diff --git a/package.json b/package.json index a1b2d36a8e..f91cb0bd9f 100644 --- a/package.json +++ b/package.json @@ -13,7 +13,7 @@ }, "packageManager": "pnpm@10.13.1", "engines": { - "node": ">=20", + "node": ">=22", "pnpm": ">=10 <11" }, "devDependencies": { @@ -23,4 +23,4 @@ "nx-cloud": "16.5.2", "vitest": "^3.1.1" } -} \ No newline at end of file +} diff --git a/packages/csv-app/src/layouts/OpenFileLayout.tsx b/packages/csv-app/src/layouts/OpenFileLayout.tsx index a2c2611dd3..00e3dfda99 100644 --- a/packages/csv-app/src/layouts/OpenFileLayout.tsx +++ b/packages/csv-app/src/layouts/OpenFileLayout.tsx @@ -273,7 +273,7 @@ const VersionDropdown = () => { onClick={async () => { const newversion = await createVersion({ lix, - commit_id: currentVersion.commit_id, + from: currentVersion, }); await switchToversion(newversion); }} diff --git a/packages/lix-docs/docs/guide/concepts/key-value.md b/packages/lix-docs/docs/guide/concepts/key-value.md new file mode 100644 index 0000000000..2e4c9dcff8 --- /dev/null +++ b/packages/lix-docs/docs/guide/concepts/key-value.md @@ -0,0 +1,101 @@ +# Key-Value + +A simple key-value store built into Lix with change control. Store any JSON value - from feature flags to UI preferences - and access it across your application. + +## Common Use Cases + +- **UI state persistence** - Sidebar positions, dismissed prompts, user preferences +- **App configuration** - Feature flags, environment settings, runtime toggles +- **Lix configuration** - Lix itself stores key values like `lix_id`, `lix_name`, or `lix_deterministic_mode` + +## Quick Start + +Always namespace your keys to avoid collisions: + +```ts +// ✅ DO: Use namespaced keys +await lix.db + .insertInto("key_value") + .values({ key: "myapp_sidebar_collapsed", value: true }) + .execute(); + +// Read it back +const sidebar = await lix.db + .selectFrom("key_value") + .where("key", "=", "myapp_sidebar_collapsed") + .executeTakeFirst(); +``` + +## Do's and Don'ts + +### ✅ DO + +- **Always use namespaces**: `myapp_feature`, `ui_sidebar`, `config_theme` +- **Store UI state as untracked**: Use `lixcol_untracked: 1` for ephemeral data + +### ❌ DON'T + +- **Never use bare keys**: Avoid `"theme"`, use `"myapp_theme"` instead + +## Real-World Examples + +### UI State Pattern + +```ts +// Store dismissed prompts per file (from md-app) +const key = `flashtype_prompt_dismissed_${activeFileId}`; +await lix.db + .insertInto("key_value") + .values({ key, value: true, lixcol_untracked: 1 }) + .execute(); +``` + +### Untracked Preferences + +```ts +// UI preferences that don't create commits +await lix.db + .insertInto("key_value_all") + .values({ + key: "ui_sidebar_width", + value: 240, + lixcol_untracked: 1, + lixcol_version_id: "global", + }) + .execute(); +``` + +## Views + +- **`key_value`** - Active version only. Your default choice. +- **`key_value_all`** - All versions. Use for untracked values or cross-version operations. +- **`key_value_history`** - Read-only audit trail. + +## Important: Booleans + +Booleans are returned as integers because SQLite's `json_extract` function (used by the views) converts JSON booleans to integers: + +- `true` → `1` +- `false` → `0` + +```ts +// Store boolean +await lix.db + .insertInto("key_value") + .values({ key: "foo_enabled", value: true }) + .execute(); + +// Read and convert +const result = await lix.db + .selectFrom("key_value") + .where("key", "=", "foo_enabled") + .executeTakeFirstOrThrow(); + +// Option 1: Use loose equality (simplest) +if (result.value == true) { + /* enabled */ +} + +// Option 2: Explicit conversion +const isEnabled = result.value === 1; +``` diff --git a/packages/lix-docs/rspress.config.ts b/packages/lix-docs/rspress.config.ts index 9c3d74c832..1237b5663f 100644 --- a/packages/lix-docs/rspress.config.ts +++ b/packages/lix-docs/rspress.config.ts @@ -125,6 +125,7 @@ export default defineConfig({ }, { text: "Versions", link: "/guide/concepts/versions" }, { text: "Discussions", link: "/guide/concepts/discussions" }, + { text: "Key-Value", link: "/guide/concepts/key-value" }, ], }, { diff --git a/packages/lix-docs/vitest.config.ts b/packages/lix-docs/vitest.config.ts new file mode 100644 index 0000000000..87d636902a --- /dev/null +++ b/packages/lix-docs/vitest.config.ts @@ -0,0 +1,8 @@ +import { defineConfig } from "vitest/config"; + +export default defineConfig({ + test: { + // increased default timeout to avoid ci/cd issues + testTimeout: 60000, + }, +}); diff --git a/packages/lix-file-manager/src/components/MergeDialog.tsx b/packages/lix-file-manager/src/components/MergeDialog.tsx index 66b0814089..85689011f3 100644 --- a/packages/lix-file-manager/src/components/MergeDialog.tsx +++ b/packages/lix-file-manager/src/components/MergeDialog.tsx @@ -15,7 +15,7 @@ import { SelectValue, } from "@/components/ui/select.js"; import { useState, useEffect } from "react"; -import { createMergeCommit } from "@lix-js/sdk"; +import { transition } from "@lix-js/sdk"; interface MergeDialogProps { open: boolean; @@ -62,10 +62,10 @@ export function MergeDialog({ if (!source || !target) return; - await createMergeCommit({ + await transition({ lix, - source: { id: source.commit_id }, - target: { id: target.commit_id }, + to: { id: source.commit_id }, + version: { id: target.id }, }); onMergeComplete(); diff --git a/packages/lix-file-manager/src/components/VersionDropdown.tsx b/packages/lix-file-manager/src/components/VersionDropdown.tsx index d0fa364e0d..d4eca4e85a 100644 --- a/packages/lix-file-manager/src/components/VersionDropdown.tsx +++ b/packages/lix-file-manager/src/components/VersionDropdown.tsx @@ -54,7 +54,7 @@ export function VersionDropdown() { const newVersion = await createVersion({ lix, - commit_id: activeVersion.commit_id, + from: activeVersion, }); await switchToVersion(newVersion); diff --git a/packages/lix-file-manager/src/state-active-file.ts b/packages/lix-file-manager/src/state-active-file.ts index 0b60dd6aaf..2e9b8d26ea 100644 --- a/packages/lix-file-manager/src/state-active-file.ts +++ b/packages/lix-file-manager/src/state-active-file.ts @@ -11,13 +11,12 @@ import { threadSearchParamsAtom, } from "./state.ts"; import { - changeSetElementIsLeafOf, - commitIsAncestorOf, - jsonArrayFrom, - Lix, - sql, - UiDiffComponentProps, - ebEntity, + commitIsAncestorOf, + jsonArrayFrom, + Lix, + sql, + UiDiffComponentProps, + ebEntity, } from "@lix-js/sdk"; import { redirect } from "react-router-dom"; @@ -130,8 +129,7 @@ export const intermediateChangesAtom = atom< "change_set_element.change_id", "change.id" ) - .where(changeSetElementIsLeafOf([{ id: workingChangeSetId }])) // Only get leaf changes - .where("change_set_element.change_set_id", "=", workingChangeSetId) + .where("change_set_element.change_set_id", "=", workingChangeSetId) .where("change.file_id", "!=", "lix_own_change_control") .select([ "change.id", @@ -296,8 +294,7 @@ export const getChangeDiffs = async ( "change.id" ) .where("change_set_element.change_set_id", "=", changeSetId) - .where(changeSetElementIsLeafOf([{ id: changeSetId }])) // Only get leaf changes - .where(ebEntity("change").hasLabel({ name: "checkpoint" })) + .where(ebEntity("change").hasLabel({ name: "checkpoint" })) .selectAll("change") .select(sql`json(snapshot.content)`.as("snapshot_content_after")); @@ -326,8 +323,7 @@ export const getChangeDiffs = async ( "change_set_element.change_id", "change.id" ) - .where(changeSetElementIsLeafOf([{ id: changeSetBeforeId }])) - .where("change.entity_id", "=", change.entity_id) + .where("change.entity_id", "=", change.entity_id) .where("change.schema_key", "=", change.schema_key) .where(ebEntity("change").hasLabel({ name: "checkpoint" })) .select(sql`json(snapshot.content)`.as("snapshot_content_before")) diff --git a/packages/lix-sdk/llm.md b/packages/lix-sdk/AGENTS.md similarity index 55% rename from packages/lix-sdk/llm.md rename to packages/lix-sdk/AGENTS.md index 8eb866b5fe..743074348b 100644 --- a/packages/lix-sdk/llm.md +++ b/packages/lix-sdk/AGENTS.md @@ -1,15 +1,13 @@ # Rules for LLMs -- Do not mock lix. Lix is a local SQLite database that does not need mocking. Test cases should always use the real lix. +- Do not mock lix. Lix is a local SQLite database that does not need mocking. Test cases should always use the real lix. -- Lix uses Kysely to expose the the SQL API in a typesafe way https://kysely-org.github.io/kysely-apidoc/. +- Lix uses Kysely to expose the the SQL API in a typesafe way https://kysely-org.github.io/kysely-apidoc/. -- The api reference for lix can be found in [./api-docs/README.md](./api-docs/README.md) - -- tests for the lix sdk can be run with `pnpm exec vitest run ...` +- tests for the lix sdk can be run with `pnpm exec vitest run ...` - validate the types AFTER the tests pass with `pnpm exec tsc --noEmit` - always start with implementing test cases that reproduce bugs before implementing a fix to validate if the test captures the bug -- do not create getter functions. isntead query sql directly via kysely. otherwise, we end up with a huge pile of wrapper functions \ No newline at end of file +- do not create getter functions. isntead query sql directly via kysely. otherwise, we end up with a huge pile of wrapper functions diff --git a/packages/lix-sdk/package.json b/packages/lix-sdk/package.json index 0b36d647a1..10d7711f31 100644 --- a/packages/lix-sdk/package.json +++ b/packages/lix-sdk/package.json @@ -27,7 +27,7 @@ "_comment": "Required for tree-shaking https://webpack.js.org/guides/tree-shaking/#mark-the-file-as-side-effect-free", "sideEffects": false, "engines": { - "node": ">=18" + "node": ">=22" }, "dependencies": { "@codspeed/vitest-plugin": "^4.0.1", @@ -50,4 +50,4 @@ "typescript-eslint": "^8.9.0", "vitest": "3.2.4" } -} \ No newline at end of file +} diff --git a/packages/lix-sdk/src/change-author/schema.test.ts b/packages/lix-sdk/src/change-author/schema.test.ts index 262654b163..a99e7fbbff 100644 --- a/packages/lix-sdk/src/change-author/schema.test.ts +++ b/packages/lix-sdk/src/change-author/schema.test.ts @@ -292,7 +292,8 @@ test("should allow same author for multiple changes", async () => { ]); }); -test("change authors are accessible during a transaction", async () => { +// disabled because change author logic was moved into the commit phase +test.skip("change authors are accessible during a transaction", async () => { // Create a lix instance with an active account const lix = await openLix({ account: { diff --git a/packages/lix-sdk/src/change-proposal/create-change-proposal.ts b/packages/lix-sdk/src/change-proposal/create-change-proposal.ts index 275ea839d8..de495e656c 100644 --- a/packages/lix-sdk/src/change-proposal/create-change-proposal.ts +++ b/packages/lix-sdk/src/change-proposal/create-change-proposal.ts @@ -5,7 +5,6 @@ import type { LixChangeSet } from "../change-set/schema.js"; import type { Lix } from "../lix/open-lix.js"; import type { ChangeProposal } from "./database-schema.js"; import { createChangeSet } from "../change-set/create-change-set.js"; -import { changeSetElementInSymmetricDifference } from "../query-filter/change-set-element-in-symmetric-difference.js"; /** * Creates a change proposal that represents the symmetric difference @@ -20,14 +19,36 @@ export async function createChangeProposal(args: { targetChangeSet: Pick; }): Promise { const executeInTransaction = async (trx: Lix["db"]) => { - // Get the changes that are in the symmetric difference between the two change sets + // Compute symmetric difference of change_ids between the two change sets (inline stub) const symmetricDifferenceChanges = await trx .selectFrom("change_set_element") - .where( - changeSetElementInSymmetricDifference( - args.sourceChangeSet, - args.targetChangeSet - ) + .where((eb) => + eb.or([ + eb("change_set_element.change_id", "in", (sub) => + sub + .selectFrom("change_set_element as A") + .leftJoin("change_set_element as B", (join) => + join + .onRef("A.change_id", "=", "B.change_id") + .on("B.change_set_id", "=", args.targetChangeSet.id) + ) + .where("A.change_set_id", "=", args.sourceChangeSet.id) + .where("B.change_id", "is", null) + .select("A.change_id") + ), + eb("change_set_element.change_id", "in", (sub) => + sub + .selectFrom("change_set_element as B") + .leftJoin("change_set_element as A", (join) => + join + .onRef("B.change_id", "=", "A.change_id") + .on("A.change_set_id", "=", args.sourceChangeSet.id) + ) + .where("B.change_set_id", "=", args.targetChangeSet.id) + .where("A.change_id", "is", null) + .select("B.change_id") + ), + ]) ) .select(["change_id as id", "entity_id", "schema_key", "file_id"]) .execute(); diff --git a/packages/lix-sdk/src/change-set/apply-change-set.test.ts b/packages/lix-sdk/src/change-set/apply-change-set.test.ts index a76a173c9b..d50f4dfefe 100644 --- a/packages/lix-sdk/src/change-set/apply-change-set.test.ts +++ b/packages/lix-sdk/src/change-set/apply-change-set.test.ts @@ -8,7 +8,7 @@ import { } from "../plugin/mock-json-plugin.js"; import type { LixChange } from "../change/schema.js"; import type { LixKeyValue } from "../key-value/schema.js"; -import { createCheckpoint } from "../commit/create-checkpoint.js"; +import { createCheckpoint } from "../state/create-checkpoint.js"; test("it applies lix own entity changes", async () => { const lix = await openLix({}); diff --git a/packages/lix-sdk/src/change-set/apply-change-set.ts b/packages/lix-sdk/src/change-set/apply-change-set.ts index 3c8ec910bf..d467fc8024 100644 --- a/packages/lix-sdk/src/change-set/apply-change-set.ts +++ b/packages/lix-sdk/src/change-set/apply-change-set.ts @@ -76,13 +76,13 @@ export async function applyChangeSet(args: { .execute(); // Write-through cache: populate internal_state_cache for all applied changes - const changesForCache = changesResult.map(change => ({ + const changesForCache = changesResult.map((change) => ({ ...change, snapshot_content: change.snapshot_content ? JSON.stringify(change.snapshot_content) : null, })); - + updateStateCache({ lix: args.lix, changes: changesForCache, diff --git a/packages/lix-sdk/src/change/schema.test.ts b/packages/lix-sdk/src/change/schema.test.ts index bf06d34c80..94dd4f024f 100644 --- a/packages/lix-sdk/src/change/schema.test.ts +++ b/packages/lix-sdk/src/change/schema.test.ts @@ -2,6 +2,8 @@ import { expect, test } from "vitest"; import { openLix } from "../lix/open-lix.js"; import type { LixInternalDatabaseSchema } from "../database/schema.js"; import { type Kysely } from "kysely"; +import { insertTransactionState } from "../state/transaction/insert-transaction-state.js"; +import { timestamp } from "../deterministic/timestamp.js"; test("insert on the change view", async () => { const lix = await openLix({}); @@ -363,3 +365,47 @@ test("JSON null handling: change view returns SQL NULL not JSON 'null' string fo expect(change?.snapshot_content).toBe(null); expect(change?.snapshot_content).not.toBe("null"); }); + +test("untracked changes in transaction don't show up in change view after commit", async () => { + const lix = await openLix({}); + const db = lix.db as unknown as Kysely; + + // Get initial change count + const initialChanges = await lix.db + .selectFrom("change") + .selectAll() + .orderBy("id") + .execute(); + + // Insert an untracked change into the transaction state + insertTransactionState({ + lix, + timestamp: timestamp({ lix }), + data: [ + { + entity_id: "test_untracked_entity", + schema_key: "lix_log", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + id: "test_log_id", + key: "test_log_key", + message: "This is an untracked log entry", + level: "debug", + }), + schema_version: "1.0", + version_id: "global", + untracked: true, // This is the key - it's untracked + }, + ], + }); + + // Verify the change is in the transaction table + const changesAfter = await db + .selectFrom("change") + .selectAll() + .orderBy("id") + .execute(); + + expect(initialChanges).toEqual(changesAfter); +}); diff --git a/packages/lix-sdk/src/change/schema.ts b/packages/lix-sdk/src/change/schema.ts index d0bd128a72..f302197410 100644 --- a/packages/lix-sdk/src/change/schema.ts +++ b/packages/lix-sdk/src/change/schema.ts @@ -63,7 +63,8 @@ export function applyChangeDatabaseSchema( t.created_at, json(t.snapshot_content) AS snapshot_content FROM - internal_change_in_transaction AS t; + internal_change_in_transaction AS t + WHERE t.untracked = 0; CREATE TRIGGER IF NOT EXISTS change_insert INSTEAD OF INSERT ON change @@ -73,14 +74,9 @@ export function applyChangeDatabaseSchema( SELECT lix_uuid_v7(), jsonb(NEW.snapshot_content) - WHERE NEW.snapshot_content IS NOT NULL - AND NOT EXISTS ( - SELECT 1 FROM internal_snapshot - WHERE id = 'no-content' - AND NEW.snapshot_content IS NULL - ); + WHERE NEW.snapshot_content IS NOT NULL; - -- Insert the change, referencing the snapshot + -- Insert the change, referencing the last inserted snapshot (or 'no-content') INSERT INTO internal_change ( id, entity_id, @@ -99,7 +95,7 @@ export function applyChangeDatabaseSchema( NEW.plugin_key, CASE WHEN NEW.snapshot_content IS NULL THEN 'no-content' - ELSE (SELECT id FROM internal_snapshot WHERE content = jsonb(NEW.snapshot_content) ORDER BY id DESC LIMIT 1) + ELSE (SELECT id FROM internal_snapshot WHERE rowid = last_insert_rowid()) END, COALESCE(NEW.created_at, lix_timestamp()) ); diff --git a/packages/lix-sdk/src/commit/apply-commit.test.ts b/packages/lix-sdk/src/commit/apply-commit.test.ts deleted file mode 100644 index 826501ceb0..0000000000 --- a/packages/lix-sdk/src/commit/apply-commit.test.ts +++ /dev/null @@ -1,426 +0,0 @@ -import { expect, test } from "vitest"; -import { openLix } from "../lix/open-lix.js"; -import { applyCommit } from "./apply-commit.js"; -import { createCommit } from "./create-commit.js"; -import { createChangeSet } from "../change-set/create-change-set.js"; -import { createVersion } from "../version/create-version.js"; -import { - mockJsonPlugin, - MockJsonPropertySchema, -} from "../plugin/mock-json-plugin.js"; - -test("applyCommit updates the active version's commit_id", async () => { - const lix = await openLix({ - providePlugins: [mockJsonPlugin], - }); - - // Insert the schema - await lix.db - .insertInto("stored_schema_all") - .values({ - value: MockJsonPropertySchema, - lixcol_version_id: "global", - }) - .execute(); - - // Create a file - await lix.db - .insertInto("file") - .values({ - id: "file1", - data: new TextEncoder().encode("{}"), - path: "/test.json", - }) - .execute(); - - // Create some changes - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "c1", - file_id: "file1", - plugin_key: mockJsonPlugin.key, - entity_id: "e1", - schema_key: "mock_json_property", - snapshot_content: { value: "Value 1" }, - schema_version: "1.0", - }, - ]) - .returningAll() - .execute(); - - // Create a change set - const changeSet = await createChangeSet({ - lix, - lixcol_version_id: "global", - elements: changes.map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - }); - - // Create a commit - const commit = await createCommit({ - lix, - changeSet, - }); - - // Get the active version before applying - const versionBefore = await lix.db - .selectFrom("active_version") - .innerJoin("version", "version.id", "active_version.version_id") - .selectAll("version") - .executeTakeFirstOrThrow(); - - // Apply the commit - await applyCommit({ - lix, - commit, - }); - - // Get the active version after applying - const versionAfter = await lix.db - .selectFrom("active_version") - .innerJoin("version", "version.id", "active_version.version_id") - .selectAll("version") - .executeTakeFirstOrThrow(); - - // Verify the version's commit_id was updated - expect(versionAfter.commit_id).toBe(commit.id); - expect(versionAfter.commit_id).not.toBe(versionBefore.commit_id); - - // Verify the changes were applied to the file - const file = await lix.db - .selectFrom("file") - .where("id", "=", "file1") - .selectAll() - .executeTakeFirstOrThrow(); - - const fileData = JSON.parse(new TextDecoder().decode(file.data)); - expect(fileData).toEqual({ e1: "Value 1" }); -}); - -test("applyCommit applies to a specific version when provided", async () => { - const lix = await openLix({ - providePlugins: [mockJsonPlugin], - }); - - // Insert the schema - await lix.db - .insertInto("stored_schema_all") - .values({ - value: MockJsonPropertySchema, - lixcol_version_id: "global", - }) - .execute(); - - // Create two versions - const version1 = await createVersion({ - lix, - name: "version1", - }); - - const version2 = await createVersion({ - lix, - name: "version2", - }); - - // Create a file - await lix.db - .insertInto("file") - .values({ - id: "file1", - data: new TextEncoder().encode("{}"), - path: "/test.json", - }) - .execute(); - - // Create changes - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "c1", - file_id: "file1", - plugin_key: mockJsonPlugin.key, - entity_id: "e1", - schema_key: "mock_json_property", - snapshot_content: { value: "Version 2 Value" }, - schema_version: "1.0", - }, - ]) - .returningAll() - .execute(); - - // Create a change set and commit - const changeSet = await createChangeSet({ - lix, - lixcol_version_id: "global", - elements: changes.map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - }); - - const commit = await createCommit({ - lix, - changeSet, - }); - - // Apply the commit to version2 specifically - await applyCommit({ - lix, - commit, - version: version2, - }); - - // Check that version2 was updated - const updatedVersion2 = await lix.db - .selectFrom("version") - .where("id", "=", version2.id) - .selectAll() - .executeTakeFirstOrThrow(); - - expect(updatedVersion2.commit_id).toBe(commit.id); - - // Check that version1 was NOT updated - const unchangedVersion1 = await lix.db - .selectFrom("version") - .where("id", "=", version1.id) - .selectAll() - .executeTakeFirstOrThrow(); - - expect(unchangedVersion1.commit_id).not.toBe(commit.id); -}); - -test("applyCommit works within a transaction", async () => { - const lix = await openLix({ - providePlugins: [mockJsonPlugin], - }); - - // Insert the schema - await lix.db - .insertInto("stored_schema_all") - .values({ - value: MockJsonPropertySchema, - lixcol_version_id: "global", - }) - .execute(); - - // Create a file - await lix.db - .insertInto("file") - .values({ - id: "file1", - data: new TextEncoder().encode("{}"), - path: "/test.json", - }) - .execute(); - - // Create changes - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "c1", - file_id: "file1", - plugin_key: mockJsonPlugin.key, - entity_id: "e1", - schema_key: "mock_json_property", - snapshot_content: { value: "Transactional Value" }, - schema_version: "1.0", - }, - ]) - .returningAll() - .execute(); - - // Create a change set and commit - const changeSet = await createChangeSet({ - lix, - lixcol_version_id: "global", - elements: changes.map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - }); - - const commit = await createCommit({ - lix, - changeSet, - }); - - // Run applyCommit within a transaction - await lix.db.transaction().execute(async (trx) => { - await applyCommit({ - lix: { ...lix, db: trx }, - commit, - }); - - // Verify within the transaction - const version = await trx - .selectFrom("active_version") - .innerJoin("version", "version.id", "active_version.version_id") - .selectAll("version") - .executeTakeFirstOrThrow(); - - expect(version.commit_id).toBe(commit.id); - }); - - // Verify outside the transaction - const version = await lix.db - .selectFrom("active_version") - .innerJoin("version", "version.id", "active_version.version_id") - .selectAll("version") - .executeTakeFirstOrThrow(); - - expect(version.commit_id).toBe(commit.id); - - // Verify the changes were applied - const file = await lix.db - .selectFrom("file") - .where("id", "=", "file1") - .selectAll() - .executeTakeFirstOrThrow(); - - const fileData = JSON.parse(new TextDecoder().decode(file.data)); - expect(fileData).toEqual({ e1: "Transactional Value" }); -}); - -test("applyCommit throws when commit doesn't exist", async () => { - const lix = await openLix({}); - - await expect( - applyCommit({ - lix, - commit: { id: "non-existent-commit" }, - }) - ).rejects.toThrow(); -}); - -test("applyCommit throws when version doesn't exist", async () => { - const lix = await openLix({}); - - // Create a dummy change set and commit - const changeSet = await createChangeSet({ - lix, - lixcol_version_id: "global", - elements: [], - }); - - const commit = await createCommit({ - lix, - changeSet, - }); - - await expect( - applyCommit({ - lix, - commit, - version: { id: "non-existent-version" }, - }) - ).rejects.toThrow(); -}); - -test("applyCommit applies multiple changes from the commit's change set", async () => { - const lix = await openLix({ - providePlugins: [mockJsonPlugin], - }); - - // Insert the schema - await lix.db - .insertInto("stored_schema_all") - .values({ - value: MockJsonPropertySchema, - lixcol_version_id: "global", - }) - .execute(); - - // Create a file - await lix.db - .insertInto("file") - .values({ - id: "file1", - data: new TextEncoder().encode("{}"), - path: "/test.json", - }) - .execute(); - - // Create multiple changes - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "c1", - file_id: "file1", - plugin_key: mockJsonPlugin.key, - entity_id: "e1", - schema_key: "mock_json_property", - snapshot_content: { value: "Value 1" }, - schema_version: "1.0", - }, - { - id: "c2", - file_id: "file1", - plugin_key: mockJsonPlugin.key, - entity_id: "e2", - schema_key: "mock_json_property", - snapshot_content: { value: "Value 2" }, - schema_version: "1.0", - }, - { - id: "c3", - file_id: "file1", - plugin_key: mockJsonPlugin.key, - entity_id: "e3", - schema_key: "mock_json_property", - snapshot_content: { value: "Value 3" }, - schema_version: "1.0", - }, - ]) - .returningAll() - .execute(); - - // Create a change set with all changes - const changeSet = await createChangeSet({ - lix, - lixcol_version_id: "global", - elements: changes.map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - }); - - const commit = await createCommit({ - lix, - changeSet, - }); - - // Apply the commit - await applyCommit({ - lix, - commit, - }); - - // Verify all changes were applied - const file = await lix.db - .selectFrom("file") - .where("id", "=", "file1") - .selectAll() - .executeTakeFirstOrThrow(); - - const fileData = JSON.parse(new TextDecoder().decode(file.data)); - expect(fileData).toEqual({ - e1: "Value 1", - e2: "Value 2", - e3: "Value 3", - }); -}); diff --git a/packages/lix-sdk/src/commit/apply-commit.ts b/packages/lix-sdk/src/commit/apply-commit.ts deleted file mode 100644 index eb1b4bbfe4..0000000000 --- a/packages/lix-sdk/src/commit/apply-commit.ts +++ /dev/null @@ -1,74 +0,0 @@ -import type { Lix } from "../lix/index.js"; -import type { LixCommit } from "./schema.js"; -import type { LixVersion } from "../version/schema.js"; -import { applyChangeSet } from "../change-set/apply-change-set.js"; - -/** - * Applies a commit to a version by updating the version's commit_id and applying its changes. - * - * This function: - * 1. Updates the version to point to the new commit - * 2. Applies all changes from the commit's change set - * - * @example - * ```ts - * // Apply a commit to the active version - * await applyCommit({ - * lix, - * commit: myCommit - * }); - * - * // Apply a commit to a specific version - * await applyCommit({ - * lix, - * commit: myCommit, - * version: specificVersion - * }); - * ``` - */ -export async function applyCommit(args: { - lix: Lix; - commit: Pick; - version?: Pick; -}): Promise { - const executeInTransaction = async (trx: Lix["db"]) => { - // Get the target version (use active version if not specified) - const targetVersion = args.version - ? await trx - .selectFrom("version") - .where("id", "=", args.version.id) - .selectAll() - .executeTakeFirstOrThrow() - : await trx - .selectFrom("active_version") - .innerJoin("version", "version.id", "active_version.version_id") - .selectAll("version") - .executeTakeFirstOrThrow(); - - // Get the commit details - const commit = await trx - .selectFrom("commit_all") - .where("id", "=", args.commit.id) - .where("lixcol_version_id", "=", "global") - .selectAll() - .executeTakeFirstOrThrow(); - - // Update the version to point to the new commit - await trx - .updateTable("version") - .set({ commit_id: commit.id }) - .where("id", "=", targetVersion.id) - .execute(); - - // Apply the changes from the commit's change set - // Note: We're using the internal transaction to ensure atomicity - await applyChangeSet({ - lix: { ...args.lix, db: trx }, - changeSet: { id: commit.change_set_id }, - }); - }; - - return args.lix.db.isTransaction - ? executeInTransaction(args.lix.db) - : args.lix.db.transaction().execute(executeInTransaction); -} diff --git a/packages/lix-sdk/src/commit/create-commit.ts b/packages/lix-sdk/src/commit/create-commit.ts deleted file mode 100644 index 1cd33947c9..0000000000 --- a/packages/lix-sdk/src/commit/create-commit.ts +++ /dev/null @@ -1,80 +0,0 @@ -import type { Lix } from "../lix/index.js"; -import type { LixCommit } from "./schema.js"; -import type { LixChangeSet } from "../change-set/schema.js"; -import { uuidV7 } from "../deterministic/uuid-v7.js"; - -/** - * Creates a commit that points to a change set and optionally has parent commits. - * - * This function only creates the commit record and edges - it does NOT apply the commit - * to any version. To update a version to point to this commit, use applyCommit(). - * - * @example - * ```ts - * // Create a commit with no parents (root commit) - * const commit = await createCommit({ - * lix, - * changeSet: myChangeSet - * }); - * - * // Create a commit with one parent - * const childCommit = await createCommit({ - * lix, - * changeSet: newChangeSet, - * parentCommits: [parentCommit] - * }); - * - * // Create a merge commit with multiple parents - * const mergeCommit = await createCommit({ - * lix, - * changeSet: mergedChangeSet, - * parentCommits: [commit1, commit2] - * }); - * ``` - */ -export async function createCommit(args: { - lix: Lix; - changeSet: Pick; - parentCommits?: Array>; -}): Promise { - const executeInTransaction = async (trx: Lix["db"]) => { - const commitId = uuidV7({ lix: args.lix }); - - // Create the commit - await trx - .insertInto("commit_all") - .values({ - id: commitId, - change_set_id: args.changeSet.id, - lixcol_version_id: "global", - }) - .execute(); - - // Create commit edges to parents - if (args.parentCommits && args.parentCommits.length > 0) { - const edges = args.parentCommits.map((parent) => ({ - parent_id: parent.id, - child_id: commitId, - lixcol_version_id: "global", - })); - - await trx.insertInto("commit_edge_all").values(edges).execute(); - } - - // Return the created commit - const commit = await trx - .selectFrom("commit_all") - .where("id", "=", commitId) - .where("lixcol_version_id", "=", "global") - .selectAll() - .executeTakeFirstOrThrow(); - - return commit; - }; - - if (args.lix.db.isTransaction) { - return executeInTransaction(args.lix.db); - } else { - return args.lix.db.transaction().execute(executeInTransaction); - } -} diff --git a/packages/lix-sdk/src/commit/create-merge-commit.test.ts b/packages/lix-sdk/src/commit/create-merge-commit.test.ts deleted file mode 100644 index a61d6bfc6b..0000000000 --- a/packages/lix-sdk/src/commit/create-merge-commit.test.ts +++ /dev/null @@ -1,268 +0,0 @@ -import { expect, test } from "vitest"; -import { openLix } from "../lix/open-lix.js"; -import { createMergeCommit } from "./create-merge-commit.js"; -import { createChangeSet } from "../change-set/create-change-set.js"; -import { createCommit } from "./create-commit.js"; -import type { LixSchemaDefinition } from "../schema-definition/definition.js"; - -test("it should merge non-conflicting changes", async () => { - const lix = await openLix({}); - - await lix.db - .insertInto("stored_schema_all") - .values({ - key: "test_schema", - version: "1.0", - value: { - "x-lix-key": "test_schema", - "x-lix-version": "1.0", - type: "object", - additionalProperties: false, - properties: { - id: { type: "string" }, - }, - required: ["id"], - } satisfies LixSchemaDefinition, - lixcol_version_id: "global", - }) - .execute(); - - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "c0", - schema_key: "test_schema", - schema_version: "1.0", - entity_id: "e0", - file_id: "file0", - plugin_key: "mock_plugin", - snapshot_content: null, - }, - { - id: "c1", - schema_key: "test_schema", - schema_version: "1.0", - entity_id: "e1", - file_id: "file0", - plugin_key: "mock_plugin", - snapshot_content: null, - }, - { - id: "c2", - schema_key: "test_schema", - schema_version: "1.0", - entity_id: "e2", - file_id: "file0", - plugin_key: "mock_plugin", - snapshot_content: null, - }, - ]) - .returningAll() - .execute(); - - const cs0 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit0 = await createCommit({ lix, changeSet: cs0 }); - - const cs1 = await createChangeSet({ - lix, - elements: [changes[1]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit1 = await createCommit({ lix, changeSet: cs1 }); - - // simulating graph relation - const cs2 = await createChangeSet({ - lix, - elements: [changes[2]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit2 = await createCommit({ - lix, - changeSet: cs2, - parentCommits: [commit1], - }); - - const merged = await createMergeCommit({ - lix, - source: commit0, - target: commit2, - }); - - const mergedElements = await lix.db - .selectFrom("change_set_element") - .where("change_set_element.change_set_id", "=", merged.change_set_id) - .selectAll() - .execute(); - - expect(mergedElements).toHaveLength(3); - expect(mergedElements.map((e) => e.change_id).sort()).toEqual( - [changes[0]!.id, changes[1]!.id, changes[2]!.id].sort() - ); -}); - -test("should handle conflicting elements with source winning (until conflicts are modeled in lix)", async () => { - const lix = await openLix({}); - - await lix.db - .insertInto("stored_schema_all") - .values({ - key: "s1", - version: "1.0", - value: { - "x-lix-key": "s1", - "x-lix-version": "1.0", - additionalProperties: false, - type: "object", - properties: { - id: { type: "string" }, - text: { type: "string" }, - }, - required: ["id"], - } satisfies LixSchemaDefinition, - lixcol_version_id: "global", - }) - .execute(); - - // Create changes for the different states of the same entity - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "c_base", - schema_key: "s1", - schema_version: "1.0", - entity_id: "e1", - file_id: "file1", - plugin_key: "mock_plugin", - snapshot_content: { text: "base" }, - }, - { - id: "c_target", - schema_key: "s1", - schema_version: "1.0", - entity_id: "e1", // Same entity as base, different content - file_id: "file1", - plugin_key: "mock_plugin", - snapshot_content: { text: "target mod" }, - }, - { - id: "c_source", - schema_key: "s1", - schema_version: "1.0", - entity_id: "e1", // Same entity as base, different content - file_id: "file1", - plugin_key: "mock_plugin", - snapshot_content: { text: "source mod" }, - }, - ]) - .returningAll() - .execute(); - - // 1. Base change set with initial content - const cs_base = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit_base = await createCommit({ - lix, - changeSet: cs_base, - }); - - // 2. Target branch - modifies e1 - const cs_target = await createChangeSet({ - lix, - elements: [changes[1]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit_target = await createCommit({ - lix, - changeSet: cs_target, - parentCommits: [commit_base], - }); - - // 3. Source branch - modifies e1 differently - const cs_source = await createChangeSet({ - lix, - elements: [changes[2]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit_source = await createCommit({ - lix, - changeSet: cs_source, - parentCommits: [commit_base], - }); - - // 4. Merge source into target - const merged = await createMergeCommit({ - lix, - source: commit_source, - target: commit_target, - }); - - // 5. Verify merged change set elements - const mergedElements = await lix.db - .selectFrom("change_set_element") - .where("change_set_element.change_set_id", "=", merged.change_set_id) - .selectAll() - .execute(); - - // The merge should only contain the element from the source change set due to "source wins" - expect(mergedElements).toHaveLength(1); - expect(mergedElements[0]).toEqual( - expect.objectContaining({ - change_set_id: merged.change_set_id, - change_id: changes[2]!.id, - entity_id: changes[2]!.entity_id, - schema_key: changes[2]!.schema_key, - file_id: changes[2]!.file_id, - }) - ); - - // 6. Verify graph structure - the merged commit should have edges pointing to both source and target - const edges = await lix.db - .selectFrom("commit_edge") - .where("child_id", "=", merged.id) - .selectAll() - .execute(); - - expect(edges).toHaveLength(2); - expect(edges.map((e) => e.parent_id).sort()).toEqual( - [commit_source.id, commit_target.id].sort() - ); -}); diff --git a/packages/lix-sdk/src/commit/create-merge-commit.ts b/packages/lix-sdk/src/commit/create-merge-commit.ts deleted file mode 100644 index 4eca9749fa..0000000000 --- a/packages/lix-sdk/src/commit/create-merge-commit.ts +++ /dev/null @@ -1,138 +0,0 @@ -import type { Lix } from "../lix/open-lix.js"; -import { changeSetElementInAncestryOf } from "../query-filter/change-set-element-in-ancestry-of.js"; -import { changeSetElementIsLeafOf } from "../query-filter/change-set-element-is-leaf-of.js"; -import { createChangeSet } from "../change-set/create-change-set.js"; -import type { LixCommit } from "./schema.js"; -import { uuidV7 } from "../deterministic/uuid-v7.js"; - -/** - * Merges two commits using a "source wins" strategy (until lix models conflicts). - * - * Creates a new change set containing the merged result and a commit that - * points to both source and target commits. If an element (identified by - * entity_id, file_id, schema_key) exists in both the source and target - * commits (considering their respective histories), the element from the - * source commit's history takes precedence. - * - * @param args - The arguments for the merge operation. - * @param args.lix - The Lix instance. - * @param args.source - The source commit (only `id` is needed). - * @param args.target - The target commit (only `id` is needed). - * - * @returns A Promise resolving to the newly created Commit representing the merged state. - */ -export async function createMergeCommit(args: { - lix: Lix; - source: Pick; - target: Pick; -}): Promise { - const executeInTransaction = async (trx: Lix["db"]) => { - // --- Calculate the merged elements using "source wins" logic --- - const mergedElements = await trx - .with("SourceLeaves", (db) => - db - .selectFrom("change_set_element") - .where(changeSetElementInAncestryOf([args.source])) - .where(changeSetElementIsLeafOf([args.source])) - .selectAll("change_set_element") - ) - .with("TargetLeaves", (db) => - db - .selectFrom("change_set_element") - .where(changeSetElementInAncestryOf([args.target])) - .where(changeSetElementIsLeafOf([args.target])) - .selectAll("change_set_element") - ) - .selectFrom("SourceLeaves") // Select all source leaves (they always win) - .selectAll() - .union( - (db) => - db - .selectFrom("TargetLeaves") // Select target leaves... - .selectAll() - .where(({ not, exists, selectFrom }) => - not( - exists( - // ...that do NOT have a corresponding entity in SourceLeaves - selectFrom("SourceLeaves") - .select("SourceLeaves.entity_id") // Select something small - .whereRef( - "SourceLeaves.entity_id", - "=", - "TargetLeaves.entity_id" - ) - .whereRef( - "SourceLeaves.file_id", - "=", - "TargetLeaves.file_id" - ) - .whereRef( - "SourceLeaves.schema_key", - "=", - "TargetLeaves.schema_key" - ) - ) - ) - ) // End WHERE NOT EXISTS - ) // End UNION - .execute(); - - // Create the new merge change set record - const newChangeSet = await createChangeSet({ - lix: { ...args.lix, db: trx }, - elements: mergedElements.map((ce) => ({ - change_id: ce.change_id, - entity_id: ce.entity_id, - schema_key: ce.schema_key, - file_id: ce.file_id, - })), - lixcol_version_id: "global", - }); - - // Create a commit for the merged change set - const commitId = uuidV7({ lix: args.lix }); - - // Insert the commit - await trx - .insertInto("commit_all") - .values({ - id: commitId, - change_set_id: newChangeSet.id, - lixcol_version_id: "global", - }) - .execute(); - - // Create commit edges to both parents - await trx - .insertInto("commit_edge_all") - .values([ - { - parent_id: args.source.id, - child_id: commitId, - lixcol_version_id: "global", - }, - { - parent_id: args.target.id, - child_id: commitId, - lixcol_version_id: "global", - }, - ]) - .execute(); - - // Return the commit - const commit = await trx - .selectFrom("commit") - .where("id", "=", commitId) - .selectAll() - .executeTakeFirstOrThrow(); - - return commit; - }; - - // Restore transaction handling - if (args.lix.db.isTransaction) { - return executeInTransaction(args.lix.db); - } else { - return args.lix.db.transaction().execute(executeInTransaction); - } -} diff --git a/packages/lix-sdk/src/commit/create-transition-commit.test.ts b/packages/lix-sdk/src/commit/create-transition-commit.test.ts deleted file mode 100644 index 251e89c560..0000000000 --- a/packages/lix-sdk/src/commit/create-transition-commit.test.ts +++ /dev/null @@ -1,256 +0,0 @@ -import { expect, test } from "vitest"; -import { openLix } from "../lix/open-lix.js"; -import { - mockJsonPlugin, - MockJsonPropertySchema, -} from "../plugin/mock-json-plugin.js"; -import { createChangeSet } from "../change-set/create-change-set.js"; -import { applyChangeSet } from "../change-set/apply-change-set.js"; -import { createTransitionCommit } from "./create-transition-commit.js"; -import { createCommit } from "./create-commit.js"; - -test.todo("it transitions state to a specific commit", async () => { - // Create a Lix instance with our plugin - const lix = await openLix({ - providePlugins: [mockJsonPlugin], - }); - - // const activeVersion = await lix.db - // .selectFrom("active_version") - // .innerJoin("version", "version.id", "active_version.version_id") - // .selectAll("version") - // .executeTakeFirstOrThrow(); - - // Insert the schema that the mockJsonPlugin uses - await lix.db - .insertInto("stored_schema_all") - .values({ - key: "mock_json_property", - version: "1.0", - value: MockJsonPropertySchema, - lixcol_version_id: "global", - }) - .execute(); - - // Create a file - await lix.db - .insertInto("file") - .values({ - id: "file1", - data: new TextEncoder().encode(""), - path: "/test.json", - }) - .execute(); - - const file = await lix.db - .selectFrom("file") - .selectAll() - .where("id", "=", "file1") - .executeTakeFirstOrThrow(); - - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "c0", - file_id: file.id, - plugin_key: mockJsonPlugin.key, - entity_id: "l0", - schema_key: "mock_json_property", - schema_version: "1.0", - snapshot_content: { value: "Value 0" }, - }, - { - id: "c1", - file_id: file.id, - plugin_key: mockJsonPlugin.key, - entity_id: "l1", - schema_key: "mock_json_property", - schema_version: "1.0", - snapshot_content: { value: "Value 1" }, - }, - { - id: "c2", - file_id: file.id, - plugin_key: mockJsonPlugin.key, - entity_id: "l2", - schema_key: "mock_json_property", - schema_version: "1.0", - snapshot_content: { value: "Value 2" }, - }, - { - id: "c3", - file_id: file.id, - plugin_key: mockJsonPlugin.key, - entity_id: "l2", - schema_key: "mock_json_property", - schema_version: "1.0", - snapshot_content: { value: "Value 2 Modified" }, - }, - { - id: "c4", - file_id: file.id, - plugin_key: mockJsonPlugin.key, - entity_id: "l3", - schema_key: "mock_json_property", - schema_version: "1.0", - snapshot_content: { value: "Value 3" }, - }, - { - id: "c5", // Add another change/entity for complexity - entity_id: "l4", - file_id: "file1", - schema_key: "mock_json_property", - schema_version: "1.0", - plugin_key: mockJsonPlugin.key, - snapshot_content: { value: "Value 4" }, - }, - ]) - .returningAll() - .execute(); - - const cs0 = await createChangeSet({ - lix, - id: "cs0", - elements: [changes[0]!, changes[1]!, changes[2]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit0 = await createCommit({ lix, changeSet: cs0 }); - - const cs1 = await createChangeSet({ - lix, - id: "cs1", - elements: [changes[3]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit1 = await createCommit({ - lix, - changeSet: cs1, - parentCommits: [commit0], - }); - - const cs2 = await createChangeSet({ - lix, - id: "cs2", - elements: [changes[4]!, changes[5]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit2 = await createCommit({ - lix, - changeSet: cs2, - parentCommits: [commit1], - }); - - // Apply change sets in order: cs0, then cs1, then cs2 - // This demonstrates the explicit, predictable "direct" behavior - await applyChangeSet({ - lix, - changeSet: cs0, - }); - - const fileCs0Before = await lix.db - .selectFrom("file") - .where("id", "=", file.id) - .selectAll() - .executeTakeFirstOrThrow(); - - // The file data should now be a JSON string representing the state at cs0 - const expectedJsonStateCs0 = { - l0: "Value 0", - l1: "Value 1", - l2: "Value 2", - }; - - const actualJsonStateCs0 = JSON.parse( - new TextDecoder().decode(fileCs0Before.data) - ); - - expect(actualJsonStateCs0).toEqual(expectedJsonStateCs0); - - // Apply cs1 (which modifies l2 to "Value 2 Modified") - await applyChangeSet({ - lix, - changeSet: cs1, - }); - - // Apply cs2 (which adds l3 and l4) - await applyChangeSet({ - lix, - changeSet: cs2, - }); - - // Verify initial state - const fileAfterRestoreCs2 = await lix.db - .selectFrom("file") - .where("id", "=", file.id) - .selectAll() - .executeTakeFirstOrThrow(); - - // The file data should now be a JSON string representing the state at cs2 - const expectedJsonStateCs2 = { - l0: "Value 0", - l1: "Value 1", - l2: "Value 2 Modified", // c3 replaced c2 - l3: "Value 3", // c4 added - l4: "Value 4", // c5 added - }; - - const actualJsonStateCs2 = JSON.parse( - new TextDecoder().decode(fileAfterRestoreCs2.data) - ); - - expect(actualJsonStateCs2).toEqual(expectedJsonStateCs2); - - // Action: Create transition commit to restore to commit0 - const transitionCommit = await createTransitionCommit({ - lix, - sourceCommit: commit2, - targetCommit: commit0, - }); - - // Apply the transition commit - await applyChangeSet({ - lix, - changeSet: { id: transitionCommit.change_set_id }, - }); - - // Verify final state - // 1. Check that the transition commit has both source and target as parents - const parentCommits = await lix.db - .selectFrom("commit_edge") - .where("child_id", "=", transitionCommit.id) - .select(["parent_id"]) - .execute(); - - expect(parentCommits).toHaveLength(2); - expect(parentCommits.map((e) => e.parent_id).sort()).toEqual( - [commit2.id, commit0.id].sort() - ); - - // 2. Check if data is updated - const finalFile = await lix.db - .selectFrom("file") - .where("id", "=", file.id) - .selectAll() - .executeTakeFirstOrThrow(); - - // The file data should now be a JSON string representing the state at cs0 - const actualJsonState = JSON.parse(new TextDecoder().decode(finalFile.data)); - - expect(actualJsonState).toEqual(expectedJsonStateCs0); -}); diff --git a/packages/lix-sdk/src/commit/create-transition-commit.ts b/packages/lix-sdk/src/commit/create-transition-commit.ts deleted file mode 100644 index 60dec9ccf5..0000000000 --- a/packages/lix-sdk/src/commit/create-transition-commit.ts +++ /dev/null @@ -1,187 +0,0 @@ -import type { Lix } from "../lix/index.js"; -import type { LixCommit } from "./schema.js"; -import { changeSetElementIsLeafOf } from "../query-filter/change-set-element-is-leaf-of.js"; -import { changeSetElementInAncestryOf } from "../query-filter/change-set-element-in-ancestry-of.js"; -import { createChangeSet } from "../change-set/create-change-set.js"; -import { uuidV7 } from "../deterministic/uuid-v7.js"; - -/** - * Creates a commit that enables a transition from a source state - * (defined by `sourceCommit`) to a target state (defined by `targetCommit`). - * - * Applying the returned commit to the source state will result in a state - * that matches the target state. - * - * - switch between state (switching versions, checkpoints, etc.) - * - restore old state (applying the transition commit on top of current state) - */ -export async function createTransitionCommit(args: { - lix: Lix; - sourceCommit: Pick; - targetCommit: Pick; -}): Promise { - const executeInTransaction = async (trx: Lix["db"]) => { - // 1. Find leaf changes defining the state AT the *target* change set - const leafChangesToApply = await trx - .selectFrom("change") - .innerJoin( - "change_set_element", - "change_set_element.change_id", - "change.id" - ) - .where(changeSetElementInAncestryOf([args.targetCommit])) - .where(changeSetElementIsLeafOf([args.targetCommit])) - .select([ - "change.id", - "change.entity_id", - "change.schema_key", - "change.file_id", - ]) - .distinct() - .execute(); - - // 2. Find leaf changes that are present in the *source* state but NOT in the *target* state, - // AND whose entity is not being restored by a different change in the target state. - const leafEntitiesToDelete = await trx - .selectFrom("change") - .innerJoin( - "change_set_element", - "change_set_element.change_id", - "change.id" - ) - // Condition A: The change must be a leaf in the *source* state - .where(changeSetElementInAncestryOf([args.sourceCommit])) - .where(changeSetElementIsLeafOf([args.sourceCommit])) - // Condition B: The change must NOT be a leaf in the *target* state - .where(({ not, exists, selectFrom }) => - not( - exists( - selectFrom("change as target_leaf_check") - .innerJoin( - "change_set_element as target_cs_elem", - "target_cs_elem.change_id", - "target_leaf_check.id" - ) - .whereRef("target_leaf_check.id", "=", "change.id") - // *** Swapped target and source here relative to previous version *** - .where(changeSetElementInAncestryOf([args.targetCommit])) - .where(changeSetElementIsLeafOf([args.targetCommit])) - .select("target_leaf_check.id") - ) - ) - ) - // Condition C: No other change for the *same entity* is a leaf in the *target* state - .where(({ not, exists, selectFrom }) => - not( - exists( - selectFrom("change as restored_entity_check") - .innerJoin( - "change_set_element as restored_cs_elem", - "restored_cs_elem.change_id", - "restored_entity_check.id" - ) - .whereRef( - "restored_entity_check.entity_id", - "=", - "change.entity_id" - ) - // Check if any change for this entity is a leaf AT THE TARGET change set - // *** Swapped target and source here relative to previous version *** - .where(changeSetElementInAncestryOf([args.targetCommit])) - .where(changeSetElementIsLeafOf([args.targetCommit])) - .select("restored_entity_check.id") - ) - ) - ) - .select([ - "change.id", - "change.entity_id", - "change.plugin_key", - "change.schema_version", - "change.schema_key", - "change.file_id", - ]) - .distinct() - .execute(); - - const deleteChanges = - leafEntitiesToDelete.length > 0 - ? await trx - .insertInto("change") - .values( - leafEntitiesToDelete.map((c) => ({ - schema_key: c.schema_key, - schema_version: c.schema_version, - plugin_key: c.plugin_key, - entity_id: c.entity_id, - file_id: c.file_id, - snapshot_content: null, // Deletion - })) - ) - .returning(["id", "entity_id", "schema_key", "file_id"]) - .execute() - : []; - - const combinedChanges = [...leafChangesToApply, ...deleteChanges]; - - if (combinedChanges.length === 0) { - throw new Error("No changes to apply in the transition commit."); - } - - const transitionChangeSet = await createChangeSet({ - lix: { ...args.lix, db: trx }, - elements: combinedChanges.map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - - // Create a commit for the transition change set - const commitId = uuidV7({ lix: args.lix }); - - // Insert the commit - await trx - .insertInto("commit_all") - .values({ - id: commitId, - change_set_id: transitionChangeSet.id, - lixcol_version_id: "global", - }) - .execute(); - - // Create commit edges to both source and target commits - await trx - .insertInto("commit_edge_all") - .values([ - { - parent_id: args.sourceCommit.id, - child_id: commitId, - lixcol_version_id: "global", - }, - { - parent_id: args.targetCommit.id, - child_id: commitId, - lixcol_version_id: "global", - }, - ]) - .execute(); - - // Return the commit - const commit = await trx - .selectFrom("commit") - .where("id", "=", commitId) - .selectAll() - .executeTakeFirstOrThrow(); - - return commit; - }; - - if (args.lix.db.isTransaction) { - return executeInTransaction(args.lix.db); - } else { - return args.lix.db.transaction().execute(executeInTransaction); - } -} diff --git a/packages/lix-sdk/src/commit/create-undo-commit.test.ts b/packages/lix-sdk/src/commit/create-undo-commit.test.ts deleted file mode 100644 index bbd7772ae3..0000000000 --- a/packages/lix-sdk/src/commit/create-undo-commit.test.ts +++ /dev/null @@ -1,358 +0,0 @@ -import { expect, test } from "vitest"; -import { openLix } from "../lix/open-lix.js"; -import { createUndoCommit } from "./create-undo-commit.js"; -import { createChangeSet } from "../change-set/create-change-set.js"; -import { createCommit } from "./create-commit.js"; -import { applyCommit } from "./apply-commit.js"; -import { - mockJsonPlugin, - MockJsonPropertySchema, -} from "../plugin/mock-json-plugin.js"; -import { createCheckpoint } from "./create-checkpoint.js"; - -test("it creates an undo commit that reverses the operations of the original commit", async () => { - // Create a Lix instance with the mockJsonPlugin - const lix = await openLix({ - providePlugins: [mockJsonPlugin], - }); - - // Insert the schema that the mockJsonPlugin uses - await lix.db - .insertInto("stored_schema_all") - .values({ - value: MockJsonPropertySchema, - lixcol_version_id: "global", - }) - .execute(); - - // Create a file - await lix.db - .insertInto("file") - .values({ - id: "file1", - data: new TextEncoder().encode("{}"), - path: "/test.json", - }) - .execute(); - - const file = await lix.db - .selectFrom("file") - .where("id", "=", "file1") - .selectAll() - .executeTakeFirstOrThrow(); - - // Create changes for our initial change set - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "c1", - file_id: file.id, - plugin_key: mockJsonPlugin.key, - entity_id: "e1", - schema_key: "mock_json_property", - snapshot_content: { value: "Value 1" }, - schema_version: "1.0", - }, - { - id: "c2", - file_id: file.id, - plugin_key: mockJsonPlugin.key, - entity_id: "e2", - schema_key: "mock_json_property", - snapshot_content: { value: "Value 2" }, - schema_version: "1.0", - }, - ]) - .returningAll() - .execute(); - - // Create a change set with these changes - const cs0 = await createChangeSet({ - lix, - id: "cs0", - lixcol_version_id: "global", - elements: changes.map((change) => ({ - lixcol_version_id: "global", - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - }); - - // Create a commit for the change set - const commit0 = await createCommit({ - lix, - changeSet: cs0, - }); - - await applyCommit({ - lix, - commit: commit0, - }); - - // Verify the file has the expected state - const fileAfterOriginal = await lix.db - .selectFrom("file") - .where("id", "=", file.id) - .selectAll() - .executeTakeFirstOrThrow(); - - const expectedJsonStateOriginal = { - e1: "Value 1", - e2: "Value 2", - }; - - const actualJsonStateOriginal = JSON.parse( - new TextDecoder().decode(fileAfterOriginal.data) - ); - - expect(actualJsonStateOriginal).toEqual(expectedJsonStateOriginal); - - // Create the undo commit - const undoCommit = await createUndoCommit({ - lix, - commit: commit0, - }); - - // Verify the undo commit has the original commit as parent - const edges = await lix.db - .selectFrom("commit_edge") - .selectAll() - .where("child_id", "=", undoCommit.id) - .execute(); - - expect(edges).toHaveLength(1); - expect(edges[0]?.parent_id).toBe(commit0.id); - - await applyCommit({ - lix, - commit: undoCommit, - }); - - // Verify the file state after undo - should be empty again - const fileAfterUndo = await lix.db - .selectFrom("file") - .where("id", "=", file.id) - .selectAll() - .executeTakeFirstOrThrow(); - - const actualJsonStateAfterUndo = JSON.parse( - new TextDecoder().decode(fileAfterUndo.data) - ); - - expect(actualJsonStateAfterUndo).toEqual({}); -}); - -test("it correctly undoes delete operations by restoring previous state", async () => { - // Create a Lix instance with the mockJsonPlugin - const lix = await openLix({ - providePlugins: [mockJsonPlugin], - }); - - // Insert the schema that the mockJsonPlugin uses - await lix.db - .insertInto("stored_schema_all") - .values({ - value: MockJsonPropertySchema, - lixcol_version_id: "global", - }) - .execute(); - - // Create a file - await lix.db - .insertInto("file") - .values({ - id: "file1", - data: new TextEncoder().encode("{}"), - path: "/test.json", - }) - .execute(); - - const file = await lix.db - .selectFrom("file") - .where("id", "=", "file1") - .selectAll() - .executeTakeFirstOrThrow(); - - // First change set - add an entity - const initialChanges = await lix.db - .insertInto("change") - .values([ - { - id: "c1", - file_id: file.id, - plugin_key: mockJsonPlugin.key, - entity_id: "e1", - schema_key: "mock_json_property", - schema_version: "1.0", - snapshot_content: { value: "Initial Value" }, - }, - ]) - .returningAll() - .execute(); - - const cs0 = await createChangeSet({ - lix, - id: "cs0", - lixcol_version_id: "global", - elements: initialChanges.map((change) => ({ - change_id: change.id, - lixcol_version_id: "global", - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - }); - - // Create a commit for cs0 - const commit0 = await createCommit({ - lix, - changeSet: cs0, - }); - - // Second change set - delete the entity - const deleteChanges = await lix.db - .insertInto("change") - .values([ - { - id: "c2", - file_id: file.id, - plugin_key: mockJsonPlugin.key, - entity_id: "e1", - schema_key: "mock_json_property", - schema_version: "1.0", - snapshot_content: null, // This marks it as a delete operation - }, - ]) - .returningAll() - .execute(); - - // Create cs1 with cs0 as parent (don't apply yet) - const cs1 = await createChangeSet({ - lix, - id: "cs1", - lixcol_version_id: "global", - elements: deleteChanges.map((change) => ({ - change_id: change.id, - lixcol_version_id: "global", - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - }); - - // Create a commit for cs1 - const commit1 = await createCommit({ - lix, - changeSet: cs1, - parentCommits: [commit0], - }); - - // Create undo commit for the delete operation - const undoDeleteCommit = await createUndoCommit({ - lix, - commit: commit1, - }); - - await applyCommit({ - lix, - commit: undoDeleteCommit, - }); - - // Verify the entity is restored to its previous state - const fileAfterUndo = await lix.db - .selectFrom("file") - .where("id", "=", file.id) - .selectAll() - .executeTakeFirstOrThrow(); - - const stateAfterUndo = JSON.parse( - new TextDecoder().decode(fileAfterUndo.data) - ); - - expect(stateAfterUndo).toEqual({ - e1: "Initial Value", - }); -}); - -test.skip("does not naively create delete changes if a previous state existed", async () => { - const lix = await openLix({ - providePlugins: [mockJsonPlugin], - }); - - // Insert the schema that the mockJsonPlugin uses - await lix.db - .insertInto("stored_schema_all") - .values({ - value: MockJsonPropertySchema, - lixcol_version_id: "global", - }) - .execute(); - - const file = await lix.db - .insertInto("file") - .values({ - id: "file1", - data: new TextEncoder().encode("{}"), - path: "/test.json", - }) - .returningAll() - .executeTakeFirstOrThrow(); - - const checkpoints: Array<{ id: string }> = []; - - // simulating an undo of peter which should restore { name: "samuel", age: 20 } - for (const state of [ - { name: "Samuel", age: 20 }, - { name: "Peter", age: 20 }, - ]) { - await lix.db - .updateTable("file") - .set({ data: new TextEncoder().encode(JSON.stringify(state)) }) - .execute(); - - checkpoints.push(await createCheckpoint({ lix })); - } - - const fileAfterEdits = await lix.db - .selectFrom("file") - .where("id", "=", file.id) - .selectAll() - .executeTakeFirstOrThrow(); - - const jsonStateAfterEdits = JSON.parse( - new TextDecoder().decode(fileAfterEdits.data) - ); - - expect(jsonStateAfterEdits).toEqual({ - name: "Peter", - age: 20, - }); - - const undoCommit = await createUndoCommit({ - lix, - commit: checkpoints[1]!, - }); - - await applyCommit({ - lix, - commit: undoCommit, - }); - - const fileAfterUndo = await lix.db - .selectFrom("file") - .where("id", "=", file.id) - .selectAll() - .executeTakeFirstOrThrow(); - - const jsonAfterUndo = JSON.parse( - new TextDecoder().decode(fileAfterUndo.data) - ); - - expect(jsonAfterUndo).toEqual({ - name: "Samuel", - age: 20, - }); -}); diff --git a/packages/lix-sdk/src/commit/create-undo-commit.ts b/packages/lix-sdk/src/commit/create-undo-commit.ts deleted file mode 100644 index e64a2bcd59..0000000000 --- a/packages/lix-sdk/src/commit/create-undo-commit.ts +++ /dev/null @@ -1,180 +0,0 @@ -import type { Lix } from "../lix/index.js"; -import type { LixCommit } from "./schema.js"; -import { createChangeSet } from "../change-set/create-change-set.js"; -import { createCommit } from "./create-commit.js"; -import type { LixLabel } from "../label/schema.js"; -import type { NewLixChange } from "../change/schema.js"; -import { uuidV7 } from "../deterministic/uuid-v7.js"; - -/** - * Creates a "reverse" commit that undoes the changes made by the specified commit. - * - * @example - * ```ts - * const undoCommit = await createUndoCommit({ - * lix, - * commit: targetCommit - * }); - * - * await applyChangeSet({ - * lix, - * changeSet: { id: undoCommit.change_set_id } - * }); - * ``` - * - * @returns The newly created commit that contains the undo operations - */ -export async function createUndoCommit(args: { - lix: Lix; - commit: Pick; - labels?: Pick[]; -}): Promise { - const executeInTransaction = async (trx: Lix["db"]) => { - // Check for multiple parents (not supported yet) - const parents = await trx - .selectFrom("commit_edge_all") - .where("lixcol_version_id", "=", "global") - .where("child_id", "=", args.commit.id) - .select("parent_id") - .execute(); - - if (parents.length > 1) { - throw new Error( - "Cannot undo commits with multiple parents (merge scenarios not yet supported)" - ); - } - - // Get the change set ID from the commit - const targetCommit = await trx - .selectFrom("commit_all") - .where("lixcol_version_id", "=", "global") - .where("id", "=", args.commit.id) - .select("change_set_id") - .executeTakeFirstOrThrow(); - - // Get all changes in the target change set (direct changes only, non-recursive) - const targetChanges = await trx - .selectFrom("change") - .innerJoin( - "change_set_element_all", - "change_set_element_all.change_id", - "change.id" - ) - .where("change_set_element_all.lixcol_version_id", "=", "global") - .where( - "change_set_element_all.change_set_id", - "=", - targetCommit.change_set_id - ) - .selectAll("change") - .execute(); - - const undoChanges: Array = []; - - for (const change of targetChanges) { - if (parents.length === 0) { - // No parent = this was the first commit, undo = delete everything - undoChanges.push({ - id: uuidV7({ lix: args.lix }), - entity_id: change.entity_id, - file_id: change.file_id, - plugin_key: change.plugin_key, - schema_key: change.schema_key, - schema_version: change.schema_version, - snapshot_content: null, // Mark as deletion - }); - } else { - // Find the previous state in the parent commit - const parentCommitId = parents[0]!.parent_id; - - // Get the parent commit's change set - const parentCommit = await trx - .selectFrom("commit_all") - .where("lixcol_version_id", "=", "global") - .where("id", "=", parentCommitId) - .select("change_set_id") - .executeTakeFirstOrThrow(); - - const previousChange = await trx - .selectFrom("change") - .innerJoin( - "change_set_element", - "change_set_element.change_id", - "change.id" - ) - .where( - "change_set_element.change_set_id", - "=", - parentCommit.change_set_id - ) - .where("change_set_element.entity_id", "=", change.entity_id) - .where("change_set_element.file_id", "=", change.file_id) - .where("change_set_element.schema_key", "=", change.schema_key) - .selectAll("change") - .executeTakeFirst(); - - if (previousChange) { - // Restore to previous state - undoChanges.push({ - id: uuidV7({ lix: args.lix }), - entity_id: change.entity_id, - file_id: change.file_id, - plugin_key: change.plugin_key, - schema_key: change.schema_key, - schema_version: change.schema_version, - snapshot_content: previousChange.snapshot_content, // Restore previous snapshot - }); - } else { - // Entity didn't exist before, so delete it - undoChanges.push({ - id: uuidV7({ lix: args.lix }), - entity_id: change.entity_id, - file_id: change.file_id, - plugin_key: change.plugin_key, - schema_key: change.schema_key, - schema_version: change.schema_version, - snapshot_content: null, // Mark as deletion - }); - } - } - } - - // Insert the undo changes - const createdUndoChanges = - undoChanges.length > 0 - ? await trx - .insertInto("change") - .values(undoChanges) - .returningAll() - .execute() - : []; - - // Create the undo change set - const undoChangeSet = await createChangeSet({ - lix: { ...args.lix, db: trx }, - labels: args.labels, - lixcol_version_id: "global", - elements: createdUndoChanges.map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - }); - - // Create a commit for the undo change set - const undoCommit = await createCommit({ - lix: { ...args.lix, db: trx }, - changeSet: undoChangeSet, - parentCommits: args.commit.id ? [args.commit] : [], - }); - - return undoCommit; - }; - - if (args.lix.db.isTransaction) { - return executeInTransaction(args.lix.db); - } else { - return args.lix.db.transaction().execute(executeInTransaction); - } -} diff --git a/packages/lix-sdk/src/commit/index.ts b/packages/lix-sdk/src/commit/index.ts index e534cac24e..01e0d66b9a 100644 --- a/packages/lix-sdk/src/commit/index.ts +++ b/packages/lix-sdk/src/commit/index.ts @@ -5,7 +5,3 @@ export { type LixCommitEdge, applyCommitDatabaseSchema, } from "./schema.js"; -export { createMergeCommit } from "./create-merge-commit.js"; -export { createTransitionCommit } from "./create-transition-commit.js"; -export { createCheckpoint } from "./create-checkpoint.js"; -export { createUndoCommit } from "./create-undo-commit.js"; diff --git a/packages/lix-sdk/src/database/init-db.ts b/packages/lix-sdk/src/database/init-db.ts index b6f1687009..ae636ac7b0 100644 --- a/packages/lix-sdk/src/database/init-db.ts +++ b/packages/lix-sdk/src/database/init-db.ts @@ -27,6 +27,7 @@ import { nanoId } from "../deterministic/nano-id.js"; import { applyEntityDatabaseSchema } from "../entity/schema.js"; import { applyEntityThreadDatabaseSchema } from "../entity/thread/schema.js"; import { applyFileLixcolCacheSchema } from "../file/cache/lixcol-schema.js"; +import { applyTransactionStateSchema } from "../state/transaction/schema.js"; /** * Configuration for JSON columns in database views. @@ -136,6 +137,7 @@ export function initDb(args: { }); // Apply all database schemas first (tables, views, triggers) + applyTransactionStateSchema(lix); applySnapshotDatabaseSchema(args.sqlite); applyChangeDatabaseSchema(args.sqlite); applyFileLixcolCacheSchema(lix); diff --git a/packages/lix-sdk/src/database/kysely-plugin/json-column-plugin.test.ts b/packages/lix-sdk/src/database/kysely-plugin/json-column-plugin.test.ts index 5a6c53eab1..60d3214bdb 100644 --- a/packages/lix-sdk/src/database/kysely-plugin/json-column-plugin.test.ts +++ b/packages/lix-sdk/src/database/kysely-plugin/json-column-plugin.test.ts @@ -402,3 +402,129 @@ test("SQL expressions in onConflict updates", async () => { }, }); }); + +test("does not coerce JSON string '1' into number in result rows", async () => { + const db = await mockDatabase(); + + // Insert a JSON string "1" and a numeric 1 into the any-json column + await db.insertInto("mock_table").values({ data: "1", other: "s" }).execute(); + // @ts-expect-error - dynamic types + await db.insertInto("mock_table").values({ data: 1, other: "n" }).execute(); + + const stringRow = await db + .selectFrom("mock_table") + .selectAll() + .where("other", "=", "s") + .executeTakeFirstOrThrow(); + const numberRow = await db + .selectFrom("mock_table") + .selectAll() + .where("other", "=", "n") + .executeTakeFirstOrThrow(); + + expect(typeof stringRow.data).toBe("string"); + // Stored as JSON string text; should not be coerced to number + expect(() => JSON.parse(stringRow.data as string)).not.toThrow(); + expect(JSON.parse(stringRow.data as string)).toBe("1"); + // And importantly, it's not a number + expect(typeof stringRow.data).not.toBe("number"); + + // For this mock table, numbers are stored via json() into TEXT; plugin doesn't parse scalars here + expect(() => JSON.parse(numberRow.data as string)).not.toThrow(); + expect(JSON.parse(numberRow.data as string)).toBe(1); +}); + +test("preserves scalar strings and JSON-looking strings; parses only JS arrays/objects or direct JSON columns", async () => { + const db = await mockDatabase(); + + // Insert array and object as strings + await db + .insertInto("mock_table") + .values({ data: "[1,2,3]", other: "arr_s" }) + .execute(); + await db + .insertInto("mock_table") + .values({ data: '{"a":1,"b":"x"}', other: "obj_s" }) + .execute(); + // Insert scalar-looking strings + await db + .insertInto("mock_table") + .values({ data: "1", other: "num_s" }) + .execute(); + await db + .insertInto("mock_table") + .values({ data: "true", other: "bool_s" }) + .execute(); + await db + .insertInto("mock_table") + .values({ data: "null", other: "null_s" }) + .execute(); + + const arrRow = await db + .selectFrom("mock_table") + .selectAll() + .where("other", "=", "arr_s") + .executeTakeFirstOrThrow(); + const objRow = await db + .selectFrom("mock_table") + .selectAll() + .where("other", "=", "obj_s") + .executeTakeFirstOrThrow(); + const numStrRow = await db + .selectFrom("mock_table") + .selectAll() + .where("other", "=", "num_s") + .executeTakeFirstOrThrow(); + const boolStrRow = await db + .selectFrom("mock_table") + .selectAll() + .where("other", "=", "bool_s") + .executeTakeFirstOrThrow(); + const nullStrRow = await db + .selectFrom("mock_table") + .selectAll() + .where("other", "=", "null_s") + .executeTakeFirstOrThrow(); + + // JSON-like strings remain strings (no implicit coercion) + expect(typeof arrRow.data).toBe("string"); + expect(JSON.parse(arrRow.data as string)).toEqual("[1,2,3]"); + expect(typeof objRow.data).toBe("string"); + expect(JSON.parse(objRow.data as string)).toEqual('{"a":1,"b":"x"}'); + + // Plugin preserves scalar strings + expect(typeof numStrRow.data).toBe("string"); + expect(JSON.parse(numStrRow.data as string)).toBe("1"); + expect(typeof boolStrRow.data).toBe("string"); + expect(JSON.parse(boolStrRow.data as string)).toBe("true"); + expect(typeof nullStrRow.data).toBe("string"); + expect(JSON.parse(nullStrRow.data as string)).toBe("null"); +}); + +test("arrays and objects inserted as JS values are round-tripped", async () => { + const db = await mockDatabase(); + + // Insert JS array/object + await db + .insertInto("mock_table") + .values({ data: [1, { k: "v" }], other: "arr" }) + .execute(); + await db + .insertInto("mock_table") + .values({ data: { a: [1, 2], b: { c: 3 } }, other: "obj" }) + .execute(); + + const arrRow = await db + .selectFrom("mock_table") + .selectAll() + .where("other", "=", "arr") + .executeTakeFirstOrThrow(); + const objRow = await db + .selectFrom("mock_table") + .selectAll() + .where("other", "=", "obj") + .executeTakeFirstOrThrow(); + + expect(arrRow.data).toEqual([1, { k: "v" }]); + expect(objRow.data).toEqual({ a: [1, 2], b: { c: 3 } }); +}); diff --git a/packages/lix-sdk/src/database/kysely-plugin/json-column-plugin.ts b/packages/lix-sdk/src/database/kysely-plugin/json-column-plugin.ts index 4de00cdcda..b8e1607596 100644 --- a/packages/lix-sdk/src/database/kysely-plugin/json-column-plugin.ts +++ b/packages/lix-sdk/src/database/kysely-plugin/json-column-plugin.ts @@ -29,11 +29,17 @@ export function JSONColumnPlugin( transformResult: async (args) => { for (const row of args.result.rows) { for (const col of jsonColumnNames) { - const text = row[col]; - try { - row[col] = JSON.parse(text as string); - } catch { - continue; + const val = row[col]; + // Only parse when it's a string that looks like an object/array JSON. + if (typeof val === "string") { + const trimmed = val.trim(); + if (trimmed.startsWith("{") || trimmed.startsWith("[")) { + try { + row[col] = JSON.parse(val); + } catch { + // leave as-is if parsing fails + } + } } } } diff --git a/packages/lix-sdk/src/database/schema.ts b/packages/lix-sdk/src/database/schema.ts index dd216e5849..60fa470382 100644 --- a/packages/lix-sdk/src/database/schema.ts +++ b/packages/lix-sdk/src/database/schema.ts @@ -16,9 +16,9 @@ import type { LixSchemaDefinition } from "../schema-definition/definition.js"; import { LixKeyValueSchema, type LixKeyValue } from "../key-value/schema.js"; import type { StateView, - InternalChangeInTransactionTable, StateAllView, -} from "../state/schema.js"; + StateWithTombstonesView, +} from "../state/index.js"; import type { StateHistoryView } from "../state-history/schema.js"; import { LixFileDescriptorSchema } from "../file/schema.js"; import { LixLogSchema } from "../log/schema.js"; @@ -39,6 +39,8 @@ import type { InternalResolvedStateAllView } from "../state/resolved-state-view. import type { InternalStateAllUntrackedTable } from "../state/untracked/schema.js"; import type { InternalFileDataCacheTable } from "../file/cache/schema.js"; import type { InternalFileLixcolCacheTable } from "../file/cache/lixcol-schema.js"; +import type { InternalChangeInTransactionTable } from "../state/transaction/schema.js"; +import type { InternalStateVTable } from "../state/vtable/vtable.js"; export const LixDatabaseSchemaJsonColumns = { snapshot: ["content"], @@ -52,6 +54,7 @@ export type LixInternalDatabaseSchema = LixDatabaseSchema & { internal_state_cache: InternalStateCacheTable; internal_state_all_untracked: InternalStateAllUntrackedTable; internal_resolved_state_all: InternalResolvedStateAllView; + internal_state_vtable: InternalStateVTable; internal_file_data_cache: InternalFileDataCacheTable; internal_file_lixcol_cache: InternalFileLixcolCacheTable; }; @@ -82,6 +85,7 @@ export type LixDatabaseSchema = { state: StateView; state_all: StateAllView; + state_with_tombstones: StateWithTombstonesView; state_history: StateHistoryView; change: ChangeView; diff --git a/packages/lix-sdk/src/deterministic/is-deterministic-mode.ts b/packages/lix-sdk/src/deterministic/is-deterministic-mode.ts index 59143b6295..37863bd69d 100644 --- a/packages/lix-sdk/src/deterministic/is-deterministic-mode.ts +++ b/packages/lix-sdk/src/deterministic/is-deterministic-mode.ts @@ -2,12 +2,13 @@ import type { Lix } from "../lix/open-lix.js"; import { executeSync } from "../database/execute-sync.js"; import { sql, type Kysely } from "kysely"; import type { LixInternalDatabaseSchema } from "../database/schema.js"; +import type { SqliteWasmDatabase } from "sqlite-wasm-kysely"; -// Cache for deterministic mode per lix instance -const deterministicModeCache = new WeakMap(); +const deterministicModeCache = new WeakMap(); -// Track which lix instances have hook listeners registered -const hookListenersRegistered = new WeakSet(); +// Track which hooks instances have a listener registered +// Using hooks object identity is stable, unlike ad-hoc { sqlite, db, hooks } wrappers +const hookListenersRegistered = new WeakSet(); /** * Checks if deterministic mode is enabled by querying the key_value table. @@ -24,9 +25,10 @@ const hookListenersRegistered = new WeakSet(); export function isDeterministicMode(args: { lix: Pick; }): boolean { - // Register hook listener for cache invalidation (only once per lix instance) - if (!hookListenersRegistered.has(args.lix) && args.lix.hooks) { - hookListenersRegistered.add(args.lix); + // Register hook listener for cache invalidation (only once per hooks instance) + const key = args.lix.hooks as unknown as object; + if (!hookListenersRegistered.has(key) && args.lix.hooks) { + hookListenersRegistered.add(key); args.lix.hooks.onStateCommit(({ changes }) => { // Check if any change affects lix_deterministic_mode @@ -36,7 +38,7 @@ export function isDeterministicMode(args: { change.schema_key === "lix_key_value" ) { // Invalidate cache when deterministic mode changes - deterministicModeCache.delete(args.lix); + deterministicModeCache.delete(args.lix.sqlite); break; } } @@ -44,8 +46,8 @@ export function isDeterministicMode(args: { } // Check cache first - if (deterministicModeCache.has(args.lix)) { - return deterministicModeCache.get(args.lix)!; + if (deterministicModeCache.has(args.lix.sqlite)) { + return deterministicModeCache.get(args.lix.sqlite)!; } // TODO account for active version @@ -56,6 +58,7 @@ export function isDeterministicMode(args: { .selectFrom("internal_resolved_state_all") .where("entity_id", "=", "lix_deterministic_mode") .where("schema_key", "=", "lix_key_value") + .where("snapshot_content", "is not", null) .select( sql`json_extract(snapshot_content, '$.value.enabled')`.as("enabled") ), @@ -64,7 +67,7 @@ export function isDeterministicMode(args: { const result = row?.enabled == true; // Cache the result - deterministicModeCache.set(args.lix, result); + deterministicModeCache.set(args.lix.sqlite, result); return result; } diff --git a/packages/lix-sdk/src/deterministic/nano-id.ts b/packages/lix-sdk/src/deterministic/nano-id.ts index d1a1788546..1e068ea292 100644 --- a/packages/lix-sdk/src/deterministic/nano-id.ts +++ b/packages/lix-sdk/src/deterministic/nano-id.ts @@ -66,6 +66,7 @@ export function nanoId(args: { .selectFrom("internal_resolved_state_all") .where("entity_id", "=", "lix_deterministic_mode") .where("schema_key", "=", "lix_key_value") + .where("snapshot_content", "is not", null) .select( sql`json_extract(snapshot_content, '$.value.nano_id')`.as("nano_id") ), diff --git a/packages/lix-sdk/src/deterministic/random.ts b/packages/lix-sdk/src/deterministic/random.ts index eb507c6d18..de118246b8 100644 --- a/packages/lix-sdk/src/deterministic/random.ts +++ b/packages/lix-sdk/src/deterministic/random.ts @@ -153,6 +153,7 @@ function getRngSeed(args: { lix: Pick }): string { .selectFrom("internal_resolved_state_all") .where("entity_id", "=", "lix_deterministic_mode") .where("schema_key", "=", "lix_key_value") + .where("snapshot_content", "is not", null) .select( sql`json_extract(snapshot_content, '$.value.random_seed')`.as( "random_seed" diff --git a/packages/lix-sdk/src/deterministic/sequence.ts b/packages/lix-sdk/src/deterministic/sequence.ts index 75020c5959..6e91ecb8d0 100644 --- a/packages/lix-sdk/src/deterministic/sequence.ts +++ b/packages/lix-sdk/src/deterministic/sequence.ts @@ -5,6 +5,8 @@ import type { Lix } from "../lix/open-lix.js"; import { isDeterministicMode } from "./is-deterministic-mode.js"; import { timestamp } from "./timestamp.js"; import { updateUntrackedState } from "../state/untracked/update-untracked-state.js"; +import { sql, type Kysely } from "kysely"; +import type { LixInternalDatabaseSchema } from "../database/schema.js"; /** State kept per SQLite connection */ type CounterState = { @@ -71,13 +73,14 @@ export function nextDeterministicSequenceNumber(args: { if (!state) { const [row] = executeSync({ lix: { sqlite: args.lix.sqlite }, - // querying from key_value_all is fine here because its a view - // that hits the internal_state_all_untracked table - query: args.lix.db - .selectFrom("key_value_all") - .where("key", "=", "lix_deterministic_sequence_number") - .where("lixcol_version_id", "=", "global") - .select("value"), + // Use internal_resolved_state_all to avoid virtual table recursion + query: (args.lix.db as unknown as Kysely) + .selectFrom("internal_resolved_state_all") + .where("entity_id", "=", "lix_deterministic_sequence_number") + .where("schema_key", "=", "lix_key_value") + .where("version_id", "=", "global") + .where("snapshot_content", "is not", null) + .select(sql`json_extract(snapshot_content, '$.value')`.as("value")), }); // The persisted value is the next counter to use diff --git a/packages/lix-sdk/src/deterministic/timestamp.ts b/packages/lix-sdk/src/deterministic/timestamp.ts index c91eaf463d..b648f1db72 100644 --- a/packages/lix-sdk/src/deterministic/timestamp.ts +++ b/packages/lix-sdk/src/deterministic/timestamp.ts @@ -59,6 +59,7 @@ export function timestamp(args: { .selectFrom("internal_resolved_state_all") .where("entity_id", "=", "lix_deterministic_mode") .where("schema_key", "=", "lix_key_value") + .where("snapshot_content", "is not", null) .select( sql`json_extract(snapshot_content, '$.value.timestamp')`.as( "timestamp" diff --git a/packages/lix-sdk/src/deterministic/uuid-v7.ts b/packages/lix-sdk/src/deterministic/uuid-v7.ts index ab58abab58..ade712f4a6 100644 --- a/packages/lix-sdk/src/deterministic/uuid-v7.ts +++ b/packages/lix-sdk/src/deterministic/uuid-v7.ts @@ -62,6 +62,7 @@ export function uuidV7(args: { .selectFrom("internal_resolved_state_all") .where("entity_id", "=", "lix_deterministic_mode") .where("schema_key", "=", "lix_key_value") + .where("snapshot_content", "is not", null) .select( sql`json_extract(snapshot_content, '$.value.uuid_v7')`.as("uuid_v7") ), diff --git a/packages/lix-sdk/src/entity-views/build-json-object-entries.test.ts b/packages/lix-sdk/src/entity-views/build-json-object-entries.test.ts new file mode 100644 index 0000000000..ea01bc0e60 --- /dev/null +++ b/packages/lix-sdk/src/entity-views/build-json-object-entries.test.ts @@ -0,0 +1,42 @@ +import { expect, test } from "vitest"; +import type { LixSchemaDefinition } from "../schema-definition/definition.js"; +import { buildJsonObjectEntries } from "./build-json-object-entries.js"; + +const TestSchema = { + "x-lix-key": "test_schema", + "x-lix-version": "1.0", + "x-lix-primary-key": ["id"], + type: "object", + properties: { + id: { type: "string" }, + name: { type: "string" }, + version: { type: "string" }, + count: { type: "number" }, + flag: { type: "boolean" }, + data: { type: "object" }, + arr: { type: "array" }, + }, + additionalProperties: false, +} as const satisfies LixSchemaDefinition; + +test("uses json_quote for string fields to prevent coercion", () => { + const sql = buildJsonObjectEntries({ + schema: TestSchema, + ref: (p) => `NEW.${p}`, + }); + expect(sql).toContain("'name', json_quote(NEW.name)"); + expect(sql).toContain("'version', json_quote(NEW.version)"); +}); + +test("uses json/json_quote combo for object/array fields", () => { + const sql = buildJsonObjectEntries({ + schema: TestSchema, + ref: (p) => `NEW.${p}`, + }); + expect(sql).toContain( + "'data', CASE WHEN json_valid(NEW.data) THEN json(NEW.data) ELSE json_quote(NEW.data) END" + ); + expect(sql).toContain( + "'arr', CASE WHEN json_valid(NEW.arr) THEN json(NEW.arr) ELSE json_quote(NEW.arr) END" + ); +}); diff --git a/packages/lix-sdk/src/entity-views/build-json-object-entries.ts b/packages/lix-sdk/src/entity-views/build-json-object-entries.ts new file mode 100644 index 0000000000..add93ff782 --- /dev/null +++ b/packages/lix-sdk/src/entity-views/build-json-object-entries.ts @@ -0,0 +1,37 @@ +import type { LixSchemaDefinition } from "../schema-definition/definition.js"; +import { isJsonType } from "../schema-definition/json-type.js"; + +/** + * Builds a json_object entries list for snapshot_content serialization that respects schema types. + * - For JSON-like props (object/array): accept raw JSON when valid, otherwise quote + * - For string props: always json_quote to avoid coercion (e.g. "1.0" -> 1) + * - For others (number/boolean/null): generic JSON handling + */ +export function buildJsonObjectEntries(args: { + schema: LixSchemaDefinition; + ref: (prop: string) => string; +}): string { + const properties = Object.keys((args.schema as any).properties); + + return properties + .map((prop) => { + const def: any = (args.schema as any).properties[prop]; + const ref = args.ref(prop); + const jsonLike = isJsonType(def); + const types = def?.type + ? Array.isArray(def.type) + ? def.type + : [def.type] + : []; + const isString = !jsonLike && types.includes("string"); + + if (jsonLike) { + return `'${prop}', CASE WHEN json_valid(${ref}) THEN json(${ref}) ELSE json_quote(${ref}) END`; + } + if (isString) { + return `'${prop}', json_quote(${ref})`; + } + return `'${prop}', CASE WHEN json_valid(${ref}) THEN json(${ref}) ELSE json_quote(${ref}) END`; + }) + .join(", "); +} diff --git a/packages/lix-sdk/src/entity-views/entity-state-all.test.ts b/packages/lix-sdk/src/entity-views/entity-state-all.test.ts index cb0f50fe6e..725849f8c0 100644 --- a/packages/lix-sdk/src/entity-views/entity-state-all.test.ts +++ b/packages/lix-sdk/src/entity-views/entity-state-all.test.ts @@ -578,11 +578,12 @@ describe("createEntityAllViewIfNotExists", () => { // Create parent and child versions const { createVersion } = await import("../version/create-version.js"); - await createVersion({ lix, id: "parent-version" }); + const parentVersion = await createVersion({ lix, id: "parent-version" }); + await createVersion({ lix, id: "child-version", - inherits_from_version_id: "parent-version", + inheritsFrom: parentVersion, }); // Insert entity into parent version diff --git a/packages/lix-sdk/src/entity-views/entity-state-all.ts b/packages/lix-sdk/src/entity-views/entity-state-all.ts index 923b9b1c53..5bbfbb90fd 100644 --- a/packages/lix-sdk/src/entity-views/entity-state-all.ts +++ b/packages/lix-sdk/src/entity-views/entity-state-all.ts @@ -5,6 +5,7 @@ import type { LixSchemaDefinition, } from "../schema-definition/definition.js"; import type { ValidationRule, ValidationCallbacks } from "./entity-state.js"; +import { buildJsonObjectEntries } from "./build-json-object-entries.js"; /** * Base type for _all entity views (cross-version) that include operational columns from the state table. @@ -480,6 +481,10 @@ function createSingleEntityAllView(args: { ? generateValidationSQL(args.validation.onDelete) : ""; + // Helper for json_object entries per schema types + const buildJsonEntries = (refExpr: (prop: string) => string): string => + buildJsonObjectEntries({ schema: args.schema, ref: refExpr }); + // Generated SQL query - set breakpoint here to inspect the generated SQL during debugging const sqlQuery = ` CREATE VIEW IF NOT EXISTS ${quoted_view_name} AS @@ -514,7 +519,7 @@ function createSingleEntityAllView(args: { '${schema_key}', ${fileId.replace(/NEW\./g, "with_default_values.")}, '${args.pluginKey}', - json_object(${properties.map((prop) => `'${prop}', with_default_values.${prop}`).join(", ")}), + json_object(${buildJsonEntries((prop) => `with_default_values.${prop}`)}), '${args.schema["x-lix-version"]}', ${versionIdReference.replace(/NEW\./g, "with_default_values.")}, COALESCE(with_default_values.lixcol_untracked, 0) @@ -531,7 +536,7 @@ function createSingleEntityAllView(args: { '${schema_key}', ${fileId}, '${args.pluginKey}', - json_object(${properties.map((prop) => `'${prop}', NEW.${prop}`).join(", ")}), + json_object(${buildJsonEntries((prop) => `NEW.${prop}`)}), '${args.schema["x-lix-version"]}', ${versionIdReference}, COALESCE(NEW.lixcol_untracked, 0) @@ -549,7 +554,7 @@ function createSingleEntityAllView(args: { schema_key = '${schema_key}', file_id = ${fileId}, plugin_key = '${args.pluginKey}', - snapshot_content = json_object(${properties.map((prop) => `'${prop}', NEW.${prop}`).join(", ")}), + snapshot_content = json_object(${buildJsonEntries((prop) => `NEW.${prop}`)}), version_id = ${versionIdReference}, untracked = NEW.lixcol_untracked WHERE diff --git a/packages/lix-sdk/src/entity-views/entity-state.ts b/packages/lix-sdk/src/entity-views/entity-state.ts index 69d55423e4..a55dc94b0c 100644 --- a/packages/lix-sdk/src/entity-views/entity-state.ts +++ b/packages/lix-sdk/src/entity-views/entity-state.ts @@ -4,6 +4,7 @@ import type { LixGenerated, LixSchemaDefinition, } from "../schema-definition/definition.js"; +import { buildJsonObjectEntries } from "./build-json-object-entries.js"; /** * Base type for regular entity views (active version only) that include operational columns from the state table. @@ -477,6 +478,9 @@ function createSingleEntityView(args: { ? generateValidationSQL(args.validation.onDelete) : ""; + const buildJsonEntries = (refExpr: (prop: string) => string): string => + buildJsonObjectEntries({ schema: args.schema, ref: refExpr }); + // Generated SQL query - set breakpoint here to inspect the generated SQL during debugging const sqlQuery = ` CREATE VIEW IF NOT EXISTS ${quoted_view_name} AS @@ -511,7 +515,7 @@ function createSingleEntityView(args: { '${schema_key}', ${fileId.replace(/NEW\./g, "with_default_values.")}, '${args.pluginKey}', - json_object(${properties.map((prop) => `'${prop}', with_default_values.${prop}`).join(", ")}), + json_object(${buildJsonEntries((prop) => `with_default_values.${prop}`)}), '${args.schema["x-lix-version"]}', ${versionIdReference.replace(/NEW\./g, "with_default_values.")}, COALESCE(with_default_values.lixcol_untracked, 0) @@ -527,7 +531,7 @@ function createSingleEntityView(args: { '${schema_key}', ${fileId}, '${args.pluginKey}', - json_object(${properties.map((prop) => `'${prop}', NEW.${prop}`).join(", ")}), + json_object(${buildJsonEntries((prop) => `NEW.${prop}`)}), '${args.schema["x-lix-version"]}', ${versionIdReference}, COALESCE(NEW.lixcol_untracked, 0) @@ -545,7 +549,7 @@ function createSingleEntityView(args: { schema_key = '${schema_key}', file_id = ${fileId}, plugin_key = '${args.pluginKey}', - snapshot_content = json_object(${properties.map((prop) => `'${prop}', NEW.${prop}`).join(", ")}), + snapshot_content = json_object(${buildJsonEntries((prop) => `NEW.${prop}`)}), version_id = ${versionIdReference}, untracked = NEW.lixcol_untracked WHERE diff --git a/packages/lix-sdk/src/entity/eb-entity.test.ts b/packages/lix-sdk/src/entity/eb-entity.test.ts index a1ab674707..510c3d1c6b 100644 --- a/packages/lix-sdk/src/entity/eb-entity.test.ts +++ b/packages/lix-sdk/src/entity/eb-entity.test.ts @@ -74,19 +74,29 @@ test("ebEntity.hasLabel filters entities by label id", async () => { .insertInto("file") .values([ { + id: "file0", path: "/docs/readme.md", data: new Uint8Array(Buffer.from("# README")), }, { + id: "file1", path: "/src/index.ts", data: new Uint8Array(Buffer.from("console.log('hello')")), }, - { path: "/package.json", data: new Uint8Array(Buffer.from("{}")) }, + { + id: "file2", + path: "/package.json", + data: new Uint8Array(Buffer.from("{}")), + }, ]) .execute(); // Get files from view - const files = await lix.db.selectFrom("file").selectAll().execute(); + const files = await lix.db + .selectFrom("file") + .orderBy("id") + .selectAll() + .execute(); // Label first two files for (let i = 0; i < 2; i++) { @@ -102,6 +112,7 @@ test("ebEntity.hasLabel filters entities by label id", async () => { .selectFrom("file") .where(ebEntity("file").hasLabel({ id: label.id })) .select(["path"]) + .orderBy("id") .execute(); expect(labeledFiles).toHaveLength(2); diff --git a/packages/lix-sdk/src/entity/thread/create-entity-thread.ts b/packages/lix-sdk/src/entity/thread/create-entity-thread.ts index 3f78617f9c..79df558eef 100644 --- a/packages/lix-sdk/src/entity/thread/create-entity-thread.ts +++ b/packages/lix-sdk/src/entity/thread/create-entity-thread.ts @@ -24,7 +24,7 @@ import type { LixEntity, LixEntityCanonical } from "../schema.js"; * await createEntityThread({ * lix, * entity: { - * entity_id: "row_789::column_2", + * entity_id: "row_789~column_2", * schema_key: "csv_cell", * file_id: "data.csv" * }, diff --git a/packages/lix-sdk/src/file/cache/schema.test.ts b/packages/lix-sdk/src/file/cache/schema.test.ts index 80cadeb740..ebfc9af3fb 100644 --- a/packages/lix-sdk/src/file/cache/schema.test.ts +++ b/packages/lix-sdk/src/file/cache/schema.test.ts @@ -78,7 +78,11 @@ test("file data cache - read-through caching", async () => { .selectAll() .executeTakeFirstOrThrow(); - expect(file.data).toEqual(testData); + // Compare JSON semantically, not byte-for-byte + // The mockJsonPlugin may reorder properties when reconstructing JSON + const fileDataParsed = JSON.parse(new TextDecoder().decode(file.data)); + const testDataParsed = JSON.parse(new TextDecoder().decode(testData)); + expect(fileDataParsed).toEqual(testDataParsed); // Now cache should be populated cachedData = getFileDataCache({ @@ -87,7 +91,9 @@ test("file data cache - read-through caching", async () => { versionId: activeVersion.version_id, }); expect(cachedData).toBeDefined(); - expect(cachedData).toEqual(testData); + // Cache contains the materialized data (which may have reordered properties) + const cachedDataParsed = JSON.parse(new TextDecoder().decode(cachedData!)); + expect(cachedDataParsed).toEqual(testDataParsed); }); test("file data cache - update invalidates and rewrites cache", async () => { @@ -124,7 +130,10 @@ test("file data cache - update invalidates and rewrites cache", async () => { .where("id", "=", "update_file") .selectAll() .executeTakeFirstOrThrow(); - expect(file.data).toEqual(initialData); + // Compare JSON semantically + expect(JSON.parse(new TextDecoder().decode(file.data))).toEqual( + JSON.parse(new TextDecoder().decode(initialData)) + ); // Verify initial cache let cachedData = getFileDataCache({ @@ -132,7 +141,9 @@ test("file data cache - update invalidates and rewrites cache", async () => { fileId: "update_file", versionId: activeVersion.version_id, }); - expect(cachedData).toEqual(initialData); + expect(JSON.parse(new TextDecoder().decode(cachedData!))).toEqual( + JSON.parse(new TextDecoder().decode(initialData)) + ); // Update the file const updatedData = new TextEncoder().encode( @@ -166,7 +177,10 @@ test("file data cache - update invalidates and rewrites cache", async () => { .selectAll() .executeTakeFirstOrThrow(); - expect(file.data).toEqual(updatedData); + // Compare JSON semantically + expect(JSON.parse(new TextDecoder().decode(file.data))).toEqual( + JSON.parse(new TextDecoder().decode(updatedData)) + ); // Now cache should be populated with updated data cachedData = getFileDataCache({ @@ -174,7 +188,9 @@ test("file data cache - update invalidates and rewrites cache", async () => { fileId: "update_file", versionId: activeVersion.version_id, }); - expect(cachedData).toEqual(updatedData); + expect(JSON.parse(new TextDecoder().decode(cachedData!))).toEqual( + JSON.parse(new TextDecoder().decode(updatedData)) + ); }); // test("file data cache - performance improvement for repeated reads", async () => { @@ -278,7 +294,10 @@ test("file data cache - cache is cleared when file is deleted", async () => { .where("id", "=", "delete_test_file") .selectAll() .executeTakeFirstOrThrow(); - expect(file.data).toEqual(testData); + // Compare JSON semantically + expect(JSON.parse(new TextDecoder().decode(file.data))).toEqual( + JSON.parse(new TextDecoder().decode(testData)) + ); // Verify cache was populated let cachedData = getFileDataCache({ @@ -287,7 +306,9 @@ test("file data cache - cache is cleared when file is deleted", async () => { versionId: activeVersion.version_id, }); expect(cachedData).toBeDefined(); - expect(cachedData).toEqual(testData); + expect(JSON.parse(new TextDecoder().decode(cachedData!))).toEqual( + JSON.parse(new TextDecoder().decode(testData)) + ); // Delete the file await lix.db diff --git a/packages/lix-sdk/src/file/schema.test.ts b/packages/lix-sdk/src/file/schema.test.ts index 330ca95ff1..e8bceafacd 100644 --- a/packages/lix-sdk/src/file/schema.test.ts +++ b/packages/lix-sdk/src/file/schema.test.ts @@ -1,7 +1,7 @@ import { test, expect, expectTypeOf } from "vitest"; import { openLix } from "../lix/open-lix.js"; import { createVersion } from "../version/create-version.js"; -import { createCheckpoint } from "../commit/create-checkpoint.js"; +import { createCheckpoint } from "../state/create-checkpoint.js"; import { switchVersion } from "../version/switch-version.js"; import { mockJsonPlugin } from "../plugin/mock-json-plugin.js"; import type { LixPlugin } from "../plugin/lix-plugin.js"; diff --git a/packages/lix-sdk/src/hooks/create-hooks.ts b/packages/lix-sdk/src/hooks/create-hooks.ts index 5fbbbfd702..346e7b84f2 100644 --- a/packages/lix-sdk/src/hooks/create-hooks.ts +++ b/packages/lix-sdk/src/hooks/create-hooks.ts @@ -1,5 +1,17 @@ import type { Change } from "../change/index.js"; +/** + * Change data passed to state commit hooks. + * Extends the standard Change type with tracking information. + */ +export type StateCommitChange = Change & { + /** + * Whether this change is untracked (bypasses change control). + * Untracked changes are stored directly without creating change records. + */ + untracked?: number; // 0 for tracked, 1 for untracked +}; + /** * Lix hooks system for listening to database lifecycle events. * @@ -27,7 +39,9 @@ export type LixHooks = { * unsubscribe(); * ``` */ - onStateCommit: (handler: (data: { changes: Change[] }) => void) => () => void; + onStateCommit: ( + handler: (data: { changes: StateCommitChange[] }) => void + ) => () => void; /** * Internal method for emitting events. @@ -51,7 +65,9 @@ export function createHooks(): LixHooks { const eventTarget = new EventTarget(); return { - onStateCommit(handler: (data: { changes: Change[] }) => void): () => void { + onStateCommit( + handler: (data: { changes: StateCommitChange[] }) => void + ): () => void { const wrappedHandler = (event: Event) => { const customEvent = event as CustomEvent; handler(customEvent.detail); diff --git a/packages/lix-sdk/src/index.ts b/packages/lix-sdk/src/index.ts index c8cd8ad63a..8fc3f2c855 100644 --- a/packages/lix-sdk/src/index.ts +++ b/packages/lix-sdk/src/index.ts @@ -17,6 +17,7 @@ export * from "./observe/index.js"; export * from "./plugin/index.js"; export * from "./query-filter/index.js"; export * from "./schema-definition/index.js"; +export * from "./state/index.js"; export * from "./stored-schema/index.js"; export * from "./server-protocol-handler/index.js"; export * from "./thread/index.js"; diff --git a/packages/lix-sdk/src/key-value/schema.test.ts b/packages/lix-sdk/src/key-value/schema.test.ts index 92da575312..ac8318f54c 100644 --- a/packages/lix-sdk/src/key-value/schema.test.ts +++ b/packages/lix-sdk/src/key-value/schema.test.ts @@ -62,6 +62,18 @@ test("inserts, updates, deletes are handled", async () => { ]); }); +// NOTE ON SQLITE JSON1 AND BOOLEANS +// --------------------------------- +// SQLite JSON1 does not have a native boolean type at the SQL level. +// When projecting a JSON boolean with json_extract(...), SQLite returns +// SQL-native scalars: true -> 1 and false -> 0. Objects/arrays are +// returned as JSON text unless wrapped, and strings are TEXT. +// +// Historically this test compared directly against JS booleans because +// values were stored as strings (e.g. "true") and parsed elsewhere. +// Now that we store proper JSON and project with json_extract, the view +// returns 1/0 for booleans. This test therefore expects 1 for true and +// 0 for false to reflect SQLite’s behavior. test("arbitrary json is allowed", async () => { const lix = await openLix({}); @@ -86,7 +98,89 @@ test("arbitrary json is allowed", async () => { ) .execute(); - expect(viewAfterInsert).toEqual(kvs); + const expected = kvs.map((kv) => ({ + key: kv.key, + value: typeof kv.value === "boolean" ? (kv.value ? 1 : 0) : kv.value, + })); + + expect(viewAfterInsert).toEqual(expected); +}); + +test("key_value insert stores proper JSON in state_all (no double encoding)", async () => { + const lix = await openLix({}); + + const kvs = [ + { key: "key0", value: { foo: "bar" } }, + { key: "key1", value: ["foo", "bar"] }, + { key: "key2", value: "foo" }, + { key: "key3", value: 42 }, + { key: "key4", value: true }, + { key: "key5", value: null }, + ]; + + await lix.db.insertInto("key_value").values(kvs).execute(); + + const rows = await lix.db + .selectFrom("state_all") + .where("schema_key", "=", "lix_key_value") + .where( + "entity_id", + "in", + kvs.map((kv) => kv.key) + ) + .select(["entity_id", sql`json(snapshot_content)`.as("snapshot_content")]) + .execute(); + + // map by key + const byKey = new Map(rows.map((r) => [r.entity_id, r as any])); + + expect(byKey.get("key0")?.snapshot_content.value).toEqual({ foo: "bar" }); + expect(byKey.get("key1")?.snapshot_content.value).toEqual(["foo", "bar"]); + expect(byKey.get("key2")?.snapshot_content.value).toBe("foo"); + expect(byKey.get("key3")?.snapshot_content.value).toBe(42); + // With json(snapshot_content), driver decodes JSON booleans to true/false + expect(byKey.get("key4")?.snapshot_content.value).toBe(true); + expect(byKey.get("key5")?.snapshot_content.value).toBeNull(); +}); + +test("boolean representation matches between key_value view and state view", async () => { + const lix = await openLix({}); + + // Insert booleans via entity view + await lix.db + .insertInto("key_value") + .values([ + { key: "bool_true", value: true }, + { key: "bool_false", value: false }, + ]) + .execute(); + + // Read from key_value view + const viewRows = await lix.db + .selectFrom("key_value") + .where("key", "in", ["bool_true", "bool_false"]) + .select(["key", "value"]) + .orderBy("key") + .execute(); + + // Read from state view (active version) and extract JSON value + const stateRows = await lix.db + .selectFrom("state") + .where("schema_key", "=", "lix_key_value") + .where("entity_id", "in", ["bool_true", "bool_false"]) + .select([ + "entity_id", + // json_extract returns SQLite-native scalars (1/0), matching the key_value view's behavior + sql`json_extract(snapshot_content, '$.value')`.as("value"), + ]) + .orderBy("entity_id") + .execute(); + + const viewMap = new Map(viewRows.map((r) => [r.key, r.value])); + const stateMap = new Map(stateRows.map((r: any) => [r.entity_id, r.value])); + + expect(viewMap.get("bool_true")).toBe(stateMap.get("bool_true")); + expect(viewMap.get("bool_false")).toBe(stateMap.get("bool_false")); }); test("view should show changes across versions", async () => { @@ -119,18 +213,12 @@ test("view should show changes across versions", async () => { }, ]); - const versionAAfterKvInsert = await lix.db - .selectFrom("version") - .where("id", "=", versionA.id) - .selectAll() - .executeTakeFirstOrThrow(); - // creating a new version from the active version const versionB = await createVersion({ lix, id: "versionB", name: "versionB", - commit_id: versionAAfterKvInsert.commit_id, + from: versionA, }); const kvAfterInsertInVersionB = await lix.db @@ -278,3 +366,39 @@ test("can update individual JSON properties using SQLite JSON functions", async count: 100, }); }); + +test("key_value preserves '1' as string when inserted as string", async () => { + const lix = await openLix({}); + + await lix.db + .insertInto("key_value") + .values({ key: "type_test_string", value: "1" }) + .execute(); + + const row = await lix.db + .selectFrom("key_value") + .selectAll() + .where("key", "=", "type_test_string") + .executeTakeFirstOrThrow(); + + expect(typeof row.value).toBe("string"); + expect(row.value).toBe("1"); +}); + +test("key_value preserves 1 as number when inserted as number", async () => { + const lix = await openLix({}); + + await lix.db + .insertInto("key_value") + .values({ key: "type_test_number", value: 1 }) + .execute(); + + const row = await lix.db + .selectFrom("key_value") + .selectAll() + .where("key", "=", "type_test_number") + .executeTakeFirstOrThrow(); + + expect(typeof row.value).toBe("number"); + expect(row.value).toBe(1); +}); diff --git a/packages/lix-sdk/src/lix/open-lix.bench.ts b/packages/lix-sdk/src/lix/open-lix.bench.ts new file mode 100644 index 0000000000..ac94c0bce0 --- /dev/null +++ b/packages/lix-sdk/src/lix/open-lix.bench.ts @@ -0,0 +1,21 @@ +import { bench } from "vitest"; +import { openLix } from "./open-lix.js"; + +// Measure baseline boot time for creating an in-memory Lix +bench("openLix (empty, in-memory)", async () => { + const lix = await openLix({}); + await lix.close(); +}); + +// Optional: opening with a small set of key-values to simulate common init path +bench("openLix with 1 keyValue", async () => { + const lix = await openLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + }, + ] as any, + }); + await lix.close(); +}); diff --git a/packages/lix-sdk/src/lix/open-lix.test.ts b/packages/lix-sdk/src/lix/open-lix.test.ts index a715b13e32..9a06d3df87 100644 --- a/packages/lix-sdk/src/lix/open-lix.test.ts +++ b/packages/lix-sdk/src/lix/open-lix.test.ts @@ -152,7 +152,23 @@ test("usedFileExtensions", async () => { }); test("it should open a lix in memory from a blob", async () => { - const lix1 = await openLix({}); + const lix1 = await openLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + await lix1.db + .insertInto("key_value") + .values({ + key: "test_key", + value: "test_value", + }) + .execute(); await lix1.db .insertInto("file") @@ -164,8 +180,20 @@ test("it should open a lix in memory from a blob", async () => { .execute(); const lix2 = await openLix({ blob: await lix1.toBlob() }); + const files = await lix2.db.selectFrom("file").selectAll().execute(); + const kv = await lix2.db + .selectFrom("key_value") + .select(["key", "value"]) + .where("key", "=", "test_key") + .executeTakeFirst(); + + expect(kv).toEqual({ + key: "test_key", + value: "test_value", + }); + expect(files).toEqual([ expect.objectContaining({ id: "1", diff --git a/packages/lix-sdk/src/lix/storage/opfs.test.ts b/packages/lix-sdk/src/lix/storage/opfs.test.ts index d71c441532..d36e369047 100644 --- a/packages/lix-sdk/src/lix/storage/opfs.test.ts +++ b/packages/lix-sdk/src/lix/storage/opfs.test.ts @@ -296,9 +296,6 @@ describe("OpfsStorage", () => { expect(activeAccount2.name).toBe(account.name); }); - // TODO occasional test failures due to timing issues - // faulty state materialization might be the cause. - // fix after https://github.com/opral/lix-sdk/issues/308 test("only saves active accounts when they change", async () => { const path = "observer-test.lix"; const storage = new OpfsStorage({ path }); diff --git a/packages/lix-sdk/src/lix/storage/opfs.ts b/packages/lix-sdk/src/lix/storage/opfs.ts index 4d01c8ca75..34e8903011 100644 --- a/packages/lix-sdk/src/lix/storage/opfs.ts +++ b/packages/lix-sdk/src/lix/storage/opfs.ts @@ -56,6 +56,7 @@ export class OpfsStorage implements LixStorageAdapter { private pendingSave = false; private activeAccounts?: Pick[]; private activeAccountSubscription?: { unsubscribe(): void }; + private unsubscribeFromStateCommit?: () => void; /** * Creates a new OpfsStorage instance. @@ -143,6 +144,11 @@ export class OpfsStorage implements LixStorageAdapter { this.activeAccountSubscription.unsubscribe(); this.activeAccountSubscription = undefined; } + // Clean up hooks listener if registered + if (this.unsubscribeFromStateCommit) { + this.unsubscribeFromStateCommit(); + this.unsubscribeFromStateCommit = undefined; + } } /** @@ -185,7 +191,7 @@ export class OpfsStorage implements LixStorageAdapter { */ connect(args: { lix: Lix }): void { // Set up hook for database persistence - args.lix.hooks.onStateCommit(() => { + this.unsubscribeFromStateCommit = args.lix.hooks.onStateCommit(() => { this.batchedSave(); }); diff --git a/packages/lix-sdk/src/log/schema.test.ts b/packages/lix-sdk/src/log/schema.test.ts index 45b6ae178b..e154ea1540 100644 --- a/packages/lix-sdk/src/log/schema.test.ts +++ b/packages/lix-sdk/src/log/schema.test.ts @@ -8,18 +8,22 @@ test("log insert creates entries in the view", async () => { await lix.db .insertInto("log") .values({ - key: "test.log", + key: "test_log", message: "Test log message", level: "info", }) .execute(); // Verify the log appears in the view - const logs = await lix.db.selectFrom("log").selectAll().execute(); + const logs = await lix.db + .selectFrom("log") + .where("key", "=", "test_log") + .selectAll() + .execute(); expect(logs).toHaveLength(1); expect(logs[0]).toMatchObject({ - key: "test.log", + key: "test_log", message: "Test log message", level: "info", }); @@ -61,12 +65,12 @@ test("log delete removes entries from the view", async () => { .insertInto("log") .values([ { - key: "log.to.keep", + key: "test_log_to_keep", message: "Keep this log", level: "info", }, { - key: "log.to.delete", + key: "test_log_to_delete", message: "Delete this log", level: "error", }, @@ -74,20 +78,28 @@ test("log delete removes entries from the view", async () => { .execute(); // Verify both logs exist - const allLogs = await lix.db.selectFrom("log").selectAll().execute(); + const allLogs = await lix.db + .selectFrom("log") + .where("key", "in", ["test_log_to_keep", "test_log_to_delete"]) + .selectAll() + .execute(); expect(allLogs).toHaveLength(2); // Get the ID of the log to delete - const logToDelete = allLogs.find((log) => log.key === "log.to.delete"); + const logToDelete = allLogs.find((log) => log.key === "test_log_to_delete"); expect(logToDelete).toBeDefined(); // Delete one log by ID await lix.db.deleteFrom("log").where("id", "=", logToDelete!.id).execute(); // Verify only one log remains - const remainingLogs = await lix.db.selectFrom("log").selectAll().execute(); + const remainingLogs = await lix.db + .selectFrom("log") + .where("key", "in", ["test_log_to_keep", "test_log_to_delete"]) + .selectAll() + .execute(); expect(remainingLogs).toHaveLength(1); - expect(remainingLogs[0]?.key).toBe("log.to.keep"); + expect(remainingLogs[0]?.key).toBe("test_log_to_keep"); expect(remainingLogs[0]?.message).toBe("Keep this log"); // Verify the deleted log is gone @@ -107,24 +119,28 @@ test("multiple log inserts with unique ids", async () => { .insertInto("log") .values([ { - key: "log1", + key: "test_log1", message: "First log", level: "info", }, { - key: "log2", + key: "test_log2", message: "Second log", level: "warn", }, { - key: "log3", + key: "test_log3", message: "Third log", level: "error", }, ]) .execute(); - const logs = await lix.db.selectFrom("log").selectAll().execute(); + const logs = await lix.db + .selectFrom("log") + .where("key", "like", "test_log%") + .selectAll() + .execute(); expect(logs).toHaveLength(3); @@ -134,7 +150,13 @@ test("multiple log inserts with unique ids", async () => { expect(uniqueIds.size).toBe(3); // Verify content - expect(logs.find((log) => log.key === "log1")?.message).toBe("First log"); - expect(logs.find((log) => log.key === "log2")?.message).toBe("Second log"); - expect(logs.find((log) => log.key === "log3")?.message).toBe("Third log"); + expect(logs.find((log) => log.key === "test_log1")?.message).toBe( + "First log" + ); + expect(logs.find((log) => log.key === "test_log2")?.message).toBe( + "Second log" + ); + expect(logs.find((log) => log.key === "test_log3")?.message).toBe( + "Third log" + ); }); diff --git a/packages/lix-sdk/src/observe/determine-schema-keys.ts b/packages/lix-sdk/src/observe/determine-schema-keys.ts index 86518d128f..2964ba5607 100644 --- a/packages/lix-sdk/src/observe/determine-schema-keys.ts +++ b/packages/lix-sdk/src/observe/determine-schema-keys.ts @@ -58,8 +58,6 @@ export function determineSchemaKeys(compiledQuery: any): string[] { change: "change", // Special case for change table state: "state", // Virtual state table - could include multiple schema keys state_all: "state_all", // Virtual state_all table - includes all versions - active_version: "lix_version", // Maps to version schema - active_account: "lix_account", // Maps to account schema }; Object.assign(tableToSchemaMap, specialMappings); diff --git a/packages/lix-sdk/src/query-filter/change-set-element-in-ancestry-of.test.ts b/packages/lix-sdk/src/query-filter/change-set-element-in-ancestry-of.test.ts deleted file mode 100644 index d1181503ce..0000000000 --- a/packages/lix-sdk/src/query-filter/change-set-element-in-ancestry-of.test.ts +++ /dev/null @@ -1,440 +0,0 @@ -import { test, expect } from "vitest"; -import { openLix } from "../lix/open-lix.js"; -import { createChangeSet } from "../change-set/create-change-set.js"; -import { createCommit } from "../commit/create-commit.js"; -import { changeSetElementInAncestryOf } from "./change-set-element-in-ancestry-of.js"; - -test("returns all elements from a single change set and its ancestors", async () => { - const lix = await openLix({}); - - // Insert required schema entry in global version - await lix.db - .insertInto("stored_schema_all") - .values({ - key: "mock", - version: "1", - value: { - "x-lix-key": "mock", - "x-lix-version": "1", - additionalProperties: false, - properties: {}, - type: "object", - }, - lixcol_version_id: "global", - }) - .execute(); - - // Insert mock change (reused across sets) - const changes = await lix.db - .insertInto("change") - .values({ - id: "c0", - entity_id: "e1", - file_id: "f1", - plugin_key: "mock", - schema_version: "1", - schema_key: "mock", - snapshot_content: { hello: "world" }, - }) - .returningAll() - .execute(); - - // cs0 <- cs1 <- cs2 (with corresponding commits) - const cs0 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - - const commit0 = await createCommit({ lix, changeSet: cs0 }); - - const cs1 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit1 = await createCommit({ - lix, - changeSet: cs1, - parentCommits: [commit0], - }); - - const cs2 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit2 = await createCommit({ - lix, - changeSet: cs2, - parentCommits: [commit1], - }); - - const elements = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInAncestryOf(commit2)) - .select("change_set_id") - .execute(); - - expect(elements.map((e) => e.change_set_id).sort()).toEqual( - [cs0.id, cs1.id, cs2.id].sort() - ); -}); - -test("respects depth limit when provided for a single target", async () => { - const lix = await openLix({}); - - // Insert required schema entry in global version - await lix.db - .insertInto("stored_schema_all") - .values({ - key: "mock", - version: "1", - value: { - "x-lix-key": "mock", - "x-lix-version": "1", - additionalProperties: false, - properties: {}, - type: "object", - }, - lixcol_version_id: "global", - }) - .execute(); - - const changes = await lix.db - .insertInto("change") - .values({ - id: "c1", - entity_id: "e1", - file_id: "f1", - schema_version: "1", - plugin_key: "mock", - schema_key: "mock", - snapshot_content: { val: "hi" }, - }) - .returningAll() - .execute(); - - // cs0 <- cs1 <- cs2 (with corresponding commits) - const cs0 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit0 = await createCommit({ lix, changeSet: cs0 }); - - const cs1 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit1 = await createCommit({ - lix, - changeSet: cs1, - parentCommits: [commit0], - }); - - const cs2 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit2 = await createCommit({ - lix, - changeSet: cs2, - parentCommits: [commit1], - }); - - const elements = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInAncestryOf(commit2, { depth: 1 })) - .select("change_set_id") - .execute(); - - expect(elements.map((e) => e.change_set_id).sort()).toEqual( - [cs1.id, cs2.id].sort() - ); -}); - -test("returns combined elements from multiple divergent change set ancestries", async () => { - const lix = await openLix({}); - - // Insert required schema entry in global version - await lix.db - .insertInto("stored_schema_all") - .values({ - key: "mock", - version: "1", - value: { - "x-lix-key": "mock", - "x-lix-version": "1", - additionalProperties: false, - properties: {}, - type: "object", - }, - lixcol_version_id: "global", - }) - .execute(); - - // Shared change - const changes = await lix.db - .insertInto("change") - .values({ - id: "c1", - entity_id: "e1", - file_id: "f1", - schema_version: "1", - plugin_key: "mock", - schema_key: "mock", - snapshot_content: { val: "shared" }, - }) - .returningAll() - .execute(); - - // cs0 <- cs1 <- cs2 - // \ - // <- cs3 <- cs4 - const cs0 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit0 = await createCommit({ lix, changeSet: cs0 }); - - const cs1 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit1 = await createCommit({ - lix, - changeSet: cs1, - parentCommits: [commit0], - }); - - const cs2 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); // Branch 1 leaf - const commit2 = await createCommit({ - lix, - changeSet: cs2, - parentCommits: [commit1], - }); - - const cs3 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit3 = await createCommit({ - lix, - changeSet: cs3, - parentCommits: [commit0], - }); - - const cs4 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); // Branch 2 leaf - const commit4 = await createCommit({ - lix, - changeSet: cs4, - parentCommits: [commit3], - }); - - const elements = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInAncestryOf([commit2, commit4])) // Target both leaves - .select("change_set_id") - .distinct() - .execute(); - - // Expect all change sets from both branches, including common ancestor cs0 - expect(elements.map((e) => e.change_set_id).sort()).toEqual( - [cs0.id, cs1.id, cs2.id, cs3.id, cs4.id].sort() - ); -}); - -test("respects depth limit with multiple divergent targets", async () => { - const lix = await openLix({}); - - // Insert required schema entry in global version - await lix.db - .insertInto("stored_schema_all") - .values({ - key: "mock", - version: "1", - value: { - "x-lix-key": "mock", - "x-lix-version": "1", - additionalProperties: false, - properties: {}, - type: "object", - }, - lixcol_version_id: "global", - }) - .execute(); - - const changes = await lix.db - .insertInto("change") - .values({ - id: "c1", - entity_id: "e1", - file_id: "f1", - schema_version: "1", - plugin_key: "mock", - schema_key: "mock", - snapshot_content: { val: "shared" }, - }) - .returningAll() - .execute(); - - // cs0 <- cs1 <- cs2 - // \ - // <- cs3 <- cs4 - const cs0 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit0 = await createCommit({ lix, changeSet: cs0 }); - - const cs1 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit1 = await createCommit({ - lix, - changeSet: cs1, - parentCommits: [commit0], - }); - - const cs2 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); // Branch 1 leaf - const commit2 = await createCommit({ - lix, - changeSet: cs2, - parentCommits: [commit1], - }); - - const cs3 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit3 = await createCommit({ - lix, - changeSet: cs3, - parentCommits: [commit0], - }); - - const cs4 = await createChangeSet({ - lix, - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); // Branch 2 leaf - const commit4 = await createCommit({ - lix, - changeSet: cs4, - parentCommits: [commit3], - }); - - const elements = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInAncestryOf([commit2, commit4], { depth: 1 })) // Depth 1 from targets - .select("change_set_id") - .distinct() - .execute(); - - // Expect targets and their direct parents (cs1, cs3) - expect(elements.map((e) => e.change_set_id).sort()).toEqual( - [cs1.id, cs2.id, cs3.id, cs4.id].sort() - ); -}); diff --git a/packages/lix-sdk/src/query-filter/change-set-element-in-ancestry-of.ts b/packages/lix-sdk/src/query-filter/change-set-element-in-ancestry-of.ts deleted file mode 100644 index f0921a9a02..0000000000 --- a/packages/lix-sdk/src/query-filter/change-set-element-in-ancestry-of.ts +++ /dev/null @@ -1,69 +0,0 @@ -import type { LixCommit } from "../commit/schema.js"; -import { - sql, - type ExpressionBuilder, - type ExpressionWrapper, - type SqlBool, -} from "kysely"; -import type { LixDatabaseSchema } from "../database/schema.js"; - -/** - * Filters elements that are in the ancestry of the given commit(s). - * - * @param target - A target commit object (or its id), or an array of such objects/ids. - * @param options - Optional options object (e.g., depth limit) - * @returns A Kysely ExpressionBuilder function for filtering. - * - * @example - * // Elements from the history of commit2 (object) - * db.selectFrom("change_set_element") - * .where(changeSetElementInAncestryOf(commit2)) - * .selectAll() - * - * // Elements from the history of commit2 (id) - * db.selectFrom("change_set_element") - * .where(changeSetElementInAncestryOf(commit2.id)) - * .selectAll() - * - * // Elements from the combined history of commit2 and commit4 (divergent branches) - * db.selectFrom("change_set_element") - * .where(changeSetElementInAncestryOf([commit2, commit4])) - * .selectAll() - */ -export function changeSetElementInAncestryOf( - target: Pick | Array>, - options?: { depth?: number } -): ( - eb: ExpressionBuilder -) => ExpressionWrapper { - const depthLimit = options?.depth; - const targetsArray = Array.isArray(target) ? target : [target]; - if (targetsArray.length === 0) { - throw new Error( - "changeSetElementInAncestryOf requires at least one target commit." - ); - } - const targetIds = targetsArray.map((commit) => - typeof commit === "object" && commit !== null ? commit.id : commit - ); - - return () => - sql` - change_set_element.change_set_id IN ( - WITH RECURSIVE ancestor_commits(id, depth) AS ( - -- Start with the target commits - SELECT id, 0 AS depth FROM "commit" WHERE id IN (${sql.join(targetIds.map((id) => sql.lit(id)))}) - UNION ALL - -- Recursively find parent commits - SELECT commit_edge.parent_id, ancestor_commits.depth + 1 - FROM commit_edge - JOIN ancestor_commits ON commit_edge.child_id = ancestor_commits.id - ${depthLimit !== undefined ? sql`WHERE ancestor_commits.depth < ${sql.lit(depthLimit)}` : sql``} - ) - -- Get the change_set_ids from the ancestor commits - SELECT change_set_id - FROM "commit" - WHERE id IN (SELECT id FROM ancestor_commits) - ) - ` as any; -} diff --git a/packages/lix-sdk/src/query-filter/change-set-element-in-symmetric-difference.test.ts b/packages/lix-sdk/src/query-filter/change-set-element-in-symmetric-difference.test.ts deleted file mode 100644 index 836a3767bb..0000000000 --- a/packages/lix-sdk/src/query-filter/change-set-element-in-symmetric-difference.test.ts +++ /dev/null @@ -1,410 +0,0 @@ -import { test, expect } from "vitest"; -import { openLix } from "../lix/open-lix.js"; -import { changeSetElementInSymmetricDifference } from "./change-set-element-in-symmetric-difference.js"; -import type { LixSchemaDefinition } from "../schema-definition/definition.js"; -import type { LixChangeSetElement } from "../change-set/schema.js"; - -// Helper function to extract necessary fields from a Change object -function getEntityChangeFields(change: any) { - return { - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - }; -} - -test("should return the symmetric difference between two change sets", async () => { - const lix = await openLix({}); - - const mockSchema: LixSchemaDefinition = { - "x-lix-key": "mock_schema", - "x-lix-version": "1.0", - additionalProperties: false, - properties: {}, - type: "object", - }; - - // Insert test data - const changeSetB = { id: "changeSetB" }; - const changeSetA = { id: "changeSetA" }; - - await lix.db - .insertInto("stored_schema") - .values({ value: mockSchema }) - .execute(); - - await lix.db - .insertInto("change_set") - .values([changeSetA, changeSetB]) - .execute(); - - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "change1", - entity_id: "e1", - schema_key: "mock_schema", - file_id: "f1", - plugin_key: "test-plugin", - schema_version: "1.0", - snapshot_content: null, - }, - { - id: "change2", - entity_id: "e2", - schema_key: "mock_schema", - file_id: "f2", - plugin_key: "test-plugin", - schema_version: "1.0", - snapshot_content: null, - }, - { - id: "change3", - entity_id: "e3", - schema_key: "mock_schema", - file_id: "f3", - plugin_key: "test-plugin", - schema_version: "1.0", - snapshot_content: null, - }, - { - id: "change4", - entity_id: "e4", - schema_key: "mock_schema", - file_id: "f4", - plugin_key: "test-plugin", - schema_version: "1.0", - snapshot_content: null, - }, - ]) - .returningAll() - .execute(); - - const changeMap = new Map(changes.map((c) => [c.id, c])); - - // Setup: Create change set elements - const changeElementsA: LixChangeSetElement[] = [ - { - change_set_id: "changeSetA", - change_id: "change1", - ...getEntityChangeFields(changeMap.get("change1")!), - }, - { - change_set_id: "changeSetA", - change_id: "change2", - ...getEntityChangeFields(changeMap.get("change2")!), - }, - ]; - - const changeElementsB: LixChangeSetElement[] = [ - { - change_set_id: "changeSetB", - change_id: "change2", - ...getEntityChangeFields(changeMap.get("change2")!), - }, - { - change_set_id: "changeSetB", - change_id: "change3", - ...getEntityChangeFields(changeMap.get("change3")!), - }, - ]; - - await lix.db - .insertInto("change_set_element") - .values([...changeElementsA, ...changeElementsB]) - .execute(); - - const result = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInSymmetricDifference(changeSetA, changeSetB)) - .selectAll() - .execute(); - - expect(result).toEqual([ - // change 1 is in A but not in B - expect.objectContaining({ - change_set_id: "changeSetA", - change_id: "change1", - entity_id: "e1", - schema_key: "mock_schema", - file_id: "f1", - }), - // change 3 is in B but not in A - expect.objectContaining({ - change_set_id: "changeSetB", - change_id: "change3", - entity_id: "e3", - schema_key: "mock_schema", - file_id: "f3", - }), - ]); -}); - -test("should return an empty array if there are no differences", async () => { - const lix = await openLix({}); - - const mockSchema: LixSchemaDefinition = { - "x-lix-key": "mock_schema", - "x-lix-version": "1.0", - additionalProperties: false, - type: "object", - properties: {}, - }; - - // Insert test data - const changeSetA = { id: "changeSetA" }; - const changeSetB = { id: "changeSetB" }; - - await lix.db - .insertInto("stored_schema") - .values({ value: mockSchema }) - .execute(); - - await lix.db - .insertInto("change_set") - .values([changeSetA, changeSetB]) - .execute(); - - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "change1", - entity_id: "e1", - schema_key: "mock_schema", - file_id: "f1", - plugin_key: "test-plugin", - schema_version: "1.0", - snapshot_content: null, - }, - { - id: "change2", - entity_id: "e2", - schema_key: "mock_schema", - file_id: "f2", - plugin_key: "test-plugin", - schema_version: "1.0", - snapshot_content: null, - }, - { - id: "change3", - entity_id: "e3", - schema_key: "mock_schema", - file_id: "f3", - plugin_key: "test-plugin", - schema_version: "1.0", - snapshot_content: null, - }, - { - id: "change4", - entity_id: "e4", - schema_key: "mock_schema", - file_id: "f4", - plugin_key: "test-plugin", - schema_version: "1.0", - snapshot_content: null, - }, - ]) - .returningAll() - .execute(); - - const changeMap = new Map(changes.map((c) => [c.id, c])); - - const sharedChangeElements: LixChangeSetElement[] = [ - { - change_set_id: "changeSetA", - change_id: "change1", - ...getEntityChangeFields(changeMap.get("change1")!), - }, - { - change_set_id: "changeSetA", - change_id: "change2", - ...getEntityChangeFields(changeMap.get("change2")!), - }, - { - change_set_id: "changeSetB", - change_id: "change1", - ...getEntityChangeFields(changeMap.get("change1")!), - }, - { - change_set_id: "changeSetB", - change_id: "change2", - ...getEntityChangeFields(changeMap.get("change2")!), - }, - ]; - - await lix.db - .insertInto("change_set_element") - .values(sharedChangeElements) - .execute(); - - const result = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInSymmetricDifference(changeSetA, changeSetB)) - .selectAll() - .execute(); - - expect(result).toEqual([]); -}); - -test("should handle empty change sets", async () => { - const lix = await openLix({}); - - // Insert test data - const changeSetA = { id: "changeSetA" }; - const changeSetB = { id: "changeSetB" }; - - await lix.db - .insertInto("change_set") - .values([changeSetA, changeSetB]) - .execute(); - - const result = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInSymmetricDifference(changeSetA, changeSetB)) - .selectAll() - .execute(); - - // Verify the results - expect(result).toEqual([]); -}); - -test("should handle disjoint change sets", async () => { - const lix = await openLix({}); - - const mockSchema: LixSchemaDefinition = { - "x-lix-key": "mock_schema", - "x-lix-version": "1.0", - type: "object", - additionalProperties: false, - properties: {}, - }; - - // Insert test data - const changeSetA = { id: "changeSetA" }; - const changeSetB = { id: "changeSetB" }; - - await lix.db - .insertInto("stored_schema") - .values({ value: mockSchema }) - .execute(); - - await lix.db - .insertInto("change_set") - .values([changeSetA, changeSetB]) - .execute(); - - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "change1", - entity_id: "e1", - schema_key: "mock_schema", - file_id: "f1", - plugin_key: "test-plugin", - schema_version: "1.0", - snapshot_content: null, - }, - { - id: "change2", - entity_id: "e2", - schema_key: "mock_schema", - file_id: "f2", - plugin_key: "test-plugin", - schema_version: "1.0", - snapshot_content: null, - }, - { - id: "change3", - entity_id: "e3", - schema_key: "mock_schema", - file_id: "f3", - plugin_key: "test-plugin", - schema_version: "1.0", - snapshot_content: null, - }, - { - id: "change4", - entity_id: "e4", - schema_key: "mock_schema", - file_id: "f4", - plugin_key: "test-plugin", - schema_version: "1.0", - snapshot_content: null, - }, - ]) - .returningAll() - .execute(); - - const changeMap = new Map(changes.map((c) => [c.id, c])); - - const disjointChangeElementsA: LixChangeSetElement[] = [ - { - change_set_id: "changeSetA", - change_id: "change1", - ...getEntityChangeFields(changeMap.get("change1")!), - }, - { - change_set_id: "changeSetA", - change_id: "change2", - ...getEntityChangeFields(changeMap.get("change2")!), - }, - ]; - const disjointChangeElementsB: LixChangeSetElement[] = [ - { - change_set_id: "changeSetB", - change_id: "change3", - ...getEntityChangeFields(changeMap.get("change3")!), - }, - { - change_set_id: "changeSetB", - change_id: "change4", - ...getEntityChangeFields(changeMap.get("change4")!), - }, - ]; - - await lix.db - .insertInto("change_set_element") - .values([...disjointChangeElementsA, ...disjointChangeElementsB]) - .execute(); - - const result = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInSymmetricDifference(changeSetA, changeSetB)) - .selectAll() - .execute(); - - // Expected result: Symmetric difference includes all elements as sets are disjoint - expect(result).toEqual([ - expect.objectContaining({ - change_set_id: "changeSetA", - change_id: "change1", - entity_id: "e1", - schema_key: "mock_schema", - file_id: "f1", - }), - expect.objectContaining({ - change_set_id: "changeSetA", - change_id: "change2", - entity_id: "e2", - schema_key: "mock_schema", - file_id: "f2", - }), - expect.objectContaining({ - change_set_id: "changeSetB", - change_id: "change3", - entity_id: "e3", - schema_key: "mock_schema", - file_id: "f3", - }), - expect.objectContaining({ - change_set_id: "changeSetB", - change_id: "change4", - entity_id: "e4", - schema_key: "mock_schema", - file_id: "f4", - }), - ]); -}); diff --git a/packages/lix-sdk/src/query-filter/change-set-element-in-symmetric-difference.ts b/packages/lix-sdk/src/query-filter/change-set-element-in-symmetric-difference.ts deleted file mode 100644 index b91c1b3eef..0000000000 --- a/packages/lix-sdk/src/query-filter/change-set-element-in-symmetric-difference.ts +++ /dev/null @@ -1,53 +0,0 @@ -import type { ExpressionBuilder, ExpressionWrapper, SqlBool } from "kysely"; -import type { LixDatabaseSchema } from "../database/schema.js"; -import type { LixChangeSet } from "../change-set/schema.js"; - -/** - * Returns the symmetric difference between two change sets. - * - * The symmetric difference is the set of changes - * that exist in either one version but not both. - * Modeled after https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set/symmetricDifference - * - * @example - * ```ts - * await lix.db.selectFrom("change_set_element") - * .where(changeSetElementInSymmetricDifference(a: changeSetA, b: changeSetB)) - * .selectAll() - * .execute(); - * ``` - */ -export function changeSetElementInSymmetricDifference( - a: Pick, - b: Pick -) { - return ( - eb: ExpressionBuilder - ): ExpressionWrapper => - eb.or([ - eb("change_set_element.change_id", "in", (subquery) => - subquery - .selectFrom("change_set_element as A") - .leftJoin("change_set_element as B", (join) => - join - .onRef("A.change_id", "=", "B.change_id") - .on("B.change_set_id", "=", b.id) - ) - .where("A.change_set_id", "=", a.id) - .where("B.change_id", "is", null) - .select("A.change_id") - ), - eb("change_set_element.change_id", "in", (subquery) => - subquery - .selectFrom("change_set_element as B") - .leftJoin("change_set_element as A", (join) => - join - .onRef("B.change_id", "=", "A.change_id") - .on("A.change_set_id", "=", a.id) - ) - .where("B.change_set_id", "=", b.id) - .where("A.change_id", "is", null) - .select("B.change_id") - ), - ]); -} diff --git a/packages/lix-sdk/src/query-filter/change-set-element-is-leaf-of.test.ts b/packages/lix-sdk/src/query-filter/change-set-element-is-leaf-of.test.ts deleted file mode 100644 index bf529505a0..0000000000 --- a/packages/lix-sdk/src/query-filter/change-set-element-is-leaf-of.test.ts +++ /dev/null @@ -1,623 +0,0 @@ -import { test, expect } from "vitest"; -import { openLix } from "../lix/open-lix.js"; -import { createChangeSet } from "../change-set/create-change-set.js"; -import { createCommit } from "../commit/create-commit.js"; -import { changeSetElementInAncestryOf } from "./change-set-element-in-ancestry-of.js"; -import { changeSetElementIsLeafOf } from "./change-set-element-is-leaf-of.js"; - -test("returns only leaf change_set_elements per entity", async () => { - const lix = await openLix({}); - - // Insert required schema entry in global version - await lix.db - .insertInto("stored_schema_all") - .values({ - key: "mock_schema", - version: "1.0", - value: { - "x-lix-key": "mock_schema", - "x-lix-version": "1.0", - additionalProperties: false, - properties: {}, - type: "object", - }, - lixcol_version_id: "global", - }) - .execute(); - - // Insert 3 snapshots for the same entity - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "c0", - entity_id: "e0", - file_id: "f1", - schema_version: "1.0", - schema_key: "mock_schema", - plugin_key: "p", - snapshot_content: { val: "0" }, - }, - { - id: "c1", - entity_id: "e1", - file_id: "f1", - schema_version: "1.0", - schema_key: "mock_schema", - plugin_key: "p", - snapshot_content: { val: "1" }, - }, - { - id: "c2", - entity_id: "e1", - file_id: "f1", - schema_version: "1.0", - schema_key: "mock_schema", - plugin_key: "p", - snapshot_content: { val: "2" }, - }, - ]) - .returningAll() - .execute(); - - const cs0 = await createChangeSet({ - lix, - id: "cs0", - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit0 = await createCommit({ lix, changeSet: cs0 }); - - const cs1 = await createChangeSet({ - lix, - id: "cs1", - elements: [changes[1]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit1 = await createCommit({ - lix, - changeSet: cs1, - parentCommits: [commit0], - }); - - const cs2 = await createChangeSet({ - lix, - id: "cs2", - elements: [changes[2]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit2 = await createCommit({ - lix, - changeSet: cs2, - parentCommits: [commit1], - }); - - // Only c2 should be the leaf (latest definition in the ancestry) - const leafChanges = await lix.db - .selectFrom("change_set_element") - .where("change_set_element.change_set_id", "in", [cs0.id, cs1.id, cs2.id]) - .where(changeSetElementIsLeafOf(commit2)) - .select(["change_set_element.change_id", "change_set_element.entity_id"]) - .execute(); - - expect(leafChanges).toEqual([ - { change_id: "c0", entity_id: "e0" }, - { change_id: "c2", entity_id: "e1" }, - ]); -}); - -test("correctly identifies leaves at different points in history", async () => { - const lix = await openLix({}); - - // Insert required schema entry in global version - await lix.db - .insertInto("stored_schema_all") - .values({ - key: "mock_schema", - version: "1.0", - value: { - "x-lix-key": "mock_schema", - "x-lix-version": "1.0", - additionalProperties: false, - properties: {}, - type: "object", - }, - lixcol_version_id: "global", - }) - .execute(); - - // Create a scenario similar to the restore-change-set test - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "c0", - entity_id: "l0", - schema_version: "1.0", - file_id: "file1", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: { text: "Line 0" }, - }, - { - id: "c1", - entity_id: "l1", - schema_version: "1.0", - file_id: "file1", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: { text: "Line 1" }, - }, - { - id: "c2", - entity_id: "l2", - file_id: "file1", - schema_version: "1.0", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: { val: "2" }, - }, - { - id: "c3", - entity_id: "l2", // Same entity as c2, but newer version - file_id: "file1", - schema_version: "1.0", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: { text: "Line 2 Modified" }, - }, - { - id: "c4", - entity_id: "l3", // New entity - file_id: "file1", - schema_version: "1.0", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: { text: "Line 3" }, - }, - ]) - .returningAll() - .execute(); - - // Create a more complex change set graph: - // cs0 (base) <- cs1 (modifies l2) <- cs2 (adds l3) - // \<- cs3 (alternative version) - - // Base change set with initial content - const cs0 = await createChangeSet({ - lix, - id: "cs0", - elements: [changes[0]!, changes[1]!, changes[2]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit0 = await createCommit({ lix, changeSet: cs0 }); - - // First modification - updates line 2 - const cs1 = await createChangeSet({ - lix, - id: "cs1", - elements: [changes[3]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit1 = await createCommit({ - lix, - changeSet: cs1, - parentCommits: [commit0], - }); - - // Second modification - adds line 3 - const cs2 = await createChangeSet({ - lix, - id: "cs2", - elements: [changes[4]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit2 = await createCommit({ - lix, - changeSet: cs2, - parentCommits: [commit1], - }); - - // Alternative branch - used to demonstrate branching in the graph - const cs3 = await createChangeSet({ - lix, - id: "cs3", - elements: [], - lixcol_version_id: "global", - }); - - // Use cs3 in a query to avoid the lint error - await lix.db - .selectFrom("change_set") - .where("id", "=", cs3.id) - .select(["id"]) - .executeTakeFirst(); - - // Visualize the graph structure (optional, for debugging) - // console.log("Change Set Graph Structure:"); - // console.log("cs0 (c0,c1,c2) -> cs1 (c3) -> cs2 (c4)"); - // console.log(" -> cs3 ()"); - - // Test 1: Using changeSetElementIsLeaf with ancestry of cs2 - // This should return c0, c1, c3, c4 (c3 replaces c2 since it's the same entity) - const leafChangesCs2 = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInAncestryOf([commit2])) - .where(changeSetElementIsLeafOf([commit2])) - .select(["change_set_element.change_id", "change_set_element.entity_id"]) - .orderBy("change_set_element.change_id") - .execute(); - - // console.log( - // "Leaf changes in ancestry of cs2:", - // leafChangesCs2.map((c) => `${c.change_id} (${c.entity_id})`) - // ); - - expect(leafChangesCs2.map((c) => c.change_id).sort()).toEqual([ - "c0", - "c1", - "c3", - "c4", - ]); - - // Test 2: Using only changeSetElementInAncestryOf without leaf filter for cs2 - // This should return all changes in the ancestry: c0, c1, c2, c3, c4 - const allChangesCs2 = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInAncestryOf([commit2])) - .select(["change_set_element.change_id", "change_set_element.entity_id"]) - .orderBy("change_set_element.change_id") - .execute(); - - // console.log( - // "All changes in ancestry of cs2:", - // allChangesCs2.map((c) => `${c.change_id} (${c.entity_id})`) - // ); - - expect(allChangesCs2.map((c) => c.change_id).sort()).toEqual([ - "c0", - "c1", - "c2", - "c3", - "c4", - ]); - - // Test 3: Simulating the restore scenario for cs1 - // This is why recursive mode is important - we need changes from cs0 too - const directChangesCs1 = await lix.db - .selectFrom("change_set_element") - .where("change_set_element.change_set_id", "=", cs1.id) - .select(["change_set_element.change_id", "change_set_element.entity_id"]) - .orderBy("change_set_element.change_id") - .execute(); - - // console.log( - // "Direct changes in cs1:", - // directChangesCs1.map((c) => `${c.change_id} (${c.entity_id})`) - // ); - - // This only has c3, but to restore cs1 we also need c0 and c1 from cs0 - expect(directChangesCs1.map((c) => c.change_id).sort()).toEqual(["c3"]); - - // Test 4: Demonstrating why we need recursive mode but without the leaf filter - // To restore cs1, we need c0, c1 from cs0 and c3 from cs1 - const restoreChangesCs1Recursive = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInAncestryOf([commit1])) - // No leaf filter here - .select(["change_set_element.change_id", "change_set_element.entity_id"]) - .orderBy("change_set_element.change_id") - .execute(); - - // console.log( - // "All changes in ancestry of cs1 (needed for restore):", - // restoreChangesCs1Recursive.map((c) => `${c.change_id} (${c.entity_id})`) - // ); - - // This includes c2, which would be replaced by c3 - expect(restoreChangesCs1Recursive.map((c) => c.change_id).sort()).toEqual([ - "c0", - "c1", - "c2", - "c3", - ]); - - // Test 5: Demonstrating the issue - when using ancestry + leaf filter - // This correctly includes c3 instead of c2, but for restore we'd need to handle this differently - const leafChangesCs1 = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInAncestryOf([commit1])) - .where(changeSetElementIsLeafOf([commit1])) - .select(["change_set_element.change_id", "change_set_element.entity_id"]) - .orderBy("change_set_element.change_id") - .execute(); - - // console.log( - // "Leaf changes in ancestry of cs1:", - // leafChangesCs1.map((c) => `${c.change_id} (${c.entity_id})`) - // ); - - expect(leafChangesCs1.map((c) => c.change_id).sort()).toEqual([ - "c0", - "c1", - "c3", - ]); - - // Test 6: Demonstrating the issue with cs0 restoration - // When restoring to cs0 but using ancestry + regular leaf filter - // This incorrectly filters out c2 because c3 is the leaf for entity l2 - const restoreChangesCs0WithLeafAtPoint = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementInAncestryOf([commit0])) - .where(changeSetElementIsLeafOf([commit0])) - .select(["change_set_element.change_id", "change_set_element.entity_id"]) - .orderBy("change_set_element.change_id") - .execute(); - - // console.log( - // "Restore changes for cs0 with leaf-at-point filter:", - // restoreChangesCs0WithLeafAtPoint.map((c) => `${c.change_id} (${c.entity_id})`) - // ); - - // This should PASS because c2 is included - it's the leaf at the point of cs0 - expect( - restoreChangesCs0WithLeafAtPoint.map((c) => c.change_id).sort() - ).toEqual(["c0", "c1", "c2"]); -}); - -test("returns combined leaves from multiple target change sets", async () => { - const lix = await openLix({}); - - // Insert required schema entry in global version - await lix.db - .insertInto("stored_schema_all") - .values({ - key: "mock_schema", - version: "1.0", - value: { - "x-lix-key": "mock_schema", - "x-lix-version": "1.0", - additionalProperties: false, - properties: {}, - type: "object", - }, - lixcol_version_id: "global", - }) - .execute(); - - // Create changes - const changes = await lix.db - .insertInto("change") - .values([ - { - id: "c0", - entity_id: "entity3", - schema_version: "1.0", - file_id: "file3", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: null, - }, - { - id: "c1", - entity_id: "entity1", - schema_version: "1.0", - file_id: "file1", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: null, - }, - { - id: "c2", - entity_id: "entity2", - schema_version: "1.0", - file_id: "file2", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: null, - }, - { - // Same entity as c2 - id: "c3", - entity_id: "entity2", - schema_version: "1.0", - file_id: "file2", - plugin_key: "mock_plugin", - snapshot_content: null, - schema_key: "mock_schema", - }, - { - id: "c4", - entity_id: "entity4", - schema_version: "1.0", - file_id: "file4", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: null, - }, - { - id: "c5", - entity_id: "entity5", - schema_version: "1.0", - file_id: "file5", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: null, - }, - { - // Index 6 - New change for entity3 - id: "c6", - entity_id: "entity3", // Same entity as c0 - file_id: "file3", - schema_version: "1.0", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: null, - }, - ]) - .returningAll() - .execute(); - - // Create change sets with the new history: - // cs0 <- cs1 <- cs2 - // \ - // <- cs3 <- cs4 - const cs0 = await createChangeSet({ - lix, - id: "cs0", - elements: [changes[0]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit0 = await createCommit({ lix, changeSet: cs0 }); - - const cs1 = await createChangeSet({ - lix, - id: "cs1", - elements: [changes[1]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit1 = await createCommit({ - lix, - changeSet: cs1, - parentCommits: [commit0], - }); - - const cs2 = await createChangeSet({ - lix, - id: "cs2", - elements: [changes[2]!, changes[6]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit2 = await createCommit({ - lix, - changeSet: cs2, - parentCommits: [commit1], - }); - - const cs3 = await createChangeSet({ - lix, - id: "cs3", - elements: [changes[3]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit3 = await createCommit({ - lix, - changeSet: cs3, - parentCommits: [commit0], - }); - - const cs4 = await createChangeSet({ - lix, - id: "cs4", - elements: [changes[4]!, changes[5]!].map((change) => ({ - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - })), - lixcol_version_id: "global", - }); - const commit4 = await createCommit({ - lix, - changeSet: cs4, - parentCommits: [commit3], - }); - - // Test 1: Leaves in cs2 (Ancestry: cs0 -> cs1 -> cs2) - const leavesCs2 = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementIsLeafOf([commit2])) - .selectAll() - .execute(); - // Expected leaves: c1, c2, c6 (c0 is superseded by c6) - expect(leavesCs2.map((c) => c.change_id).sort()).toEqual(["c1", "c2", "c6"]); - - // Test 2: Leaves in cs4 branch (Ancestry: cs0 -> cs3 -> cs4) - const leavesCs4 = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementIsLeafOf([commit4])) - .selectAll() - .execute(); - // Expected leaves: c0, c3, c4, c5 - expect(leavesCs4.map((c) => c.change_id).sort()).toEqual([ - "c0", - "c3", - "c4", - "c5", - ]); - - // Test 3: Combined leaves from both cs2 and cs4 branches - const combinedLeaves = await lix.db - .selectFrom("change_set_element") - .where(changeSetElementIsLeafOf([commit2, commit4])) // Target heads are commit2 and commit4 - .selectAll() - .execute(); - - // Expected combined leaves: - // c0 is NOT included as it's superseded by c6 in the cs2 branch - // - c1 (entity1): leaf in cs2 ancestry - // - c2 (entity2): leaf in cs2 ancestry (diverged from c3, BOTH LEAVES) - // - c3 (entity2): leaf in cs4 ancestry (diverged from c2, BOTH LEAVES) - // - c4 (entity4): leaf in cs4 ancestry - // - c5 (entity5): leaf in cs4 ancestry - // - c6 (entity3): leaf in cs2 ancestry, supersedes c0 - expect(combinedLeaves.map((c) => c.change_id).sort()).toEqual([ - "c1", - "c2", - "c3", - "c4", - "c5", - "c6", // <-- Supersedes c0 - ]); - expect(combinedLeaves).toHaveLength(6); -}); diff --git a/packages/lix-sdk/src/query-filter/change-set-element-is-leaf-of.ts b/packages/lix-sdk/src/query-filter/change-set-element-is-leaf-of.ts deleted file mode 100644 index 6162e79ac1..0000000000 --- a/packages/lix-sdk/src/query-filter/change-set-element-is-leaf-of.ts +++ /dev/null @@ -1,132 +0,0 @@ -import type { LixCommit } from "../commit/schema.js"; -import { - sql, - type ExpressionBuilder, - type ExpressionWrapper, - type SqlBool, -} from "kysely"; -import type { LixDatabaseSchema } from "../database/schema.js"; - -/** - * Filters the leaves of the given commit(s). - * - * An element is considered a **leaf** if no other element in the combined ancestry - * of the target commits redefines the same entity at a later point in the graph. - * - * @param target - A target commit object (or its id), or an array of such objects/ids. - * - * @example - * // Find leaves relative to a single commit (object) - * db.selectFrom("change_set_element") - * .where(changeSetElementIsLeafOf(commit)) - * .selectAll() - * - * // Find leaves relative to a single commit (id) - * db.selectFrom("change_set_element") - * .where(changeSetElementIsLeafOf(commit.id)) - * .selectAll() - * - * // Find leaves relative to multiple commits - * db.selectFrom("change_set_element") - * .where(changeSetElementIsLeafOf([commit_source, commit_target])) - * .selectAll() - */ -export function changeSetElementIsLeafOf( - target: Pick | Array> -): ( - eb: ExpressionBuilder -) => ExpressionWrapper { - // Normalize to array - const targetsArray = Array.isArray(target) ? target : [target]; - if (targetsArray.length === 0) { - throw new Error( - "changeSetElementIsLeafOf requires at least one target commit." - ); - } - - // Convert to ids if needed - const ids = targetsArray.map((commit) => - typeof commit === "object" && commit !== null ? commit.id : commit - ); - - // Generate SELECT statements for each target head for UNION ALL - const commitIds = ids - .map((id) => sql`SELECT ${sql.lit(id)} as id`) - .reduce((acc, curr) => (acc ? sql`${acc} UNION ALL ${curr}` : curr)); - - return () => - sql` - -- Element must exist within the combined ancestry - change_set_element.change_set_id IN ( - WITH RECURSIVE - -- First get all ancestor commits - combined_ancestry_commits(id) AS ( - ${commitIds} - UNION -- Use UNION here to combine heads with recursive parent lookup (deduplicates) - SELECT ce.parent_id - FROM commit_edge ce - JOIN combined_ancestry_commits a ON ce.child_id = a.id - WHERE ce.parent_id IS NOT NULL - ) - -- Then get the change_set_ids from those commits - SELECT change_set_id - FROM "commit" - WHERE id IN (SELECT id FROM combined_ancestry_commits) - ) - AND - -- And it must be a leaf within that combined ancestry - NOT EXISTS ( - WITH RECURSIVE - -- Combined Ancestry: All commits from ALL target commits upwards - combined_ancestry_commits(id) AS ( - ${commitIds} - UNION -- Use UNION here to combine heads with recursive parent lookup (deduplicates) - SELECT ce.parent_id - FROM commit_edge ce - JOIN combined_ancestry_commits a ON ce.child_id = a.id - WHERE ce.parent_id IS NOT NULL - ), - -- Get change_set_ids from the ancestor commits - combined_ancestry_change_sets(id) AS ( - SELECT change_set_id as id - FROM "commit" - WHERE id IN (SELECT id FROM combined_ancestry_commits) - ), - -- Get the commit for the current element's change set - current_commit(id) AS ( - SELECT id - FROM "commit" - WHERE change_set_id = change_set_element.change_set_id - ), - -- Descendants: All commits from the current element's commit downwards - descendant_commits(id) AS ( - SELECT id FROM current_commit - UNION ALL - SELECT ce.child_id - FROM commit_edge ce - JOIN descendant_commits d ON ce.parent_id = d.id - ), - -- Get change_set_ids from descendant commits - descendant_change_sets(id) AS ( - SELECT change_set_id as id - FROM "commit" - WHERE id IN (SELECT id FROM descendant_commits) - ) - -- Check for a newer element defining the same entity - SELECT 1 - FROM change_set_element AS newer_cse - WHERE - -- Same entity definition - newer_cse.entity_id = change_set_element.entity_id - AND newer_cse.file_id = change_set_element.file_id - AND newer_cse.schema_key = change_set_element.schema_key - -- Different element instance - AND (newer_cse.change_set_id != change_set_element.change_set_id - OR newer_cse.change_id != change_set_element.change_id) - -- Newer element must be in the combined ancestry of the target commits - AND newer_cse.change_set_id IN (SELECT id FROM combined_ancestry_change_sets) - -- Newer element's change set must be a descendant of the current element's change set - AND newer_cse.change_set_id IN (SELECT id FROM descendant_change_sets) - ) - ` as any; -} diff --git a/packages/lix-sdk/src/query-filter/index.ts b/packages/lix-sdk/src/query-filter/index.ts index 4b804be124..81d4783d30 100644 --- a/packages/lix-sdk/src/query-filter/index.ts +++ b/packages/lix-sdk/src/query-filter/index.ts @@ -1,5 +1,2 @@ -export { changeSetElementInAncestryOf } from "./change-set-element-in-ancestry-of.js"; -export { changeSetElementIsLeafOf } from "./change-set-element-is-leaf-of.js"; -export { changeSetElementInSymmetricDifference } from "./change-set-element-in-symmetric-difference.js"; export { commitIsAncestorOf } from "./commit-is-ancestor-of.js"; export { commitIsDescendantOf } from "./commit-is-descendant-of.js"; diff --git a/packages/lix-sdk/src/snapshot/schema.ts b/packages/lix-sdk/src/snapshot/schema.ts index ea733833cc..73f0ff54ac 100644 --- a/packages/lix-sdk/src/snapshot/schema.ts +++ b/packages/lix-sdk/src/snapshot/schema.ts @@ -20,6 +20,10 @@ export function applySnapshotDatabaseSchema( INSERT OR IGNORE INTO internal_snapshot (id, content) VALUES ('no-content', NULL); + + -- Index on id (explicit, though PRIMARY KEY already provides one) + CREATE INDEX IF NOT EXISTS idx_internal_snapshot_id + ON internal_snapshot(id); `); } diff --git a/packages/lix-sdk/src/state-history/schema.test.ts b/packages/lix-sdk/src/state-history/schema.test.ts index 9a9b9a855d..709aa195b5 100644 --- a/packages/lix-sdk/src/state-history/schema.test.ts +++ b/packages/lix-sdk/src/state-history/schema.test.ts @@ -2,7 +2,7 @@ import { test, expect } from "vitest"; import { openLix } from "../lix/open-lix.js"; import type { LixSchemaDefinition } from "../schema-definition/definition.js"; -import { createCheckpoint } from "../commit/create-checkpoint.js"; +import { createCheckpoint } from "../state/create-checkpoint.js"; import { commitIsDescendantOf, commitIsAncestorOf, diff --git a/packages/lix-sdk/src/state-history/schema.ts b/packages/lix-sdk/src/state-history/schema.ts index d92a625b92..19ae8de810 100644 --- a/packages/lix-sdk/src/state-history/schema.ts +++ b/packages/lix-sdk/src/state-history/schema.ts @@ -107,7 +107,8 @@ export function applyStateHistoryDatabaseSchema( lix.sqlite.exec(STATE_HISTORY_VIEW_SQL); } -// Optimized to use materialized commit_edge_all and change_set_element_all from global version +// Optimized to keep the generic history view, but add a fast path for depth=0 +// to avoid whole-graph recursion for common queries like "WHERE depth = 0". export const STATE_HISTORY_VIEW_SQL = ` CREATE VIEW IF NOT EXISTS state_history AS WITH @@ -122,13 +123,34 @@ WITH FROM internal_change ic LEFT JOIN internal_snapshot s ON ic.snapshot_id = s.id ), - -- For state_history, we work with any commit_id, not just version heads + + -- Fast path for depth = 0 (no recursion, direct commit join) + depth0_entity_states AS ( + SELECT + chg.entity_id, + chg.schema_key, + chg.file_id, + chg.plugin_key, + chg.snapshot_content, + chg.schema_version, + cse.change_id AS target_change_id, + c.id AS origin_commit_id, + c.id AS root_commit_id, + 0 AS commit_depth + FROM change_set_element_all cse + JOIN commit_all c + ON cse.change_set_id = c.change_set_id + AND c.lixcol_version_id = 'global' + JOIN all_changes_with_snapshots chg + ON chg.id = cse.change_id + WHERE cse.lixcol_version_id = 'global' + ), + + -- General path for depth > 0 (recursive, ancestors of requested commits) requested_commits AS ( SELECT DISTINCT c.id as commit_id FROM commit_all c - -- This will be filtered by the WHERE clause in queries ), - -- Find all commits reachable from requested ones (including ancestors) reachable_commits_from_requested(id, root_commit_id, depth) AS ( SELECT commit_id, commit_id as root_commit_id, 0 as depth FROM requested_commits @@ -138,7 +160,6 @@ WITH JOIN reachable_commits_from_requested r ON ce.child_id = r.id WHERE ce.lixcol_version_id = 'global' ), - -- Get change set IDs for each commit commit_changesets AS ( SELECT c.id as commit_id, @@ -149,7 +170,6 @@ WITH JOIN reachable_commits_from_requested rc ON c.id = rc.id WHERE c.lixcol_version_id = 'global' ), - -- Find all change set elements in reachable commits cse_in_reachable_commits AS ( SELECT cse.entity_id AS target_entity_id, cse.file_id AS target_file_id, @@ -164,7 +184,6 @@ WITH JOIN commit_changesets cc ON cse.change_set_id = cc.change_set_id WHERE cse.lixcol_version_id = 'global' ), - -- For each entity at each depth, find the latest change within that depth's commit latest_change_per_entity_per_depth AS ( SELECT r.target_entity_id, @@ -178,8 +197,7 @@ WITH INNER JOIN all_changes_with_snapshots target_change ON r.target_change_id = target_change.id GROUP BY r.target_entity_id, r.target_file_id, r.target_schema_key, r.root_commit_id, r.commit_depth ), - -- Get the actual changes for each entity at each depth - entity_states_at_depths AS ( + depthN_entity_states AS ( SELECT target_change.entity_id, target_change.schema_key, @@ -203,19 +221,25 @@ WITH r.target_change_id = target_change.id AND target_change.created_at = latest.latest_created_at ) + WHERE latest.commit_depth > 0 ) + SELECT - esad.entity_id, - esad.schema_key, - esad.file_id, - esad.plugin_key, - esad.snapshot_content, - esad.schema_version, - esad.target_change_id as change_id, - esad.origin_commit_id as commit_id, - esad.root_commit_id as root_commit_id, - esad.commit_depth as depth -FROM entity_states_at_depths esad -WHERE esad.snapshot_content IS NOT NULL -- Exclude deletions for now -ORDER BY esad.entity_id, esad.commit_depth; + es.entity_id, + es.schema_key, + es.file_id, + es.plugin_key, + es.snapshot_content, + es.schema_version, + es.target_change_id as change_id, + es.origin_commit_id as commit_id, + es.root_commit_id as root_commit_id, + es.commit_depth as depth +FROM ( + SELECT * FROM depth0_entity_states + UNION ALL + SELECT * FROM depthN_entity_states +) AS es +WHERE es.snapshot_content IS NOT NULL -- Exclude deletions for now +ORDER BY es.entity_id, es.commit_depth; `; diff --git a/packages/lix-sdk/src/state/cache/clear-state-cache.ts b/packages/lix-sdk/src/state/cache/clear-state-cache.ts index f32ef7ee90..e07bdb0050 100644 --- a/packages/lix-sdk/src/state/cache/clear-state-cache.ts +++ b/packages/lix-sdk/src/state/cache/clear-state-cache.ts @@ -2,15 +2,46 @@ import type { Lix } from "../../lix/open-lix.js"; import { markStateCacheAsStale } from "./mark-state-cache-as-stale.js"; /** - * Clears the internal state cache. + * Clears all state cache tables. + * + * This function: + * 1. Marks the cache as stale to prevent repopulation during delete + * 2. Finds ALL per-schema physical tables (not just cached ones) + * 3. Deletes all entries from each table + * + * @example + * clearStateCache({ lix }); */ -export function clearStateCache(args: { lix: Lix }): void { +export function clearStateCache(args: { + lix: Pick; + timestamp?: string; +}): void { // Mark the cache as stale first to prevent repopulation during delete - markStateCacheAsStale({ lix: args.lix }); + markStateCacheAsStale({ lix: args.lix as Lix, timestamp: args.timestamp }); - // Delete all entries from the cache - args.lix.sqlite.exec({ - sql: `DELETE FROM internal_state_cache`, + // Find ALL physical cache tables in the database (not just cached ones) + // This ensures we clear tables even if they weren't in our cache + // Exclude the v2 virtual table itself + const existingTables = args.lix.sqlite.exec({ + sql: `SELECT name FROM sqlite_schema + WHERE type='table' + AND name LIKE 'internal_state_cache_%' + AND name != 'internal_state_cache' + AND name != 'internal_state_cache'`, returnValue: "resultRows", - }); + }) as any[]; + + // Delete all entries from each physical table + if (existingTables) { + for (const row of existingTables) { + const tableName = row[0] as string; + // Skip virtual tables (shouldn't happen with our query, but be safe) + if (tableName === "internal_state_cache") continue; + + args.lix.sqlite.exec({ + sql: `DELETE FROM ${tableName}`, + returnValue: "resultRows", + }); + } + } } diff --git a/packages/lix-sdk/src/state/cache/create-schema-cache-table.test.ts b/packages/lix-sdk/src/state/cache/create-schema-cache-table.test.ts new file mode 100644 index 0000000000..4ac408e1fd --- /dev/null +++ b/packages/lix-sdk/src/state/cache/create-schema-cache-table.test.ts @@ -0,0 +1,58 @@ +import { test, expect } from "vitest"; +import { openLix } from "../../lix/open-lix.js"; +import { + createSchemaCacheTable, + schemaKeyToCacheTableName, +} from "./create-schema-cache-table.js"; + +test("createSchemaCacheTable creates table with core indexes and is idempotent", async () => { + const lix = await openLix({}); + + const tableName = schemaKeyToCacheTableName("lix_test_create"); + + // First call should create the table and indexes + createSchemaCacheTable({ lix, tableName }); + + // Verify table exists and WITHOUT ROWID + const tbl = lix.sqlite.exec({ + sql: `SELECT name, sql FROM sqlite_schema WHERE type='table' AND name = ?`, + bind: [tableName], + returnValue: "resultRows", + rowMode: "object", + }) as any[]; + + expect(tbl?.[0]?.name).toBe(tableName); + expect(String(tbl?.[0]?.sql || "")).toMatch(/WITHOUT ROWID/); + + // Verify core indexes exist + const idxRows = lix.sqlite.exec({ + sql: `SELECT name, sql FROM sqlite_schema WHERE type='index' AND tbl_name = ? ORDER BY name`, + bind: [tableName], + returnValue: "resultRows", + rowMode: "object", + }) as { name: string; sql: string }[]; + + const names = new Set(idxRows.map((r) => r.name)); + expect(Array.from(names)).toEqual( + expect.arrayContaining([ + `idx_${tableName}_version_id`, + `idx_${tableName}_vfe`, + `idx_${tableName}_fv`, + ]) + ); + + // Second call should be a no-op (idempotent) + createSchemaCacheTable({ lix, tableName }); + + const idxRows2 = lix.sqlite.exec({ + sql: `SELECT name FROM sqlite_schema WHERE type='index' AND tbl_name = ?`, + bind: [tableName], + returnValue: "resultRows", + }) as string[][]; + + // Still contains the same three core indexes (no duplicates) + const idxCount = idxRows2.filter((r) => + String(r?.[0] || "").startsWith(`idx_${tableName}_`) + ).length; + expect(idxCount).toBeGreaterThanOrEqual(3); +}); diff --git a/packages/lix-sdk/src/state/cache/create-schema-cache-table.ts b/packages/lix-sdk/src/state/cache/create-schema-cache-table.ts new file mode 100644 index 0000000000..e80388ff6b --- /dev/null +++ b/packages/lix-sdk/src/state/cache/create-schema-cache-table.ts @@ -0,0 +1,64 @@ +import type { Lix } from "../../lix/open-lix.js"; + +/** + * Creates (or updates) a per-schema internal state cache table with core indexes. + * Idempotent: safe to call multiple times and on existing tables. + * + * Core choices: + * - STRICT + WITHOUT ROWID for compact storage and fast PK lookups + * - PK(entity_id, file_id, version_id) to reflect logical identity + * - Indexes to accelerate common access patterns used by views/benches + */ +export function createSchemaCacheTable(args: { + lix: Pick; + tableName: string; +}): void { + const { lix, tableName } = args; + + // Create table if it doesn't exist + const createTableSql = ` + CREATE TABLE IF NOT EXISTS ${tableName} ( + entity_id TEXT NOT NULL, + schema_key TEXT NOT NULL, + file_id TEXT NOT NULL, + version_id TEXT NOT NULL, + plugin_key TEXT NOT NULL, + snapshot_content BLOB, + schema_version TEXT NOT NULL, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + inherited_from_version_id TEXT, + inheritance_delete_marker INTEGER DEFAULT 0, + change_id TEXT, + commit_id TEXT, + PRIMARY KEY (entity_id, file_id, version_id) + ) STRICT, WITHOUT ROWID; + `; + + lix.sqlite.exec({ sql: createTableSql }); + + // Core static indexes for common access patterns + // 1) Fast version-scoped lookups (frequent) + lix.sqlite.exec({ + sql: `CREATE INDEX IF NOT EXISTS idx_${tableName}_version_id ON ${tableName} (version_id)`, + }); + + // 2) Fast lookups by (version_id, file_id, entity_id) – complements PK order + lix.sqlite.exec({ + sql: `CREATE INDEX IF NOT EXISTS idx_${tableName}_vfe ON ${tableName} (version_id, file_id, entity_id)`, + }); + + // 3) Fast scans by file within a version + lix.sqlite.exec({ + sql: `CREATE INDEX IF NOT EXISTS idx_${tableName}_fv ON ${tableName} (file_id, version_id)`, + }); + + // Update planner stats + lix.sqlite.exec({ sql: `ANALYZE ${tableName}` }); +} + +/** Utility to sanitize a schema_key for use in a physical table name */ +export function schemaKeyToCacheTableName(schema_key: string): string { + const sanitized = schema_key.replace(/[^a-zA-Z0-9]/g, "_"); + return `internal_state_cache_${sanitized}`; +} diff --git a/packages/lix-sdk/src/state/cache/is-stale-state-cache.ts b/packages/lix-sdk/src/state/cache/is-stale-state-cache.ts index 2afb11b593..85dac0cefe 100644 --- a/packages/lix-sdk/src/state/cache/is-stale-state-cache.ts +++ b/packages/lix-sdk/src/state/cache/is-stale-state-cache.ts @@ -16,6 +16,7 @@ export function isStaleStateCache(args: { .where("entity_id", "=", CACHE_STALE_KEY) .where("schema_key", "=", "lix_key_value") .where("version_id", "=", "global") + .where("snapshot_content", "is not", null) .select(sql`json_extract(snapshot_content, '$.value')`.as("value")), }); diff --git a/packages/lix-sdk/src/state/cache/mark-state-cache-as-stale.ts b/packages/lix-sdk/src/state/cache/mark-state-cache-as-stale.ts index fb840d2d8a..6024c119e1 100644 --- a/packages/lix-sdk/src/state/cache/mark-state-cache-as-stale.ts +++ b/packages/lix-sdk/src/state/cache/mark-state-cache-as-stale.ts @@ -12,6 +12,8 @@ export function markStateCacheAsStale(args: { // Set the cache stale flag to "true" in untracked state const snapshotContent = JSON.stringify({ key: CACHE_STALE_KEY, value: true }); + const ts = args.timestamp ?? timestamp({ lix: args.lix }); + updateUntrackedState({ lix: args.lix, change: { @@ -21,7 +23,7 @@ export function markStateCacheAsStale(args: { plugin_key: "lix_own_entity", snapshot_content: snapshotContent, schema_version: LixKeyValueSchema["x-lix-version"], - created_at: args.timestamp ?? timestamp({ lix: args.lix }), + created_at: ts, }, version_id: "global", }); @@ -37,6 +39,8 @@ export function markStateCacheAsFresh(args: { value: false, }); + const ts = args.timestamp ?? timestamp({ lix: args.lix }); + updateUntrackedState({ lix: args.lix, change: { @@ -46,7 +50,7 @@ export function markStateCacheAsFresh(args: { plugin_key: "lix_own_entity", snapshot_content: snapshotContent, schema_version: LixKeyValueSchema["x-lix-version"], - created_at: args.timestamp ?? timestamp({ lix: args.lix }), + created_at: ts, }, version_id: "global", }); diff --git a/packages/lix-sdk/src/state/cache/populate-state-cache.test.ts b/packages/lix-sdk/src/state/cache/populate-state-cache.test.ts index 98906d8695..79952191e9 100644 --- a/packages/lix-sdk/src/state/cache/populate-state-cache.test.ts +++ b/packages/lix-sdk/src/state/cache/populate-state-cache.test.ts @@ -1,13 +1,15 @@ import { test, expect } from "vitest"; import { openLix } from "../../lix/open-lix.js"; import { populateStateCache } from "./populate-state-cache.js"; +import { updateStateCache } from "./update-state-cache.js"; +import { timestamp } from "../../deterministic/timestamp.js"; +import type { LixChangeRaw } from "../../change/schema.js"; import { clearStateCache } from "./clear-state-cache.js"; -import { sql, type Kysely } from "kysely"; -import type { LixInternalDatabaseSchema } from "../../database/schema.js"; import { createVersion } from "../../version/create-version.js"; +import { Kysely, sql } from "kysely"; +import type { LixInternalDatabaseSchema } from "../../database/schema.js"; -test("should populate cache for a specific version_id", async () => { - // Test that populateStateCache({ version_id: "global" }) populates only global version +test("populates v2 cache from materializer", async () => { const lix = await openLix({ keyValues: [ { @@ -17,248 +19,518 @@ test("should populate cache for a specific version_id", async () => { ], }); - // Add some test data to the global version - await lix.db - .insertInto("key_value_all") - .values({ - key: "test-key", - value: "test-value", - lixcol_version_id: "global", - }) - .execute(); - - // Clear any existing cache - clearStateCache({ lix }); - - // Populate cache for global version only - populateStateCache(lix.sqlite, { version_id: "global" }); - - // Query cache directly for our test entity using Kysely - const cacheEntry = await ( - lix.db as unknown as Kysely - ) - .selectFrom("internal_state_cache") - .selectAll() - .select(sql`json(snapshot_content)`.as("snapshot_content")) - .where("entity_id", "=", "test-key") - .where("schema_key", "=", "lix_key_value") - .execute(); + const currentTimestamp = timestamp({ lix }); + + // First, insert some test data using updateStateCacheV2 + const testChanges: LixChangeRaw[] = [ + { + id: "test-change-1", + entity_id: "entity-1", + schema_key: "lix_test", + schema_version: "1.0", + file_id: "file1", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ id: "entity-1", value: "test1" }), + created_at: currentTimestamp, + }, + { + id: "test-change-2", + entity_id: "entity-2", + schema_key: "lix_test", + schema_version: "1.0", + file_id: "file2", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ id: "entity-2", value: "test2" }), + created_at: currentTimestamp, + }, + { + id: "test-change-3", + entity_id: "entity-3", + schema_key: "lix_other", + schema_version: "1.0", + file_id: "file1", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ id: "entity-3", value: "test3" }), + created_at: currentTimestamp, + }, + ]; + + // Insert data into v2 cache + updateStateCache({ + lix, + changes: testChanges, + commit_id: "test-commit-1", + version_id: "global", + }); - expect(cacheEntry).toHaveLength(1); + // Check lix_test table + const lixTestTable = lix.sqlite.exec({ + sql: `SELECT * FROM internal_state_cache_lix_test ORDER BY entity_id`, + returnValue: "resultRows", + rowMode: "object", + }) as any[]; + + expect(lixTestTable).toHaveLength(2); + expect(lixTestTable[0].entity_id).toBe("entity-1"); + expect(lixTestTable[1].entity_id).toBe("entity-2"); + + // Check lix_other table + const lixOtherTable = lix.sqlite.exec({ + sql: `SELECT * FROM internal_state_cache_lix_other ORDER BY entity_id`, + returnValue: "resultRows", + rowMode: "object", + }) as any[]; + + expect(lixOtherTable).toHaveLength(1); + expect(lixOtherTable[0].entity_id).toBe("entity-3"); +}); - // Should find our test data in the cache - expect(cacheEntry[0]?.entity_id).toBe("test-key"); - expect(cacheEntry[0]?.version_id).toBe("global"); - expect(cacheEntry[0]?.schema_key).toBe("lix_key_value"); +test("populates v2 cache with version filter", async () => { + const lix = await openLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + }, + ], + }); - // Verify the snapshot content contains our data - const snapshot = cacheEntry[0]!.snapshot_content! as any; - expect(snapshot.key).toBe("test-key"); - expect(snapshot.value).toBe("test-value"); -}); + const currentTimestamp = timestamp({ lix }); + + // Insert test data for different versions + const changes: LixChangeRaw[] = [ + { + id: "change-v1-1", + entity_id: "entity-v1", + schema_key: "lix_test", + schema_version: "1.0", + file_id: "file1", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ id: "entity-v1", value: "v1" }), + created_at: currentTimestamp, + }, + ]; + + updateStateCache({ + lix, + changes, + commit_id: "commit-v1", + version_id: "version-1", + }); -test("should filter by entity_id when provided", async () => { - // Test that populateStateCache({ version_id: "global", entity_id: "specific-entity" }) - // only populates cache entries for that specific entity -}); + const changesV2: LixChangeRaw[] = [ + { + id: "change-v2-1", + entity_id: "entity-v2", + schema_key: "lix_test", + schema_version: "1.0", + file_id: "file1", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ id: "entity-v2", value: "v2" }), + created_at: currentTimestamp, + }, + ]; + + updateStateCache({ + lix, + changes: changesV2, + commit_id: "commit-v2", + version_id: "version-2", + }); -test("should filter by schema_key when provided", async () => { - // Test that populateStateCache({ version_id: "global", schema_key: "lix_version" }) - // only populates cache entries for that specific schema + // Verify both versions exist + const allData = lix.sqlite.exec({ + sql: `SELECT * FROM internal_state_cache_lix_test ORDER BY entity_id`, + returnValue: "resultRows", + rowMode: "object", + }) as any[]; + + expect(allData).toHaveLength(2); + expect(allData[0].version_id).toBe("version-1"); + expect(allData[1].version_id).toBe("version-2"); + + // Populate only version-1 + populateStateCache(lix, { version_id: "version-1" }); + + // Check that version-1 was cleared (no materializer data to re-populate) + // but version-2 remains + const afterPopulate = lix.sqlite.exec({ + sql: `SELECT * FROM internal_state_cache_lix_test ORDER BY entity_id`, + returnValue: "resultRows", + rowMode: "object", + }) as any[]; + + expect(afterPopulate).toHaveLength(1); + expect(afterPopulate[0].version_id).toBe("version-2"); + expect(afterPopulate[0].entity_id).toBe("entity-v2"); }); -test("should filter by file_id when provided", async () => { - // Test that populateStateCache({ version_id: "global", file_id: "lix" }) - // only populates cache entries for that specific file -}); +test("clears all v2 cache tables when no filters specified", async () => { + const lix = await openLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + }, + ], + }); -test("should combine multiple filters correctly", async () => { - // Test that populateStateCache({ version_id: "global", entity_id: "x", schema_key: "y" }) - // applies all filters together with AND logic -}); + const currentTimestamp = timestamp({ lix }); + + // Insert data into multiple schema tables + const changes: LixChangeRaw[] = [ + { + id: "change-1", + entity_id: "entity-1", + schema_key: "schema_a", + schema_version: "1.0", + file_id: "file1", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ id: "entity-1", value: "a" }), + created_at: currentTimestamp, + }, + { + id: "change-2", + entity_id: "entity-2", + schema_key: "schema_b", + schema_version: "1.0", + file_id: "file1", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ id: "entity-2", value: "b" }), + created_at: currentTimestamp, + }, + { + id: "change-3", + entity_id: "entity-3", + schema_key: "schema_c", + schema_version: "1.0", + file_id: "file1", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ id: "entity-3", value: "c" }), + created_at: currentTimestamp, + }, + ]; + + updateStateCache({ + lix, + changes, + commit_id: "commit-1", + version_id: "global", + }); -test("should delete existing cache entries before populating", async () => { - // Test that existing cache entries matching the filters are cleared before new ones are inserted - // This prevents duplicates -}); + // Verify data exists in all tables + const schemaA = lix.sqlite.exec({ + sql: `SELECT * FROM internal_state_cache_schema_a`, + returnValue: "resultRows", + }); + const schemaB = lix.sqlite.exec({ + sql: `SELECT * FROM internal_state_cache_schema_b`, + returnValue: "resultRows", + }); + const schemaC = lix.sqlite.exec({ + sql: `SELECT * FROM internal_state_cache_schema_c`, + returnValue: "resultRows", + }); -test("should handle empty materialization results gracefully", async () => { - // Test behavior when the materializer returns no results for the given filters - // Should clear matching cache entries but not error -}); + expect(schemaA).toHaveLength(1); + expect(schemaB).toHaveLength(1); + expect(schemaC).toHaveLength(1); -test("should populate cache with correct inheritance relationships", async () => { - // Test that inherited state is correctly populated with proper inherited_from_version_id values - // Verify that inheritance_delete_marker is set to 0 (from materializer) -}); + // Populate with no filters (should clear all) + populateStateCache(lix); -test("should handle version that doesn't exist in materializer", async () => { - // Test behavior when requesting a version_id that has no materialized state - // Should clear cache entries for that version but not error -}); + // All tables should be empty now (no materializer data) + const schemaAAfter = lix.sqlite.exec({ + sql: `SELECT * FROM internal_state_cache_schema_a`, + returnValue: "resultRows", + }); + const schemaBAfter = lix.sqlite.exec({ + sql: `SELECT * FROM internal_state_cache_schema_b`, + returnValue: "resultRows", + }); + const schemaCAfter = lix.sqlite.exec({ + sql: `SELECT * FROM internal_state_cache_schema_c`, + returnValue: "resultRows", + }); -test("should populate cache with all required columns", async () => { - // Test that all expected columns are populated correctly: - // entity_id, schema_key, file_id, version_id, plugin_key, snapshot_content, - // schema_version, created_at, updated_at, inherited_from_version_id, - // inheritance_delete_marker, change_id, commit_id + expect(schemaAAfter).toHaveLength(0); + expect(schemaBAfter).toHaveLength(0); + expect(schemaCAfter).toHaveLength(0); }); -test("should implement copy-on-write semantics by only caching direct entries", async () => { +// This test verifies that when populating cache for a child version, +// all parent versions in the inheritance chain are also populated. +// This is necessary because the child version needs access to inherited state. +test("inheritance is queryable from the resolved view after population", async () => { const lix = await openLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, }, ], }); - // Add some test data to global version - await lix.db - .insertInto("key_value") - .values({ - key: "test-key", - value: "test-value", - }) - .execute(); + const currentTimestamp = timestamp({ lix }); - // Clear any existing cache - clearStateCache({ lix }); + // Create version hierarchy: C inherits from B, B inherits from A + const versionA = await createVersion({ + lix, + name: "Version A", + id: "version_a", + }); - // Populate cache - should only copy direct entries (copy-on-write) - populateStateCache(lix.sqlite); + const versionB = await createVersion({ + lix, + name: "Version B", + id: "version_b", + inheritsFrom: versionA, + }); - // Note: We don't check materializer entries here since internal_state_materializer - // is not part of the public schema, but we verify cache behavior + const versionC = await createVersion({ + lix, + name: "Version C", + id: "version_c", + inheritsFrom: versionB, + }); - // Check what's in the cache - should only have direct entries - const cacheEntries = await ( - lix.db as unknown as Kysely - ) - .selectFrom("internal_state_cache") - .selectAll() - .select(sql`json(snapshot_content)`.as("snapshot_content")) - .where("entity_id", "=", "test-key") - .where("schema_key", "=", "lix_key_value") + // Insert test entities directly into state_all for each version using Kysely + // Entity in version A + await lix.db + .insertInto("state_all") + .values({ + entity_id: "entity_a", + schema_key: "test_entity", + file_id: "file1", + version_id: versionA.id, + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ + id: "entity_a", + value: "from_version_a", + }) as any, + schema_version: "1.0", + created_at: currentTimestamp, + updated_at: currentTimestamp, + }) .execute(); - // Cache should only contain direct entries (inherited_from_version_id IS NULL) - expect(cacheEntries).toHaveLength(1); - expect(cacheEntries[0]?.inherited_from_version_id).toBeNull(); - - // Populate cache multiple times - should not create duplicates due to DELETE before INSERT - populateStateCache(lix.sqlite); - populateStateCache(lix.sqlite); + // Entity in version B + await lix.db + .insertInto("state_all") + .values({ + entity_id: "entity_b", + schema_key: "test_entity", + file_id: "file1", + version_id: versionB.id, + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ + id: "entity_b", + value: "from_version_b", + }) as any, + schema_version: "1.0", + created_at: currentTimestamp, + updated_at: currentTimestamp, + }) + .execute(); - const cacheEntriesAfterMultiplePopulate = await ( - lix.db as unknown as Kysely - ) - .selectFrom("internal_state_cache") - .selectAll() - .select(sql`json(snapshot_content)`.as("snapshot_content")) - .where("entity_id", "=", "test-key") - .where("schema_key", "=", "lix_key_value") + // Entity in version C + await lix.db + .insertInto("state_all") + .values({ + entity_id: "entity_c", + schema_key: "test_entity", + file_id: "file1", + version_id: versionC.id, + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ + id: "entity_c", + value: "from_version_c", + }) as any, + schema_version: "1.0", + created_at: currentTimestamp, + updated_at: currentTimestamp, + }) .execute(); - // Should still have exactly one entry (no duplicates) - expect(cacheEntriesAfterMultiplePopulate).toHaveLength(1); + // Clear all cache to start fresh + clearStateCache({ lix }); + + // ACT: Populate ONLY version C + populateStateCache(lix, { version_id: versionC.id }); - // Verify copy-on-write: check that all cache entries have inherited_from_version_id = null - const allCacheEntries = await ( + // ASSERT: Check what got populated in the cache + // Read from the virtual table internal_state_cache using Kysely with json function + const resolvedContents = await ( lix.db as unknown as Kysely ) - .selectFrom("internal_state_cache") - .selectAll() - .select(sql`json(snapshot_content)`.as("snapshot_content")) + .selectFrom("internal_resolved_state_all") + .select([ + "entity_id", + "schema_key", + "file_id", + "version_id", + "inherited_from_version_id", + sql`json(snapshot_content)`.as("snapshot_content"), + ]) + .where("schema_key", "=", "test_entity") + .where("version_id", "=", versionC.id) + .orderBy("entity_id") .execute(); - for (const entry of allCacheEntries) { - expect(entry.inherited_from_version_id).toBeNull(); - } + // EXPECTED BEHAVIOR: When populating version_c, the cache should contain + // all entities that version_c can see through inheritance: + // 1. entity_a from version_a (inherited through B -> A) + // 2. entity_b from version_b (inherited from B) + // 3. entity_c from version_c (direct) + + // All three entities should be in the cache + expect(resolvedContents).toHaveLength(3); + + // Verify entity_a is cached (inherited from version_a) + // All entities are stored with version_id=version_c since that's the version viewing them + const entityA = resolvedContents.find((r: any) => r.entity_id === "entity_a"); + expect(entityA).toBeTruthy(); + expect(entityA?.version_id).toBe(versionC.id); // Stored under version_c + expect(entityA?.inherited_from_version_id).toBe(versionA.id); // But inherited from version_a + // snapshot_content is already a parsed object from the sql`json()` function + expect((entityA?.snapshot_content as any).value).toBe("from_version_a"); + + // Verify entity_b is cached (inherited from version_b) + const entityB = resolvedContents.find((r: any) => r.entity_id === "entity_b"); + expect(entityB).toBeTruthy(); + expect(entityB?.version_id).toBe(versionC.id); // Stored under version_c + expect(entityB?.inherited_from_version_id).toBe(versionB.id); // But inherited from version_b + expect((entityB?.snapshot_content as any).value).toBe("from_version_b"); + + // Verify entity_c is cached (direct from version_c) + const entityC = resolvedContents.find((r: any) => r.entity_id === "entity_c"); + expect(entityC).toBeTruthy(); + expect(entityC?.version_id).toBe(versionC.id); // Stored under version_c + expect(entityC?.inherited_from_version_id).toBeNull(); // Direct, not inherited + expect((entityC?.snapshot_content as any).value).toBe("from_version_c"); }); -test("should cache tombstones (delete markers) when they are direct entries", async () => { +test("global version entities are populated when populating child versions", async () => { const lix = await openLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, }, ], }); - // First, create an entity in the global version - await lix.db - .insertInto("key_value_all") - .values({ - key: "to-be-deleted", - value: "original-value", - lixcol_version_id: "global", - }) - .execute(); + const db = lix.db as unknown as Kysely; - await createVersion({ - lix: lix, - id: "feature-version", + // Create a test version that will inherit from global + const testVersion = await createVersion({ + lix, + name: "Test Version", + id: "test_version_1", }); - // Create a delete marker (tombstone) in the feature version + // Insert a test entity into state_all for global version + // This simulates entities that exist in global and should be inherited by all versions await lix.db - .deleteFrom("key_value_all") - .where("key", "=", "to-be-deleted") - .where("lixcol_version_id", "=", "feature-version") + .insertInto("state_all") + .values({ + entity_id: "global_entity_1", + schema_key: "test_entity", + file_id: "test_file", + version_id: "global", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ + id: "global_entity_1", + value: "from_global", + updated_after_merge: true, + }) as any, + schema_version: "1.0", + }) .execute(); - // Clear cache and populate for the feature version - clearStateCache({ lix }); - - populateStateCache(lix.sqlite, { version_id: "feature-version" }); - - // Check cache entries for the deleted entity in feature version - const cacheEntries = await ( - lix.db as unknown as Kysely - ) - .selectFrom("internal_state_cache") - .selectAll() - .select(sql`json(snapshot_content)`.as("snapshot_content")) - .where("entity_id", "=", "to-be-deleted") - .where("schema_key", "=", "lix_key_value") - .where("version_id", "=", "feature-version") + // Verify the test version can see this entity through inheritance before cache miss + const beforeCacheMiss = await db + .selectFrom("state_all") + .where("version_id", "=", testVersion.id) + .where("schema_key", "=", "test_entity") + .where("entity_id", "=", "global_entity_1") + .select([ + "entity_id", + "change_id", + sql`json(snapshot_content)`.as("snapshot_content"), + ]) .execute(); - // Should have a cache entry for the tombstone since it's a direct entry in feature-version - expect(cacheEntries).toHaveLength(1); - expect(cacheEntries[0]?.inherited_from_version_id).toBeNull(); - expect(cacheEntries[0]?.version_id).toBe("feature-version"); + expect(beforeCacheMiss).toHaveLength(1); + const originalChangeId = beforeCacheMiss[0]?.change_id; + expect((beforeCacheMiss[0]?.snapshot_content as any).value).toBe( + "from_global" + ); - // The snapshot content should be null for tombstones - expect(cacheEntries[0]?.snapshot_content).toBeNull(); + // Clear all cache to simulate cache miss + clearStateCache({ lix }); - // Check resolved state for global version - should have the entity - const globalResolvedState = await lix.db + // ACT: Populate the test version's cache (simulating cache miss recovery) + populateStateCache(lix, { version_id: testVersion.id }); + + // ASSERT: After cache population, the test version should still see the global entity + const afterCachePopulation = await db .selectFrom("state_all") - .selectAll() - .where("entity_id", "=", "to-be-deleted") - .where("schema_key", "=", "lix_key_value") - .where("version_id", "=", "global") + .where("version_id", "=", testVersion.id) + .where("schema_key", "=", "test_entity") + .where("entity_id", "=", "global_entity_1") + .select([ + "entity_id", + "change_id", + sql`json(snapshot_content)`.as("snapshot_content"), + ]) .execute(); - expect(globalResolvedState).toHaveLength(1); - expect((globalResolvedState[0]?.snapshot_content as any)?.value).toBe( - "original-value" + // Should still see the entity with the same change_id + expect(afterCachePopulation).toHaveLength(1); + expect(afterCachePopulation[0]?.change_id).toBe(originalChangeId); + expect((afterCachePopulation[0]?.snapshot_content as any).value).toBe( + "from_global" ); - // Check resolved state for feature-version - should NOT have the entity (it's deleted) - const featureResolvedState = await lix.db - .selectFrom("state_all") - .selectAll() - .where("entity_id", "=", "to-be-deleted") - .where("schema_key", "=", "lix_key_value") - .where("version_id", "=", "feature-version") + // Check the physical cache directly: the parent/global authored entry + // should be materialized in its own version's cache table. + const cacheEntries = await db + .selectFrom("internal_state_cache_test_entity" as any) + .where("entity_id", "=", "global_entity_1") + .select([ + "entity_id", + "change_id", + "version_id", + "inherited_from_version_id", + ]) .execute(); - // The resolved state should NOT include tombstones - they should be filtered out - // Only the cache should include tombstones for proper deletion handling - expect(featureResolvedState).toHaveLength(0); + const globalEntry = cacheEntries.find((e: any) => e.version_id === "global"); + expect(globalEntry).toBeTruthy(); + expect(globalEntry?.change_id).toBe(originalChangeId); + + // Inheritance is resolved at read time via the resolved view. + // Verify the child version sees the inherited row from global. + const resolvedInherited = await db + .selectFrom("internal_resolved_state_all") + .where("version_id", "=", testVersion.id) + .where("schema_key", "=", "test_entity") + .where("entity_id", "=", "global_entity_1") + .select([ + "entity_id", + "change_id", + "version_id", + "inherited_from_version_id", + sql`json(snapshot_content)`.as("snapshot_content"), + ]) + .execute(); + + expect(resolvedInherited).toHaveLength(1); + expect(resolvedInherited[0]?.version_id).toBe(testVersion.id); + expect(resolvedInherited[0]?.inherited_from_version_id).toBe("global"); + expect(resolvedInherited[0]?.change_id).toBe(originalChangeId); + expect((resolvedInherited[0]?.snapshot_content as any).value).toBe( + "from_global" + ); }); diff --git a/packages/lix-sdk/src/state/cache/populate-state-cache.ts b/packages/lix-sdk/src/state/cache/populate-state-cache.ts index a3db1c1a96..89c144772c 100644 --- a/packages/lix-sdk/src/state/cache/populate-state-cache.ts +++ b/packages/lix-sdk/src/state/cache/populate-state-cache.ts @@ -1,124 +1,204 @@ import type { SqliteWasmDatabase } from "sqlite-wasm-kysely"; +import type { Lix } from "../../lix/open-lix.js"; +import { getStateCacheV2Tables } from "./schema.js"; +import { createSchemaCacheTable } from "./create-schema-cache-table.js"; -export interface PopulateStateCacheOptions { +export interface PopulateStateCacheV2Options { version_id?: string; // Optional - if not provided, all active versions are populated - entity_id?: string; // Optional - schema_key?: string; // Optional - file_id?: string; // Optional } +/** + * Populates the state cache v2 from the materializer view. + * + * This function reads from the materialized state and writes to the per-schema + * physical cache tables. If a version_id is provided, it also populates the + * cache for all ancestor versions. + * + * @param lix - The Lix instance with sqlite and db + * @param options - Optional filters for selective population + */ export function populateStateCache( - sqlite: SqliteWasmDatabase, - options: PopulateStateCacheOptions = {} + lix: Pick, + options: PopulateStateCacheV2Options = {} ): void { - // Build WHERE clause based on options - const whereConditions: string[] = []; - const bindParams: any[] = []; + const { sqlite } = lix; + + let versionsToPopulate: string[]; if (options.version_id) { - whereConditions.push("m.version_id = ?"); - bindParams.push(options.version_id); + // When a specific version is requested, also include all its ancestors + // This ensures the resolved view can access inherited state + // Use the materializer's version ancestry view to find all ancestors + const ancestorRows = sqlite.exec({ + sql: ` + SELECT DISTINCT ancestor_version_id as version_id + FROM internal_materialization_version_ancestry + WHERE version_id = ? + `, + bind: [options.version_id], + returnValue: "resultRows", + rowMode: "array", + }) as [string][]; + + // The ancestry view includes the version itself and all its ancestors + versionsToPopulate = + ancestorRows.length > 0 + ? ancestorRows.map((row) => row[0]) + : [options.version_id]; } else { - // If no version_id specified, only populate active versions (with tips) - whereConditions.push(`EXISTS ( - SELECT 1 FROM internal_materialization_version_tips vt - WHERE vt.version_id = m.version_id - )`); + // If no version_id specified, populate all active versions (with tips) + const tipRows = sqlite.exec({ + sql: `SELECT version_id FROM internal_materialization_version_tips`, + returnValue: "resultRows", + rowMode: "array", + }) as [string][]; + versionsToPopulate = tipRows.map((row) => row[0]); } - if (options.entity_id) { - whereConditions.push("m.entity_id = ?"); - bindParams.push(options.entity_id); - } - if (options.schema_key) { - whereConditions.push("m.schema_key = ?"); - bindParams.push(options.schema_key); - } - if (options.file_id) { - whereConditions.push("m.file_id = ?"); - bindParams.push(options.file_id); + if (versionsToPopulate.length === 0) { + return; } - // Delete existing cache entries that match the criteria - if ( - options.version_id || - options.entity_id || - options.schema_key || - options.file_id - ) { - // Build delete conditions - only for specific filters, not for the EXISTS clause - const deleteConditions: string[] = []; - const deleteParams: any[] = []; - - if (options.version_id) { - deleteConditions.push("version_id = ?"); - deleteParams.push(options.version_id); - } - if (options.entity_id) { - deleteConditions.push("entity_id = ?"); - deleteParams.push(options.entity_id); - } - if (options.schema_key) { - deleteConditions.push("schema_key = ?"); - deleteParams.push(options.schema_key); - } - if (options.file_id) { - deleteConditions.push("file_id = ?"); - deleteParams.push(options.file_id); - } + // Clear existing cache entries for the versions being populated + const tableCache = getStateCacheV2Tables(lix); + for (const tableName of tableCache) { + if (tableName === "internal_state_cache") continue; - if (deleteConditions.length > 0) { + const tableExists = sqlite.exec({ + sql: `SELECT 1 FROM sqlite_schema WHERE type='table' AND name=?`, + bind: [tableName], + returnValue: "resultRows", + }); + + if (tableExists && tableExists.length > 0) { + const placeholders = versionsToPopulate.map(() => "?").join(","); sqlite.exec({ - sql: `DELETE FROM internal_state_cache WHERE ${deleteConditions.join(" AND ")}`, - bind: deleteParams, + sql: `DELETE FROM ${tableName} WHERE version_id IN (${placeholders})`, + bind: versionsToPopulate, }); } - } else { - // No specific filters - clear entire cache (populate all active versions) - sqlite.exec(`DELETE FROM internal_state_cache`); } - // Populate cache from the materializer view for the specified version - // IMPORTANT: Only copy direct entries (inherited_from_version_id IS NULL) - // Inherited state should not be stored in the cache - inheritance is handled - // by the resolved state view at query time - const insertSql = ` - INSERT INTO internal_state_cache ( - entity_id, - schema_key, - file_id, - version_id, - plugin_key, - snapshot_content, - schema_version, - created_at, - updated_at, - inherited_from_version_id, - inheritance_delete_marker, - change_id, - commit_id - ) + // Query materialized state to get changes for all required versions + const placeholders = versionsToPopulate.map(() => "?").join(","); + const selectSql = ` SELECT m.entity_id, m.schema_key, m.file_id, m.version_id, m.plugin_key, - CASE WHEN m.snapshot_content IS NULL THEN NULL ELSE jsonb(m.snapshot_content) END as snapshot_content, + m.snapshot_content, m.schema_version, m.created_at, m.updated_at, - m.inherited_from_version_id, - 0 as inheritance_delete_marker, -- No deletion markers from materializer m.change_id, - m.commit_id + m.commit_id, + m.inherited_from_version_id FROM internal_state_materializer m - WHERE ${whereConditions.join(" AND ")} - AND m.inherited_from_version_id IS NULL -- Only direct entries, no inherited state + WHERE m.version_id IN (${placeholders}) + AND m.inherited_from_version_id IS NULL `; - sqlite.exec({ - sql: insertSql, - bind: bindParams, - }); + const results = sqlite.exec({ + sql: selectSql, + bind: versionsToPopulate, + returnValue: "resultRows", + rowMode: "object", + }) as any[]; + + if (!results || results.length === 0) { + return; + } + + // Group results by schema_key for batch processing + const rowsBySchema = new Map(); + + for (const row of results) { + if (!rowsBySchema.has(row.schema_key)) { + rowsBySchema.set(row.schema_key, []); + } + rowsBySchema.get(row.schema_key)!.push(row); + } + + // Process each schema's rows directly to its physical table + for (const [schema_key, schemaRows] of rowsBySchema) { + // Sanitize schema_key for use in table name - must match update-state-cache.ts + const sanitizedSchemaKey = schema_key.replace(/[^a-zA-Z0-9]/g, "_"); + const tableName = `internal_state_cache_${sanitizedSchemaKey}`; + + // Ensure table exists (creates if needed, updates cache) + ensureTableExists(sqlite, tableName); + + // Batch insert with prepared statement + const stmt = sqlite.prepare(` + INSERT INTO ${tableName} ( + entity_id, + schema_key, + file_id, + version_id, + plugin_key, + snapshot_content, + schema_version, + created_at, + updated_at, + inherited_from_version_id, + inheritance_delete_marker, + change_id, + commit_id + ) VALUES (?, ?, ?, ?, ?, jsonb(?), ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(entity_id, file_id, version_id) DO UPDATE SET + schema_key = excluded.schema_key, + plugin_key = excluded.plugin_key, + snapshot_content = excluded.snapshot_content, + schema_version = excluded.schema_version, + -- Preserve both timestamps exactly as they are from the materializer + created_at = excluded.created_at, + updated_at = excluded.updated_at, + inherited_from_version_id = excluded.inherited_from_version_id, + inheritance_delete_marker = excluded.inheritance_delete_marker, + change_id = excluded.change_id, + commit_id = excluded.commit_id + `); + + try { + for (const row of schemaRows) { + const isDeletion = + row.snapshot_content === null || row.snapshot_content === undefined; + stmt.bind([ + row.entity_id, + row.schema_key, + row.file_id, + row.version_id, + row.plugin_key, + row.snapshot_content, // jsonb() conversion happens in SQL + row.schema_version, + row.created_at, // Preserve original created_at + row.updated_at, // Preserve original updated_at + row.inherited_from_version_id, + isDeletion ? 1 : 0, // inheritance_delete_marker + row.change_id, + row.commit_id, + ]); + stmt.step(); + stmt.reset(); + } + } finally { + stmt.finalize(); + } + } +} + +/** + * Ensures a table exists and updates the cache. + * Duplicated from update-state-cache.ts to avoid circular dependency. + */ +function ensureTableExists( + sqlite: SqliteWasmDatabase, + tableName: string +): void { + // Use shared creator (idempotent) and update the cache set + createSchemaCacheTable({ lix: { sqlite } as any, tableName }); + const tableCache = getStateCacheV2Tables({ sqlite } as any); + if (!tableCache.has(tableName)) tableCache.add(tableName); } diff --git a/packages/lix-sdk/src/state/cache/schema.bench.ts b/packages/lix-sdk/src/state/cache/schema.bench.ts index f50e8db94b..7c84d1d911 100644 --- a/packages/lix-sdk/src/state/cache/schema.bench.ts +++ b/packages/lix-sdk/src/state/cache/schema.bench.ts @@ -183,7 +183,7 @@ bench("complex OR query (deletionReconciliation pattern)", async () => { for (let i = 0; i < ROW_NUM; i++) { const snapshotContent = { - entity_id: `changeset::entity-${i}`, + entity_id: `changeset~entity-${i}`, schema_key: "lix_change_set_element", file_id: "lix", change_id: `change-${i}`, @@ -194,7 +194,7 @@ bench("complex OR query (deletionReconciliation pattern)", async () => { }; rows.push({ - entity_id: `changeset::entity-${i}`, + entity_id: `changeset~entity-${i}`, schema_key: "lix_change_set_element", file_id: "lix", version_id: "global", @@ -218,7 +218,7 @@ bench("complex OR query (deletionReconciliation pattern)", async () => { const userChanges: any = []; for (let i = 0; i < ROW_NUM / 10; i++) { userChanges.push({ - entity_id: `changeset::entity-${i}`, + entity_id: `changeset~entity-${i}`, schema_key: "lix_change_set_element", file_id: "lix", }); @@ -233,7 +233,7 @@ bench("complex OR query (deletionReconciliation pattern)", async () => { sql`json_extract(snapshot_content, '$.schema_key')`.as("schema_key"), sql`json_extract(snapshot_content, '$.file_id')`.as("file_id"), ]) - .where("entity_id", "like", "changeset::%") + .where("entity_id", "like", "changeset~%") .where("schema_key", "=", "lix_change_set_element") .where("file_id", "=", "lix") .where("version_id", "=", "global") diff --git a/packages/lix-sdk/src/state/cache/schema.test.ts b/packages/lix-sdk/src/state/cache/schema.test.ts new file mode 100644 index 0000000000..050c431bba --- /dev/null +++ b/packages/lix-sdk/src/state/cache/schema.test.ts @@ -0,0 +1,111 @@ +import { test, expect } from "vitest"; +import { openLix } from "../../lix/open-lix.js"; +import { type Kysely } from "kysely"; +import type { LixInternalDatabaseSchema } from "../../database/schema.js"; +import { updateStateCache } from "./update-state-cache.js"; + +test("selecting from vtable queries per-schema physical tables", async () => { + const lix = await openLix({}); + const db = lix.db as unknown as Kysely; + + // Use the direct function to insert rows into different schemas + updateStateCache({ + lix, + changes: [ + { + id: "change1", + entity_id: "entity1", + schema_key: "schema_a", + file_id: "file1", + plugin_key: "plugin1", + snapshot_content: JSON.stringify({ data: "a1" }), + schema_version: "1.0", + created_at: "2024-01-01", + }, + { + id: "change2", + entity_id: "entity2", + schema_key: "schema_b", + file_id: "file2", + plugin_key: "plugin1", + snapshot_content: JSON.stringify({ data: "b1" }), + schema_version: "1.0", + created_at: "2024-01-01", + }, + { + id: "change3", + entity_id: "entity3", + schema_key: "schema_a", + file_id: "file3", + plugin_key: "plugin1", + snapshot_content: JSON.stringify({ data: "a2" }), + schema_version: "1.0", + created_at: "2024-01-01", + }, + ], + commit_id: "commit1", + version_id: "v1", + }); + + // Test 1: Select test rows from vtable (filter by our specific test data) + const allRows = await db + .selectFrom("internal_state_cache") + .select(["entity_id", "schema_key", "file_id"]) + .where("schema_key", "in", ["schema_a", "schema_b"]) + .orderBy("entity_id") + .execute(); + + expect(allRows).toHaveLength(3); + expect(allRows[0]).toMatchObject({ + entity_id: "entity1", + schema_key: "schema_a", + file_id: "file1", + }); + expect(allRows[1]).toMatchObject({ + entity_id: "entity2", + schema_key: "schema_b", + file_id: "file2", + }); + expect(allRows[2]).toMatchObject({ + entity_id: "entity3", + schema_key: "schema_a", + file_id: "file3", + }); + + // Test 2: Select with schema_key filter (should query single physical table) + const schemaARows = await db + .selectFrom("internal_state_cache") + .select(["entity_id", "schema_key", "file_id"]) + .where("schema_key", "=", "schema_a") + .orderBy("entity_id") + .execute(); + + expect(schemaARows).toHaveLength(2); + expect(schemaARows[0]?.entity_id).toBe("entity1"); + expect(schemaARows[1]?.entity_id).toBe("entity3"); + + // Test 3: Select with entity_id filter + const entity2Row = await db + .selectFrom("internal_state_cache") + .select(["entity_id", "schema_key", "file_id"]) + .where("entity_id", "=", "entity2") + .execute(); + + expect(entity2Row).toHaveLength(1); + expect(entity2Row[0]).toMatchObject({ + entity_id: "entity2", + schema_key: "schema_b", + file_id: "file2", + }); + + // Test 4: Select with multiple filters + const filteredRow = await db + .selectFrom("internal_state_cache") + .select(["entity_id", "schema_key", "file_id"]) + .where("schema_key", "=", "schema_a") + .where("file_id", "=", "file3") + .execute(); + + expect(filteredRow).toHaveLength(1); + expect(filteredRow[0]?.entity_id).toBe("entity3"); +}); diff --git a/packages/lix-sdk/src/state/cache/schema.ts b/packages/lix-sdk/src/state/cache/schema.ts index 168684e709..bb90bedc58 100644 --- a/packages/lix-sdk/src/state/cache/schema.ts +++ b/packages/lix-sdk/src/state/cache/schema.ts @@ -1,51 +1,527 @@ -import type { Selectable } from "kysely"; +import type { SqliteWasmDatabase } from "sqlite-wasm-kysely"; import type { Lix } from "../../lix/open-lix.js"; -export function applyStateCacheSchema(lix: Pick): void { - lix.sqlite.exec(` - CREATE TABLE IF NOT EXISTS internal_state_cache ( - entity_id TEXT NOT NULL, - schema_key TEXT NOT NULL, - file_id TEXT NOT NULL, - version_id TEXT NOT NULL, - plugin_key TEXT NOT NULL, - snapshot_content BLOB, -- JSONB content, NULL for deletions - schema_version TEXT NOT NULL, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - inherited_from_version_id TEXT, - inheritance_delete_marker INTEGER DEFAULT 0, -- Flag for copy-on-write deletion markers - change_id TEXT, - commit_id TEXT, -- Allow NULL until commit is created - PRIMARY KEY (entity_id, schema_key, file_id, version_id) - -- 8 = strictly JSONB - -- https://www.sqlite.org/json1.html#jvalid - CHECK (snapshot_content IS NULL OR json_valid(snapshot_content, 8)), - -- Ensure content is either NULL or a JSON object (not string, array, etc) - -- This prevents double-stringified JSON from being stored - CHECK (snapshot_content IS NULL OR json_type(snapshot_content) = 'object') - ) strict; - - -- Index for fast version_id filtering - CREATE INDEX IF NOT EXISTS idx_internal_state_cache_version_id - ON internal_state_cache (version_id); -`); -} - -export type InternalStateCacheRow = Selectable; +export type InternalStateCache = InternalStateCacheTable; +// Type definition for the cache v2 virtual table export type InternalStateCacheTable = { entity_id: string; schema_key: string; file_id: string; version_id: string; plugin_key: string; - snapshot_content: string | null; // JSON string, NULL for deletions + snapshot_content: string | null; // BLOB stored as string/JSON schema_version: string; created_at: string; updated_at: string; inherited_from_version_id: string | null; - inheritance_delete_marker: number; // 1 for copy-on-write deletion markers, 0 otherwise - change_id: string; + inheritance_delete_marker: number; // 0 or 1 + change_id: string | null; commit_id: string | null; }; +// Virtual table schema definition - matches existing internal_state_cache structure +const CACHE_VTAB_CREATE_SQL = `CREATE TABLE x( + _pk HIDDEN TEXT NOT NULL PRIMARY KEY, + entity_id TEXT NOT NULL, + schema_key TEXT NOT NULL, + file_id TEXT NOT NULL, + version_id TEXT NOT NULL, + plugin_key TEXT NOT NULL, + snapshot_content BLOB, + schema_version TEXT NOT NULL, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + inherited_from_version_id TEXT, + inheritance_delete_marker INTEGER DEFAULT 0, + change_id TEXT, + commit_id TEXT +) WITHOUT ROWID;`; + +// Cache of physical tables scoped to each Lix instance +// Using WeakMap ensures proper cleanup when Lix instances are garbage collected +const stateCacheV2TablesMap = new WeakMap>(); + +// Export a getter function to access the cache for a specific Lix instance +export function getStateCacheV2Tables(lix: Pick): Set { + let cache = stateCacheV2TablesMap.get(lix); + if (!cache) { + cache = new Set(); + stateCacheV2TablesMap.set(lix, cache); + } + return cache; +} + +export function applyStateCacheV2Schema( + lix: Pick +): void { + const { sqlite } = lix; + + // Get or create cache for this Lix instance + const tableCache = getStateCacheV2Tables(lix); + + // Initialize cache with existing tables on startup + const existingTables = sqlite.exec({ + sql: `SELECT name FROM sqlite_schema WHERE type='table' AND name LIKE 'internal_state_cache_%'`, + returnValue: "resultRows", + }) as any[]; + + if (existingTables) { + for (const row of existingTables) { + tableCache.add(row[0] as string); + } + } + + // Note: INSERT/UPDATE/DELETE operations are now handled by updateStateCacheV2() + // which writes directly to physical tables for better performance. + // This vtable is now read-only. + + // Create virtual table using the proper SQLite WASM API + const capi = sqlite.sqlite3.capi; + const module = new capi.sqlite3_module(); + + // Store cursor states - maps cursor pointer to state + const cursorStates = new Map(); + + module.installMethods( + { + xCreate: ( + dbHandle: any, + _pAux: any, + _argc: number, + _argv: any, + pVTab: any + ) => { + const result = capi.sqlite3_declare_vtab( + dbHandle, + CACHE_VTAB_CREATE_SQL + ); + if (result !== capi.SQLITE_OK) { + return result; + } + + sqlite.sqlite3.vtab.xVtab.create(pVTab); + return capi.SQLITE_OK; + }, + + xConnect: ( + dbHandle: any, + _pAux: any, + _argc: number, + _argv: any, + pVTab: any + ) => { + const result = capi.sqlite3_declare_vtab( + dbHandle, + CACHE_VTAB_CREATE_SQL + ); + if (result !== capi.SQLITE_OK) { + return result; + } + + sqlite.sqlite3.vtab.xVtab.create(pVTab); + return capi.SQLITE_OK; + }, + + xBestIndex: (pVTab: any, pIdxInfo: any) => { + const idxInfo = sqlite.sqlite3.vtab.xIndexInfo(pIdxInfo); + + // Track which columns have equality constraints + const usableConstraints: string[] = []; + let argIndex = 0; + + // Column mapping (matching the CREATE TABLE order) + const columnMap = [ + "_pk", // 0 (HIDDEN column) + "entity_id", // 1 + "schema_key", // 2 + "file_id", // 3 + "version_id", // 4 + "plugin_key", // 5 + "snapshot_content", // 6 + "schema_version", // 7 + "created_at", // 8 + "updated_at", // 9 + "inherited_from_version_id", // 10 + "inheritance_delete_marker", // 11 + "change_id", // 12 + "commit_id", // 13 + ]; + + // Process constraints + // @ts-expect-error - idxInfo.$nConstraint is not defined in the type + for (let i = 0; i < idxInfo.$nConstraint; i++) { + // @ts-expect-error - idxInfo.nthConstraint is not defined in the type + const constraint = idxInfo.nthConstraint(i); + + // Only handle equality constraints that are usable + if ( + constraint.$op === capi.SQLITE_INDEX_CONSTRAINT_EQ && + constraint.$usable + ) { + const columnName = columnMap[constraint.$iColumn]; + if (columnName) { + usableConstraints.push(columnName); + + // Mark this constraint as used + // @ts-expect-error - idxInfo.nthConstraintUsage is not defined in the type + idxInfo.nthConstraintUsage(i).$argvIndex = ++argIndex; + } + } + } + + const fullTableCost = 1000000; // Default cost for full table scan + const fullTableRows = 10000000; + + // Set the index string to pass column names to xFilter + if (usableConstraints.length > 0) { + const idxStr = usableConstraints.join(","); + // @ts-expect-error - idxInfo.$idxStr is not defined in the type + idxInfo.$idxStr = sqlite.sqlite3.wasm.allocCString(idxStr, false); + // @ts-expect-error - idxInfo.$needToFreeIdxStr is not defined in the type + idxInfo.$needToFreeIdxStr = 1; + + // Lower cost when we can use filters (especially schema_key) + // Schema_key is the most selective since it determines which table to query + const hasSchemaKey = usableConstraints.includes("schema_key"); + // @ts-expect-error - idxInfo.$estimatedCost is not defined in the type + idxInfo.$estimatedCost = hasSchemaKey + ? fullTableCost / 1000 + : fullTableCost / (usableConstraints.length + 1); + // @ts-expect-error - idxInfo.$estimatedRows is not defined in the type + idxInfo.$estimatedRows = hasSchemaKey + ? 1000 + : Math.ceil(fullTableRows / (usableConstraints.length + 1)); + } else { + // @ts-expect-error - idxInfo.$needToFreeIdxStr is not defined in the type + idxInfo.$needToFreeIdxStr = 0; + + // Higher cost for full table scan + // @ts-expect-error - idxInfo.$estimatedCost is not defined in the type + idxInfo.$estimatedCost = fullTableCost; + // @ts-expect-error - idxInfo.$estimatedRows is not defined in the type + idxInfo.$estimatedRows = fullTableRows; + } + + return capi.SQLITE_OK; + }, + + xDisconnect: () => { + return capi.SQLITE_OK; + }, + + xDestroy: () => { + return capi.SQLITE_OK; + }, + + xOpen: (_pVTab: any, pCursor: any) => { + const cursor = sqlite.sqlite3.vtab.xCursor.create(pCursor); + cursorStates.set(cursor.pointer, { + tables: [], // List of tables to query + currentTableIndex: 0, // Current table being queried + currentStmt: null, // Current prepared statement + currentRows: [], // Rows from current table + currentRowIndex: 0, // Current row in current table + filters: {}, // Filters from xFilter to use in xNext + }); + return capi.SQLITE_OK; + }, + + xClose: (pCursor: any) => { + const cursorState = cursorStates.get(pCursor); + if (cursorState && cursorState.currentStmt) { + // Finalize any active statement + cursorState.currentStmt.finalize(); + } + cursorStates.delete(pCursor); + return capi.SQLITE_OK; + }, + + xFilter: ( + pCursor: any, + idxNum: number, + idxStrPtr: number, + argc: number, + argv: any + ) => { + const cursorState = cursorStates.get(pCursor); + const idxStr = sqlite.sqlite3.wasm.cstrToJs(idxStrPtr); + + // Extract filter arguments if provided + const filters: Record = {}; + if (argc > 0 && argv) { + const args = sqlite.sqlite3.capi.sqlite3_values_to_js(argc, argv); + if (idxStr) { + const columns = idxStr.split(",").filter((c) => c.length > 0); + for (let i = 0; i < Math.min(columns.length, args.length); i++) { + if (args[i] !== null) { + filters[columns[i]!] = args[i]; + } + } + } + } + + // Store filters in cursor state for use in xNext + cursorState.filters = filters; + + // Determine which tables to query + if (filters.schema_key) { + // Single schema_key - query single table + // Sanitize schema_key for table name - must match update-state-cache.ts + const sanitizedSchemaKey = String(filters.schema_key).replace( + /[^a-zA-Z0-9]/g, + "_" + ); + const tableName = `internal_state_cache_${sanitizedSchemaKey}`; + // Check if table exists + const tableExists = sqlite.exec({ + sql: `SELECT 1 FROM sqlite_schema WHERE type='table' AND name=?`, + bind: [tableName], + returnValue: "resultRows", + }); + + if (tableExists && tableExists.length > 0) { + cursorState.tables = [tableName]; + } else { + cursorState.tables = []; + } + } else { + // No schema_key filter - need to query all cache tables + cursorState.tables = getPhysicalTables(sqlite, tableCache); + } + + // Reset cursor state + cursorState.currentTableIndex = 0; + cursorState.currentRowIndex = 0; + cursorState.currentRows = []; + if (cursorState.currentStmt) { + cursorState.currentStmt.finalize(); + cursorState.currentStmt = null; + } + + // Load first non-empty table if available + if (cursorState.tables.length > 0) { + loadNextTable(sqlite, cursorState, filters); + // Skip empty tables at the start + while ( + cursorState.currentRows.length === 0 && + cursorState.currentTableIndex < cursorState.tables.length - 1 + ) { + cursorState.currentTableIndex++; + cursorState.currentRowIndex = 0; + loadNextTable(sqlite, cursorState, filters); + } + } + + return capi.SQLITE_OK; + }, + + xNext: (pCursor: any) => { + const cursorState = cursorStates.get(pCursor); + cursorState.currentRowIndex++; + + // Check if we need to move to next table + while ( + cursorState.currentRowIndex >= cursorState.currentRows.length && + cursorState.currentTableIndex < cursorState.tables.length + ) { + // Move to next table + cursorState.currentTableIndex++; + cursorState.currentRowIndex = 0; + cursorState.currentRows = []; + + // Finalize current statement + if (cursorState.currentStmt) { + cursorState.currentStmt.finalize(); + cursorState.currentStmt = null; + } + + // Load next table if available + if (cursorState.currentTableIndex < cursorState.tables.length) { + // Use the stored filters from xFilter + loadNextTable(sqlite, cursorState, cursorState.filters || {}); + // If the table we just loaded is also empty, continue loop + } + } + + return capi.SQLITE_OK; + }, + + xEof: (pCursor: any) => { + const cursorState = cursorStates.get(pCursor); + // Check if we've run out of tables entirely + if (cursorState.currentTableIndex >= cursorState.tables.length) { + return 1; + } + // Check if we're past the end of the current table's rows + // This handles both empty tables and exhausted tables + if (cursorState.currentRowIndex >= cursorState.currentRows.length) { + // If this is the last table and we're out of rows, we're at EOF + if (cursorState.currentTableIndex === cursorState.tables.length - 1) { + return 1; + } + // Otherwise, there might be more tables, so not EOF yet + // xNext will handle moving to the next table + return 0; + } + return 0; + }, + + xColumn: (pCursor: any, pContext: any, iCol: number) => { + const cursorState = cursorStates.get(pCursor); + const row = cursorState.currentRows[cursorState.currentRowIndex]; + + if (!row) { + capi.sqlite3_result_null(pContext); + return capi.SQLITE_OK; + } + + // Map column index to value + let value; + switch (iCol) { + case 0: { + // _pk - composite primary key (needs schema_key for DELETE) + value = `${row.entity_id}|${row.schema_key}|${row.file_id}|${row.version_id}`; + break; + } + case 1: + value = row.entity_id; + break; + case 2: // schema_key - read from row + value = row.schema_key; + break; + case 3: + value = row.file_id; + break; + case 4: + value = row.version_id; + break; + case 5: + value = row.plugin_key; + break; + case 6: + value = row.snapshot_content; + break; + case 7: + value = row.schema_version; + break; + case 8: + value = row.created_at; + break; + case 9: + value = row.updated_at; + break; + case 10: + value = row.inherited_from_version_id; + break; + case 11: + value = row.inheritance_delete_marker; + break; + case 12: + value = row.change_id; + break; + case 13: + value = row.commit_id; + break; + default: + value = null; + } + + if (value === null || value === undefined) { + capi.sqlite3_result_null(pContext); + } else { + capi.sqlite3_result_js(pContext, value); + } + + return capi.SQLITE_OK; + }, + + xRowid: () => { + // Not used - vtable doesn't use rowids + return capi.SQLITE_ERROR; + }, + + xUpdate: () => { + // All write operations should use updateStateCacheV2() + return capi.SQLITE_READONLY; + }, + }, + false + ); + + capi.sqlite3_create_module( + sqlite.pointer!, + "internal_state_cache_vtable", + module, + 0 + ); + + // Create the virtual table + sqlite.exec( + `CREATE VIRTUAL TABLE IF NOT EXISTS internal_state_cache USING internal_state_cache_vtable();` + ); +} + +// Helper function to get list of physical cache tables +function getPhysicalTables( + sqlite: SqliteWasmDatabase, + cache: Set +): string[] { + // Always refresh cache from database since direct function may have created new tables + const existingTables = sqlite.exec({ + sql: `SELECT name FROM sqlite_schema WHERE type='table' AND name LIKE 'internal_state_cache_%'`, + returnValue: "resultRows", + }) as any[]; + + if (existingTables) { + for (const row of existingTables) { + cache.add(row[0] as string); + } + } + + // Convert Set to array and filter out base tables + return Array.from(cache).filter( + (name) => name !== "internal_state_cache" && name !== "internal_state_cache" + ); +} + +// Helper function to load rows from next table +function loadNextTable( + sqlite: SqliteWasmDatabase, + cursorState: any, + filters: Record +): void { + if (cursorState.currentTableIndex >= cursorState.tables.length) { + return; + } + + const tableName = cursorState.tables[cursorState.currentTableIndex]; + + // Build query with filters (except schema_key which is implicit in table name) + let sql = `SELECT * FROM ${tableName}`; + const whereClauses: string[] = []; + const bindParams: any[] = []; + + // Don't filter tombstones here - let the view decide what to filter + // This allows the resolved view to check for tombstones when needed + + for (const [column, value] of Object.entries(filters)) { + if (column !== "schema_key") { + // Skip schema_key as it's implicit + whereClauses.push(`${column} = ?`); + bindParams.push(value); + } + } + + if (whereClauses.length > 0) { + sql += ` WHERE ${whereClauses.join(" AND ")}`; + } + + const result = sqlite.exec({ + sql, + bind: bindParams, + returnValue: "resultRows", + rowMode: "object", + }); + + cursorState.currentRows = result || []; +} diff --git a/packages/lix-sdk/src/state/cache/update-state-cache.bench.ts b/packages/lix-sdk/src/state/cache/update-state-cache.bench.ts new file mode 100644 index 0000000000..a8d8634e65 --- /dev/null +++ b/packages/lix-sdk/src/state/cache/update-state-cache.bench.ts @@ -0,0 +1,158 @@ +import { bench, describe } from "vitest"; +import { openLix } from "../../lix/open-lix.js"; +import { updateStateCache } from "./update-state-cache.js"; +import { timestamp } from "../../deterministic/timestamp.js"; +import type { LixChangeRaw } from "../../change/schema.js"; + +/** + * Regression benchmarks for updateStateCacheV2 performance + * + * Key regression tests: + * - Standard batch (1000 records) - baseline performance + * - Multi-schema distribution (5 schemas) - tests per-schema table handling + * - Large batch (10000 records) - tests scalability + */ + +// Helper function to generate test changes +function generateChanges( + count: number, + schemas: string[], + ts: string, + options: { + prefix?: string; + deletionRatio?: number; // 0-1, percentage of changes that are deletions + updateRatio?: number; // 0-1, percentage of non-deletions that are updates to existing entities + } = {} +): LixChangeRaw[] { + const { prefix = "entity", deletionRatio = 0, updateRatio = 0 } = options; + const changes: LixChangeRaw[] = []; + + for (let i = 0; i < count; i++) { + const schemaIndex = i % schemas.length; + const isDeleted = Math.random() < deletionRatio; + const isUpdate = !isDeleted && Math.random() < updateRatio; + + changes.push({ + id: `change-${prefix}-${i}`, + entity_id: isUpdate ? `${prefix}-${Math.floor(i / 2)}` : `${prefix}-${i}`, // Updates reuse entity IDs + schema_key: schemas[schemaIndex]!, + schema_version: "1.0", + file_id: `file-${i % 100}`, + plugin_key: "benchmark_plugin", + snapshot_content: isDeleted + ? null + : JSON.stringify({ + id: `${prefix}-${i}`, + name: `Benchmark ${i}`, + data: `Test data for entity ${i}`, + timestamp: ts, + complex_field: { + nested: { + value: i * 2, + array: [1, 2, 3, i], + metadata: `metadata-${i}`, + }, + }, + }), + created_at: ts, + }); + } + + return changes; +} + +describe("updateStateCacheV2 Regression Tests", () => { + const schemas = [ + "lix_file", + "lix_change", + "lix_discussion", + "lix_comment", + "lix_account", + ]; + + bench("Standard batch - 1000 records", async () => { + const lix = await openLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + }, + ], + }); + + const ts = timestamp({ lix }); + const changes = generateChanges(1000, schemas, ts); + + updateStateCache({ + lix, + changes, + commit_id: "commit-standard", + version_id: "v1", + }); + }); + + bench("Multi-schema - 1000 records across 5 schemas", async () => { + const lix = await openLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + }, + ], + }); + + const ts = timestamp({ lix }); + const changes = generateChanges(1000, schemas, ts); + + updateStateCache({ + lix, + changes, + commit_id: "commit-multi", + version_id: "v1", + }); + }); + + bench.skip( + "Warm cache - 1000 records with 100K pre-existing rows", + async () => { + const lix = await openLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + }, + ], + }); + + const ts = timestamp({ lix }); + + // Pre-populate with 100K rows across 5 schemas (20K per schema) + // Process in 10K batches to avoid memory issues + for (let i = 0; i < 10; i++) { + const warmupChanges = generateChanges(10000, schemas, ts, { + prefix: `warmup-${i}`, + }); + updateStateCache({ + lix, + changes: warmupChanges, + commit_id: `warmup-${i}`, + version_id: "v0", + }); + } + + // Now benchmark 1000 changes against the warm cache with deep B-trees + const changes = generateChanges(1000, schemas, ts, { + prefix: "bench", + deletionRatio: 0.05, + updateRatio: 0.15, + }); + + updateStateCache({ + lix, + changes, + commit_id: "commit-warm", + version_id: "v1", + }); + } + ); +}); diff --git a/packages/lix-sdk/src/state/cache/update-state-cache.test.ts b/packages/lix-sdk/src/state/cache/update-state-cache.test.ts index 8cac115ca2..c3c23d0806 100644 --- a/packages/lix-sdk/src/state/cache/update-state-cache.test.ts +++ b/packages/lix-sdk/src/state/cache/update-state-cache.test.ts @@ -6,7 +6,7 @@ import { createVersion } from "../../version/create-version.js"; import { sql, type Kysely } from "kysely"; import type { LixInternalDatabaseSchema } from "../../database/schema.js"; import type { LixChangeRaw } from "../../change/schema.js"; -import type { InternalStateCacheRow } from "./schema.js"; +import type { InternalStateCache } from "./schema.js"; test("inserts into cache based on change", async () => { const lix = await openLix({ @@ -35,7 +35,7 @@ test("inserts into cache based on change", async () => { const commitId = "test-commit-456"; const versionId = "global"; - // Call updateStateCache + // Call updateStateCacheV2 updateStateCache({ lix, changes: [testChange], @@ -73,7 +73,7 @@ test("inserts into cache based on change", async () => { inheritance_delete_marker: 0, change_id: testChange.id, commit_id: commitId, - } satisfies InternalStateCacheRow); + } satisfies InternalStateCache); }); test("upserts cache entry on conflict", async () => { @@ -180,202 +180,13 @@ test("upserts cache entry on conflict", async () => { plugin_key: updatedChange.plugin_key, // Should be updated snapshot_content: JSON.parse(updatedChange.snapshot_content as any), // Should be updated schema_version: updatedChange.schema_version, // Should be updated - created_at: initialTimestamp, // Should remain from initial insert + created_at: initialTimestamp, // Should remain from initial insert (v2 now matches v1 behavior) updated_at: updateTimestamp, // Should be updated inherited_from_version_id: null, inheritance_delete_marker: 0, change_id: updatedChange.id, // Should be updated commit_id: updatedCommitId, // Should be updated - } satisfies InternalStateCacheRow); -}); - -test("moves cache entries to children on deletion, clears when no children remain", async () => { - const lix = await openLix({ - keyValues: [ - { - key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, - }, - ], - }); - - // Create inheritance chain: parent -> child1, child2 - await createVersion({ - lix, - id: "parent", - inherits_from_version_id: "global", - }); - await createVersion({ - lix, - id: "child1", - inherits_from_version_id: "parent", - }); - await createVersion({ - lix, - id: "child2", - inherits_from_version_id: "parent", - }); - - const initialTimestamp = timestamp({ lix }); - const testEntity = "test-entity-cleanup"; - - // 1. Create entity in parent - const createChange: LixChangeRaw = { - id: "create-change", - entity_id: testEntity, - schema_key: "lix_test", - schema_version: "1.0", - file_id: "lix", - plugin_key: "test_plugin", - snapshot_content: JSON.stringify({ id: testEntity, value: "parent-data" }), - created_at: initialTimestamp, - }; - - updateStateCache({ - lix, - changes: [createChange], - commit_id: "parent-commit", - version_id: "parent", - }); - - const intDb = lix.db as unknown as Kysely; - - // Verify entity exists only in parent - const initialCache = await intDb - .selectFrom("internal_state_cache") - .selectAll() - .select(sql`json(snapshot_content)`.as("snapshot_content")) - .where("entity_id", "=", testEntity) - .execute(); - - expect(initialCache).toHaveLength(1); - expect(initialCache[0]?.version_id).toBe("parent"); - - // 2. Delete from parent - should move to child1 and child2 - const deleteFromParentTimestamp = timestamp({ lix }); - const deleteFromParentChange: LixChangeRaw = { - id: "delete-from-parent", - entity_id: testEntity, - schema_key: "lix_test", - schema_version: "1.0", - file_id: "lix", - plugin_key: "test_plugin", - snapshot_content: null, // Deletion - created_at: deleteFromParentTimestamp, - }; - - updateStateCache({ - lix, - changes: [deleteFromParentChange], - commit_id: "parent-delete-commit", - version_id: "parent", - }); - - // Verify entity moved to child1 and child2, parent entry removed - // Note: We need to exclude tombstones (inheritance_delete_marker = 1) and entries without content - const cacheAfterParentDelete = await intDb - .selectFrom("internal_state_cache") - .selectAll() - .select(sql`json(snapshot_content)`.as("snapshot_content")) - .where("entity_id", "=", testEntity) - .where("inheritance_delete_marker", "=", 0) // Exclude tombstones - .where("snapshot_content", "is not", null) // Exclude null snapshots - .orderBy("version_id", "asc") - .execute(); - - expect(cacheAfterParentDelete).toHaveLength(2); - expect(cacheAfterParentDelete[0]?.version_id).toBe("child1"); - expect(cacheAfterParentDelete[1]?.version_id).toBe("child2"); - // Both should have the same original data (Kysely auto-parses JSON) - expect(cacheAfterParentDelete[0]?.snapshot_content).toEqual({ - id: testEntity, - value: "parent-data", - }); - expect(cacheAfterParentDelete[1]?.snapshot_content).toEqual({ - id: testEntity, - value: "parent-data", - }); - - // 3. Delete from child1 - should remove child1 entry but keep child2 - const deleteFromChild1Timestamp = timestamp({ lix }); - const deleteFromChild1Change: LixChangeRaw = { - id: "delete-from-child1", - entity_id: testEntity, - schema_key: "lix_test", - schema_version: "1.0", - file_id: "lix", - plugin_key: "test_plugin", - snapshot_content: null, // Deletion - created_at: deleteFromChild1Timestamp, - }; - - updateStateCache({ - lix, - changes: [deleteFromChild1Change], - commit_id: "child1-delete-commit", - version_id: "child1", - }); - - // Verify only child2 has the entity now - const cacheAfterChild1Delete = await intDb - .selectFrom("internal_state_cache") - .selectAll() - .select(sql`json(snapshot_content)`.as("snapshot_content")) - .where("entity_id", "=", testEntity) - .where("inheritance_delete_marker", "=", 0) // Exclude tombstones - .where("snapshot_content", "is not", null) // Exclude null snapshots - .execute(); - - expect(cacheAfterChild1Delete).toHaveLength(1); - expect(cacheAfterChild1Delete[0]?.version_id).toBe("child2"); - expect(cacheAfterChild1Delete[0]?.snapshot_content).toEqual({ - id: testEntity, - value: "parent-data", - }); - - // 4. Delete from child2 - should remove the entity entirely from cache - const deleteFromChild2Timestamp = timestamp({ lix }); - const deleteFromChild2Change: LixChangeRaw = { - id: "delete-from-child2", - entity_id: testEntity, - schema_key: "lix_test", - schema_version: "1.0", - file_id: "lix", - plugin_key: "test_plugin", - snapshot_content: null, // Deletion - created_at: deleteFromChild2Timestamp, - }; - - updateStateCache({ - lix, - changes: [deleteFromChild2Change], - commit_id: "child2-delete-commit", - version_id: "child2", - }); - - // Verify tombstones remain in cache (new behavior: tombstones are permanent) - // The important thing is that state_all queries show no active entities - const finalCache = await intDb - .selectFrom("internal_state_cache") - .selectAll() - .select(sql`json(snapshot_content)`.as("snapshot_content")) - .where("entity_id", "=", testEntity) - .execute(); - - // Should have 3 tombstones (one for each version where we deleted) - expect(finalCache).toHaveLength(3); - expect(finalCache.every(c => c.inheritance_delete_marker === 1)).toBe(true); - expect(finalCache.every(c => c.snapshot_content === null)).toBe(true); - - // More importantly, verify that state_all shows no active entities - const stateAllResults = await lix.db - .selectFrom("state_all") - .selectAll() - .where("entity_id", "=", testEntity) - .execute(); - - // This is what really matters - no visible entities - expect(stateAllResults).toHaveLength(0); + } satisfies InternalStateCache); }); test("handles inheritance chain deletions with tombstones", async () => { @@ -392,17 +203,17 @@ test("handles inheritance chain deletions with tombstones", async () => { await createVersion({ lix, id: "parent-version", - inherits_from_version_id: "global", + inheritsFrom: { id: "global" }, }); await createVersion({ lix, id: "child-version", - inherits_from_version_id: "parent-version", + inheritsFrom: { id: "parent-version" }, }); await createVersion({ lix, id: "subchild-version", - inherits_from_version_id: "child-version", + inheritsFrom: { id: "child-version" }, }); const baseTimestamp = timestamp({ lix }); @@ -503,121 +314,188 @@ test("handles inheritance chain deletions with tombstones", async () => { expect(subchildCacheAfterDelete).toHaveLength(0); - // 7. Verify state_all queries return correct results (tombstones filtered out) - const parentStateAll = await lix.db - .selectFrom("state_all") + // 7. Verify cache entries are correct (tombstones filtered out) + const parentStateAll = await intDb + .selectFrom("internal_state_cache") .selectAll() + .select(sql`json(snapshot_content)`.as("snapshot_content")) .where("entity_id", "=", testEntity) .where("version_id", "=", "parent-version") + .where("inheritance_delete_marker", "=", 0) + .where("snapshot_content", "is not", null) .execute(); - const childStateAll = await lix.db - .selectFrom("state_all") + const childStateAll = await intDb + .selectFrom("internal_state_cache") .selectAll() + .select(sql`json(snapshot_content)`.as("snapshot_content")) .where("entity_id", "=", testEntity) .where("version_id", "=", "child-version") + .where("inheritance_delete_marker", "=", 0) + .where("snapshot_content", "is not", null) .execute(); - const subchildStateAll = await lix.db - .selectFrom("state_all") + const subchildStateAll = await intDb + .selectFrom("internal_state_cache") .selectAll() + .select(sql`json(snapshot_content)`.as("snapshot_content")) .where("entity_id", "=", testEntity) .where("version_id", "=", "subchild-version") + .where("inheritance_delete_marker", "=", 0) + .where("snapshot_content", "is not", null) .execute(); - // Parent should show the entity through state_all + // Parent should show the entity expect(parentStateAll).toHaveLength(1); expect(parentStateAll[0]?.snapshot_content).toEqual({ id: testEntity, value: "parent-data", }); - // Child should show NO entity through state_all (tombstone filtered out) + // Child should show NO entity (tombstone filtered out) expect(childStateAll).toHaveLength(0); - // Subchild should show NO entity through state_all (inherits deletion from child) + // Subchild should show NO entity (inherits deletion from child) expect(subchildStateAll).toHaveLength(0); }); -test("copied entries retain original commit_id during deletion copy-down", async () => { +test("handles duplicate entity updates - last change wins", async () => { const lix = await openLix({ keyValues: [ - { key: "lix_deterministic_mode", value: { enabled: true, bootstrap: true } }, + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + }, ], }); - // Create inheritance chain: parent -> child1, child2 - await createVersion({ lix, id: "parent-cid", inherits_from_version_id: "global" }); - await createVersion({ lix, id: "child1-cid", inherits_from_version_id: "parent-cid" }); - await createVersion({ lix, id: "child2-cid", inherits_from_version_id: "parent-cid" }); - - const t1 = timestamp({ lix }); - const entityId = "entity-commit-propagation"; + // Create test changes for the same entity + const change1: LixChangeRaw = { + id: "change-1", + entity_id: "test-entity", + schema_key: "test-schema", + file_id: "test-file", + plugin_key: "test-plugin", + snapshot_content: JSON.stringify({ value: "first" }), + schema_version: "1.0", + created_at: "2024-01-01T00:00:00Z", + }; - // Create in parent with an original commit id - const createChange: LixChangeRaw = { - id: "change-create-cid", - entity_id: entityId, - schema_key: "lix_test", + const change2: LixChangeRaw = { + id: "change-2", + entity_id: "test-entity", // Same entity + schema_key: "test-schema", + file_id: "test-file", + plugin_key: "test-plugin", + snapshot_content: JSON.stringify({ value: "second" }), schema_version: "1.0", - file_id: "lix", - plugin_key: "test_plugin", - snapshot_content: JSON.stringify({ id: entityId, value: "data" }), - created_at: t1, + created_at: "2024-01-01T00:01:00Z", // Later timestamp }; - const originalCommitId = "original-commit-id-001"; - updateStateCache({ lix, changes: [createChange], commit_id: originalCommitId, version_id: "parent-cid" }); + // Apply first change + updateStateCache({ + lix, + changes: [change1], + commit_id: "commit-1", + version_id: "version-1", + }); - const intDb = lix.db as unknown as Kysely; + // Apply second change (should overwrite first) + updateStateCache({ + lix, + changes: [change2], + commit_id: "commit-2", + version_id: "version-1", + }); - // Sanity: parent entry has original commit id - const parentEntry = await intDb + // Query the cache to verify only the latest change is present + const result = await (lix.db as unknown as Kysely) .selectFrom("internal_state_cache") .selectAll() - .where("entity_id", "=", entityId) - .where("version_id", "=", "parent-cid") - .executeTakeFirstOrThrow(); - expect(parentEntry.commit_id).toBe(originalCommitId); + .select(sql`json(snapshot_content)`.as("snapshot_content")) + .where("entity_id", "=", "test-entity") + .where("file_id", "=", "test-file") + .where("version_id", "=", "version-1") + .execute(); - // Delete in parent with a different commit id; this should copy entries to children - const t2 = timestamp({ lix }); - const deleteChange: LixChangeRaw = { - id: "change-delete-cid", - entity_id: entityId, - schema_key: "lix_test", - schema_version: "1.0", - file_id: "lix", - plugin_key: "test_plugin", - snapshot_content: null, - created_at: t2, - }; - const deletionCommitId = "deletion-commit-id-002"; - updateStateCache({ lix, changes: [deleteChange], commit_id: deletionCommitId, version_id: "parent-cid" }); + // Should have exactly one row (latest change wins) + expect(result).toHaveLength(1); + + // Should be the second change + expect(result[0]!.change_id).toBe("change-2"); + expect(result[0]!.snapshot_content).toEqual({ value: "second" }); + expect(result[0]!.created_at).toBe("2024-01-01T00:00:00Z"); // Should preserve original created_at + expect(result[0]!.updated_at).toBe("2024-01-01T00:01:00Z"); // Should update updated_at +}); - // Verify copied entries exist in both children with the ORIGINAL commit id - const childEntries = await intDb +test("handles batch updates with duplicates - last in batch wins", async () => { + const lix = await openLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + }, + ], + }); + + // Create multiple changes for the same entity in a single batch + const changes: LixChangeRaw[] = [ + { + id: "change-1", + entity_id: "test-entity", + schema_key: "test-schema", + file_id: "test-file", + plugin_key: "test-plugin", + snapshot_content: JSON.stringify({ value: "first" }), + schema_version: "1.0", + created_at: "2024-01-01T00:00:00Z", + }, + { + id: "change-2", + entity_id: "test-entity", // Same entity + schema_key: "test-schema", + file_id: "test-file", + plugin_key: "test-plugin", + snapshot_content: JSON.stringify({ value: "second" }), + schema_version: "1.0", + created_at: "2024-01-01T00:01:00Z", + }, + { + id: "change-3", + entity_id: "test-entity", // Same entity again + schema_key: "test-schema", + file_id: "test-file", + plugin_key: "test-plugin", + snapshot_content: JSON.stringify({ value: "third" }), + schema_version: "1.0", + created_at: "2024-01-01T00:02:00Z", + }, + ]; + + // Apply all changes in a single batch + updateStateCache({ + lix, + changes, + commit_id: "commit-1", + version_id: "version-1", + }); + + // Query the cache to verify only the latest change is present + const result = await (lix.db as unknown as Kysely) .selectFrom("internal_state_cache") .selectAll() - .where("entity_id", "=", entityId) - .where("inheritance_delete_marker", "=", 0) - .where("snapshot_content", "is not", null) - .where("version_id", "in", ["child1-cid", "child2-cid"]) + .select(sql`json(snapshot_content)`.as("snapshot_content")) + .where("entity_id", "=", "test-entity") + .where("file_id", "=", "test-file") + .where("version_id", "=", "version-1") .execute(); - expect(childEntries).toHaveLength(2); - for (const entry of childEntries) { - expect(["child1-cid", "child2-cid"]).toContain(entry.version_id); - expect(entry.commit_id).toBe(originalCommitId); - } + // Should have exactly one row (last change in batch wins) + expect(result).toHaveLength(1); - // Tombstone in parent should have the deletion commit id - const tombstone = await intDb - .selectFrom("internal_state_cache") - .selectAll() - .where("entity_id", "=", entityId) - .where("version_id", "=", "parent-cid") - .where("inheritance_delete_marker", "=", 1) - .executeTakeFirstOrThrow(); - expect(tombstone.commit_id).toBe(deletionCommitId); + // Should be the third change (last in batch) + expect(result[0]!.change_id).toBe("change-3"); + expect(result[0]!.snapshot_content).toEqual({ value: "third" }); + expect(result[0]!.created_at).toBe("2024-01-01T00:00:00Z"); // Should preserve original created_at from first + expect(result[0]!.updated_at).toBe("2024-01-01T00:02:00Z"); // Should use updated_at from last }); diff --git a/packages/lix-sdk/src/state/cache/update-state-cache.ts b/packages/lix-sdk/src/state/cache/update-state-cache.ts index 158a308e9e..e2a2f25b24 100644 --- a/packages/lix-sdk/src/state/cache/update-state-cache.ts +++ b/packages/lix-sdk/src/state/cache/update-state-cache.ts @@ -1,184 +1,261 @@ -import { sql, type Kysely } from "kysely"; -import type { LixInternalDatabaseSchema } from "../../database/schema.js"; -import { executeSync } from "../../database/execute-sync.js"; -import type { LixChangeRaw } from "../../change/schema.js"; import type { Lix } from "../../lix/open-lix.js"; +import type { LixChangeRaw } from "../../change/schema.js"; +import type { Kysely } from "kysely"; +import type { LixInternalDatabaseSchema } from "../../database/schema.js"; +import { getStateCacheV2Tables } from "./schema.js"; +import { createSchemaCacheTable } from "./create-schema-cache-table.js"; /** - * Updates cache entries with new commit_id for specific entities that were changed. - * Processes multiple changes in batch for better performance. + * Updates the state cache v2 directly to physical tables, bypassing the virtual table. + * + * This function writes directly to per-schema SQLite tables instead of going through + * the vtable for two critical performance reasons: + * + * 1. **Minimizes JS <-> WASM overhead**: Direct table access avoids the vtable's + * row-by-row callback mechanism that crosses the JS/WASM boundary for each row. * - * This function is the centralized entry point for all cache updates to ensure - * consistency and proper handling of duplicate entries (inherited vs direct). + * 2. **Enables efficient batching**: Vtables only support per-row logic, preventing + * batch optimizations like prepared statements, transactions, and bulk operations. + * Direct access allows us to batch hundreds of rows in a single transaction. * - * @param args - Update parameters - * @param args.lix - Lix instance with sqlite and db - * @param args.changes - Array of change objects containing entity information - * @param args.commit_id - New commit ID to set - * @param args.version_id - Version ID to update + * The vtable (schema.ts) remains read-only for SELECT queries, providing a unified + * query interface while mutations bypass it for ~50% better performance. + * + * This function handles: + * - Direct writes to per-schema physical tables for optimal performance + * - Batch inserting/updating cache entries + * - Deletion copy-down operations for inheritance + * - Tombstone management + * + * @example + * updateStateCache({ + * lix, + * changes: [change1, change2], + * commit_id: "commit-123", + * version_id: "v1" + * }); */ export function updateStateCache(args: { - lix: Pick; + lix: Pick; changes: LixChangeRaw[]; commit_id: string; version_id: string; }): void { - if (args.changes.length === 0) return; - - const intDb = args.lix.db as unknown as Kysely; - - // Separate changes into deletions and non-deletions for batch processing - const deletions = args.changes.filter((c) => c.snapshot_content === null); - const nonDeletions = args.changes.filter((c) => c.snapshot_content !== null); - - // ------------------------------- - // Upsert non-deletions in batch - // ------------------------------- - if (nonDeletions.length > 0) { - const values = nonDeletions.map((change) => ({ - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - version_id: args.version_id, - plugin_key: change.plugin_key, - // Keep using the existing jsonb(...) wrapper used elsewhere in the codebase - snapshot_content: sql`jsonb(${change.snapshot_content})` as any, - schema_version: change.schema_version, - created_at: change.created_at, - updated_at: change.created_at, - inherited_from_version_id: null as string | null, // Direct entry, not inherited - inheritance_delete_marker: 0, - change_id: change.id, - commit_id: args.commit_id, - })); - - executeSync({ - lix: args.lix, - query: intDb - .insertInto("internal_state_cache") - .values(values) - .onConflict((oc) => - oc - .columns(["entity_id", "schema_key", "file_id", "version_id"]) - .doUpdateSet({ - plugin_key: sql`excluded.plugin_key`, - snapshot_content: sql`excluded.snapshot_content`, - schema_version: sql`excluded.schema_version`, - updated_at: sql`excluded.updated_at`, - inheritance_delete_marker: sql`excluded.inheritance_delete_marker`, - change_id: sql`excluded.change_id`, - commit_id: sql`excluded.commit_id`, - }) - ), - }); + const { lix, changes, commit_id, version_id } = args; + const db = lix.db as unknown as Kysely; + + // Group changes by schema_key for efficient batch processing + const changesBySchema = new Map< + string, + { + inserts: LixChangeRaw[]; + deletes: LixChangeRaw[]; + } + >(); + + for (const change of changes) { + if (!changesBySchema.has(change.schema_key)) { + changesBySchema.set(change.schema_key, { + inserts: [], + deletes: [], + }); + } + + const group = changesBySchema.get(change.schema_key)!; + if (change.snapshot_content === null) { + group.deletes.push(change); + } else { + group.inserts.push(change); + } } - // ----------------------------------------- - // Handle deletions: copy-down + tombstones - // ----------------------------------------- - if (deletions.length > 0) { - // A) Perform the entire "copy-down" operation in a single, set-based query. - // This query finds the relevant source rows and generates all required - // child entries directly in the database, avoiding JS loops. - executeSync({ - lix: args.lix, - query: intDb - .with("children", (db) => - db - .selectFrom("internal_resolved_state_all") - .select(sql`json_extract(snapshot_content, '$.id')`.as("id")) - .where("schema_key", "=", "lix_version") - .where( - sql`json_extract(snapshot_content, '$.inherits_from_version_id')`, - "=", - args.version_id - ) - ) - .insertInto("internal_state_cache") - .columns([ - "entity_id", "schema_key", "file_id", "version_id", "plugin_key", - "snapshot_content", "schema_version", "created_at", "updated_at", - "inherited_from_version_id", "inheritance_delete_marker", - "change_id", "commit_id" - ]) - .expression((eb) => - eb - .selectFrom("internal_state_cache as src") - .innerJoin("children", (join) => join.on(sql`1`, "=", sql`1`)) // CROSS JOIN via always-true condition - .select([ - "src.entity_id", "src.schema_key", "src.file_id", - "children.id as version_id", // Set child's version ID - "src.plugin_key", "src.snapshot_content", "src.schema_version", - "src.created_at", "src.updated_at", - sql`null`.as("inherited_from_version_id"), - sql`0`.as("inheritance_delete_marker"), - "src.change_id", - // Preserve the original commit_id from the source entry when copying down - "src.commit_id", - ]) - .where("src.version_id", "=", args.version_id) - .where("src.inheritance_delete_marker", "=", 0) - .where("src.snapshot_content", "is not", null) - .where((where_eb) => where_eb.or( - deletions.map((d) => where_eb.and([ - where_eb("src.entity_id", "=", d.entity_id), - where_eb("src.schema_key", "=", d.schema_key), - where_eb("src.file_id", "=", d.file_id), - ])) - )) - ) - .onConflict((oc) => - oc.columns(["entity_id", "schema_key", "file_id", "version_id"]) - .doUpdateSet({ - // Define update rules for conflicts - plugin_key: sql`excluded.plugin_key`, - snapshot_content: sql`excluded.snapshot_content`, - schema_version: sql`excluded.schema_version`, - updated_at: sql`excluded.updated_at`, - change_id: sql`excluded.change_id`, - commit_id: sql`excluded.commit_id`, - }) - ), - }) - - // B) Upsert tombstones in the deleting version (blocks inheritance below) - const tombstoneValues = deletions.map((change) => ({ - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - version_id: args.version_id, - plugin_key: change.plugin_key, - snapshot_content: null as string | null, // Always null for deletions - schema_version: change.schema_version, - created_at: change.created_at, - updated_at: change.created_at, - inherited_from_version_id: null as string | null, - inheritance_delete_marker: 1, // mark as tombstone - change_id: change.id, - commit_id: args.commit_id, - })); - - executeSync({ - lix: args.lix, - query: intDb - .insertInto("internal_state_cache") - .values(tombstoneValues) - .onConflict((oc) => - oc - .columns(["entity_id", "schema_key", "file_id", "version_id"]) - .doUpdateSet({ - plugin_key: sql`excluded.plugin_key`, - snapshot_content: sql`excluded.snapshot_content`, - schema_version: sql`excluded.schema_version`, - updated_at: sql`excluded.updated_at`, - inheritance_delete_marker: sql`excluded.inheritance_delete_marker`, - change_id: sql`excluded.change_id`, - commit_id: sql`excluded.commit_id`, - }) - ), - }); + // Process each schema's changes directly to its physical table + for (const [schema_key, schemaChanges] of changesBySchema) { + // Sanitize schema_key for use in table name - replace non-alphanumeric with underscore + const sanitizedSchemaKey = schema_key.replace(/[^a-zA-Z0-9]/g, "_"); + const tableName = `internal_state_cache_${sanitizedSchemaKey}`; + + // Ensure table exists (creates if needed, updates cache) + ensureTableExists(lix, tableName); + + // Process inserts/updates for this schema + if (schemaChanges.inserts.length > 0) { + batchInsertDirectToTable({ + lix, + tableName, + changes: schemaChanges.inserts, + commit_id, + version_id, + }); + } + + // Process deletions for this schema + if (schemaChanges.deletes.length > 0) { + batchDeleteDirectFromTable({ + db, + lix, + tableName, + changes: schemaChanges.deletes, + commit_id, + version_id, + }); + } + } +} + +/** + * Ensures a table exists and updates the cache. + * Single source of truth for table creation and cache management. + */ +function ensureTableExists(lix: Pick, tableName: string): void { + // Get cache for this Lix instance + const tableCache = getStateCacheV2Tables(lix); + + // Always run idempotent creator to ensure indexes exist + createSchemaCacheTable({ lix, tableName }); - // Note: We intentionally skip any hot-path tombstone cleanup here. - // A periodic GC can safely remove unneeded tombstones if desired. + // Update cache set if newly seen + if (!tableCache.has(tableName)) { + tableCache.add(tableName); + } +} + +function batchInsertDirectToTable(args: { + lix: Pick; + tableName: string; + changes: LixChangeRaw[]; + commit_id: string; + version_id: string; +}): void { + const { lix, tableName, changes, commit_id, version_id } = args; + + // Prepare statement once for all inserts + // Use proper UPSERT with ON CONFLICT instead of INSERT OR REPLACE + // jsonb() conversion is handled directly in the SQL + const stmt = lix.sqlite.prepare(` + INSERT INTO ${tableName} ( + entity_id, + schema_key, + file_id, + version_id, + plugin_key, + snapshot_content, + schema_version, + created_at, + updated_at, + inherited_from_version_id, + inheritance_delete_marker, + change_id, + commit_id + ) VALUES (?, ?, ?, ?, ?, jsonb(?), ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT(entity_id, file_id, version_id) DO UPDATE SET + schema_key = excluded.schema_key, + plugin_key = excluded.plugin_key, + snapshot_content = excluded.snapshot_content, + schema_version = excluded.schema_version, + -- Preserve original created_at, don't overwrite it + updated_at = excluded.updated_at, + inherited_from_version_id = excluded.inherited_from_version_id, + inheritance_delete_marker = excluded.inheritance_delete_marker, + change_id = excluded.change_id, + commit_id = excluded.commit_id + `); + + try { + for (const change of changes) { + stmt.bind([ + change.entity_id, + change.schema_key, // Add the original schema_key + change.file_id, + version_id, + change.plugin_key, + change.snapshot_content, // jsonb() conversion happens in SQL + change.schema_version, + change.created_at, + change.created_at, // updated_at + null, // inherited_from_version_id + 0, // inheritance_delete_marker + change.id, + commit_id, + ]); + stmt.step(); + stmt.reset(); + } + } finally { + stmt.finalize(); + } +} + +function batchDeleteDirectFromTable(args: { + db: Kysely; + lix: Pick; + tableName: string; + changes: LixChangeRaw[]; + commit_id: string; + version_id: string; +}): void { + const { lix, tableName, changes, commit_id, version_id } = args; + + for (const change of changes) { + // Get existing entry to check if it exists before deletion + const result = lix.sqlite.exec({ + sql: `SELECT * FROM ${tableName} + WHERE entity_id = ? AND file_id = ? AND version_id = ? + AND inheritance_delete_marker = 0 AND snapshot_content IS NOT NULL`, + bind: [change.entity_id, change.file_id, version_id], + returnValue: "resultRows", + }) as any[]; + + const existingEntry = result?.[0]; + + // Delete the entry + if (existingEntry) { + lix.sqlite.exec({ + sql: `DELETE FROM ${tableName} + WHERE entity_id = ? AND file_id = ? AND version_id = ?`, + bind: [change.entity_id, change.file_id, version_id], + }); + } + + // Insert tombstone with UPSERT to handle existing entries + lix.sqlite.exec({ + sql: `INSERT INTO ${tableName} ( + entity_id, + schema_key, + file_id, + version_id, + plugin_key, + snapshot_content, + schema_version, + created_at, + updated_at, + inherited_from_version_id, + inheritance_delete_marker, + change_id, + commit_id + ) VALUES (?, ?, ?, ?, ?, NULL, ?, ?, ?, NULL, 1, ?, ?) + ON CONFLICT(entity_id, file_id, version_id) DO UPDATE SET + schema_key = excluded.schema_key, + plugin_key = excluded.plugin_key, + snapshot_content = NULL, + schema_version = excluded.schema_version, + updated_at = excluded.updated_at, + inherited_from_version_id = NULL, + inheritance_delete_marker = 1, + change_id = excluded.change_id, + commit_id = excluded.commit_id`, + bind: [ + change.entity_id, + change.schema_key, + change.file_id, + version_id, + change.plugin_key, + change.schema_version, + change.created_at, + change.created_at, + change.id, + commit_id, + ], + }); } } diff --git a/packages/lix-sdk/src/state/commit.bench.ts b/packages/lix-sdk/src/state/commit.bench.ts deleted file mode 100644 index a01991f675..0000000000 --- a/packages/lix-sdk/src/state/commit.bench.ts +++ /dev/null @@ -1,49 +0,0 @@ -import { bench } from "vitest"; -import { openLix } from "../lix/open-lix.js"; -import { commit } from "./commit.js"; -import { insertTransactionState } from "./insert-transaction-state.js"; - -// NOTE: openLix includes database initialization overhead -// This affects all benchmarks equally and represents real-world usage patterns -// this test exists to act as baseline for commit performance -bench("commit empty transaction (baseline)", async () => { - const lix = await openLix({}); - - commit({ - lix: lix as any, - }); -}); - -bench("commit transaction with 100 rows", async () => { - const lix = await openLix({}); - - // Insert multiple transaction states in a single batch - const multipleData = []; - for (let i = 0; i < 100; i++) { - multipleData.push({ - entity_id: `commit_test_entity_${i}`, - version_id: "global", - schema_key: "commit_benchmark_entity", - file_id: `commit_file`, - plugin_key: "benchmark_plugin", - snapshot_content: JSON.stringify({ - id: `commit_test_entity_${i}`, - value: `test_data_${i}`, - metadata: { type: "commit_benchmark", index: i }, - }), - schema_version: "1.0", - untracked: false, - }); - } - insertTransactionState({ - lix: lix as any, - data: multipleData, - }); - - // Benchmark: Commit all transaction states - commit({ - lix: { sqlite: lix.sqlite, db: lix.db as any, hooks: lix.hooks }, - }); -}); - -bench.todo("commit with mixed operations (insert/update/delete)"); diff --git a/packages/lix-sdk/src/state/commit.ts b/packages/lix-sdk/src/state/commit.ts deleted file mode 100644 index 83343fb347..0000000000 --- a/packages/lix-sdk/src/state/commit.ts +++ /dev/null @@ -1,696 +0,0 @@ -import { type Kysely, sql } from "kysely"; -import { - type LixChangeSet, - type LixChangeSetElement, - LixChangeSetElementSchema, - LixChangeSetSchema, -} from "../change-set/schema.js"; -import { executeSync } from "../database/execute-sync.js"; -import type { LixInternalDatabaseSchema } from "../database/schema.js"; -import { LixVersionSchema, type LixVersion } from "../version/schema.js"; -import { nanoId } from "../deterministic/index.js"; -import { uuidV7 } from "../deterministic/uuid-v7.js"; -import { commitDeterministicSequenceNumber } from "../deterministic/sequence.js"; -import { timestamp } from "../deterministic/timestamp.js"; -import type { Lix } from "../lix/open-lix.js"; -import { handleStateDelete } from "./schema.js"; -import { insertTransactionState } from "./insert-transaction-state.js"; -import { commitIsAncestorOf } from "../query-filter/commit-is-ancestor-of.js"; -import type { LixCommitEdge } from "../commit/schema.js"; -import { updateStateCache } from "./cache/update-state-cache.js"; - -/** - * Commits all pending changes from the transaction stage to permanent storage. - * - * This function handles the COMMIT stage of the state mutation flow. It takes - * all changes accumulated in the transaction table (internal_change_in_transaction), - * groups them by version, creates changesets for each version, and saves - * them to permanent storage (internal_change and internal_snapshot tables). - * - * @example - * // After accumulating changes via insertTransactionState - * commit({ lix }); - * // All pending changes are now persisted - */ -export function commit(args: { - lix: Pick; -}): number { - // Create a single timestamp for the entire transaction - const transactionTimestamp = timestamp({ lix: args.lix }); - - const transactionChanges = executeSync({ - lix: args.lix, - query: (args.lix.db as unknown as Kysely) - .selectFrom("internal_change_in_transaction") - .select([ - "id", - "entity_id", - "schema_key", - "schema_version", - "file_id", - "plugin_key", - "version_id", - sql`json(snapshot_content)`.as("snapshot_content"), - "created_at", - ]) - .orderBy("version_id"), - }); - - // Group changes by version_id - const changesByVersion = new Map(); - for (const change of transactionChanges) { - if (!changesByVersion.has(change.version_id)) { - changesByVersion.set(change.version_id, []); - } - changesByVersion.get(change.version_id)!.push(change); - } - - // Process each version's changes to create changesets and commits - const commitIdsByVersion = new Map(); - - // First pass: Create changesets for non-global versions - for (const [version_id, versionChanges] of changesByVersion) { - if (version_id !== "global") { - // Create changeset, commit and edges for this version's transaction - const commitId = createChangesetForTransaction( - args.lix, - transactionTimestamp, - version_id, - versionChanges - ); - commitIdsByVersion.set(version_id, commitId); - } - } - - // Second pass: Handle global version - // At this point, any version updates from the first pass are in the transaction - // with version_id: "global", so we need to re-query - if (commitIdsByVersion.size > 0 || changesByVersion.has("global")) { - // Get all changes for global version (including version updates from first pass) - const globalChanges = executeSync({ - lix: args.lix, - query: (args.lix.db as unknown as Kysely) - .selectFrom("internal_change_in_transaction") - .select([ - "id", - "entity_id", - "schema_key", - "schema_version", - "file_id", - "plugin_key", - "version_id", - sql`json(snapshot_content)`.as("snapshot_content"), - "created_at", - ]) - .where("version_id", "=", "global"), - }); - - if (globalChanges.length > 0) { - const globalCommitId = createChangesetForTransaction( - args.lix, - transactionTimestamp, - "global", - globalChanges - ); - commitIdsByVersion.set("global", globalCommitId); - } - } - - // Use the same changes we already queried at the beginning - // Don't re-query the transaction table as it now contains additional changes - // created by createChangesetForTransaction (like change_author records) - - // Also need to realize the changes created by createChangesetForTransaction - const newChangesInTransaction = - transactionChanges.length > 0 - ? executeSync({ - lix: args.lix, - query: (args.lix.db as unknown as Kysely) - .selectFrom("internal_change_in_transaction") - .select([ - "id", - "entity_id", - "schema_key", - "schema_version", - "file_id", - "plugin_key", - "version_id", - sql`json(snapshot_content)`.as("snapshot_content"), - "created_at", - ]) - .where( - "id", - "not in", - transactionChanges.map((c) => c.id) - ), - }) - : []; - - // Combine all changes to realize - const allChangesToRealize = [ - ...transactionChanges, - ...newChangesInTransaction, - ]; - - // Batch insert all changes into the change table (instead of N+1 individual inserts) - if (allChangesToRealize.length > 0) { - const changeRows = allChangesToRealize.map((change) => ({ - id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - schema_version: change.schema_version, - file_id: change.file_id, - plugin_key: change.plugin_key, - created_at: change.created_at, - snapshot_content: change.snapshot_content, - })); - - executeSync({ - lix: args.lix, - query: args.lix.db.insertInto("change").values(changeRows), - }); - } - - // Clear the transaction table after committing - executeSync({ - lix: args.lix, - query: ( - args.lix.db as unknown as Kysely - ).deleteFrom("internal_change_in_transaction"), - }); - - // Update cache entries with the commit id only for entities that were changed - for (const [version_id, commitId] of commitIdsByVersion) { - // Get the changes for this version - // For global version, we need to use all changes in the version (including from second pass) - const changesForVersion = - version_id === "global" - ? allChangesToRealize.filter((c) => c.version_id === "global") - : changesByVersion.get(version_id)!; - - // Only update cache entries for entities that were actually changed - // Track files that need lixcol cache updates - const fileChanges = new Map< - string, - { change_id: string; created_at: string } - >(); - - // Batch update state cache for all changes at once - const changesForCache = changesForVersion.map(change => ({ - id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - schema_version: change.schema_version, - file_id: change.file_id, - plugin_key: change.plugin_key, - snapshot_content: change.snapshot_content, - created_at: change.created_at, - })); - - updateStateCache({ - lix: args.lix, - changes: changesForCache, - commit_id: commitId, - version_id: version_id, - }); - - // Track files that need lixcol cache updates - for (const change of changesForVersion) { - // IDEALLY WE WOULD HAVE A BEFORE_COMMIT HOOK - // THAT LIX EXPOSES TO KEEP THE LOGIC IN THE FILE STUFF - // - // - // Track the latest change for each file (excluding "lix" internal file) - if (change.file_id && change.file_id !== "lix") { - // We want the latest change for each file (by created_at) - const existing = fileChanges.get(change.file_id); - if (!existing || change.created_at > existing.created_at) { - fileChanges.set(change.file_id, { - change_id: change.id, - created_at: change.created_at, - }); - } - } - } - - // Update file lixcol cache for all files that had changes - // We have all the data we need from the commit, no need to recompute - if (fileChanges.size > 0) { - // Separate files into deletions and updates - const filesToDelete: string[] = []; - const filesToUpdate: Array<{ - file_id: string; - version_id: string; - latest_change_id: string; - latest_commit_id: string; - created_at: string; - updated_at: string; - }> = []; - - for (const [fileId, { change_id, created_at }] of fileChanges) { - // Check if this is a deletion (file descriptor with null snapshot content) - const changeData = changesForVersion.find((c) => c.id === change_id); - const isDeleted = - changeData?.schema_key === "lix_file_descriptor" && - !changeData.snapshot_content; - - if (isDeleted) { - filesToDelete.push(fileId); - } else { - filesToUpdate.push({ - file_id: fileId, - version_id: version_id, - latest_change_id: change_id, - latest_commit_id: commitId, - created_at: created_at, - updated_at: created_at, - }); - } - } - - // Delete cache entries for deleted files - if (filesToDelete.length > 0) { - executeSync({ - lix: args.lix, - query: (args.lix.db as unknown as Kysely) - .deleteFrom("internal_file_lixcol_cache") - .where("version_id", "=", version_id) - .where("file_id", "in", filesToDelete), - }); - } - - // Batch insert/update cache entries for existing files - if (filesToUpdate.length > 0) { - executeSync({ - lix: args.lix, - query: (args.lix.db as unknown as Kysely) - .insertInto("internal_file_lixcol_cache") - .values(filesToUpdate) - .onConflict((oc) => - oc.columns(["file_id", "version_id"]).doUpdateSet({ - latest_change_id: sql`excluded.latest_change_id`, - latest_commit_id: sql`excluded.latest_commit_id`, - // Don't update created_at - preserve the original - updated_at: sql`excluded.updated_at`, - }) - ), - }); - } - } - } - - commitDeterministicSequenceNumber({ - lix: args.lix, - timestamp: transactionTimestamp, - }); - - //* Emit state commit hook after transaction is successfully committed - //* must come last to ensure that subscribers see the changes - args.lix.hooks._emit("state_commit", { changes: allChangesToRealize }); - - return args.lix.sqlite.sqlite3.capi.SQLITE_OK; -} - -/** - * Creates a changeset and commit for all changes in a transaction and updates the version. - * - * This function: - * 1. Creates a new changeset and commit - * 2. Creates a commit edge linking the previous commit to the new one - * 3. Updates the version to point to the new commit - * 4. Creates changeset elements for each change - * 5. Updates working changeset elements for user data changes - * - * @param sqlite - SQLite database instance - * @param db - Kysely database instance - * @param _currentTime - Current timestamp (unused) - * @param version_id - The version to create the changeset for - * @param changes - Array of changes to include in the changeset - * @returns The ID of the newly created commit - */ -function createChangesetForTransaction( - lix: Pick, - _currentTime: string, - version_id: string, - changes: Pick< - { - id: string; - entity_id: string; - schema_key: string; - schema_version: string; - file_id: string; - plugin_key: string; - snapshot_id: string; - created_at: string; - snapshot_content: string | null; - }, - "id" | "entity_id" | "schema_key" | "file_id" | "snapshot_content" - >[] -): string { - const db = lix.db as unknown as Kysely; - - // Get the version record from resolved state view - const versionRows = executeSync({ - lix: lix, - query: db - .selectFrom("internal_resolved_state_all") - .where("schema_key", "=", "lix_version") - .where("entity_id", "=", version_id) - .select("snapshot_content") - .limit(1), - }); - - if (versionRows.length === 0 || !versionRows[0]?.snapshot_content) { - throw new Error(`Version with id '${version_id}' not found.`); - } - - const mutatedVersion = JSON.parse(versionRows[0].snapshot_content) as any; - const nextChangeSetId = nanoId({ - lix, - }); - - // TODO: Don't create change author for the changeset itself. - // Change authors should be associated with commit entities when implemented. - // See: https://github.com/opral/lix-sdk/issues/359 - - // Create a new commit that points to the new change set - const nextCommitId = uuidV7({ - lix, - }); - - // Batch create all core entities (changeset, commit, edge, version) in one call - const coreEntitiesData = [ - { - entity_id: nextChangeSetId, - schema_key: "lix_change_set", - file_id: "lix", - plugin_key: "lix_own_entity", - snapshot_content: JSON.stringify({ - id: nextChangeSetId, - metadata: null, - } satisfies LixChangeSet), - schema_version: LixChangeSetSchema["x-lix-version"], - version_id: "global", - untracked: false, - }, - { - entity_id: nextCommitId, - schema_key: "lix_commit", - file_id: "lix", - plugin_key: "lix_own_entity", - snapshot_content: JSON.stringify({ - id: nextCommitId, - change_set_id: nextChangeSetId, - }), - schema_version: "1.0", - version_id: "global", - untracked: false, - }, - { - entity_id: `${mutatedVersion.commit_id}~${nextCommitId}`, - schema_key: "lix_commit_edge", - file_id: "lix", - plugin_key: "lix_own_entity", - snapshot_content: JSON.stringify({ - parent_id: mutatedVersion.commit_id, - child_id: nextCommitId, - } satisfies LixCommitEdge), - schema_version: "1.0", - version_id: "global", - untracked: false, - }, - { - entity_id: mutatedVersion.id, - schema_key: "lix_version", - file_id: "lix", - plugin_key: "lix_own_entity", - snapshot_content: JSON.stringify({ - ...mutatedVersion, - commit_id: nextCommitId, - } satisfies LixVersion), - schema_version: LixVersionSchema["x-lix-version"], - version_id: "global", - untracked: false, - }, - ]; - - const [changeSetChange, commitChange, commitEdgeChange, versionChange] = - insertTransactionState({ - lix, - data: coreEntitiesData, - createChangeAuthors: false, - }); - - // Create changeset elements for all changes - const changesToProcess = [ - ...changes, - changeSetChange!, - commitChange!, - commitEdgeChange!, - versionChange!, - ]; - - // Batch create all changeset elements in one call (instead of N+1 individual inserts) - - const changesetElementsData = changesToProcess.map((change) => { - // Get the change ID - it may be 'id' for original changes or 'change_id' for results from insertTransactionState - const changeId = "change_id" in change ? change.change_id : change.id; - - return { - entity_id: `${nextChangeSetId}::${changeId}`, - schema_key: "lix_change_set_element", - file_id: "lix", - plugin_key: "lix_own_entity", - snapshot_content: JSON.stringify({ - change_set_id: nextChangeSetId, - change_id: changeId, - schema_key: change.schema_key, - file_id: change.file_id, - entity_id: change.entity_id, - } satisfies LixChangeSetElement), - schema_version: LixChangeSetElementSchema["x-lix-version"], - version_id: "global", - untracked: false, - }; - }); - - if (changesetElementsData.length > 0) { - insertTransactionState({ - lix, - data: changesetElementsData, - createChangeAuthors: false, - }); - } - - // Create/update working change set element for user data changes - // Get the working commit and its change set - const workingCommit = executeSync({ - lix, - query: db - .selectFrom("commit") - .where("id", "=", mutatedVersion.working_commit_id) - .selectAll(), - }); - - if (workingCommit.length === 0) { - throw new Error( - `Working commit not found: ${mutatedVersion.working_commit_id}` - ); - } - - const workingChangeSetId = workingCommit[0]!.change_set_id; - - // TODO skipping lix internal entities is likely undesired. - // Skip lix internal entities (change sets, edges, etc.) - - // Filter out lix internal entities - const userChanges = changes.filter( - (change) => - change.schema_key !== "lix_change_set" && - change.schema_key !== "lix_change_set_edge" && - change.schema_key !== "lix_change_set_element" && - change.schema_key !== "lix_version" - ); - - if (userChanges.length > 0) { - // Separate changes into deletions and non-deletions - const deletions: typeof userChanges = []; - const nonDeletions: typeof userChanges = []; - - for (const change of userChanges) { - const parsedSnapshot = change.snapshot_content - ? JSON.parse(change.snapshot_content) - : null; - const isDeletion = - !parsedSnapshot || parsedSnapshot.snapshot_id === "no-content"; - - if (isDeletion) { - deletions.push(change); - } else { - nonDeletions.push(change); - } - } - - // Step 1: Batch check for entities at checkpoint (for deletions) - const entitiesAtCheckpoint = new Set(); - if (deletions.length > 0) { - // Get the checkpoint commit ID once - const checkpointCommitResult = executeSync({ - lix, - query: db - .selectFrom("commit") - .innerJoin("entity_label", (join) => - join - .onRef("entity_label.entity_id", "=", "commit.id") - .on("entity_label.schema_key", "=", "lix_commit") - ) - .innerJoin("label", "label.id", "entity_label.label_id") - .where("label.name", "=", "checkpoint") - .where( - commitIsAncestorOf( - { id: mutatedVersion.commit_id }, - { includeSelf: true, depth: 1 } - ) - ) - .select("commit.id") - .limit(1), - }); - - const checkpointCommitId = checkpointCommitResult[0]?.id; - - if (checkpointCommitId) { - // Batch check all deletion entities at checkpoint - const checkpointEntities = executeSync({ - lix, - query: db - .selectFrom("state_history") - .where("depth", "=", 0) - .where("commit_id", "=", checkpointCommitId) - .where((eb) => - eb.or( - deletions.map((change) => - eb.and([ - eb("entity_id", "=", change.entity_id), - eb("schema_key", "=", change.schema_key), - eb("file_id", "=", change.file_id), - ]) - ) - ) - ) - .select(["entity_id", "schema_key", "file_id"]), - }); - - // Build a set for quick lookup - for (const entity of checkpointEntities) { - entitiesAtCheckpoint.add( - `${entity.entity_id}|${entity.schema_key}|${entity.file_id}` - ); - } - } - } - - // Step 2: Batch find all existing working change set elements to delete - const existingEntities = executeSync({ - lix, - query: db - .selectFrom("internal_resolved_state_all") - .select([ - "_pk", - sql`json_extract(snapshot_content, '$.entity_id')`.as("entity_id"), - sql`json_extract(snapshot_content, '$.schema_key')`.as("schema_key"), - sql`json_extract(snapshot_content, '$.file_id')`.as("file_id"), - ]) - .where("entity_id", "like", `${workingChangeSetId}::%`) - .where("schema_key", "=", "lix_change_set_element") - .where("file_id", "=", "lix") - .where("version_id", "=", "global") - .where((eb) => - eb.or( - userChanges.map((change) => - eb.and([ - eb( - sql`json_extract(snapshot_content, '$.entity_id')`, - "=", - change.entity_id - ), - eb( - sql`json_extract(snapshot_content, '$.schema_key')`, - "=", - change.schema_key - ), - eb( - sql`json_extract(snapshot_content, '$.file_id')`, - "=", - change.file_id - ), - ]) - ) - ) - ), - }); - - // Step 3: Delete all existing working change set elements at once - for (const existing of existingEntities) { - handleStateDelete(lix, existing._pk); - } - - // Step 4: Batch create new working change set elements - const newWorkingElements: Parameters< - typeof insertTransactionState - >[0]["data"] = []; - - // Add deletions that existed at checkpoint - for (const deletion of deletions) { - const key = `${deletion.entity_id}|${deletion.schema_key}|${deletion.file_id}`; - if (entitiesAtCheckpoint.has(key)) { - newWorkingElements.push({ - entity_id: `${workingChangeSetId}::${deletion.id}`, - schema_key: "lix_change_set_element", - file_id: "lix", - plugin_key: "lix_own_entity", - snapshot_content: JSON.stringify({ - change_set_id: workingChangeSetId, - change_id: deletion.id, - entity_id: deletion.entity_id, - schema_key: deletion.schema_key, - file_id: deletion.file_id, - } satisfies LixChangeSetElement), - schema_version: LixChangeSetElementSchema["x-lix-version"], - version_id: "global", - untracked: false, - }); - } - } - - // Add all non-deletions - for (const change of nonDeletions) { - newWorkingElements.push({ - entity_id: `${workingChangeSetId}::${change.id}`, - schema_key: "lix_change_set_element", - file_id: "lix", - plugin_key: "lix_own_entity", - snapshot_content: JSON.stringify({ - change_set_id: workingChangeSetId, - change_id: change.id, - entity_id: change.entity_id, - schema_key: change.schema_key, - file_id: change.file_id, - } satisfies LixChangeSetElement), - schema_version: LixChangeSetElementSchema["x-lix-version"], - version_id: "global", - untracked: false, - }); - } - - // Batch insert all new working elements - if (newWorkingElements.length > 0) { - insertTransactionState({ - lix, - data: newWorkingElements, - createChangeAuthors: false, - }); - } - } - - return nextCommitId; -} diff --git a/packages/lix-sdk/src/commit/create-checkpoint.test.ts b/packages/lix-sdk/src/state/create-checkpoint.test.ts similarity index 95% rename from packages/lix-sdk/src/commit/create-checkpoint.test.ts rename to packages/lix-sdk/src/state/create-checkpoint.test.ts index 07fc4f024c..930c686c36 100644 --- a/packages/lix-sdk/src/commit/create-checkpoint.test.ts +++ b/packages/lix-sdk/src/state/create-checkpoint.test.ts @@ -185,11 +185,30 @@ test("creates edge from checkpoint to new working commit", async () => { }); }); -test("creating a checkpoint with no changes throws", async () => { +test("creating a checkpoint with no changes returns current head (idempotent)", async () => { const lix = await openLix({}); - // Create checkpoint without making explicit changes (should work with lix own changes) - await expect(createCheckpoint({ lix })).rejects.toThrow(); + // Capture version before + const before = await lix.db + .selectFrom("version") + .where("name", "=", "main") + .selectAll() + .executeTakeFirstOrThrow(); + + // Create checkpoint without making explicit changes → idempotent no-op + const cp = await createCheckpoint({ lix }); + + // Returns the current head commit + expect(cp.id).toBe(before.commit_id); + + // Verify version state unchanged + const after = await lix.db + .selectFrom("version") + .where("name", "=", "main") + .selectAll() + .executeTakeFirstOrThrow(); + expect(after.commit_id).toBe(before.commit_id); + expect(after.working_commit_id).toBe(before.working_commit_id); }); // we should have https://github.com/opral/lix-sdk/issues/305 before this test diff --git a/packages/lix-sdk/src/commit/create-checkpoint.ts b/packages/lix-sdk/src/state/create-checkpoint.ts similarity index 91% rename from packages/lix-sdk/src/commit/create-checkpoint.ts rename to packages/lix-sdk/src/state/create-checkpoint.ts index 8e8f654ffe..2163f28bc0 100644 --- a/packages/lix-sdk/src/commit/create-checkpoint.ts +++ b/packages/lix-sdk/src/state/create-checkpoint.ts @@ -1,4 +1,4 @@ -import type { LixCommit } from "./schema.js"; +import type { LixCommit } from "../commit/schema.js"; import { nanoId, uuidV7 } from "../deterministic/index.js"; import type { Lix } from "../lix/open-lix.js"; import type { State } from "../entity-views/types.js"; @@ -45,9 +45,14 @@ export async function createCheckpoint(args: { .execute(); if (workingElements.length === 0) { - throw new Error( - "No changes in working change set to create a checkpoint for." - ); + // Idempotent behavior: if working set is clean, return the current head commit + const headCommit = await trx + .selectFrom("commit_all") + .selectAll() + .where("id", "=", activeVersion.commit_id) + .where("lixcol_version_id", "=", "global") + .executeTakeFirstOrThrow(); + return headCommit; } // 1. The old working commit becomes the checkpoint commit diff --git a/packages/lix-sdk/src/state/index.ts b/packages/lix-sdk/src/state/index.ts new file mode 100644 index 0000000000..d87ccd0a61 --- /dev/null +++ b/packages/lix-sdk/src/state/index.ts @@ -0,0 +1,23 @@ +// State types +export type { + StateView, + StateRow, + NewStateRow, + StateRowUpdate, +} from "./views/state.js"; + +export type { + StateAllView, + StateAllRow, + NewStateAllRow, + StateAllRowUpdate, +} from "./views/state-all.js"; + +export type { + StateWithTombstonesView, + StateWithTombstonesRow, +} from "./views/state-with-tombstones.js"; + +// State operations +export { createCheckpoint } from "./create-checkpoint.js"; +export { transition } from "./transition.js"; diff --git a/packages/lix-sdk/src/state/insert-transaction-state.ts b/packages/lix-sdk/src/state/insert-transaction-state.ts deleted file mode 100644 index 3edc7cc547..0000000000 --- a/packages/lix-sdk/src/state/insert-transaction-state.ts +++ /dev/null @@ -1,384 +0,0 @@ -import { sql, type Kysely } from "kysely"; -import { executeSync } from "../database/execute-sync.js"; -import { timestamp, uuidV7 } from "../deterministic/index.js"; -import type { Lix } from "../lix/open-lix.js"; -import type { LixInternalDatabaseSchema } from "../database/schema.js"; -import type { NewStateAllRow, StateAllRow } from "./schema.js"; -import { LixChangeAuthorSchema } from "../change-author/schema.js"; -import { updateUntrackedState } from "./untracked/update-untracked-state.js"; - -type NewTransactionStateRow = Omit & { - snapshot_content: string | null; -}; - -export type TransactionStateRow = Omit & { - snapshot_content: string | null; -}; - -/** - * Inserts a state change into the transaction stage. - * - * This function handles the TRANSACTION stage of the state mutation flow, where - * changes are temporarily stored before being committed to permanent storage. - * It supports both tracked and untracked entities, manages the state cache for - * immediate consistency, and automatically creates change_author records to - * track who made each change. - * - * @param args.lix - The Lix instance with SQLite database and Kysely query builder - * @param args.data - The state data to insert, including entity details and snapshot - * @param args.timestamp - Optional timestamp to use (defaults to current time) - * @param args.createChangeAuthors - Whether to create change_author records (defaults to true) - * - * @returns The inserted state row with generated fields like change_id - * - * @example - * // Insert a new entity state - * insertTransactionState({ - * lix: { sqlite, db }, - * data: { - * entity_id: "user-123", - * schema_key: "user", - * file_id: "file1", - * plugin_key: "my-plugin", - * snapshot_content: JSON.stringify({ name: "John", email: "john@example.com" }), - * schema_version: "1.0", - * version_id: "version-abc", - * untracked: false - * } - * }); - * - * @example - * // Delete an entity (null snapshot_content) - * insertTransactionState({ - * lix: { sqlite, db }, - * data: { - * entity_id: "user-123", - * schema_key: "user", - * file_id: "file1", - * plugin_key: "my-plugin", - * snapshot_content: null, // Deletion - * schema_version: "1.0", - * version_id: "version-abc", - * untracked: false - * } - * }); - */ -export function insertTransactionState(args: { - lix: Pick; - data: NewTransactionStateRow[]; - timestamp?: string; - createChangeAuthors?: boolean; -}): TransactionStateRow[] { - const _timestamp = args.timestamp || timestamp({ lix: args.lix as any }); - - if (args.data.length === 0) { - return []; - } - - // Group data by tracked vs untracked for batch processing - const trackedData = args.data.filter((data) => data.untracked !== true); - const untrackedData = args.data.filter((data) => data.untracked === true); - - const results: TransactionStateRow[] = []; - - // Process untracked data (currently one by one - can be optimized later) - for (const data of untrackedData) { - // Use the new untracked API for all untracked operations - updateUntrackedState({ - lix: args.lix as any, - change: { - entity_id: data.entity_id, - schema_key: data.schema_key, - file_id: data.file_id, - plugin_key: data.plugin_key, - snapshot_content: data.snapshot_content, - schema_version: data.schema_version, - created_at: _timestamp, - }, - version_id: data.version_id, - }); - - results.push({ - entity_id: data.entity_id, - schema_key: data.schema_key, - file_id: data.file_id, - plugin_key: data.plugin_key, - snapshot_content: data.snapshot_content, - schema_version: data.schema_version, - version_id: data.version_id, - created_at: _timestamp, - updated_at: _timestamp, - untracked: true, - inherited_from_version_id: null, - change_id: "untracked", - commit_id: "pending", - }); - } - - // Process tracked data with batching optimization - if (trackedData.length > 0) { - // Generate change IDs for all tracked entities upfront - const dataWithChangeIds = trackedData.map((data) => ({ - ...data, - change_id: uuidV7({ lix: args.lix as any }), - })); - - // Batch delete existing untracked state for all entities - // Build a single DELETE with OR conditions for all entities - if (dataWithChangeIds.length > 0) { - executeSync({ - lix: args.lix, - query: (args.lix.db as unknown as Kysely) - .deleteFrom("internal_state_all_untracked") - .where((eb) => - eb.or( - dataWithChangeIds.map((data) => - eb.and([ - eb("entity_id", "=", data.entity_id), - eb("schema_key", "=", data.schema_key), - eb("file_id", "=", data.file_id), - eb("version_id", "=", data.version_id), - ]) - ) - ) - ), - }); - } - - // Batch insert into internal_change_in_transaction - const transactionRows = dataWithChangeIds.map((data) => ({ - id: data.change_id, - entity_id: data.entity_id, - schema_key: data.schema_key, - file_id: data.file_id, - plugin_key: data.plugin_key, - snapshot_content: data.snapshot_content - ? sql`jsonb(${data.snapshot_content})` - : null, - schema_version: data.schema_version, - version_id: data.version_id, - created_at: _timestamp, - })); - - executeSync({ - lix: args.lix, - query: (args.lix.db as unknown as Kysely) - .insertInto("internal_change_in_transaction") - .values(transactionRows) - .onConflict((oc) => - oc - .columns(["entity_id", "file_id", "schema_key", "version_id"]) - .doUpdateSet((eb) => ({ - id: eb.ref("excluded.id"), - plugin_key: eb.ref("excluded.plugin_key"), - snapshot_content: eb.ref("excluded.snapshot_content"), - schema_version: eb.ref("excluded.schema_version"), - created_at: eb.ref("excluded.created_at"), - })) - ), - }); - - // Batch insert/update cache - const cacheRows = dataWithChangeIds.map((data) => ({ - entity_id: data.entity_id, - schema_key: data.schema_key, - file_id: data.file_id, - plugin_key: data.plugin_key, - snapshot_content: data.snapshot_content - ? sql`jsonb(${data.snapshot_content})` - : null, - schema_version: data.schema_version, - version_id: data.version_id, - change_id: data.change_id, - inheritance_delete_marker: data.snapshot_content === null ? 1 : 0, - created_at: _timestamp, - updated_at: _timestamp, - inherited_from_version_id: null, - commit_id: "pending", - })); - - executeSync({ - lix: args.lix, - query: (args.lix.db as unknown as Kysely) - .insertInto("internal_state_cache") - .values(cacheRows as any) - .onConflict((oc) => - oc - .columns(["entity_id", "schema_key", "file_id", "version_id"]) - .doUpdateSet((eb) => ({ - plugin_key: eb.ref("excluded.plugin_key"), - snapshot_content: eb.ref("excluded.snapshot_content"), - schema_version: eb.ref("excluded.schema_version"), - updated_at: eb.ref("excluded.updated_at"), - change_id: eb.ref("excluded.change_id"), - inheritance_delete_marker: eb.ref( - "excluded.inheritance_delete_marker" - ), - inherited_from_version_id: eb.ref( - "excluded.inherited_from_version_id" - ), - commit_id: eb.ref("excluded.commit_id"), - })) - ), - }); - - // Handle change authors for tracked entities (if enabled) - if (args.createChangeAuthors !== false && dataWithChangeIds.length > 0) { - // Step 1: Get all active accounts once - const activeAccounts = executeSync({ - lix: args.lix, - query: (args.lix.db as unknown as Kysely) - .selectFrom("internal_resolved_state_all") - .where("schema_key", "=", "lix_active_account") - .where("version_id", "=", "global") - .select(["snapshot_content"]), - }); - - if (activeAccounts && activeAccounts.length > 0) { - // Extract all account IDs - const accountIds = activeAccounts.map( - (acc) => - JSON.parse(acc.snapshot_content as string).account_id as string - ); - - // Get all unique version IDs we need to check - const uniqueVersionIds = [ - ...new Set(dataWithChangeIds.map((d) => d.version_id)), - ]; - - // Step 2: Batch query to check account states across all versions - const accountStates = executeSync({ - lix: args.lix, - query: (args.lix.db as unknown as Kysely) - .selectFrom("internal_resolved_state_all") - .where("entity_id", "in", accountIds) - .where("schema_key", "=", "lix_account") - .where("version_id", "in", uniqueVersionIds) - .select([ - "entity_id", - "version_id", - "snapshot_content", - "untracked", - ]), - }); - - // Create a Map for quick lookups: "accountId:versionId" -> state - const accountStateMap = new Map(); - for (const state of accountStates) { - accountStateMap.set(`${state.entity_id}:${state.version_id}`, state); - } - - // Step 3: Identify missing/untracked accounts that need to be imported - const accountsToImport: Array<{ - accountId: string; - versionId: string; - }> = []; - - for (const versionId of uniqueVersionIds) { - for (const accountId of accountIds) { - const state = accountStateMap.get(`${accountId}:${versionId}`); - if (!state || state.untracked) { - accountsToImport.push({ accountId, versionId }); - } - } - } - - // Step 4: Batch fetch missing accounts from global version if needed - if (accountsToImport.length > 0) { - const uniqueAccountIds = [ - ...new Set(accountsToImport.map((a) => a.accountId)), - ]; - - const globalAccounts = executeSync({ - lix: args.lix, - query: (args.lix.db as unknown as Kysely) - .selectFrom("internal_resolved_state_all") - .where("entity_id", "in", uniqueAccountIds) - .where("schema_key", "=", "lix_account") - .where("version_id", "=", "global") - .select(["entity_id", "snapshot_content"]), - }); - - // Create a Map for global accounts - const globalAccountMap = new Map(); - for (const acc of globalAccounts) { - globalAccountMap.set(acc.entity_id, acc.snapshot_content!); - } - - // Step 5: Batch insert all missing accounts as tracked - const accountsToTrack = accountsToImport - .filter((item) => globalAccountMap.has(item.accountId)) - .map((item) => ({ - entity_id: item.accountId, - schema_key: "lix_account", - file_id: "lix", - plugin_key: "lix_own_entity", - snapshot_content: globalAccountMap.get(item.accountId)!, - schema_version: "1.0", - version_id: item.versionId, - untracked: false, - })); - - if (accountsToTrack.length > 0) { - insertTransactionState({ - lix: args.lix, - data: accountsToTrack, - timestamp: _timestamp, - createChangeAuthors: false, - }); - } - } - - // Step 6: Batch create all change_author records - const changeAuthorData = []; - for (const data of dataWithChangeIds) { - for (const accountId of accountIds) { - changeAuthorData.push({ - entity_id: `${data.change_id}~${accountId}`, - schema_key: LixChangeAuthorSchema["x-lix-key"], - file_id: "lix", - plugin_key: "lix_own_entity", - snapshot_content: JSON.stringify({ - change_id: data.change_id, - account_id: accountId, - }), - schema_version: LixChangeAuthorSchema["x-lix-version"], - version_id: data.version_id, - untracked: false, - }); - } - } - - if (changeAuthorData.length > 0) { - insertTransactionState({ - lix: args.lix, - data: changeAuthorData, - timestamp: _timestamp, - createChangeAuthors: false, - }); - } - } - } - - // Add tracked results to results array - for (const data of dataWithChangeIds) { - results.push({ - entity_id: data.entity_id, - schema_key: data.schema_key, - file_id: data.file_id, - plugin_key: data.plugin_key, - snapshot_content: data.snapshot_content, - schema_version: data.schema_version, - version_id: data.version_id, - created_at: _timestamp, - updated_at: _timestamp, - untracked: false, - inherited_from_version_id: null, - change_id: data.change_id, - commit_id: "pending", - }); - } - } - - return results; -} diff --git a/packages/lix-sdk/src/state/materialize-state.test.ts b/packages/lix-sdk/src/state/materialize-state.test.ts index e1ae353afe..209ba3e447 100644 --- a/packages/lix-sdk/src/state/materialize-state.test.ts +++ b/packages/lix-sdk/src/state/materialize-state.test.ts @@ -804,16 +804,16 @@ describe("internal_materialization_commit_graph", () => { }); // Create three versions - await createVersion({ lix, id: "version-cycle-a" }); - await createVersion({ + const versionA = await createVersion({ lix, id: "version-cycle-a" }); + const versionB = await createVersion({ lix, id: "version-cycle-b", - inherits_from_version_id: "version-cycle-a", + inheritsFrom: versionA, }); await createVersion({ lix, id: "version-cycle-c", - inherits_from_version_id: "version-cycle-b", + inheritsFrom: versionB, }); // Manually create a cycle by updating version A to inherit from C @@ -1438,7 +1438,7 @@ describe("internal_materialization_version_ancestry", () => { await createVersion({ lix, id: "version-b", - inherits_from_version_id: "version-a", + inheritsFrom: { id: "version-a" }, }); // Query ancestry for version B @@ -1496,19 +1496,19 @@ describe("internal_materialization_version_ancestry", () => { await createVersion({ lix, id: "version-b", - inherits_from_version_id: "version-a", + inheritsFrom: { id: "version-a" }, }); await createVersion({ lix, id: "version-c", - inherits_from_version_id: "version-b", + inheritsFrom: { id: "version-b" }, }); await createVersion({ lix, id: "version-d", - inherits_from_version_id: "version-c", + inheritsFrom: { id: "version-c" }, }); // Query ancestry for version D @@ -1587,20 +1587,20 @@ describe("internal_materialization_version_ancestry", () => { await createVersion({ lix, id: "version-b", - inherits_from_version_id: "version-a", + inheritsFrom: { id: "version-a" }, }); await createVersion({ lix, id: "version-c", - inherits_from_version_id: "version-a", + inheritsFrom: { id: "version-a" }, }); // D inherits from B (not from both B and C) await createVersion({ lix, id: "version-d", - inherits_from_version_id: "version-b", + inheritsFrom: { id: "version-b" }, }); // Query ancestry for version D @@ -1671,7 +1671,7 @@ describe("internal_materialization_version_ancestry", () => { await createVersion({ lix, id: "standalone-version", - inherits_from_version_id: null, + inheritsFrom: null, }); // Query ancestry @@ -1712,14 +1712,14 @@ describe("internal_materialization_version_ancestry", () => { await createVersion({ lix, id: "version-a", - inherits_from_version_id: null, // Start with no inheritance + inheritsFrom: null, // Start with no inheritance }); // Create version B that inherits from A await createVersion({ lix, id: "version-b", - inherits_from_version_id: "version-a", + inheritsFrom: { id: "version-a" }, }); // Now update A to inherit from B, creating a cycle @@ -1859,7 +1859,7 @@ describe("internal_state_materializer", () => { await createVersion({ lix, id: "child-version", - inherits_from_version_id: "parent-version", + inheritsFrom: { id: "parent-version" }, }); // Add entity to parent version @@ -1921,7 +1921,7 @@ describe("internal_state_materializer", () => { await createVersion({ lix, id: "child-version", - inherits_from_version_id: "parent-version", + inheritsFrom: { id: "parent-version" }, }); // Add entity to parent version @@ -1991,19 +1991,19 @@ describe("internal_state_materializer", () => { await createVersion({ lix, id: "version-b", - inherits_from_version_id: "version-a", + inheritsFrom: { id: "version-a" }, }); await createVersion({ lix, id: "version-c", - inherits_from_version_id: "version-b", + inheritsFrom: { id: "version-b" }, }); // Add entity to version A await lix.db .insertInto("key_value_all") .values({ - key: "deep-key", + key: "key-from-a", value: "value-from-a", lixcol_version_id: "version-a", }) @@ -2025,7 +2025,7 @@ describe("internal_state_materializer", () => { .selectAll() .where("version_id", "=", "version-c") .where("schema_key", "=", "lix_key_value") - .where("entity_id", "in", ["deep-key", "b-only-key"]) + .where("entity_id", "in", ["key-from-a", "b-only-key"]) .orderBy("entity_id") .execute(); @@ -2033,15 +2033,17 @@ describe("internal_state_materializer", () => { expect(materializedStates).toHaveLength(2); // Check inherited from A (through B) - const deepKey = materializedStates.find( - (s: any) => s.entity_id === "deep-key" + const keyFromA = materializedStates.find( + (s: any) => s.entity_id === "key-from-a" ); - expect(deepKey).toBeDefined(); - expect(deepKey!.snapshot_content).toEqual({ - key: "deep-key", + expect(keyFromA).toBeDefined(); + expect(keyFromA!.snapshot_content).toEqual({ + key: "key-from-a", value: "value-from-a", }); - expect(deepKey!.inherited_from_version_id).toBe("version-a"); + expect(keyFromA!.inherited_from_version_id).toBe("version-a"); + // CRITICAL: version_id should be version-c (the viewing version), not version-a + expect(keyFromA!.version_id).toBe("version-c"); // Check inherited from B const bKey = materializedStates.find( @@ -2053,6 +2055,8 @@ describe("internal_state_materializer", () => { value: "value-from-b", }); expect(bKey!.inherited_from_version_id).toBe("version-b"); + // CRITICAL: version_id should be version-c (the viewing version), not version-b + expect(bKey!.version_id).toBe("version-c"); }, { simulations: [normalSimulation, outOfOrderSequenceSimulation], @@ -2138,7 +2142,7 @@ describe("internal_state_materializer", () => { await createVersion({ lix, id: "child-version", - inherits_from_version_id: "parent-version", + inheritsFrom: { id: "parent-version" }, }); // Add entity to parent version @@ -2237,17 +2241,17 @@ describe("internal_state_materializer", () => { await createVersion({ lix, id: "version-b", - inherits_from_version_id: "version-a", + inheritsFrom: { id: "version-a" }, }); await createVersion({ lix, id: "version-c", - inherits_from_version_id: "version-a", + inheritsFrom: { id: "version-a" }, }); await createVersion({ lix, id: "version-d", - inherits_from_version_id: "version-b", + inheritsFrom: { id: "version-b" }, }); // Add entity to root version A diff --git a/packages/lix-sdk/src/state/primary-key.test.ts b/packages/lix-sdk/src/state/primary-key.test.ts deleted file mode 100644 index e871b054f8..0000000000 --- a/packages/lix-sdk/src/state/primary-key.test.ts +++ /dev/null @@ -1,158 +0,0 @@ -import { test, expect } from "vitest"; -import { - serializeStatePk, - parseStatePk, - type StatePkTag, -} from "./primary-key.js"; - -test("makePk creates correct composite key for simple values", () => { - const pk = serializeStatePk("U", "file1", "entity1", "version1"); - // No special characters, so no encoding needed - expect(pk).toBe("U~file1~entity1~version1"); -}); - -test("makePk handles special characters in fields", () => { - const pk = serializeStatePk( - "C", - "file|with|pipes", - "entity-123", - "ver/sion:1" - ); - // Special characters should be percent-encoded - expect(pk).toBe("C~file%7Cwith%7Cpipes~entity-123~ver%2Fsion%3A1"); -}); - -test("parsePk correctly parses simple composite key", () => { - const result = parseStatePk("UI~file1~entity1~version1"); - expect(result).toEqual({ - tag: "UI", - fileId: "file1", - entityId: "entity1", - versionId: "version1", - }); -}); - -test("parsePk handles fields with special characters", () => { - // Test with encoded values - const result = parseStatePk( - "CI~file%7Cwith%7Cpipes~entity-123~ver%2Fsion%3A1" - ); - expect(result).toEqual({ - tag: "CI", - fileId: "file|with|pipes", - entityId: "entity-123", - versionId: "ver/sion:1", - }); -}); - -test("roundtrip encoding and decoding preserves data", () => { - const testCases: Array<[StatePkTag, string, string, string]> = [ - ["U", "simple", "test", "v1"], - ["UI", "file-with-dash", "entity", "version"], - ["C", "file", "entity_with_underscore", "version"], - ["CI", "file", "entity", "version.with.dot"], - ["U", "a|b", "c:d", "e/f"], // various special chars - ["C", "---", "test", "v1"], // multiple dashes - ["UI", "", "", ""], // empty strings - ["CI", "file-", "-entity", "-version-"], // dashes at edges - ]; - - for (const [tag, fileId, entityId, versionId] of testCases) { - const pk = serializeStatePk(tag, fileId, entityId, versionId); - const parsed = parseStatePk(pk); - - expect(parsed).toEqual({ - tag, - fileId, - entityId, - versionId, - }); - } -}); - -test("parsePk handles edge cases", () => { - // Empty strings are valid values - const result1 = parseStatePk("U~~~"); - expect(result1).toEqual({ - tag: "U", - fileId: "", - entityId: "", - versionId: "", - }); - - // Complex field values - const result2 = parseStatePk( - "C~file/path/to/resource~com.example.entity~v1.2.3" - ); - expect(result2).toEqual({ - tag: "C", - fileId: "file/path/to/resource", - entityId: "com.example.entity", - versionId: "v1.2.3", - }); -}); - -test("all tag types are handled", () => { - const tags: StatePkTag[] = ["U", "UI", "C", "CI"]; - - for (const tag of tags) { - const pk = serializeStatePk(tag, "file", "entity", "version"); - const parsed = parseStatePk(pk); - expect(parsed.tag).toBe(tag); - } -}); - -test("makePk and parsePk handle Unicode correctly", () => { - const pk = serializeStatePk( - "U", - "文件-file", - "エンティティ-entity", - "版本-version" - ); - const parsed = parseStatePk(pk); - - expect(parsed).toEqual({ - tag: "U", - fileId: "文件-file", - entityId: "エンティティ-entity", - versionId: "版本-version", - }); -}); - -test("handles entity IDs with tildes", () => { - // This is the critical test - entity ID contains a tilde - const pk = serializeStatePk("C", "test_file", "cat1~id1", "version1"); - // The tilde in entity ID should be encoded as %7E - expect(pk).toBe("C~test_file~cat1%7Eid1~version1"); - - // And it should parse back correctly - const parsed = parseStatePk(pk); - expect(parsed).toEqual({ - tag: "C", - fileId: "test_file", - entityId: "cat1~id1", - versionId: "version1", - }); -}); - -test("parsePk throws on malformed input", () => { - // Missing parts - expect(() => parseStatePk("U")).toThrow( - "Invalid composite key: U - expected 4 parts" - ); - - // Only tag and one field - expect(() => parseStatePk("C~file")).toThrow( - "Invalid composite key: C~file - expected 4 parts" - ); - - // Two fields - expect(() => parseStatePk("UI~file~entity")).toThrow( - "Invalid composite key: UI~file~entity - expected 4 parts" - ); - - // Too many fields - expect(() => parseStatePk("CI~file~entity~version~extra")).toThrow( - "Invalid composite key: CI~file~entity~version~extra - expected 4 parts" - ); -}); diff --git a/packages/lix-sdk/src/state/resolved-state-view.test.ts b/packages/lix-sdk/src/state/resolved-state-view.test.ts index ba2ad0d4eb..d41d25ab2e 100644 --- a/packages/lix-sdk/src/state/resolved-state-view.test.ts +++ b/packages/lix-sdk/src/state/resolved-state-view.test.ts @@ -2,8 +2,26 @@ import { test, expect } from "vitest"; import { openLix } from "../lix/open-lix.js"; import { sql, type Kysely } from "kysely"; import type { LixInternalDatabaseSchema } from "../database/schema.js"; -import { serializeStatePk, parseStatePk } from "./primary-key.js"; +import { serializeStatePk, parseStatePk } from "./vtable/primary-key.js"; import { timestamp } from "../deterministic/timestamp.js"; +import { createVersion } from "../version/create-version.js"; + +/** + * Strips the internal vtable primary key column `_pk` from result rows. + * + * Why: internal_resolved_state_all exposes an implementation detail `_pk` used + * for efficient row identification across merged sources (txn, untracked, cache). + * Public views like state_all should be compared against resolved state without + * this internal column. Use when asserting equality between state_all and + * internal_resolved_state_all results. + */ +function filterPkCol>(rows: T[]): T[] { + return rows.map((r) => { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const { _pk, ...rest } = r || ({} as any); + return rest as T; + }); +} test("resolved state view should return same results as state_all for a tracked entity", async () => { const lix = await openLix({}); @@ -25,8 +43,6 @@ test("resolved state view should return same results as state_all for a tracked .where("entity_id", "=", "test-key") .where("schema_key", "=", "lix_key_value") .selectAll() - // @ts-expect-error - internal state_all has a hidden _pk column - .select("_pk") .execute(); const resolvedStateResults = await lixInternalDb @@ -36,7 +52,7 @@ test("resolved state view should return same results as state_all for a tracked .selectAll() .execute(); - expect(stateAllResults).toEqual(resolvedStateResults); + expect(stateAllResults).toEqual(filterPkCol(resolvedStateResults)); }); test("resolved state view should return same results as state_all for an untracked entity", async () => { @@ -60,8 +76,6 @@ test("resolved state view should return same results as state_all for an untrack .where("entity_id", "=", "cache_stale") .where("schema_key", "=", "lix_key_value") .selectAll() - // @ts-expect-error - internal state_all has a hidden _pk column - .select("_pk") .execute(); const resolvedStateResults = await lixInternalDb @@ -71,7 +85,7 @@ test("resolved state view should return same results as state_all for an untrack .selectAll() .execute(); - expect(stateAllResults).toEqual(resolvedStateResults); + expect(stateAllResults).toEqual(filterPkCol(resolvedStateResults)); // Verify it's marked as untracked expect(stateAllResults[0]?.untracked).toBe(1); @@ -105,8 +119,6 @@ test("resolved state view should handle version inheritance", async () => { .where("entity_id", "=", "inherited-key") .where("version_id", "=", activeVersion!.version_id) .selectAll() - // @ts-expect-error - internal state_all has a hidden _pk column - .select("_pk") .execute(); const resolvedStateResults = await lixInternalDb @@ -121,7 +133,7 @@ test("resolved state view should handle version inheritance", async () => { expect(resolvedStateResults).toHaveLength(1); // Results should match - expect(stateAllResults).toEqual(resolvedStateResults); + expect(stateAllResults).toEqual(filterPkCol(resolvedStateResults)); // Verify it's marked as inherited from global expect(stateAllResults[0]?.inherited_from_version_id).toBe("global"); @@ -155,8 +167,6 @@ test("resolved state view should handle inherited untracked entities", async () .selectFrom("state_all") .where("entity_id", "=", "inherited-untracked-key") .where("version_id", "=", activeVersion!.version_id) - // @ts-expect-error - internal state_all has a hidden _pk column - .select("_pk") .selectAll() .execute(); @@ -172,7 +182,7 @@ test("resolved state view should handle inherited untracked entities", async () expect(resolvedStateResults).toHaveLength(1); // Results should match - expect(stateAllResults).toEqual(resolvedStateResults); + expect(stateAllResults).toEqual(filterPkCol(resolvedStateResults)); // Verify it's marked as inherited from global and untracked expect(stateAllResults[0]?.inherited_from_version_id).toBe("global"); @@ -253,6 +263,11 @@ test("resolved state view generates correct composite keys", async () => { const lix = await openLix({}); const lixInternalDb = lix.db as unknown as Kysely; + // Import updateStateCacheV2 at the top of the test + const { updateStateCache: updateStateCacheV2 } = await import( + "./cache/update-state-cache.js" + ); + // Insert some test data into untracked state const now = timestamp({ lix }); await lixInternalDb @@ -272,25 +287,24 @@ test("resolved state view generates correct composite keys", async () => { }) .execute(); - // Insert some test data into state cache - await lixInternalDb - .insertInto("internal_state_cache") - .values({ - entity_id: "entity2", - schema_key: "test_schema", - file_id: "file2", - plugin_key: "test_plugin", - snapshot_content: sql`jsonb(${JSON.stringify({ test: "data2" })})`, - schema_version: "1.0", - version_id: "version2", - change_id: "change1", - inheritance_delete_marker: 0, - inherited_from_version_id: null, - commit_id: "changeset1", - created_at: timestamp({ lix }), - updated_at: timestamp({ lix }), - }) - .execute(); + // Insert some test data into state cache using updateStateCacheV2 + updateStateCacheV2({ + lix, + changes: [ + { + id: "change1", + entity_id: "entity2", + file_id: "file2", + schema_key: "test_schema", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ test: "data2" }), + schema_version: "1.0", + created_at: timestamp({ lix }), + }, + ], + commit_id: "changeset1", + version_id: "version2", + }); // Query the resolved state view const results = await lixInternalDb @@ -362,73 +376,159 @@ test("resolved state view generates correct composite keys", async () => { }); }); +test("resolved state view should handle transitive inheritance (A->B->C)", async () => { + const lix = await openLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + }, + ], + }); + const lixInternalDb = lix.db as unknown as Kysely; + const currentTimestamp = timestamp({ lix }); + + // Create version hierarchy: C inherits from B, B inherits from A + const versionA = await createVersion({ + lix, + name: "Version A", + id: "version_a", + }); + + const versionB = await createVersion({ + lix, + name: "Version B", + id: "version_b", + inheritsFrom: versionA, + }); + + const versionC = await createVersion({ + lix, + name: "Version C", + id: "version_c", + inheritsFrom: versionB, + }); + + // Insert an entity only in version A + await lix.db + .insertInto("state_all") + .values({ + entity_id: "entity_a", + schema_key: "test_schema", + file_id: "file1", + version_id: versionA.id, + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ + id: "entity_a", + value: "from_version_a", + }) as any, + schema_version: "1.0", + created_at: currentTimestamp, + updated_at: currentTimestamp, + }) + .execute(); + + // Query resolved state for version C (should see entity_a through transitive inheritance) + const resolvedForC = await lixInternalDb + .selectFrom("internal_resolved_state_all") + .select([ + "entity_id", + "schema_key", + "file_id", + "version_id", + "inherited_from_version_id", + sql`json(snapshot_content)`.as("snapshot_content"), + ]) + .where("schema_key", "=", "test_schema") + .where("version_id", "=", versionC.id) + .execute(); + + // Version C should see entity_a inherited from version_a through version_b + expect(resolvedForC).toHaveLength(1); + + const entityA = resolvedForC[0]; + expect(entityA?.entity_id).toBe("entity_a"); + expect(entityA?.version_id).toBe(versionC.id); + expect(entityA?.inherited_from_version_id).toBe(versionA.id); + expect((entityA?.snapshot_content as any).value).toBe("from_version_a"); + + // Also verify version B sees entity_a inherited from A + const resolvedForB = await lixInternalDb + .selectFrom("internal_resolved_state_all") + .select(["entity_id", "version_id", "inherited_from_version_id"]) + .where("schema_key", "=", "test_schema") + .where("version_id", "=", versionB.id) + .execute(); + + expect(resolvedForB).toHaveLength(1); + expect(resolvedForB[0]?.entity_id).toBe("entity_a"); + expect(resolvedForB[0]?.inherited_from_version_id).toBe(versionA.id); +}); + test("resolved state view generates correct composite keys for inherited state", async () => { const lix = await openLix({}); const lixInternalDb = lix.db as unknown as Kysely; + // Import updateStateCacheV2 at the top of the test + const { updateStateCache: updateStateCacheV2 } = await import( + "./cache/update-state-cache.js" + ); + // Create parent and child versions const parentVersionId = "parent_version"; const childVersionId = "child_version"; - // Insert version records - await lixInternalDb - .insertInto("internal_state_cache") - .values([ + // Insert version records using updateStateCacheV2 + const versionTimestamp = timestamp({ lix }); + updateStateCacheV2({ + lix, + changes: [ { + id: "change1", entity_id: parentVersionId, schema_key: "lix_version", file_id: "lix", plugin_key: "lix", - snapshot_content: sql`jsonb(${JSON.stringify({ id: parentVersionId })})`, + snapshot_content: JSON.stringify({ id: parentVersionId }), schema_version: "1.0", - version_id: "global", - change_id: "change1", - inheritance_delete_marker: 0, - inherited_from_version_id: null, - commit_id: "changeset1", - created_at: timestamp({ lix }), - updated_at: timestamp({ lix }), + created_at: versionTimestamp, }, { + id: "change2", entity_id: childVersionId, schema_key: "lix_version", file_id: "lix", plugin_key: "lix", - snapshot_content: sql`jsonb(${JSON.stringify({ + snapshot_content: JSON.stringify({ id: childVersionId, inherits_from_version_id: parentVersionId, - })})`, + }), schema_version: "1.0", - version_id: "global", - change_id: "change2", - inheritance_delete_marker: 0, - inherited_from_version_id: null, - commit_id: "changeset2", - created_at: timestamp({ lix }), - updated_at: timestamp({ lix }), + created_at: versionTimestamp, }, - ]) - .execute(); + ], + commit_id: "changeset1", + version_id: "global", + }); - // Insert data in parent version (cached) - await lixInternalDb - .insertInto("internal_state_cache") - .values({ - entity_id: "inherited_entity", - schema_key: "test_schema", - file_id: "file3", - plugin_key: "test_plugin", - snapshot_content: sql`jsonb(${JSON.stringify({ test: "inherited_data" })})`, - schema_version: "1.0", - version_id: parentVersionId, - change_id: "change3", - inheritance_delete_marker: 0, - inherited_from_version_id: null, - commit_id: "changeset3", - created_at: timestamp({ lix }), - updated_at: timestamp({ lix }), - }) - .execute(); + // Insert data in parent version (cached) using updateStateCacheV2 + updateStateCacheV2({ + lix, + changes: [ + { + id: "change3", + entity_id: "inherited_entity", + schema_key: "test_schema", + file_id: "file3", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ test: "inherited_data" }), + schema_version: "1.0", + created_at: timestamp({ lix }), + }, + ], + commit_id: "changeset3", + version_id: parentVersionId, + }); // Insert data in parent version (untracked) const untrackedTimestamp = timestamp({ lix }); diff --git a/packages/lix-sdk/src/state/resolved-state-view.ts b/packages/lix-sdk/src/state/resolved-state-view.ts index 7b6ef582d5..911a734e71 100644 --- a/packages/lix-sdk/src/state/resolved-state-view.ts +++ b/packages/lix-sdk/src/state/resolved-state-view.ts @@ -1,5 +1,5 @@ -import type { StateAllView } from "./schema.js"; -import { encodeStatePkPart } from "./primary-key.js"; +import type { StateAllView } from "./views/state-all.js"; +import { encodeStatePkPart } from "./vtable/primary-key.js"; import type { Lix } from "../index.js"; /** @@ -8,8 +8,9 @@ import type { Lix } from "../index.js"; * issues when operations like lix_timestamp() need to query state. * * The "resolved state" combines: - * - Untracked state (highest priority) - * - Tracked state from cache + * - Transaction state (highest priority - pending changes) + * - Untracked state (second priority) + * - Tracked state from cache (third priority) * - Inherited state (resolved from parent versions) * * IMPORTANT: This view assumes that the cache is fresh. It does not check @@ -32,62 +33,101 @@ export function applyResolvedStateView( return encodeStatePkPart(String(part)); }, }); - // Create the view that provides resolved state by combining cache and untracked state + // Create the view that provides resolved state by combining transaction, cache and untracked state lix.sqlite.exec(` - CREATE VIEW IF NOT EXISTS internal_resolved_state_all AS - SELECT * FROM ( - -- 1. Untracked state (highest priority) - SELECT - 'U' || '~' || lix_encode_pk_part(file_id) || '~' || lix_encode_pk_part(entity_id) || '~' || lix_encode_pk_part(version_id) as _pk, - entity_id, - schema_key, - file_id, - plugin_key, - json(snapshot_content) as snapshot_content, - schema_version, - version_id, - created_at, - updated_at, - NULL as inherited_from_version_id, - 'untracked' as change_id, - 1 as untracked, - 'untracked' as commit_id - FROM internal_state_all_untracked - WHERE inheritance_delete_marker = 0 -- Hide tombstones - AND snapshot_content IS NOT NULL -- Hide deleted entries + CREATE VIEW IF NOT EXISTS internal_resolved_state_all AS + SELECT * FROM ( + -- 1. Transaction state (highest priority) - pending changes + SELECT + 'T' || '~' || lix_encode_pk_part(file_id) || '~' || lix_encode_pk_part(entity_id) || '~' || lix_encode_pk_part(version_id) as _pk, + entity_id, + schema_key, + file_id, + plugin_key, + json(snapshot_content) as snapshot_content, + schema_version, + version_id, + created_at, + created_at as updated_at, + NULL as inherited_from_version_id, + id as change_id, + untracked, + 'pending' as commit_id + FROM internal_change_in_transaction + -- Include both live rows and deletion tombstones (NULL snapshot_content) UNION ALL - -- 2. Tracked state from cache (second priority) - only if no untracked exists - SELECT - 'C' || '~' || lix_encode_pk_part(file_id) || '~' || lix_encode_pk_part(entity_id) || '~' || lix_encode_pk_part(version_id) as _pk, - entity_id, - schema_key, - file_id, - plugin_key, - json(snapshot_content) as snapshot_content, - schema_version, - version_id, - created_at, - updated_at, - inherited_from_version_id, - change_id, - 0 as untracked, - commit_id - FROM internal_state_cache - WHERE inheritance_delete_marker = 0 -- Hide copy-on-write deletions - AND snapshot_content IS NOT NULL -- Hide tombstones (deleted entries) - AND NOT EXISTS ( - SELECT 1 FROM internal_state_all_untracked unt - WHERE unt.entity_id = internal_state_cache.entity_id - AND unt.schema_key = internal_state_cache.schema_key - AND unt.file_id = internal_state_cache.file_id - AND unt.version_id = internal_state_cache.version_id - ) + -- 2. Untracked state (second priority) - only if no transaction exists + SELECT + 'U' || '~' || lix_encode_pk_part(file_id) || '~' || lix_encode_pk_part(entity_id) || '~' || lix_encode_pk_part(version_id) as _pk, + entity_id, + schema_key, + file_id, + plugin_key, + json(snapshot_content) as snapshot_content, + schema_version, + version_id, + created_at, + updated_at, + NULL as inherited_from_version_id, + 'untracked' as change_id, + 1 as untracked, + 'untracked' as commit_id + FROM internal_state_all_untracked + WHERE ( + (inheritance_delete_marker = 0 AND snapshot_content IS NOT NULL) -- live + OR (inheritance_delete_marker = 1 AND snapshot_content IS NULL) -- tombstone + ) + AND NOT EXISTS ( + SELECT 1 FROM internal_change_in_transaction txn + WHERE txn.entity_id = internal_state_all_untracked.entity_id + AND txn.schema_key = internal_state_all_untracked.schema_key + AND txn.file_id = internal_state_all_untracked.file_id + AND txn.version_id = internal_state_all_untracked.version_id + ) UNION ALL - -- 3. Inherited tracked state (lower priority) - only if no untracked or tracked exists + -- 3. Tracked state from cache (third priority) - only if no transaction or untracked exists + SELECT + 'C' || '~' || lix_encode_pk_part(file_id) || '~' || lix_encode_pk_part(entity_id) || '~' || lix_encode_pk_part(version_id) as _pk, + entity_id, + schema_key, + file_id, + plugin_key, + json(snapshot_content) as snapshot_content, + schema_version, + version_id, + created_at, + updated_at, + inherited_from_version_id, + change_id, + 0 as untracked, + commit_id + FROM internal_state_cache + WHERE ( + (inheritance_delete_marker = 0 AND snapshot_content IS NOT NULL) -- live + OR (inheritance_delete_marker = 1 AND snapshot_content IS NULL) -- tombstone + ) + AND NOT EXISTS ( + SELECT 1 FROM internal_change_in_transaction txn + WHERE txn.entity_id = internal_state_cache.entity_id + AND txn.schema_key = internal_state_cache.schema_key + AND txn.file_id = internal_state_cache.file_id + AND txn.version_id = internal_state_cache.version_id + ) + AND NOT EXISTS ( + SELECT 1 FROM internal_state_all_untracked unt + WHERE unt.entity_id = internal_state_cache.entity_id + AND unt.schema_key = internal_state_cache.schema_key + AND unt.file_id = internal_state_cache.file_id + AND unt.version_id = internal_state_cache.version_id + ) + + UNION ALL + + -- 4. Inherited tracked state (fourth priority) - only if no transaction, untracked or tracked exists SELECT 'CI' || '~' || lix_encode_pk_part(isc.file_id) || '~' || lix_encode_pk_part(isc.entity_id) || '~' || lix_encode_pk_part(vi.version_id) as _pk, isc.entity_id, @@ -99,23 +139,46 @@ export function applyResolvedStateView( vi.version_id, -- Return child version_id isc.created_at, isc.updated_at, - vi.parent_version_id as inherited_from_version_id, + isc.version_id as inherited_from_version_id, -- The actual version containing the entity isc.change_id, 0 as untracked, isc.commit_id FROM ( - -- Get version inheritance relationships from cache - SELECT DISTINCT - json_extract(isc_v.snapshot_content, '$.id') AS version_id, - json_extract(isc_v.snapshot_content, '$.inherits_from_version_id') AS parent_version_id - FROM internal_state_cache isc_v - WHERE isc_v.schema_key = 'lix_version' + -- Get all ancestor versions using recursive CTE for transitive inheritance + WITH RECURSIVE version_inheritance AS ( + -- Base case: direct inheritance relationships + SELECT + json_extract(v.snapshot_content, '$.id') AS version_id, + json_extract(v.snapshot_content, '$.inherits_from_version_id') AS ancestor_version_id + FROM internal_state_cache v + WHERE v.schema_key = 'lix_version' + AND json_extract(v.snapshot_content, '$.inherits_from_version_id') IS NOT NULL + + UNION + + -- Recursive case: follow the inheritance chain + SELECT + vi.version_id, + json_extract(v.snapshot_content, '$.inherits_from_version_id') AS ancestor_version_id + FROM version_inheritance vi + JOIN internal_state_cache v ON v.schema_key = 'lix_version' + AND json_extract(v.snapshot_content, '$.id') = vi.ancestor_version_id + WHERE json_extract(v.snapshot_content, '$.inherits_from_version_id') IS NOT NULL + ) + SELECT DISTINCT version_id, ancestor_version_id + FROM version_inheritance ) vi - JOIN internal_state_cache isc ON isc.version_id = vi.parent_version_id - WHERE vi.parent_version_id IS NOT NULL - -- Only inherit entities that exist (not deleted) in parent - AND isc.inheritance_delete_marker = 0 - AND isc.snapshot_content IS NOT NULL -- Don't inherit tombstones + JOIN internal_state_cache isc ON isc.version_id = vi.ancestor_version_id + WHERE isc.inheritance_delete_marker = 0 -- Only inherit entities that exist (not deleted) + AND isc.snapshot_content IS NOT NULL -- Don't inherit tombstones + -- Don't inherit if child has transaction state + AND NOT EXISTS ( + SELECT 1 FROM internal_change_in_transaction txn + WHERE txn.version_id = vi.version_id + AND txn.entity_id = isc.entity_id + AND txn.schema_key = isc.schema_key + AND txn.file_id = isc.file_id + ) -- Don't inherit if child has tracked state AND NOT EXISTS ( SELECT 1 FROM internal_state_cache child_isc @@ -135,7 +198,7 @@ export function applyResolvedStateView( UNION ALL - -- 4. Inherited untracked state (lowest priority) - only if no untracked or tracked exists + -- 5. Inherited untracked state (lowest priority) - only if no transaction, untracked or tracked exists SELECT 'UI' || '~' || lix_encode_pk_part(unt.file_id) || '~' || lix_encode_pk_part(unt.entity_id) || '~' || lix_encode_pk_part(vi.version_id) as _pk, unt.entity_id, @@ -147,10 +210,81 @@ export function applyResolvedStateView( vi.version_id, -- Return child version_id unt.created_at, unt.updated_at, - vi.parent_version_id as inherited_from_version_id, + unt.version_id as inherited_from_version_id, -- The actual version containing the entity 'untracked' as change_id, 1 as untracked, 'untracked' as commit_id + FROM ( + -- Get all ancestor versions using recursive CTE for transitive inheritance + WITH RECURSIVE version_inheritance AS ( + -- Base case: direct inheritance relationships + SELECT + json_extract(v.snapshot_content, '$.id') AS version_id, + json_extract(v.snapshot_content, '$.inherits_from_version_id') AS ancestor_version_id + FROM internal_state_cache v + WHERE v.schema_key = 'lix_version' + AND json_extract(v.snapshot_content, '$.inherits_from_version_id') IS NOT NULL + + UNION + + -- Recursive case: follow the inheritance chain + SELECT + vi.version_id, + json_extract(v.snapshot_content, '$.inherits_from_version_id') AS ancestor_version_id + FROM version_inheritance vi + JOIN internal_state_cache v ON v.schema_key = 'lix_version' + AND json_extract(v.snapshot_content, '$.id') = vi.ancestor_version_id + WHERE json_extract(v.snapshot_content, '$.inherits_from_version_id') IS NOT NULL + ) + SELECT DISTINCT version_id, ancestor_version_id + FROM version_inheritance + ) vi + JOIN internal_state_all_untracked unt ON unt.version_id = vi.ancestor_version_id + WHERE unt.inheritance_delete_marker = 0 -- Only inherit entities that exist (not deleted) + AND unt.snapshot_content IS NOT NULL -- Don't inherit tombstones + -- Don't inherit if child has transaction state + AND NOT EXISTS ( + SELECT 1 FROM internal_change_in_transaction txn + WHERE txn.version_id = vi.version_id + AND txn.entity_id = unt.entity_id + AND txn.schema_key = unt.schema_key + AND txn.file_id = unt.file_id + ) + -- Don't inherit if child has tracked state + AND NOT EXISTS ( + SELECT 1 FROM internal_state_cache child_isc + WHERE child_isc.version_id = vi.version_id + AND child_isc.entity_id = unt.entity_id + AND child_isc.schema_key = unt.schema_key + AND child_isc.file_id = unt.file_id + ) + -- Don't inherit if child has untracked state + AND NOT EXISTS ( + SELECT 1 FROM internal_state_all_untracked child_unt + WHERE child_unt.version_id = vi.version_id + AND child_unt.entity_id = unt.entity_id + AND child_unt.schema_key = unt.schema_key + AND child_unt.file_id = unt.file_id + ) + + UNION ALL + + -- 6. Inherited transaction state (after inherited untracked) - only if no direct transaction exists + SELECT + 'TI' || '~' || lix_encode_pk_part(txn.file_id) || '~' || lix_encode_pk_part(txn.entity_id) || '~' || lix_encode_pk_part(vi.version_id) as _pk, + txn.entity_id, + txn.schema_key, + txn.file_id, + txn.plugin_key, + json(txn.snapshot_content) as snapshot_content, + txn.schema_version, + vi.version_id, -- Return child version_id + txn.created_at, + txn.created_at as updated_at, + vi.parent_version_id as inherited_from_version_id, + txn.id as change_id, + txn.untracked, + 'pending' as commit_id FROM ( -- Get version inheritance relationships from cache SELECT DISTINCT @@ -159,26 +293,33 @@ export function applyResolvedStateView( FROM internal_state_cache isc_v WHERE isc_v.schema_key = 'lix_version' ) vi - JOIN internal_state_all_untracked unt ON unt.version_id = vi.parent_version_id + JOIN internal_change_in_transaction txn ON txn.version_id = vi.parent_version_id WHERE vi.parent_version_id IS NOT NULL - -- Only inherit entities that exist (not deleted) in parent - AND unt.inheritance_delete_marker = 0 - AND unt.snapshot_content IS NOT NULL -- Don't inherit tombstones + -- Only inherit entities that exist (not deleted) in parent transaction + AND txn.snapshot_content IS NOT NULL + -- Don't inherit if child has direct transaction state + AND NOT EXISTS ( + SELECT 1 FROM internal_change_in_transaction child_txn + WHERE child_txn.version_id = vi.version_id + AND child_txn.entity_id = txn.entity_id + AND child_txn.schema_key = txn.schema_key + AND child_txn.file_id = txn.file_id + ) -- Don't inherit if child has tracked state AND NOT EXISTS ( SELECT 1 FROM internal_state_cache child_isc WHERE child_isc.version_id = vi.version_id - AND child_isc.entity_id = unt.entity_id - AND child_isc.schema_key = unt.schema_key - AND child_isc.file_id = unt.file_id + AND child_isc.entity_id = txn.entity_id + AND child_isc.schema_key = txn.schema_key + AND child_isc.file_id = txn.file_id ) -- Don't inherit if child has untracked state AND NOT EXISTS ( SELECT 1 FROM internal_state_all_untracked child_unt WHERE child_unt.version_id = vi.version_id - AND child_unt.entity_id = unt.entity_id - AND child_unt.schema_key = unt.schema_key - AND child_unt.file_id = unt.file_id + AND child_unt.entity_id = txn.entity_id + AND child_unt.schema_key = txn.schema_key + AND child_unt.file_id = txn.file_id ) ); `); @@ -191,7 +332,7 @@ export type InternalResolvedStateAllView = Omit< > & { /** * Primary key in format: tag~file_id~entity_id~version_id - * where tag is U (untracked), UI (untracked inherited), C (cached), or CI (cached inherited) + * where tag is T (transaction), U (untracked), UI (untracked inherited), C (cached), CI (cached inherited), or TI (transaction inherited) */ _pk: string; // needs to manually stringify snapshot_content diff --git a/packages/lix-sdk/src/state/schema.ts b/packages/lix-sdk/src/state/schema.ts index b929899e6f..6fa2d3bead 100644 --- a/packages/lix-sdk/src/state/schema.ts +++ b/packages/lix-sdk/src/state/schema.ts @@ -1,1002 +1,26 @@ -import type { Generated, Insertable, Selectable, Updateable } from "kysely"; -import type { SqliteWasmDatabase } from "sqlite-wasm-kysely"; -import { validateStateMutation } from "./validate-state-mutation.js"; -import type { LixInternalDatabaseSchema } from "../database/schema.js"; -import type { Kysely } from "kysely"; -import { sql } from "kysely"; -import { insertTransactionState } from "./insert-transaction-state.js"; -import { executeSync } from "../database/execute-sync.js"; +import type { Lix } from "../lix/open-lix.js"; import { applyMaterializeStateSchema } from "./materialize-state.js"; import { applyResolvedStateView } from "./resolved-state-view.js"; -import { applyStateCacheSchema } from "./cache/schema.js"; -import { isStaleStateCache } from "./cache/is-stale-state-cache.js"; -import { markStateCacheAsFresh } from "./cache/mark-state-cache-as-stale.js"; import { applyUntrackedStateSchema } from "./untracked/schema.js"; -import { commit } from "./commit.js"; -import { parseStatePk, serializeStatePk } from "./primary-key.js"; -import { uuidV7 } from "../deterministic/uuid-v7.js"; -import { LixLogSchema } from "../log/schema.js"; -import { shouldLog } from "../log/create-lix-own-log.js"; -import { populateStateCache } from "./cache/populate-state-cache.js"; -import type { Lix } from "../lix/open-lix.js"; -// import { createLixOwnLogSync } from "../log/create-lix-own-log.js"; - -// Virtual table schema definition -const VTAB_CREATE_SQL = `CREATE TABLE x( - _pk HIDDEN TEXT NOT NULL PRIMARY KEY, - entity_id TEXT, - schema_key TEXT, - file_id TEXT, - version_id TEXT, - plugin_key TEXT, - snapshot_content TEXT, - schema_version TEXT, - created_at TEXT, - updated_at TEXT, - inherited_from_version_id TEXT, - change_id TEXT, - untracked INTEGER, - commit_id TEXT -) WITHOUT ROWID;`; +import { applyStateCacheV2Schema } from "./cache/schema.js"; +import { applyStateAllView } from "./views/state-all.js"; +import { applyStateWithTombstonesView } from "./views/state-with-tombstones.js"; +import { applyStateView } from "./views/state.js"; +import { applyStateVTable } from "./vtable/index.js"; export function applyStateDatabaseSchema( lix: Pick ): void { - const { sqlite, hooks } = lix; - const db = lix.db as unknown as Kysely; - applyMaterializeStateSchema(lix); - applyStateCacheSchema(lix); + applyStateCacheV2Schema(lix); applyUntrackedStateSchema(lix); applyResolvedStateView(lix); - sqlite.createFunction({ - name: "validate_snapshot_content", - deterministic: true, - arity: 5, - // @ts-expect-error - type mismatch - xFunc: (_ctxPtr: number, ...args: any[]) => { - return validateStateMutation({ - lix: { sqlite, db: db as any }, - schema: args[0] ? JSON.parse(args[0]) : null, - snapshot_content: JSON.parse(args[1]), - operation: args[2] || undefined, - entity_id: args[3] || undefined, - version_id: args[4], - }); - }, - }); - - // Create virtual table using the proper SQLite WASM API (following vtab-experiment pattern) - const capi = sqlite.sqlite3.capi; - const module = new capi.sqlite3_module(); - - // Store cursor state - const cursorStates = new Map(); - - // Guard flag to prevent recursion when logging - let loggingIsInProgress = false; - - /** - * Flag to prevent recursion when updating cache state. - * - * The guard ensures that while we're marking cache as fresh, any nested state queries - * bypass the cache and use materialized state directly, preventing recursion. - * - * Why is this needed is unclear. Queries are executed in sync. Why concurrent - * reads simultaneously update the cache is not clear. Given that state - * materialization is rare, this workaround has been deemed sufficient. - * - * This is a temporary fix and should be revisited in the future. - */ - let isUpdatingCacheState = false; - - module.installMethods( - { - xCreate: ( - dbHandle: any, - _pAux: any, - _argc: number, - _argv: any, - pVTab: any - ) => { - const result = capi.sqlite3_declare_vtab(dbHandle, VTAB_CREATE_SQL); - if (result !== capi.SQLITE_OK) { - return result; - } - - sqlite.sqlite3.vtab.xVtab.create(pVTab); - return capi.SQLITE_OK; - }, - - xConnect: ( - dbHandle: any, - _pAux: any, - _argc: number, - _argv: any, - pVTab: any - ) => { - const result = capi.sqlite3_declare_vtab(dbHandle, VTAB_CREATE_SQL); - if (result !== capi.SQLITE_OK) { - return result; - } - - // wiping all rows on connect simulates a temp table for internal_change_in_transaction. - // we need to clear any existing changes on connect in case a transaction remained open. - // otherwise, the lix can't boot up properly and will throw an error. - // - // an open transaction can happen if the storage layer crashes or is not properly shut down. - // - // PS internal_change_in_transaction is not a temp table because sqlite - // prohibits access to temp tables from virtual tables - executeSync({ - lix: { sqlite }, - query: db.deleteFrom("internal_change_in_transaction"), - }); - - sqlite.sqlite3.vtab.xVtab.create(pVTab); - return capi.SQLITE_OK; - }, - - xBegin: () => { - // TODO comment in after all internal v-table logic uses underlying state view - // // assert that we are not already in a transaction (the internal_change_in_transaction table is empty) - // const existingChangesInTransaction = executeSync({ - // lix: { sqlite }, - // query: db.selectFrom("internal_change_in_transaction").selectAll(), - // }); - // if (existingChangesInTransaction.length > 0) { - // const errorMessage = "Transaction already in progress"; - // if (canLog()) { - // createLixOwnLogSync({ - // lix: { sqlite, db: db as any }, - // key: "lix_state_xbegin_error", - // level: "error", - // message: `xBegin error: ${errorMessage}`, - // }); - // } - // throw new Error(errorMessage); - // } - }, - - xCommit: () => { - return commit({ lix: { sqlite, db: db as any, hooks } }); - }, - - xRollback: () => { - sqlite.exec({ - sql: "DELETE FROM internal_change_in_transaction", - returnValue: "resultRows", - }); - }, - - xBestIndex: (pVTab: any, pIdxInfo: any) => { - try { - const idxInfo = sqlite.sqlite3.vtab.xIndexInfo(pIdxInfo); - - // Track which columns have equality constraints - const usableConstraints: string[] = []; - let argIndex = 0; - - // Column mapping (matching the CREATE TABLE order in xCreate/xConnect) - const columnMap = [ - "_pk", // 0 (HIDDEN column) - "entity_id", // 1 - "schema_key", // 2 - "file_id", // 3 - "version_id", // 4 - "plugin_key", // 5 - "snapshot_content", // 6 - "schema_version", // 7 - "created_at", // 8 - "updated_at", // 9 - "inherited_from_version_id", // 10 - "change_id", // 11 - "untracked", // 12 - "commit_id", // 13 - ]; - - // Process constraints - // @ts-expect-error - idxInfo.$nConstraint is not defined in the type - for (let i = 0; i < idxInfo.$nConstraint; i++) { - // @ts-expect-error - idxInfo.nthConstraint is not defined in the type - const constraint = idxInfo.nthConstraint(i); - - // Only handle equality constraints that are usable - if ( - constraint.$op === capi.SQLITE_INDEX_CONSTRAINT_EQ && - constraint.$usable - ) { - const columnName = columnMap[constraint.$iColumn]; - if (columnName) { - usableConstraints.push(columnName); - - // Mark this constraint as used - // @ts-expect-error - idxInfo.nthConstraintUsage is not defined in the type - idxInfo.nthConstraintUsage(i).$argvIndex = ++argIndex; - } - } - } - - const fullTableCost = 1000000; // Default cost for full table scan - const fullTableRows = 10000000; - - // Set the index string to pass column names to xFilter - if (usableConstraints.length > 0) { - const idxStr = usableConstraints.join(","); - // @ts-expect-error - idxInfo.$idxStr is not defined in the type - idxInfo.$idxStr = sqlite.sqlite3.wasm.allocCString(idxStr, false); - // @ts-expect-error - idxInfo.$needToFreeIdxStr is not defined in the type - idxInfo.$needToFreeIdxStr = 1; // We don't need SQLite to free this string - - // Lower cost when we can use filters (more selective) - // @ts-expect-error - idxInfo.$estimatedCost is not defined in the type - idxInfo.$estimatedCost = - fullTableCost / (usableConstraints.length + 1); - // @ts-expect-error - idxInfo.$estimatedRows is not defined in the type - idxInfo.$estimatedRows = Math.ceil( - fullTableRows / (usableConstraints.length + 1) - ); - } else { - // @ts-expect-error - idxInfo.$needToFreeIdxStr is not defined in the type - idxInfo.$needToFreeIdxStr = 0; - - // Higher cost for full table scan - // @ts-expect-error - idxInfo.$estimatedCost is not defined in the type - idxInfo.$estimatedCost = fullTableCost; - // @ts-expect-error - idxInfo.$estimatedRows is not defined in the type - idxInfo.$estimatedRows = fullTableRows; - } - - return capi.SQLITE_OK; - } finally { - // Always log timing even if error occurs - } - }, - - xDisconnect: () => { - return capi.SQLITE_OK; - }, - - xDestroy: () => { - return capi.SQLITE_OK; - }, - - xOpen: (_pVTab: any, pCursor: any) => { - const cursor = sqlite.sqlite3.vtab.xCursor.create(pCursor); - cursorStates.set(cursor.pointer, { - results: [], - rowIndex: 0, - }); - return capi.SQLITE_OK; - }, - - xClose: (pCursor: any) => { - cursorStates.delete(pCursor); - return capi.SQLITE_OK; - }, - - xFilter: ( - pCursor: any, - idxNum: number, - idxStrPtr: number, - argc: number, - argv: any - ) => { - const cursorState = cursorStates.get(pCursor); - const idxStr = sqlite.sqlite3.wasm.cstrToJs(idxStrPtr); - - // Debug: Track recursion depth - const recursionKey = "_vtab_recursion_depth"; - // @ts-expect-error - using global for debugging - const currentDepth = (globalThis[recursionKey] || 0) + 1; - // @ts-expect-error - using global for debugging - globalThis[recursionKey] = currentDepth; - - if (currentDepth > 10) { - // @ts-expect-error - using global for debugging - globalThis[recursionKey] = 0; // Reset - throw new Error( - `Virtual table recursion depth exceeded: ${currentDepth}` - ); - } - - try { - // Extract filter arguments if provided - const filters: Record = {}; - if (argc > 0 && argv) { - const args = sqlite.sqlite3.capi.sqlite3_values_to_js(argc, argv); - // Parse idxStr to understand which columns are being filtered - // idxStr format: "column1,column2,..." - if (idxStr) { - const columns = idxStr.split(",").filter((c) => c.length > 0); - for (let i = 0; i < Math.min(columns.length, args.length); i++) { - if (args[i] !== null) { - filters[columns[i]!] = args[i]; // Keep original type - } - } - } - } - - // If we're updating cache state, we must use resolved state view directly to avoid recursion - if (isUpdatingCacheState) { - // Query directly from resolved state view which handles inheritance correctly - let query = db - .selectFrom("internal_resolved_state_all") - .selectAll(); - - // Apply filters - for (const [column, value] of Object.entries(filters)) { - query = query.where(column as any, "=", value); - } - - const stateResults = executeSync({ - lix: { sqlite }, - query, - }); - - cursorState.results = stateResults || []; - cursorState.rowIndex = 0; - return capi.SQLITE_OK; - } - - // Normal path: check cache staleness - const cacheIsStale = isStaleStateCache({ - lix: { sqlite, db: db as any }, - }); - - // Try cache first - but only if it's not stale - let cacheResults: any[] | null = null; - if (!cacheIsStale) { - // Select directly from resolved state view using Kysely - let query = db - .selectFrom("internal_resolved_state_all") - .selectAll(); - - // Apply filters - for (const [column, value] of Object.entries(filters)) { - query = query.where(column as any, "=", value); - } - - cacheResults = executeSync({ - lix: { sqlite }, - query, - }); - } - - cursorState.results = cacheResults || []; - cursorState.rowIndex = 0; - - if (cacheIsStale) { - // Populate cache directly with materialized state - populateStateCache(sqlite); - - // Log the cache miss - insertVTableLog({ - sqlite, - db: db as any, - key: "lix_state_cache_miss", - level: "debug", - message: `Cache miss detected - materialized state`, - }); - - // Mark cache as fresh after population - isUpdatingCacheState = true; - try { - markStateCacheAsFresh({ lix: { sqlite, db: db as any, hooks } }); - } finally { - isUpdatingCacheState = false; - } - - let query = db - .selectFrom("internal_resolved_state_all") - .selectAll(); - - // Apply filters - for (const [column, value] of Object.entries(filters)) { - query = query.where(column as any, "=", value); - } - - const newResults = executeSync({ - lix: { sqlite }, - query, - }); - cursorState.results = newResults || []; - } - - return capi.SQLITE_OK; - } finally { - // Always decrement recursion depth - // @ts-expect-error - using global for debugging - globalThis[recursionKey] = currentDepth - 1; - } - }, - - xNext: (pCursor: any) => { - const cursorState = cursorStates.get(pCursor); - cursorState.rowIndex++; - return capi.SQLITE_OK; - }, - - xEof: (pCursor: any) => { - const cursorState = cursorStates.get(pCursor); - return cursorState.rowIndex >= cursorState.results.length ? 1 : 0; - }, - - xColumn: (pCursor: any, pContext: any, iCol: number) => { - const cursorState = cursorStates.get(pCursor); - const row = cursorState.results[cursorState.rowIndex]; - - if (!row) { - capi.sqlite3_result_null(pContext); - return capi.SQLITE_OK; - } + // Apply the virtual table + applyStateVTable(lix); - // Handle primary key column (_pk) - if (iCol === 0) { - if (Array.isArray(row)) { - // For array results, _pk is at index 0 - capi.sqlite3_result_js(pContext, row[0]); - } else if (row._pk) { - // If row already has _pk, use it - capi.sqlite3_result_js(pContext, row._pk); - } else { - // Generate primary key from row data - const tag = row.untracked ? "U" : "C"; - const primaryKey = serializeStatePk( - tag, - row.file_id, - row.entity_id, - row.version_id - ); - capi.sqlite3_result_js(pContext, primaryKey); - } - return capi.SQLITE_OK; - } - - // Handle array-style results from SQLite exec - let value; - if (Array.isArray(row)) { - // For array results, composite_key is at index 0, so we use iCol directly - value = row[iCol]; - } else { - const columnName = getColumnName(iCol); - value = row[columnName]; - } - - // Handle special cases for null values that might be stored as strings - if ( - value === "null" && - getColumnName(iCol) === "inherited_from_version_id" - ) { - capi.sqlite3_result_null(pContext); - return capi.SQLITE_OK; - } - - if (value === null) { - capi.sqlite3_result_null(pContext); - } else { - capi.sqlite3_result_js(pContext, value); - } - - return capi.SQLITE_OK; - }, - - xRowid: () => { - // For WITHOUT ROWID tables, xRowid should not be called - // But if it is, we return an error - return capi.SQLITE_ERROR; - }, - - xUpdate: (_pVTab: number, nArg: number, ppArgv: any) => { - try { - // Extract arguments using the proper SQLite WASM API - const args = sqlite.sqlite3.capi.sqlite3_values_to_js(nArg, ppArgv); - - // DELETE operation: nArg = 1, args[0] = old primary key - if (nArg === 1) { - const oldPk = args[0] as string; - if (!oldPk) { - throw new Error("Missing primary key for DELETE operation"); - } - - // Use handleStateDelete for all cases - it handles both tracked and untracked - handleStateDelete(lix as any, oldPk); - - return capi.SQLITE_OK; - } - - // INSERT operation: nArg = N+2, args[0] = NULL, args[1] = new primary key - // UPDATE operation: nArg = N+2, args[0] = old primary key, args[1] = new primary key - const isInsert = args[0] === null; - const isUpdate = args[0] !== null; - - if (!isInsert && !isUpdate) { - throw new Error("Invalid xUpdate operation"); - } - - // Extract column values (args[2] through args[N+1]) - // Column order: _pk, entity_id, schema_key, file_id, version_id, plugin_key, - // snapshot_content, schema_version, created_at, updated_at, inherited_from_version_id, change_id, untracked - const entity_id = args[3]; - const schema_key = args[4]; - const file_id = args[5]; - const version_id = args[6]; - const plugin_key = args[7]; - // this is an update where we have a snapshot_content - // the snapshot_content is a JSON string as returned by SQlite - const snapshot_content = args[8] as string; - const schema_version = args[9]; - // Skip created_at (args[10]), updated_at (args[11]), inherited_from_version_id (args[12]), change_id (args[13]) - const untracked = args[14] ?? false; - - // assert required fields - if (!entity_id || !schema_key || !file_id || !plugin_key) { - throw new Error("Missing required fields for state mutation"); - } - - if (!version_id) { - throw new Error("version_id is required for state mutation"); - } - - // Call validation function (same logic as triggers) - const storedSchema = getStoredSchema(lix as any, schema_key); - - validateStateMutation({ - lix: lix as any, - schema: storedSchema ? JSON.parse(storedSchema) : null, - snapshot_content: JSON.parse(snapshot_content), - operation: isInsert ? "insert" : "update", - entity_id: String(entity_id), - version_id: String(version_id), - untracked: Boolean(untracked), - }); - - // Use insertTransactionState which handles both tracked and untracked entities - insertTransactionState({ - lix: lix as any, - data: [ - { - entity_id: String(entity_id), - schema_key: String(schema_key), - file_id: String(file_id), - plugin_key: String(plugin_key), - snapshot_content, - schema_version: String(schema_version), - version_id: String(version_id), - untracked: Boolean(untracked), - }, - ], - }); - - // TODO: This cache copying logic is a temporary workaround for shared commits. - // The proper solution requires improving cache miss logic to handle commit sharing - // without duplicating entries. See: https://github.com/opral/lix-sdk/issues/309 - // - // Handle cache copying for new versions that share commits - if (isInsert && String(schema_key) === "lix_version") { - const versionData = JSON.parse(snapshot_content); - const newVersionId = versionData.id; - const commitId = versionData.commit_id; - - if (newVersionId && commitId) { - // Find other versions that point to the same commit - const existingVersionsWithSameCommit = sqlite.exec({ - sql: ` - SELECT json_extract(snapshot_content, '$.id') as version_id - FROM internal_state_cache - WHERE schema_key = 'lix_version' - AND json_extract(snapshot_content, '$.commit_id') = ? - AND json_extract(snapshot_content, '$.id') != ? - `, - bind: [commitId, newVersionId], - returnValue: "resultRows", - }); - - // If there are existing versions with the same commit, copy their cache entries - if ( - existingVersionsWithSameCommit && - existingVersionsWithSameCommit.length > 0 - ) { - const sourceVersionId = existingVersionsWithSameCommit[0]![0]; // Take first existing version - - // Copy cache entries from source version to new version - // IMPORTANT: When copying cache entries, we need to mark them as inherited - // if they don't have an inherited_from_version_id already - sqlite.exec({ - sql: ` - INSERT OR IGNORE INTO internal_state_cache - (entity_id, schema_key, file_id, version_id, plugin_key, snapshot_content, schema_version, created_at, updated_at, inherited_from_version_id, inheritance_delete_marker, change_id, commit_id) - SELECT - entity_id, schema_key, file_id, ?, plugin_key, snapshot_content, schema_version, created_at, updated_at, - CASE - WHEN inherited_from_version_id IS NULL THEN ? - ELSE inherited_from_version_id - END as inherited_from_version_id, - inheritance_delete_marker, change_id, commit_id - FROM internal_state_cache - WHERE version_id = ? AND schema_key != 'lix_version' - `, - bind: [newVersionId, sourceVersionId, sourceVersionId], - }); - } - } - } - return capi.SQLITE_OK; - } catch (error) { - const errorMessage = - error instanceof Error ? error.message : String(error); - - // Log error for debugging - insertVTableLog({ - sqlite, - db: db as any, - key: "lix_state_xupdate_error", - level: "error", - message: `xUpdate error: ${errorMessage}`, - }); - - throw error; // Re-throw to propagate error - } - }, - }, - false - ); - - capi.sqlite3_create_module(sqlite.pointer!, "state_vtab", module, 0); - - // Create the virtual table as 'state' directly (no more _impl suffix or view layer) - sqlite.exec( - `CREATE VIRTUAL TABLE IF NOT EXISTS state_all USING state_vtab();` - ); - - // Create state view that filters to active version only - sqlite.exec(` - CREATE VIEW IF NOT EXISTS state AS - SELECT - entity_id, - schema_key, - file_id, - plugin_key, - snapshot_content, - schema_version, - created_at, - updated_at, - inherited_from_version_id, - change_id, - untracked, - commit_id - FROM state_all - WHERE version_id IN (SELECT version_id FROM active_version); - - -- Add INSTEAD OF triggers for state that forward to state virtual table - CREATE TRIGGER IF NOT EXISTS state_insert - INSTEAD OF INSERT ON state - BEGIN - INSERT INTO state_all ( - entity_id, - schema_key, - file_id, - version_id, - plugin_key, - snapshot_content, - schema_version, - created_at, - updated_at, - inherited_from_version_id, - change_id, - untracked, - commit_id - ) VALUES ( - NEW.entity_id, - NEW.schema_key, - NEW.file_id, - (SELECT version_id FROM active_version), - NEW.plugin_key, - NEW.snapshot_content, - NEW.schema_version, - NEW.created_at, - NEW.updated_at, - NEW.inherited_from_version_id, - NEW.change_id, - NEW.untracked, - NEW.commit_id - ); - END; - - CREATE TRIGGER IF NOT EXISTS state_update - INSTEAD OF UPDATE ON state - BEGIN - UPDATE state_all - SET - entity_id = NEW.entity_id, - schema_key = NEW.schema_key, - file_id = NEW.file_id, - version_id = (SELECT version_id FROM active_version), - plugin_key = NEW.plugin_key, - snapshot_content = NEW.snapshot_content, - schema_version = NEW.schema_version, - created_at = NEW.created_at, - updated_at = NEW.updated_at, - inherited_from_version_id = NEW.inherited_from_version_id, - change_id = NEW.change_id, - untracked = NEW.untracked, - commit_id = NEW.commit_id - WHERE - entity_id = OLD.entity_id - AND schema_key = OLD.schema_key - AND file_id = OLD.file_id - AND version_id = (SELECT version_id FROM active_version); - END; - - CREATE TRIGGER IF NOT EXISTS state_delete - INSTEAD OF DELETE ON state - BEGIN - -- Delete from state_all (handles both tracked and untracked entities) - DELETE FROM state_all - WHERE - entity_id = OLD.entity_id - AND schema_key = OLD.schema_key - AND file_id = OLD.file_id - AND version_id = (SELECT version_id FROM active_version); - END; - `); - - /** - * Insert a log entry directly using insertTransactionState to avoid recursion - * when logging from within the virtual table methods. - */ - function insertVTableLog(args: { - sqlite: SqliteWasmDatabase; - db: Kysely; - key: string; - message: string; - level: string; - }): void { - if (loggingIsInProgress) { - return; - } - // preventing recursivly logging that we inserted a log entry - // with this flag - loggingIsInProgress = true; - // Check log levels directly from internal state tables to avoid recursion - const logLevelsResult = executeSync({ - lix: { sqlite: args.sqlite }, - query: args.db - .selectFrom("internal_resolved_state_all") - .select(sql`json_extract(snapshot_content, '$.value')`.as("value")) - .where("schema_key", "=", "lix_key_value") - .where( - sql`json_extract(snapshot_content, '$.key')`, - "=", - "lix_log_levels" - ) - .limit(1), - }); - - const logLevelsValue = logLevelsResult[0]?.value; - - // Check if the level is allowed - if (!shouldLog(logLevelsValue as string[] | undefined, args.level)) { - return; - } - - // Create log entry data - const lix = { sqlite: args.sqlite, db: args.db } as any; - const logData = { - id: uuidV7({ lix }), - key: args.key, - message: args.message, - level: args.level, - }; - - // Insert log using insertTransactionState - insertTransactionState({ - lix, - data: [ - { - entity_id: logData.id, - schema_key: LixLogSchema["x-lix-key"], - file_id: "lix", - plugin_key: "lix_own_entity", - snapshot_content: JSON.stringify(logData), - schema_version: LixLogSchema["x-lix-version"], - // Using global and untracked for vtable logs. - // if we need to track them, we can change this later - version_id: "global", - untracked: true, - }, - ], - }); - loggingIsInProgress = false; - } + // Public views over the internal vtable + applyStateView(lix); + applyStateAllView(lix); + applyStateWithTombstonesView(lix); } - -export function handleStateDelete( - lix: Pick, - primaryKey: string -): void { - // Query the row to delete using the resolved state view with Kysely - const rowToDelete = executeSync({ - lix, - query: (lix.db as unknown as Kysely) - .selectFrom("internal_resolved_state_all") - .select([ - "entity_id", - "schema_key", - "file_id", - "version_id", - "plugin_key", - "snapshot_content", - "schema_version", - "untracked", - "inherited_from_version_id", - ]) - .where("_pk", "=", primaryKey), - })[0]; - - if (!rowToDelete) { - throw new Error(`Row not found for primary key: ${primaryKey}`); - } - - const entity_id = rowToDelete.entity_id; - const schema_key = rowToDelete.schema_key; - const file_id = rowToDelete.file_id; - const version_id = rowToDelete.version_id; - const plugin_key = rowToDelete.plugin_key; - const snapshot_content = rowToDelete.snapshot_content; - const schema_version = rowToDelete.schema_version; - const untracked = rowToDelete.untracked; - - // If entity is untracked, handle differently based on whether it's inherited - if (untracked) { - // Parse the primary key to check if it's inherited untracked (UI tag) - const parsed = parseStatePk(primaryKey); - - if (parsed.tag === "UI") { - // For inherited untracked, create a tombstone to block inheritance - insertTransactionState({ - lix, - data: [ - { - entity_id: String(entity_id), - schema_key: String(schema_key), - file_id: String(file_id), - plugin_key: String(plugin_key), - snapshot_content: null, // Deletion tombstone - schema_version: String(schema_version), - version_id: String(version_id), - untracked: true, - }, - ], - }); - } else { - // For direct untracked (U tag), just delete from untracked table - executeSync({ - lix, - query: (lix.db as unknown as Kysely) - .deleteFrom("internal_state_all_untracked") - .where("entity_id", "=", String(entity_id)) - .where("schema_key", "=", String(schema_key)) - .where("file_id", "=", String(file_id)) - .where("version_id", "=", String(version_id)), - }); - } - return; - } - - const storedSchema = getStoredSchema(lix, schema_key); - - validateStateMutation({ - lix, - schema: storedSchema ? JSON.parse(storedSchema) : null, - snapshot_content: JSON.parse(snapshot_content as string), - operation: "delete", - entity_id: String(entity_id), - version_id: String(version_id), - }); - - insertTransactionState({ - lix, - data: [ - { - entity_id: String(entity_id), - schema_key: String(schema_key), - file_id: String(file_id), - plugin_key: String(plugin_key), - snapshot_content: null, // No snapshot content for DELETE - schema_version: String(schema_version), - version_id: String(version_id), - untracked: false, // tracked entity - }, - ], - }); -} - -// Helper functions for the virtual table - -function getStoredSchema( - lix: Pick, - schemaKey: any -): string | null { - // Query directly from internal_resolved_state_all to avoid vtable recursion - const result = executeSync({ - lix, - query: (lix.db as unknown as Kysely) - .selectFrom("internal_resolved_state_all") - .select(sql`json_extract(snapshot_content, '$.value')`.as("value")) - .where("schema_key", "=", "lix_stored_schema") - .where( - sql`json_extract(snapshot_content, '$.key')`, - "=", - String(schemaKey) - ) - .limit(1), - }); - - return result && result.length > 0 ? result[0]!.value : null; -} - -function getColumnName(columnIndex: number): string { - const columns = [ - "_pk", - "entity_id", - "schema_key", - "file_id", - "version_id", - "plugin_key", - "snapshot_content", - "schema_version", - "created_at", - "updated_at", - "inherited_from_version_id", - "change_id", - "untracked", - "commit_id", - ]; - return columns[columnIndex] || "unknown"; -} - -export type StateView = Omit; - -export type StateAllView = { - entity_id: string; - schema_key: string; - file_id: string; - plugin_key: string; - snapshot_content: Record; - schema_version: string; - version_id: string; - created_at: Generated; - updated_at: Generated; - inherited_from_version_id: string | null; - change_id: Generated; - untracked: Generated; - commit_id: Generated; -}; - -// Kysely operation types -export type StateRow = Selectable; -export type NewStateRow = Insertable; -export type StateRowUpdate = Updateable; - -export type StateAllRow = Selectable; -export type NewStateAllRow = Insertable; -export type StateAllRowUpdate = Updateable; - -// Types for the internal_change TABLE -export type InternalChangeInTransaction = - Selectable; -export type NewInternalChangeInTransaction = - Insertable; -export type InternalChangeInTransactionTable = { - id: Generated; - entity_id: string; - schema_key: string; - schema_version: string; - file_id: string; - plugin_key: string; - version_id: string; - snapshot_content: Record | null; - created_at: Generated; -}; diff --git a/packages/lix-sdk/src/state/insert-transaction-state.test.ts b/packages/lix-sdk/src/state/transaction/insert-transaction-state.test.ts similarity index 78% rename from packages/lix-sdk/src/state/insert-transaction-state.test.ts rename to packages/lix-sdk/src/state/transaction/insert-transaction-state.test.ts index df521ff16d..2b911ebee8 100644 --- a/packages/lix-sdk/src/state/insert-transaction-state.test.ts +++ b/packages/lix-sdk/src/state/transaction/insert-transaction-state.test.ts @@ -1,16 +1,17 @@ import { expect, test } from "vitest"; -import { openLix } from "../lix/open-lix.js"; +import { openLix } from "../../lix/open-lix.js"; import { sql, type Kysely } from "kysely"; -import type { LixInternalDatabaseSchema } from "../database/schema.js"; -import { commit } from "./commit.js"; +import type { LixInternalDatabaseSchema } from "../../database/schema.js"; +import { commit } from "../vtable/commit.js"; import { insertTransactionState } from "./insert-transaction-state.js"; +import { timestamp } from "../../deterministic/timestamp.js"; test("creates tracked entity with pending change", async () => { const lix = await openLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], @@ -26,6 +27,7 @@ test("creates tracked entity with pending change", async () => { // Use insertPendingState function insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: "test-insert", @@ -48,33 +50,33 @@ test("creates tracked entity with pending change", async () => { .execute(); expect(results).toHaveLength(1); - expect(results[0]?.snapshot_content).toEqual({ value: "inserted-value" }); + expect(results[0]?.snapshot_content).toEqual({ value: "inserted-value" }); // Already parsed by the view + expect(results[0]?.untracked).toBe(0); // tracked entity + expect(results[0]?.commit_id).toBe("pending"); // should be pending before commit - // Check that the cache has been updated with a change_id - const cacheBeforeCommit = await lixInternalDb - .selectFrom("internal_state_cache") + // Check that the change is in the transaction table before commit (not in cache) + const changeInTransaction = await lixInternalDb + .selectFrom("internal_change_in_transaction") .where("entity_id", "=", "test-insert") .selectAll() .select(sql`json(snapshot_content)`.as("snapshot_content")) .executeTakeFirstOrThrow(); - expect(cacheBeforeCommit).toBeDefined(); - expect(cacheBeforeCommit.snapshot_content).toEqual({ + expect(changeInTransaction).toBeDefined(); + expect(changeInTransaction.id).toBe(results[0]?.change_id); + expect(changeInTransaction.untracked).toBe(0); // tracked entity + expect(changeInTransaction.snapshot_content).toEqual({ value: "inserted-value", }); - // Change ID should exist and match what's in the view - expect(cacheBeforeCommit.change_id).toBeTruthy(); - expect(cacheBeforeCommit.change_id).toBe(results[0]?.change_id); - // Check that the change is in the transaction table before commit - const changeInTransaction = await lixInternalDb - .selectFrom("internal_change_in_transaction") + // Verify cache is NOT updated before commit (new behavior) + const cacheBeforeCommit = await lixInternalDb + .selectFrom("internal_state_cache") .where("entity_id", "=", "test-insert") .selectAll() - .executeTakeFirstOrThrow(); + .execute(); - expect(changeInTransaction).toBeDefined(); - expect(changeInTransaction.id).toBe(cacheBeforeCommit.change_id); + expect(cacheBeforeCommit).toHaveLength(0); // No cache entry before commit // Trigger a commit commit({ lix }); @@ -88,7 +90,21 @@ test("creates tracked entity with pending change", async () => { .executeTakeFirstOrThrow(); expect(changeAfterCommit).toBeDefined(); - expect(changeAfterCommit.id).toBe(cacheBeforeCommit.change_id); + expect(changeAfterCommit.id).toBe(changeInTransaction.id); + + // After commit, verify cache has been updated + const cacheAfterCommit = await lixInternalDb + .selectFrom("internal_state_cache") + .where("entity_id", "=", "test-insert") + .selectAll() + .select(sql`json(snapshot_content)`.as("snapshot_content")) + .executeTakeFirstOrThrow(); + + expect(cacheAfterCommit).toBeDefined(); + expect(cacheAfterCommit.snapshot_content).toEqual({ + value: "inserted-value", + }); + expect(cacheAfterCommit.change_id).toBe(changeInTransaction.id); // Verify the transaction table is cleared const transactionAfterCommit = await lixInternalDb @@ -102,24 +118,28 @@ test("creates tracked entity with pending change", async () => { const resultingState = await lix.db .selectFrom("state_all") .selectAll() - // @ts-expect-error - internal state_all has a hidden _pk column - .select("_pk") .execute(); - const resultingUnderlyingState = await lixInternalDb + const resultingUnderlyingStateRaw = await lixInternalDb .selectFrom("internal_resolved_state_all") .selectAll() .execute(); - expect(resultingState).toEqual(resultingUnderlyingState); + const resolvedStateWithoutPk = resultingUnderlyingStateRaw.map((r: any) => { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + const { _pk, ...rest } = r || {}; + return rest; + }); + + expect(resultingState).toEqual(resolvedStateWithoutPk); }); -test("insertTransactionState creates tombstone for inherited entity deletion", async () => { +test("creates tombstone for inherited entity deletion", async () => { const lix = await openLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], @@ -155,6 +175,7 @@ test("insertTransactionState creates tombstone for inherited entity deletion", a // Use insertTransactionState directly for deletion (tracked) insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: "inherited-key", @@ -169,33 +190,22 @@ test("insertTransactionState creates tombstone for inherited entity deletion", a ], }); - // Verify tombstone exists in cache before commit - const tombstoneBeforeCommit = await lixInternalDb - .selectFrom("internal_state_cache") + // Verify the deletion is in transaction table (not cache yet) + const transactionDeletion = await lixInternalDb + .selectFrom("internal_change_in_transaction") .where("entity_id", "=", "inherited-key") .where("schema_key", "=", "lix_key_value") .where("version_id", "=", activeVersion.version_id) .selectAll() - .select(sql`json(snapshot_content)`.as("snapshot_content")) - .execute(); - - expect(tombstoneBeforeCommit).toHaveLength(1); - expect(tombstoneBeforeCommit[0]?.inheritance_delete_marker).toBe(1); - expect(tombstoneBeforeCommit[0]?.snapshot_content).toBe(null); - - // Verify entity no longer appears in active version - const afterDelete = await lix.db - .selectFrom("key_value") - .where("key", "=", "inherited-key") - .selectAll() - .execute(); + .executeTakeFirstOrThrow(); - expect(afterDelete).toHaveLength(0); + expect(transactionDeletion.snapshot_content).toBe(null); // Deletion + expect(transactionDeletion.untracked).toBe(0); // tracked entity - // Trigger a commit + // Commit to create the tombstone commit({ lix }); - // After commit, verify tombstone still exists + // Verify tombstone exists in cache after commit const tombstoneAfterCommit = await lixInternalDb .selectFrom("internal_state_cache") .where("entity_id", "=", "inherited-key") @@ -208,14 +218,23 @@ test("insertTransactionState creates tombstone for inherited entity deletion", a expect(tombstoneAfterCommit).toHaveLength(1); expect(tombstoneAfterCommit[0]?.inheritance_delete_marker).toBe(1); expect(tombstoneAfterCommit[0]?.snapshot_content).toBe(null); + + // Verify entity no longer appears in active version + const afterDelete = await lix.db + .selectFrom("key_value") + .where("key", "=", "inherited-key") + .selectAll() + .execute(); + + expect(afterDelete).toHaveLength(0); }); -test("insertTransactionState creates tombstone for inherited untracked entity deletion", async () => { +test("creates tombstone for inherited untracked entity deletion", async () => { const lix = await openLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], @@ -252,6 +271,7 @@ test("insertTransactionState creates tombstone for inherited untracked entity de // Use insertTransactionState directly for deletion (untracked) insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: "inherited-untracked-key", @@ -266,6 +286,21 @@ test("insertTransactionState creates tombstone for inherited untracked entity de ], }); + // Verify the deletion is in transaction table first + const transactionDeletion = await lixInternalDb + .selectFrom("internal_change_in_transaction") + .where("entity_id", "=", "inherited-untracked-key") + .where("schema_key", "=", "lix_key_value") + .where("version_id", "=", activeVersion.version_id) + .selectAll() + .executeTakeFirstOrThrow(); + + expect(transactionDeletion.snapshot_content).toBe(null); // Deletion + expect(transactionDeletion.untracked).toBe(1); // untracked entity + + // Commit to create the tombstone in untracked table + commit({ lix }); + // Verify tombstone exists in untracked table (not cache) const tombstone = await lixInternalDb .selectFrom("internal_state_all_untracked") @@ -288,8 +323,6 @@ test("insertTransactionState creates tombstone for inherited untracked entity de .execute(); expect(afterDelete).toHaveLength(0); - - // No commit needed for untracked entities - they don't participate in change control }); test("untracked entities use same timestamp for created_at and updated_at", async () => { @@ -297,7 +330,7 @@ test("untracked entities use same timestamp for created_at and updated_at", asyn keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], @@ -313,6 +346,7 @@ test("untracked entities use same timestamp for created_at and updated_at", asyn // Use insertTransactionState for untracked entity const result = insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: "test-untracked-timestamp", @@ -333,7 +367,24 @@ test("untracked entities use same timestamp for created_at and updated_at", asyn // Check returned data has same timestamps expect(result[0]?.created_at).toBe(result[0]?.updated_at); - // Verify in the actual table + // Verify the entity is in the transaction table (not untracked table yet) + const transactionEntity = await lixInternalDb + .selectFrom("internal_change_in_transaction") + .where("entity_id", "=", "test-untracked-timestamp") + .selectAll() + .select(sql`json(snapshot_content)`.as("snapshot_content")) + .executeTakeFirstOrThrow(); + + expect(transactionEntity.untracked).toBe(1); // marked as untracked + expect(transactionEntity.snapshot_content).toEqual({ + key: "test-key", + value: "test-value", + }); + + // Commit to move untracked entities to final state + commit({ lix }); + + // After commit, verify in the untracked table const untrackedEntity = await lixInternalDb .selectFrom("internal_state_all_untracked") .where("entity_id", "=", "test-untracked-timestamp") @@ -353,12 +404,12 @@ test("untracked entities use same timestamp for created_at and updated_at", asyn expect(stateView.created_at).toBe(stateView.updated_at); }); -test("insertTransactionState deletes direct untracked entity on null snapshot_content", async () => { +test("deletes direct untracked entity on null snapshot_content", async () => { const lix = await openLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], @@ -374,6 +425,7 @@ test("insertTransactionState deletes direct untracked entity on null snapshot_co // First insert a direct untracked entity insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: "direct-untracked-key", @@ -391,7 +443,10 @@ test("insertTransactionState deletes direct untracked entity on null snapshot_co ], }); - // Verify it exists in untracked table + // Commit to move the untracked entity to its final state + commit({ lix }); + + // Verify it exists in untracked table after commit const beforeDelete = await lixInternalDb .selectFrom("internal_state_all_untracked") .where("entity_id", "=", "direct-untracked-key") @@ -423,6 +478,7 @@ test("insertTransactionState deletes direct untracked entity on null snapshot_co // Now delete the direct untracked entity insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: "direct-untracked-key", @@ -437,6 +493,9 @@ test("insertTransactionState deletes direct untracked entity on null snapshot_co ], }); + // Commit to finalize the deletion + commit({ lix }); + // Verify it's deleted from untracked table const afterDelete = await lixInternalDb .selectFrom("internal_state_all_untracked") @@ -590,6 +649,9 @@ test("updates working change set elements on entity updates (latest change wins) }) .execute(); + // Commit to create working changeset element for the insert + commit({ lix }); + // Get the working commit to find its change set const workingCommit = await lix.db .selectFrom("commit") @@ -617,6 +679,9 @@ test("updates working change set elements on entity updates (latest change wins) .set({ value: "updated_value" }) .execute(); + // Commit the transaction to process working changeset logic + commit({ lix }); + // Check that working change set still has only one element for this entity (latest change) const workingElementsAfterUpdate = await lix.db .selectFrom("change_set_element_all") @@ -627,12 +692,7 @@ test("updates working change set elements on entity updates (latest change wins) .selectAll() .execute(); - expect(workingElementsAfterUpdate).toHaveLength(1); - - // Verify the change_id was updated to latest change - expect(workingElementsAfterUpdate[0]!.change_id).not.toBe(initialChangeId); - - // Verify the change_id points to the latest change + // DEBUG: Get all changes to see what changes exist const allChanges = await lix.db .selectFrom("change") .where("entity_id", "=", "test_key") @@ -641,6 +701,39 @@ test("updates working change set elements on entity updates (latest change wins) .selectAll() .execute(); + // DEBUG: Throw error with detailed info if we have the wrong count + if (workingElementsAfterUpdate.length !== 1) { + const debugInfo = { + workingElementsCount: workingElementsAfterUpdate.length, + workingElements: workingElementsAfterUpdate.map((element, i) => ({ + index: i, + entity_id: element.entity_id, + change_id: element.change_id, + change_set_id: element.change_set_id, + schema_key: element.schema_key, + file_id: element.file_id, + })), + allChangesCount: allChanges.length, + allChanges: allChanges.map((change, i) => ({ + index: i, + id: change.id, + entity_id: change.entity_id, + schema_key: change.schema_key, + file_id: change.file_id, + created_at: change.created_at, + snapshot_content: change.snapshot_content, + })), + }; + throw new Error( + `DEBUG: Working change set elements not properly replaced. Expected 1 but got ${workingElementsAfterUpdate.length}. Details: ${JSON.stringify(debugInfo, null, 2)}` + ); + } + + expect(workingElementsAfterUpdate).toHaveLength(1); + + // Verify the change_id was updated to latest change + expect(workingElementsAfterUpdate[0]!.change_id).not.toBe(initialChangeId); + expect(allChanges).toHaveLength(2); // Insert + Update expect(workingElementsAfterUpdate[0]!.change_id).toBe(allChanges[0]!.id); // Latest change }); @@ -665,6 +758,9 @@ test("mutation handler removes working change set elements on entity deletion", }) .execute(); + // Commit to create working changeset element for the insert + commit({ lix }); + // Get the working commit to find its change set const workingCommit = await lix.db .selectFrom("commit") @@ -687,6 +783,9 @@ test("mutation handler removes working change set elements on entity deletion", // Delete the entity await lix.db.deleteFrom("key_value").where("key", "=", "test_key").execute(); + // Commit the transaction to process working changeset logic + commit({ lix }); + // Check that working change set no longer includes this entity const workingElementsAfterDelete = await lix.db .selectFrom("change_set_element_all") @@ -756,6 +855,9 @@ test("delete reconciliation: entities added after checkpoint then deleted are ex }) .execute(); + // Commit to create working changeset element for the insert + commit({ lix }); + // Get the working commit to find its change set const workingCommit = await lix.db .selectFrom("commit") @@ -781,6 +883,9 @@ test("delete reconciliation: entities added after checkpoint then deleted are ex .where("key", "=", "post_checkpoint_key") .execute(); + // Commit the transaction to process working changeset logic + commit({ lix }); + // Verify entity is excluded from working change set (added after checkpoint then deleted) const workingElementsAfterDelete = await lix.db .selectFrom("change_set_element_all") @@ -945,3 +1050,65 @@ test("working change set elements are separated per version", async () => { expect(mainCrossCheck).toHaveLength(0); expect(newCrossCheck).toHaveLength(0); }); + +test("inheritance works with resolved view before committing", async () => { + const lix = await openLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + const lixInternalDb = lix.db as unknown as Kysely; + + // Get the active version (should inherit from global) + const activeVersion = await lix.db + .selectFrom("active_version") + .innerJoin("version", "version.id", "active_version.version_id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + // Insert a global entity in transaction state + insertTransactionState({ + lix, + timestamp: timestamp({ lix }), + data: [ + { + entity_id: "test-global-key", + schema_key: "lix_key_value", + file_id: "lix", + plugin_key: "lix_key_value", + snapshot_content: JSON.stringify({ + key: "test-global-key", + value: "global-value", + }), + schema_version: "1.0", + version_id: "global", + untracked: false, + }, + ], + }); + + // Query resolved view for the active version - should inherit the global entity + const resolvedEntitiesForActiveVersion = await lixInternalDb + .selectFrom("internal_resolved_state_all") + .where("schema_key", "=", "lix_key_value") + .where("version_id", "=", activeVersion.id) + .where("entity_id", "=", "test-global-key") + .selectAll() + .execute(); + + // Should inherit the global entity even though it was only inserted into global version + expect(resolvedEntitiesForActiveVersion).toHaveLength(1); + expect(resolvedEntitiesForActiveVersion[0]?.entity_id).toBe( + "test-global-key" + ); + + const parsedContent = resolvedEntitiesForActiveVersion[0]! + .snapshot_content as any; + expect(parsedContent.key).toBe("test-global-key"); + expect(parsedContent.value).toBe("global-value"); +}); diff --git a/packages/lix-sdk/src/state/transaction/insert-transaction-state.ts b/packages/lix-sdk/src/state/transaction/insert-transaction-state.ts new file mode 100644 index 0000000000..61e1a8bffc --- /dev/null +++ b/packages/lix-sdk/src/state/transaction/insert-transaction-state.ts @@ -0,0 +1,132 @@ +import { sql, type Kysely } from "kysely"; +import { executeSync } from "../../database/execute-sync.js"; +import { uuidV7 } from "../../deterministic/index.js"; +import type { Lix } from "../../lix/open-lix.js"; +import type { LixInternalDatabaseSchema } from "../../database/schema.js"; +import type { NewStateAllRow, StateAllRow } from "../index.js"; + +type NewTransactionStateRow = Omit & { + snapshot_content: string | null; +}; + +export type TransactionStateRow = Omit & { + snapshot_content: string | null; +}; + +/** + * Inserts a state change into the transaction stage. + * + * This function handles the TRANSACTION stage of the state mutation flow, where + * changes are temporarily stored in the transaction table before being committed + * to permanent storage. All changes (both tracked and untracked) are stored + * in the transaction table until commit time. + * + * @param args.lix - The Lix instance with SQLite database and Kysely query builder + * @param args.data - The state data to insert, including entity details and snapshot + * @param args.timestamp - Timestamp to use for the changes + * @param args.createChangeAuthors - Whether to create change_author records (defaults to true) + * + * @returns The inserted state row with generated fields like change_id + * + * @example + * // Insert a new entity state + * insertTransactionState({ + * lix: { sqlite, db }, + * data: { + * entity_id: "user-123", + * schema_key: "user", + * file_id: "file1", + * plugin_key: "my-plugin", + * snapshot_content: JSON.stringify({ name: "John", email: "john@example.com" }), + * schema_version: "1.0", + * version_id: "version-abc", + * untracked: false + * } + * }); + * + * @example + * // Delete an entity (null snapshot_content) + * insertTransactionState({ + * lix: { sqlite, db }, + * data: { + * entity_id: "user-123", + * schema_key: "user", + * file_id: "file1", + * plugin_key: "my-plugin", + * snapshot_content: null, // Deletion + * schema_version: "1.0", + * version_id: "version-abc", + * untracked: false + * } + * }); + */ +export function insertTransactionState(args: { + lix: Pick; + data: NewTransactionStateRow[]; + timestamp: string; + createChangeAuthors?: boolean; +}): TransactionStateRow[] { + const _timestamp = args.timestamp; + + if (args.data.length === 0) { + return []; + } + + // Generate change IDs for all entities upfront + const dataWithChangeIds = args.data.map((data) => ({ + ...data, + change_id: uuidV7({ lix: args.lix as any }), + })); + + // Batch insert into internal_change_in_transaction + const transactionRows = dataWithChangeIds.map((data) => ({ + id: data.change_id, + entity_id: data.entity_id, + schema_key: data.schema_key, + file_id: data.file_id, + plugin_key: data.plugin_key, + snapshot_content: data.snapshot_content + ? sql`jsonb(${data.snapshot_content})` + : null, + schema_version: data.schema_version, + version_id: data.version_id, + created_at: _timestamp, + untracked: data.untracked === true ? 1 : 0, + })); + + executeSync({ + lix: args.lix, + query: (args.lix.db as unknown as Kysely) + .insertInto("internal_change_in_transaction") + .values(transactionRows) + .onConflict((oc) => + oc + .columns(["entity_id", "file_id", "schema_key", "version_id"]) + .doUpdateSet((eb) => ({ + id: eb.ref("excluded.id"), + plugin_key: eb.ref("excluded.plugin_key"), + snapshot_content: eb.ref("excluded.snapshot_content"), + schema_version: eb.ref("excluded.schema_version"), + created_at: eb.ref("excluded.created_at"), + untracked: eb.ref("excluded.untracked"), + })) + ), + }); + + // Return results for all data + return dataWithChangeIds.map((data) => ({ + entity_id: data.entity_id, + schema_key: data.schema_key, + file_id: data.file_id, + plugin_key: data.plugin_key, + snapshot_content: data.snapshot_content, + schema_version: data.schema_version, + version_id: data.version_id, + created_at: _timestamp, + updated_at: _timestamp, + untracked: data.untracked === true, + inherited_from_version_id: null, + change_id: data.change_id, + commit_id: "pending", + })); +} diff --git a/packages/lix-sdk/src/state/transaction/schema.ts b/packages/lix-sdk/src/state/transaction/schema.ts new file mode 100644 index 0000000000..ecdf97fde9 --- /dev/null +++ b/packages/lix-sdk/src/state/transaction/schema.ts @@ -0,0 +1,38 @@ +import type { Selectable, Insertable, Generated } from "kysely"; +import type { Lix } from "../../lix/open-lix.js"; + +export function applyTransactionStateSchema(lix: Pick): void { + lix.sqlite.exec(` + CREATE TABLE IF NOT EXISTS internal_change_in_transaction ( + id TEXT PRIMARY KEY DEFAULT (lix_uuid_v7()), + entity_id TEXT NOT NULL, + schema_key TEXT NOT NULL, + schema_version TEXT NOT NULL, + file_id TEXT NOT NULL, + plugin_key TEXT NOT NULL, + version_id TEXT NOT NULL, + snapshot_content BLOB, + created_at TEXT NOT NULL, + untracked INTEGER NOT NULL DEFAULT 0, + --- NOTE schema_key must be unique per entity_id and file_id in the transaction + UNIQUE(entity_id, file_id, schema_key, version_id) + ) STRICT; +`); +} + +export type InternalChangeInTransaction = + Selectable; +export type NewInternalChangeInTransaction = + Insertable; +export type InternalChangeInTransactionTable = { + id: Generated; + entity_id: string; + schema_key: string; + schema_version: string; + file_id: string; + plugin_key: string; + version_id: string; + snapshot_content: Record | null; + created_at: Generated; + untracked: number; +}; diff --git a/packages/lix-sdk/src/state/transition.bench.ts b/packages/lix-sdk/src/state/transition.bench.ts new file mode 100644 index 0000000000..a7a9a92380 --- /dev/null +++ b/packages/lix-sdk/src/state/transition.bench.ts @@ -0,0 +1,190 @@ +import { bench } from "vitest"; +import { openLix } from "../lix/open-lix.js"; +import { createCheckpoint } from "./create-checkpoint.js"; +import { createVersionFromCommit } from "../version/create-version-from-commit.js"; +import { switchVersion } from "../version/switch-version.js"; +import { transition } from "./transition.js"; + +const N = 1; +const DEPTH = 2; + +bench("transition no-op (baseline)", async () => { + const lix = await openLix({}); + + // Create a small state and checkpoint + await lix.db + .insertInto("key_value") + .values({ key: "bench_noop", value: "1" }) + .execute(); + + const cp = await createCheckpoint({ lix }); + + // Version already points to cp; transition to same commit should be a no-op + await transition({ lix, to: cp }); +}); + +bench("transition with 100 additions", async () => { + try { + const lix = await openLix({}); + + // Baseline empty state + const emptyCp = await createCheckpoint({ lix }); + + // Add N key_value rows and checkpoint + const rows = Array.from({ length: N }, (_, i) => ({ + key: `add_${i}`, + value: String(i), + })); + for (const row of rows) { + await lix.db.insertInto("key_value").values(row).execute(); + } + const addedCp = await createCheckpoint({ lix }); + + // Create and switch to a version at the empty baseline + const version = await createVersionFromCommit({ + lix, + name: "bench_additions", + commit: emptyCp, + }); + await switchVersion({ lix, to: version }); + + // Transition to the checkpoint with 100 additions + await transition({ lix, to: addedCp }); + } catch (error) { + console.error("Error during transition with 100 additions:", error); + } +}); + +bench("transition with 100 deletions", async () => { + const lix = await openLix({}); + + // Start with N rows + const rows = Array.from({ length: N }, (_, i) => ({ + key: `del_${i}`, + value: String(i), + })); + for (const row of rows) { + await lix.db.insertInto("key_value").values(row).execute(); + } + const fullCp = await createCheckpoint({ lix }); + + // Delete all rows and checkpoint + await lix.db.deleteFrom("key_value").execute(); + const emptyCp = await createCheckpoint({ lix }); + + // Create and switch to a version at the full baseline + const version = await createVersionFromCommit({ + lix, + name: "bench_deletions", + commit: fullCp, + }); + await switchVersion({ lix, to: version }); + + // Transition to the empty checkpoint (generates explicit deletions) + await transition({ lix, to: emptyCp }); +}); + +bench("transition with 100 updates", async () => { + const lix = await openLix({}); + + // Start with N rows + for (let i = 0; i < N; i++) { + await lix.db + .insertInto("key_value") + .values({ key: `up_${i}`, value: "v0" }) + .execute(); + } + const beforeCp = await createCheckpoint({ lix }); + + // Update all N rows + for (let i = 0; i < N; i++) { + await lix.db + .updateTable("key_value") + .set({ value: "v1" }) + .where("key", "=", `up_${i}`) + .execute(); + } + const afterCp = await createCheckpoint({ lix }); + + // Version at beforeCp, transition to afterCp + const version = await createVersionFromCommit({ + lix, + name: "bench_updates", + commit: beforeCp, + }); + await switchVersion({ lix, to: version }); + await transition({ lix, to: afterCp }); +}); + +bench("transition mixed (40 add, 40 update, 20 delete)", async () => { + const lix = await openLix({}); + + const updateCount = Math.floor(N * 0.4); // 40 + const deleteCount = Math.floor(N * 0.2); // 20 + const addCount = N - updateCount - deleteCount; // 40 + + // Baseline: updateCount + deleteCount rows + for (let i = 0; i < updateCount + deleteCount; i++) { + await lix.db + .insertInto("key_value") + .values({ key: `mix_${i}`, value: "base" }) + .execute(); + } + const baseCp = await createCheckpoint({ lix }); + + // Update first updateCount + for (let i = 0; i < updateCount; i++) { + await lix.db + .updateTable("key_value") + .set({ value: "upd" }) + .where("key", "=", `mix_${i}`) + .execute(); + } + // Delete last deleteCount + for (let i = updateCount; i < updateCount + deleteCount; i++) { + await lix.db + .deleteFrom("key_value") + .where("key", "=", `mix_${i}`) + .execute(); + } + // Add addCount new keys + for (let i = 0; i < addCount; i++) { + await lix.db + .insertInto("key_value") + .values({ key: `mix_new_${i}`, value: "new" }) + .execute(); + } + const targetCp = await createCheckpoint({ lix }); + + const version = await createVersionFromCommit({ + lix, + name: "bench_mixed", + commit: baseCp, + }); + await switchVersion({ lix, to: version }); + await transition({ lix, to: targetCp }); +}); + +bench("transition deep ancestry (depth=50)", async () => { + const lix = await openLix({}); + + const baseCp = await createCheckpoint({ lix }); + + for (let i = 0; i < DEPTH; i++) { + await lix.db + .insertInto("key_value") + .values({ key: `depth_key_${i}`, value: String(i) }) + .execute(); + await createCheckpoint({ lix }); + } + + const headCp = await createCheckpoint({ lix }); // idempotent, returns current head + + const version = await createVersionFromCommit({ + lix, + name: "bench_depth", + commit: baseCp, + }); + await switchVersion({ lix, to: version }); + await transition({ lix, to: headCp }); +}); diff --git a/packages/lix-sdk/src/state/transition.test.ts b/packages/lix-sdk/src/state/transition.test.ts new file mode 100644 index 0000000000..466015761a --- /dev/null +++ b/packages/lix-sdk/src/state/transition.test.ts @@ -0,0 +1,295 @@ +import { expect, test } from "vitest"; +import { transition } from "./transition.js"; +import { createVersionFromCommit } from "../version/create-version-from-commit.js"; +import { switchVersion } from "../version/switch-version.js"; +import { createCheckpoint } from "./create-checkpoint.js"; +import { simulationTest } from "../test-utilities/simulation-test/simulation-test.js"; +import { mockJsonPlugin } from "../plugin/mock-json-plugin.js"; + +test("simulation test discovery", () => {}); + +simulationTest( + "transition creates a new commit, links parents, updates active version, and restores user + file state", + async ({ openSimulatedLix }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + // first checkpoint (checkpoint0) + await lix.db + .insertInto("key_value") + .values({ key: "a", value: "1" }) + .execute(); + + // Create a file with initial content in the same checkpoint + const enc = new TextEncoder(); + + await lix.db + .insertInto("file") + .values({ id: "file1", path: "/a.txt", data: enc.encode("one") }) + .execute(); + + const checkpoint0 = await createCheckpoint({ lix }); + + // second checkpoint (checkpoint1) + await lix.db + .updateTable("key_value") + .set({ value: "2" }) + .where("key", "=", "a") + .execute(); + + await lix.db + .insertInto("key_value") + .values({ key: "b", value: "x" }) + .execute(); + + // Modify the file content in the same checkpoint + await lix.db + .updateTable("file") + .set({ data: enc.encode("two") }) + .where("id", "=", "file1") + .execute(); + + const checkpoint1 = await createCheckpoint({ lix }); + + // Transition back to checkpoint0 + const newCommit = await transition({ lix, to: checkpoint0 }); + + // Active version should now point to newCommit + const activeV = await lix.db + .selectFrom("active_version") + .innerJoin("version", "version.id", "active_version.version_id") + .selectAll("version") + .executeTakeFirstOrThrow(); + expect(activeV.commit_id).toBe(newCommit.id); + + // New commit should have two parents: source (checkpoint1) and target (checkpoint0) + const parents = await lix.db + .selectFrom("commit_edge") + .where("child_id", "=", newCommit.id) + .select(["parent_id"]) + .execute(); + + const parentIds = parents.map((p: any) => p.parent_id).sort(); + + expect(parentIds).toEqual([checkpoint1.id, checkpoint0.id].sort()); + + // Verify user and file state match checkpoint0 + const aRow = await lix.db + .selectFrom("key_value") + .selectAll() + .where("key", "=", "a") + .executeTakeFirstOrThrow(); + + expect(aRow.value).toBe("1"); + + const bRow = await lix.db + .selectFrom("key_value") + .selectAll() + .where("key", "=", "b") + .executeTakeFirst(); + expect(bRow).toBeUndefined(); + + const file = await lix.db + .selectFrom("file") + .selectAll() + .where("id", "=", "file1") + .executeTakeFirstOrThrow(); + const dec = new TextDecoder(); + + expect(dec.decode(file.data)).toBe("one"); + } +); + +simulationTest( + "transition no-op when target equals current version commit", + async ({ openSimulatedLix }) => { + const lix = await openSimulatedLix({}); + await lix.db + .insertInto("key_value") + .values({ key: "k", value: "v1" }) + .execute(); + const checkpoint = await createCheckpoint({ lix }); + + const version = await createVersionFromCommit({ + lix, + name: "noop-test", + commit: checkpoint, + }); + + // Record commit count immediately before transition + const commitsBefore = await lix.db + .selectFrom("commit") + .select(({ fn }) => [fn.countAll().as("c")]) + .executeTakeFirstOrThrow(); + + const returned = await transition({ lix, to: checkpoint, version }); + expect(returned.id).toBe(checkpoint.id); + + const v = await lix.db + .selectFrom("version") + .selectAll() + .where("id", "=", version.id) + .executeTakeFirstOrThrow(); + expect(v.commit_id).toBe(checkpoint.id); + + // Verify no new commits were created by transition + const commitsAfter = await lix.db + .selectFrom("commit") + .select(({ fn }) => [fn.countAll().as("c")]) + .executeTakeFirstOrThrow(); + + expect(Number((commitsAfter as any).c)).toBe( + Number((commitsBefore as any).c) + ); + } +); + +simulationTest( + "transition defaults to active version when versionId omitted", + async ({ openSimulatedLix }) => { + const lix = await openSimulatedLix({}); + + await lix.db + .insertInto("key_value") + .values({ key: "n", value: "1" }) + .execute(); + + const checkpoint0 = await createCheckpoint({ lix }); + + await lix.db + .updateTable("key_value") + .set({ value: "2" }) + .where("key", "=", "n") + .execute(); + + const checkpoint1 = await createCheckpoint({ lix }); + + const version = await createVersionFromCommit({ + lix, + name: "active-transition", + commit: checkpoint1, + }); + + await switchVersion({ lix, to: version }); + + const resultCommit = await transition({ lix, to: checkpoint0 }); + + const activeV = await lix.db + .selectFrom("active_version") + .innerJoin("version", "version.id", "active_version.version_id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + expect(activeV.id).toBe(version.id); + expect(activeV.commit_id).toBe(resultCommit.id); + } +); + +simulationTest( + "transition respects boundaries by file_id", + async ({ openSimulatedLix }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + + providePlugins: [mockJsonPlugin], + }); + + // Create two JSON files where both have a shared property key + await lix.db + .insertInto("file") + .values({ + id: "f1", + path: "/a.json", + data: new TextEncoder().encode( + JSON.stringify({ shared: "A", keep: 1 }) + ), + }) + .execute(); + + await lix.db + .insertInto("file") + .values({ + id: "f2", + path: "/b.json", + data: new TextEncoder().encode( + JSON.stringify({ shared: "B", keep: 2 }) + ), + }) + .execute(); + + // Baseline where both files have the shared property + const checkpointBoth = await createCheckpoint({ lix }); + + // Update only f2 to delete the shared property (keep other fields) + await lix.db + .updateTable("file") + .set({ data: new TextEncoder().encode(JSON.stringify({ keep: 2 })) }) + .where("id", "=", "f2") + .execute(); + + const checkpointMinusOne = await createCheckpoint({ lix }); + + // Create a new version pinned at the baseline (both present) and switch to it + const version = await createVersionFromCommit({ + lix, + name: "multi-key-diff", + commit: checkpointBoth, + }); + await switchVersion({ lix, to: version }); + + // Transition this version to the target that removed only f2's shared property + await transition({ lix, to: checkpointMinusOne }); + + // Verify file contents reflect deletion only in f2 + const decoder = new TextDecoder(); + const file1 = await lix.db + .selectFrom("file") + .selectAll() + .where("id", "=", "f1") + .executeTakeFirstOrThrow(); + const file2 = await lix.db + .selectFrom("file") + .selectAll() + .where("id", "=", "f2") + .executeTakeFirstOrThrow(); + + const json1 = JSON.parse(decoder.decode(file1.data)); + const json2 = JSON.parse(decoder.decode(file2.data)); + + expect(json1.shared).toBe("A"); + expect(json1.keep).toBe(1); + expect(json2.shared).toBeUndefined(); + expect(json2.keep).toBe(2); + + // Additionally, check state_all shows only one leaf for entity_id 'shared' at this version + const activeV = await lix.db + .selectFrom("active_version") + .innerJoin("version", "version.id", "active_version.version_id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + const sharedLeaves = await lix.db + .selectFrom("state_all") + .where("schema_key", "=", "mock_json_property") + .where("entity_id", "=", "shared") + .where("version_id", "=", activeV.id) + .selectAll() + .execute(); + + expect(sharedLeaves).toHaveLength(1); + expect(sharedLeaves[0]!.file_id).toBe("f1"); + } +); diff --git a/packages/lix-sdk/src/state/transition.ts b/packages/lix-sdk/src/state/transition.ts new file mode 100644 index 0000000000..59032a043f --- /dev/null +++ b/packages/lix-sdk/src/state/transition.ts @@ -0,0 +1,433 @@ +import type { Lix } from "../lix/index.js"; +import { uuidV7 } from "../deterministic/uuid-v7.js"; +// Using explicit commit-scoped leaf CTEs for performance and clarity +import type { LixCommit } from "../commit/schema.js"; +import { + LixChangeSetSchema, + LixChangeSetElementSchema, +} from "../change-set/schema.js"; +import { LixCommitSchema, LixCommitEdgeSchema } from "../commit/schema.js"; +import { LixVersionSchema, type LixVersion } from "../version/schema.js"; +import { sql, type Kysely } from "kysely"; +import type { LixInternalDatabaseSchema } from "../database/schema.js"; +import { timestamp } from "../deterministic/timestamp.js"; +import type { LixChangeRaw } from "../change/schema.js"; +import { updateStateCache } from "./cache/update-state-cache.js"; + +/** + * Transitions a version's state to match the state at `toCommitId`. + * + * - If `versionId` is omitted, operates on the active version. + * - If the version already points to `toCommitId`, it's a no-op and returns that commit. + * - Otherwise, creates a transition commit whose changeset transforms source → target, + * links it to both the source and target commits, and updates the version to point to it. + */ +export async function transition(args: { + lix: Lix; + to: Pick; + version?: Pick; +}): Promise { + const executeInTransaction = async (trx: Lix["db"]) => { + // Resolve target version + const version = args.version + ? await trx + .selectFrom("version") + .where("id", "=", args.version.id) + .selectAll() + .executeTakeFirstOrThrow() + : await trx + .selectFrom("active_version") + .innerJoin("version", "version.id", "active_version.version_id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + const sourceCommitId = version.commit_id; + + // No-op if already at target + if (sourceCommitId === args.to.id) { + const commit = await trx + .selectFrom("commit") + .where("id", "=", args.to.id) + .selectAll() + .executeTakeFirstOrThrow(); + return commit; + } + + // 1) Gather leaf changes for target and source via explicit commit-scoped CTEs + const leafChangesToApplyRes = await sql<{ + id: string; + entity_id: string; + schema_key: string; + file_id: string; + plugin_key: string; + schema_version: string; + snapshot_content: any | null; + created_at: string; + }>` +WITH RECURSIVE ancestry(id, depth) AS ( + SELECT id, 0 FROM "commit" WHERE id = ${sql.lit(args.to.id)} + UNION ALL + SELECT ce.parent_id, ancestry.depth + 1 + FROM commit_edge ce + JOIN ancestry ON ce.child_id = ancestry.id +), +change_sets AS ( + SELECT change_set_id FROM "commit" WHERE id IN (SELECT id FROM ancestry) +), +per_entity AS ( + SELECT + ch.id, + ch.entity_id, + ch.schema_key, + ch.file_id, + ch.plugin_key, + ch.schema_version, + ch.snapshot_content, + ch.created_at, + (SELECT depth FROM ancestry a JOIN "commit" c2 ON c2.id = a.id WHERE c2.change_set_id = cse.change_set_id LIMIT 1) AS depth_at + FROM change_set_element cse + JOIN change ch ON ch.id = cse.change_id + WHERE cse.change_set_id IN (SELECT change_set_id FROM change_sets) +) +SELECT id, entity_id, schema_key, file_id, plugin_key, schema_version, json(snapshot_content) as snapshot_content, created_at +FROM ( + SELECT *, ROW_NUMBER() OVER ( + PARTITION BY entity_id, schema_key, file_id + ORDER BY depth_at ASC + ) AS rn + FROM per_entity +) +WHERE rn = 1; + `.execute(trx); + const leafChangesToApply = leafChangesToApplyRes.rows; + + const sourceLeavesRes = await sql<{ + id: string; + entity_id: string; + schema_key: string; + file_id: string; + plugin_key: string; + schema_version: string; + }>` +WITH RECURSIVE ancestry(id, depth) AS ( + SELECT id, 0 FROM "commit" WHERE id = ${sql.lit(sourceCommitId)} + UNION ALL + SELECT ce.parent_id, ancestry.depth + 1 + FROM commit_edge ce + JOIN ancestry ON ce.child_id = ancestry.id +), +change_sets AS ( + SELECT change_set_id FROM "commit" WHERE id IN (SELECT id FROM ancestry) +), +per_entity AS ( + SELECT + ch.id, + ch.entity_id, + ch.schema_key, + ch.file_id, + ch.plugin_key, + ch.schema_version, + (SELECT depth FROM ancestry a JOIN "commit" c2 ON c2.id = a.id WHERE c2.change_set_id = cse.change_set_id LIMIT 1) AS depth_at + FROM change_set_element cse + JOIN change ch ON ch.id = cse.change_id + WHERE cse.change_set_id IN (SELECT change_set_id FROM change_sets) +) +SELECT id, entity_id, schema_key, file_id, plugin_key, schema_version +FROM ( + SELECT *, ROW_NUMBER() OVER ( + PARTITION BY entity_id, schema_key, file_id + ORDER BY depth_at ASC + ) AS rn + FROM per_entity +) +WHERE rn = 1; + `.execute(trx); + + // 2) Set-diff: deletions = source leaf keys minus target leaf keys + const targetKeySet = new Set( + leafChangesToApply.map( + (c) => `${c.entity_id}|${c.schema_key}|${c.file_id}` + ) + ); + const leafEntitiesToDelete = sourceLeavesRes.rows.filter( + (c) => !targetKeySet.has(`${c.entity_id}|${c.schema_key}|${c.file_id}`) + ); + + // Create deletion changes as new rows (snapshot_content = null) + const deletionChanges: Array<{ + id: string; + entity_id: string; + schema_key: string; + file_id: string; + plugin_key: string; + schema_version: string; + snapshot_content: null; + created_at: string; + }> = []; + if (leafEntitiesToDelete.length > 0) { + const deletionRows = leafEntitiesToDelete.map((c) => ({ + id: uuidV7({ lix: args.lix }), + entity_id: c.entity_id, + schema_key: c.schema_key, + file_id: c.file_id, + plugin_key: c.plugin_key, + schema_version: c.schema_version, + snapshot_content: null as null, + created_at: timestamp({ lix: args.lix }), + })); + await trx + .insertInto("change") + .values(deletionRows as any) + .execute(); + deletionChanges.push(...deletionRows); + } + + const combinedElements = [...leafChangesToApply, ...deletionChanges]; + + // If nothing to change, treat as no-op by returning target commit + if (combinedElements.length === 0) { + const commit = await trx + .selectFrom("commit") + .where("id", "=", args.to.id) + .selectAll() + .executeTakeFirstOrThrow(); + return commit; + } + + // 3) Create change set + commit + edges + version as tracked change rows + const changeSetId = uuidV7({ lix: args.lix }); + const commitId = uuidV7({ lix: args.lix }); + const now = timestamp({ lix: args.lix }); + + // Collect all raw changes to insert (with explicit ids + created_at) + const metadataChanges: LixChangeRaw[] = []; + + // change_set entity + metadataChanges.push({ + id: uuidV7({ lix: args.lix }), + entity_id: changeSetId, + schema_key: LixChangeSetSchema["x-lix-key"], + schema_version: LixChangeSetSchema["x-lix-version"], + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ id: changeSetId, metadata: null }), + created_at: now, + }); + + // change_set_element entities + for (const el of combinedElements) { + metadataChanges.push({ + id: uuidV7({ lix: args.lix }), + entity_id: `${changeSetId}~${el.id}`, + schema_key: LixChangeSetElementSchema["x-lix-key"], + schema_version: LixChangeSetElementSchema["x-lix-version"], + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: changeSetId, + change_id: el.id, + entity_id: el.entity_id, + schema_key: el.schema_key, + file_id: el.file_id, + }), + created_at: now, + }); + } + + // commit entity (track id for change_set_element) + const commitChangeId = uuidV7({ lix: args.lix }); + metadataChanges.push({ + id: commitChangeId, + entity_id: commitId, + schema_key: LixCommitSchema["x-lix-key"], + schema_version: LixCommitSchema["x-lix-version"], + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + id: commitId, + change_set_id: changeSetId, + }), + created_at: now, + }); + + // commit_edge entities: source->commit, target->commit (track ids for change_set_element) + const sourceEdgeChangeId = uuidV7({ lix: args.lix }); + metadataChanges.push({ + id: sourceEdgeChangeId, + entity_id: `${sourceCommitId}~${commitId}`, + schema_key: LixCommitEdgeSchema["x-lix-key"], + schema_version: LixCommitEdgeSchema["x-lix-version"], + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + parent_id: sourceCommitId, + child_id: commitId, + }), + created_at: now, + }); + + const targetEdgeChangeId = uuidV7({ lix: args.lix }); + metadataChanges.push({ + id: targetEdgeChangeId, + entity_id: `${args.to.id}~${commitId}`, + schema_key: LixCommitEdgeSchema["x-lix-key"], + schema_version: LixCommitEdgeSchema["x-lix-version"], + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + parent_id: args.to.id, + child_id: commitId, + }), + created_at: now, + }); + + // Add change_set_element entries for commit and edges so materializer can reach metadata + for (const meta of [ + { + change_id: commitChangeId, + entity_id: commitId, + schema_key: LixCommitSchema["x-lix-key"], + file_id: "lix" as const, + }, + { + change_id: sourceEdgeChangeId, + entity_id: `${sourceCommitId}~${commitId}`, + schema_key: LixCommitEdgeSchema["x-lix-key"], + file_id: "lix" as const, + }, + { + change_id: targetEdgeChangeId, + entity_id: `${args.to.id}~${commitId}`, + schema_key: LixCommitEdgeSchema["x-lix-key"], + file_id: "lix" as const, + }, + ]) { + metadataChanges.push({ + id: uuidV7({ lix: args.lix }), + entity_id: `${changeSetId}~${meta.change_id}`, + schema_key: LixChangeSetElementSchema["x-lix-key"], + schema_version: LixChangeSetElementSchema["x-lix-version"], + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: changeSetId, + change_id: meta.change_id, + entity_id: meta.entity_id, + schema_key: meta.schema_key, + file_id: meta.file_id, + }), + created_at: now, + }); + } + + // Fetch current version snapshot to preserve fields + const intDb = trx as unknown as Kysely; + const versionRow = await intDb + .selectFrom("internal_resolved_state_all") + .where("schema_key", "=", "lix_version") + .where("entity_id", "=", version.id) + .where("snapshot_content", "is not", null) + .select([sql`json(snapshot_content)`.as("snapshot_content")]) + .executeTakeFirstOrThrow(); + const currentVersion = versionRow.snapshot_content as unknown as LixVersion; + const updatedVersion = { + ...currentVersion, + commit_id: commitId, + } satisfies LixVersion; + + // version entity update as tracked change (track id for change_set_element) + const versionChangeId = uuidV7({ lix: args.lix }); + metadataChanges.push({ + id: versionChangeId, + entity_id: version.id, + schema_key: LixVersionSchema["x-lix-key"], + schema_version: LixVersionSchema["x-lix-version"], + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify(updatedVersion), + created_at: now, + }); + + // Also anchor the version change as a change_set_element for materialization + metadataChanges.push({ + id: uuidV7({ lix: args.lix }), + entity_id: `${changeSetId}~${versionChangeId}`, + schema_key: LixChangeSetElementSchema["x-lix-key"], + schema_version: LixChangeSetElementSchema["x-lix-version"], + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: changeSetId, + change_id: versionChangeId, + entity_id: version.id, + schema_key: LixVersionSchema["x-lix-key"], + file_id: "lix", + }), + created_at: now, + }); + + // Insert all rows via change view to populate internal tables/snapshots deterministically + if (metadataChanges.length > 0) { + await trx + .insertInto("change") + .values(metadataChanges as any) + .execute(); + } + + // Ensure FK validations for version update see commit/edge/change_set in global cache + updateStateCache({ + lix: args.lix, + changes: metadataChanges, + version_id: "global", + commit_id: commitId, + }); + + // Ensure the version view reflects the new commit immediately (bypass materializer latency) + await trx + .updateTable("version") + .set({ commit_id: commitId }) + .where("id", "=", version.id) + .execute(); + + // Prepare user entity cache updates (target content + deletions) + const userChangesForCache: LixChangeRaw[] = [ + ...leafChangesToApply.map((c) => ({ + id: c.id, + entity_id: c.entity_id, + schema_key: c.schema_key, + schema_version: c.schema_version, + file_id: c.file_id, + plugin_key: c.plugin_key, + snapshot_content: c.snapshot_content + ? JSON.stringify(c.snapshot_content) + : null, + created_at: c.created_at, + })), + ...deletionChanges.map((c) => ({ + id: c.id, + entity_id: c.entity_id, + schema_key: c.schema_key, + schema_version: c.schema_version, + file_id: c.file_id, + plugin_key: c.plugin_key, + snapshot_content: null, + created_at: c.created_at ?? now, + })), + ]; + + // Update cache once at the very end for the scoped version (user entities only) + updateStateCache({ + lix: args.lix, + changes: userChangesForCache, + version_id: version.id, + commit_id: commitId, + }); + + // Return the created commit directly + return { id: commitId, change_set_id: changeSetId } satisfies LixCommit; + }; + + return args.lix.db.isTransaction + ? executeInTransaction(args.lix.db) + : args.lix.db.transaction().execute(executeInTransaction); +} diff --git a/packages/lix-sdk/src/state/views/state-all.ts b/packages/lix-sdk/src/state/views/state-all.ts new file mode 100644 index 0000000000..de658b90d4 --- /dev/null +++ b/packages/lix-sdk/src/state/views/state-all.ts @@ -0,0 +1,91 @@ +import type { Generated, Insertable, Selectable, Updateable } from "kysely"; +import type { Lix } from "../../lix/open-lix.js"; + +export type StateAllView = { + entity_id: string; + schema_key: string; + file_id: string; + plugin_key: string; + snapshot_content: Record; + schema_version: string; + version_id: string; + created_at: Generated; + updated_at: Generated; + inherited_from_version_id: string | null; + change_id: Generated; + untracked: Generated; + commit_id: Generated; +}; + +// Kysely operation types +export type StateAllRow = Selectable; +export type NewStateAllRow = Insertable; +export type StateAllRowUpdate = Updateable; + +/** + * Creates the public state_all view (no tombstones) over the internal vtable, + * plus INSTEAD OF triggers to forward writes to the internal vtable. + */ +export function applyStateAllView(lix: Pick): void { + lix.sqlite.exec(` + CREATE VIEW IF NOT EXISTS state_all AS + SELECT * FROM internal_state_vtable + WHERE snapshot_content IS NOT NULL; + + -- Forward writes on state_all to the internal vtable + CREATE TRIGGER IF NOT EXISTS state_all_insert + INSTEAD OF INSERT ON state_all + BEGIN + INSERT INTO internal_state_vtable ( + entity_id, + schema_key, + file_id, + version_id, + plugin_key, + snapshot_content, + schema_version, + untracked + ) VALUES ( + NEW.entity_id, + NEW.schema_key, + NEW.file_id, + NEW.version_id, + NEW.plugin_key, + NEW.snapshot_content, + NEW.schema_version, + COALESCE(NEW.untracked, 0) + ); + END; + + CREATE TRIGGER IF NOT EXISTS state_all_update + INSTEAD OF UPDATE ON state_all + BEGIN + UPDATE internal_state_vtable + SET + entity_id = NEW.entity_id, + schema_key = NEW.schema_key, + file_id = NEW.file_id, + version_id = NEW.version_id, + plugin_key = NEW.plugin_key, + snapshot_content = NEW.snapshot_content, + schema_version = NEW.schema_version, + untracked = COALESCE(NEW.untracked, 0) + WHERE + entity_id = OLD.entity_id AND + schema_key = OLD.schema_key AND + file_id = OLD.file_id AND + version_id = OLD.version_id; + END; + + CREATE TRIGGER IF NOT EXISTS state_all_delete + INSTEAD OF DELETE ON state_all + BEGIN + DELETE FROM internal_state_vtable + WHERE + entity_id = OLD.entity_id AND + schema_key = OLD.schema_key AND + file_id = OLD.file_id AND + version_id = OLD.version_id; + END; + `); +} diff --git a/packages/lix-sdk/src/state/views/state-with-tombstones.test.ts b/packages/lix-sdk/src/state/views/state-with-tombstones.test.ts new file mode 100644 index 0000000000..d2f0ae6425 --- /dev/null +++ b/packages/lix-sdk/src/state/views/state-with-tombstones.test.ts @@ -0,0 +1,71 @@ +import { expect, test } from "vitest"; +import { openLix } from "../../lix/open-lix.js"; +import { applyStateWithTombstonesView } from "./state-with-tombstones.js"; + +test("state_with_tombstones exposes tracked deletions as tombstones", async () => { + const lix = await openLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + // Create the view (temporary until wired into schema bootstrap) + applyStateWithTombstonesView(lix); + + const active = await lix.db + .selectFrom("active_version") + .selectAll() + .executeTakeFirstOrThrow(); + + // Insert a tracked row into active version + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_del", + schema_key: "mock_schema_for_deleted", + file_id: "file_tombstone", + version_id: (active as any).version_id ?? (active as any).id, + plugin_key: "test_plugin", + schema_version: "1.0", + snapshot_content: { v: "live" }, + }) + .execute(); + + // Delete to create a tracked tombstone + await lix.db + .deleteFrom("state_all") + .where("entity_id", "=", "e_del") + .where("schema_key", "=", "mock_schema_for_deleted") + .where("file_id", "=", "file_tombstone") + .where("version_id", "=", (active as any).version_id ?? (active as any).id) + .execute(); + + // Default state_all should hide the deletion + const hidden = await lix.db + .selectFrom("state_all") + .where("entity_id", "=", "e_del") + .where("schema_key", "=", "mock_schema_for_deleted") + .where("file_id", "=", "file_tombstone") + .selectAll() + .execute(); + expect(hidden).toHaveLength(0); + + // state_with_tombstones should expose the tombstone (snapshot_content = null) + const withDeleted = await lix.db + .selectFrom("state_with_tombstones" as any) + .where("entity_id", "=", "e_del") + .where("schema_key", "=", "mock_schema_for_deleted") + .where("file_id", "=", "file_tombstone") + .selectAll() + .execute(); + + expect(withDeleted).toHaveLength(1); + const row: any = withDeleted[0]; + expect(row.snapshot_content).toBeNull(); + expect(row.change_id).toBeTruthy(); + expect(row.commit_id).toBeTruthy(); +}); diff --git a/packages/lix-sdk/src/state/views/state-with-tombstones.ts b/packages/lix-sdk/src/state/views/state-with-tombstones.ts new file mode 100644 index 0000000000..838ec901da --- /dev/null +++ b/packages/lix-sdk/src/state/views/state-with-tombstones.ts @@ -0,0 +1,37 @@ +import type { Generated, Selectable } from "kysely"; +import type { Lix } from "../../lix/open-lix.js"; + +export type StateWithTombstonesView = { + entity_id: string; + schema_key: string; + file_id: string; + plugin_key: string; + snapshot_content: Record | null; // null for tombstones + schema_version: string; + version_id: string; + created_at: Generated; + updated_at: Generated; + inherited_from_version_id: string | null; + change_id: Generated; + untracked: Generated; + commit_id: Generated; +}; + +export type StateWithTombstonesRow = Selectable; + +/** + * Creates a read-only view that exposes tracked deletions as tombstones. + * + * This view reads from the materialized state which includes both live rows + * and deletion tombstones (NULL snapshot_content). It intentionally does NOT + * filter out tombstones, unlike the resolved-state or public state_all views. + * + * We restrict to non-inherited rows (inherited_from_version_id IS NULL) so that + * each version only reports its own direct state or tombstones. + */ +export function applyStateWithTombstonesView(lix: Pick): void { + lix.sqlite.exec(` + CREATE VIEW IF NOT EXISTS state_with_tombstones AS + SELECT * FROM internal_state_vtable; + `); +} diff --git a/packages/lix-sdk/src/state/views/state.test.ts b/packages/lix-sdk/src/state/views/state.test.ts new file mode 100644 index 0000000000..10a155ff90 --- /dev/null +++ b/packages/lix-sdk/src/state/views/state.test.ts @@ -0,0 +1,201 @@ +import type { LixSchemaDefinition } from "../../schema-definition/definition.js"; +import { simulationTest } from "../../test-utilities/simulation-test/simulation-test.js"; + +simulationTest( + "state version_id defaults active version", + async ({ openSimulatedLix, expectDeterministic }) => { + const mockSchema: LixSchemaDefinition = { + "x-lix-key": "mock_schema", + "x-lix-version": "1.0", + type: "object", + additionalProperties: false, + properties: { + value: { + type: "string", + }, + }, + }; + + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + lixcol_version_id: "global", + }, + ], + }); + + await lix.db + .insertInto("stored_schema") + .values({ value: mockSchema }) + .execute(); + + // Get the active version ID to verify it gets auto-filled + const activeVersion = await lix.db + .selectFrom("active_version") + .select("version_id") + .executeTakeFirstOrThrow(); + + // Insert into state view without specifying version_id + // This should auto-fill with the active version + await lix.db + .insertInto("state") + .values({ + entity_id: "entity0", + file_id: "f0", + schema_key: "mock_schema", + plugin_key: "lix_own_entity", + schema_version: "1.0", + snapshot_content: { value: "initial content" }, + }) + .execute(); + + // Verify the entity was inserted with the correct version_id + const insertedEntity = await lix.db + .selectFrom("state") + .where("entity_id", "=", "entity0") + .selectAll() + .execute(); + + expectDeterministic(insertedEntity).toHaveLength(1); + expectDeterministic(insertedEntity[0]).toMatchObject({ + entity_id: "entity0", + file_id: "f0", + schema_key: "mock_schema", + plugin_key: "lix_own_entity", + schema_version: "1.0", + snapshot_content: { value: "initial content" }, + }); + + // Verify the version_id was auto-filled with the active version + const entityInStateAll = await lix.db + .selectFrom("state_all") + .where("entity_id", "=", "entity0") + .select("version_id") + .executeTakeFirstOrThrow(); + + expectDeterministic(entityInStateAll.version_id).toBe( + activeVersion.version_id + ); + + // Test update operation + await lix.db + .updateTable("state") + .where("entity_id", "=", "entity0") + .set({ + snapshot_content: { value: "updated content" }, + }) + .execute(); + + // Verify update worked + const updatedEntity = await lix.db + .selectFrom("state") + .where("entity_id", "=", "entity0") + .selectAll() + .execute(); + + expectDeterministic(updatedEntity[0]?.snapshot_content).toEqual({ + value: "updated content", + }); + + // Test delete operation + await lix.db + .deleteFrom("state") + .where("entity_id", "=", "entity0") + .execute(); + + // Verify delete worked + const deletedEntity = await lix.db + .selectFrom("state") + .where("entity_id", "=", "entity0") + .selectAll() + .execute(); + + expectDeterministic(deletedEntity).toHaveLength(0); + } +); + +// https://github.com/opral/lix-sdk/issues/344 +simulationTest( + "deleting key_value entities from state should not cause infinite loop", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + lixcol_version_id: "global", + }, + ], + }); + + // 1. Insert key_value in global version (tracked) + await lix.db + .insertInto("key_value_all") + .values({ + key: "test-key-global", + value: "global-tracked-value", + lixcol_version_id: "global", + }) + .execute(); + + // 2. Insert key_value in global version (untracked) + await lix.db + .insertInto("key_value_all") + .values({ + key: "test-key-global-untracked", + value: "global-untracked-value", + lixcol_version_id: "global", + lixcol_untracked: true, + }) + .execute(); + + // 3. Insert key_value in active version (tracked) + await lix.db + .insertInto("key_value") + .values({ + key: "test-key-active", + value: "active-tracked-value", + }) + .execute(); + + // 4. Insert key_value in active version (untracked) + await lix.db + .insertInto("key_value") + .values({ + key: "test-key-active-untracked", + value: "active-untracked-value", + lixcol_untracked: true, + }) + .execute(); + + // Verify all entities exist before deletion (including inherited) + const entitiesBeforeDelete = await lix.db + .selectFrom("state") + .where("schema_key", "=", "lix_key_value") + .where("entity_id", "like", "test-key-%") + .selectAll() + .execute(); + + // state view shows active version entities + inherited from global + expectDeterministic(entitiesBeforeDelete).toHaveLength(4); + + // Delete all key_value entities + // this is the reproduction of the infinite loop issue + await lix.db + .deleteFrom("state") + .where("schema_key", "=", "lix_key_value") + .execute(); + + // Verify all entities are deleted + const keyValueAfterDelete = await lix.db + .selectFrom("state") + .where("schema_key", "=", "lix_key_value") + .where("entity_id", "like", "test-key-%") + .selectAll() + .execute(); + + expectDeterministic(keyValueAfterDelete).toHaveLength(0); + } +); diff --git a/packages/lix-sdk/src/state/views/state.ts b/packages/lix-sdk/src/state/views/state.ts new file mode 100644 index 0000000000..66c9b564d4 --- /dev/null +++ b/packages/lix-sdk/src/state/views/state.ts @@ -0,0 +1,91 @@ +import type { Insertable, Selectable, Updateable } from "kysely"; +import type { Lix } from "../../lix/open-lix.js"; +import type { StateAllView } from "./state-all.js"; + +export type StateView = Omit; + +// Kysely operation types +export type StateRow = Selectable; +export type NewStateRow = Insertable; +export type StateRowUpdate = Updateable; + +/** + * Creates the public 'state' view filtered to the active version, and + * INSTEAD OF triggers that forward writes to state_all (which proxies to the vtable). + */ +export function applyStateView(lix: Pick): void { + lix.sqlite.exec(` + CREATE VIEW IF NOT EXISTS state AS + SELECT + entity_id, + schema_key, + file_id, + plugin_key, + snapshot_content, + schema_version, + created_at, + updated_at, + inherited_from_version_id, + change_id, + untracked, + commit_id + FROM state_all + WHERE version_id IN (SELECT version_id FROM active_version); + + -- Forward writes to the active version via state_all + CREATE TRIGGER IF NOT EXISTS state_insert + INSTEAD OF INSERT ON state + BEGIN + INSERT INTO state_all ( + entity_id, + schema_key, + file_id, + version_id, + plugin_key, + snapshot_content, + schema_version, + untracked + ) VALUES ( + NEW.entity_id, + NEW.schema_key, + NEW.file_id, + (SELECT version_id FROM active_version), + NEW.plugin_key, + NEW.snapshot_content, + NEW.schema_version, + COALESCE(NEW.untracked, 0) + ); + END; + + CREATE TRIGGER IF NOT EXISTS state_update + INSTEAD OF UPDATE ON state + BEGIN + UPDATE state_all + SET + entity_id = NEW.entity_id, + schema_key = NEW.schema_key, + file_id = NEW.file_id, + version_id = (SELECT version_id FROM active_version), + plugin_key = NEW.plugin_key, + snapshot_content = NEW.snapshot_content, + schema_version = NEW.schema_version, + untracked = COALESCE(NEW.untracked, 0) + WHERE + entity_id = OLD.entity_id + AND schema_key = OLD.schema_key + AND file_id = OLD.file_id + AND version_id = (SELECT version_id FROM active_version); + END; + + CREATE TRIGGER IF NOT EXISTS state_delete + INSTEAD OF DELETE ON state + BEGIN + DELETE FROM state_all + WHERE + entity_id = OLD.entity_id + AND schema_key = OLD.schema_key + AND file_id = OLD.file_id + AND version_id = (SELECT version_id FROM active_version); + END; + `); +} diff --git a/packages/lix-sdk/src/state/vtable/commit.bench.ts b/packages/lix-sdk/src/state/vtable/commit.bench.ts new file mode 100644 index 0000000000..2e418a26ea --- /dev/null +++ b/packages/lix-sdk/src/state/vtable/commit.bench.ts @@ -0,0 +1,124 @@ +import { bench } from "vitest"; +import { openLix } from "../../lix/open-lix.js"; +import { commit } from "./commit.js"; +import { insertTransactionState } from "../transaction/insert-transaction-state.js"; +import { timestamp } from "../../deterministic/timestamp.js"; + +// NOTE: openLix includes database initialization overhead +// This affects all benchmarks equally and represents real-world usage patterns +// this test exists to act as baseline for commit performance +bench("commit empty transaction (baseline)", async () => { + const lix = await openLix({}); + + commit({ + lix: lix as any, + }); +}); + +bench("commit transaction with 1 row", async () => { + const lix = await openLix({}); + + // Insert multiple transaction states in a single batch + const multipleData = []; + for (let i = 0; i < 1; i++) { + multipleData.push({ + entity_id: `commit_test_entity_${i}`, + version_id: "global", + schema_key: "commit_benchmark_entity", + file_id: `commit_file`, + plugin_key: "benchmark_plugin", + snapshot_content: JSON.stringify({ + id: `commit_test_entity_${i}`, + value: `test_data_${i}`, + metadata: { type: "commit_benchmark", index: i }, + }), + schema_version: "1.0", + untracked: false, + }); + } + + insertTransactionState({ + lix: lix as any, + data: multipleData, + timestamp: timestamp({ lix }), + }); + + // Benchmark: Commit all transaction states + commit({ + lix: { sqlite: lix.sqlite, db: lix.db as any, hooks: lix.hooks }, + }); +}); + +bench("commit transaction with 100 rows", async () => { + const lix = await openLix({}); + + // Insert multiple transaction states in a single batch + const multipleData = []; + for (let i = 0; i < 100; i++) { + multipleData.push({ + entity_id: `commit_test_entity_${i}`, + version_id: "global", + schema_key: "commit_benchmark_entity", + file_id: `commit_file`, + plugin_key: "benchmark_plugin", + snapshot_content: JSON.stringify({ + id: `commit_test_entity_${i}`, + value: `test_data_${i}`, + metadata: { type: "commit_benchmark", index: i }, + }), + schema_version: "1.0", + untracked: false, + }); + } + insertTransactionState({ + lix: lix as any, + data: multipleData, + timestamp: timestamp({ lix }), + }); + + // Benchmark: Commit all transaction states + commit({ + lix: { sqlite: lix.sqlite, db: lix.db as any, hooks: lix.hooks }, + }); +}); + +bench("commit 10 transactions x 10 changes (sequential)", async () => { + const lix = await openLix({}); + + const TXN_COUNT = 10; + const ROWS_PER_TXN = 10; + + for (let t = 0; t < TXN_COUNT; t++) { + const batch = []; + for (let i = 0; i < ROWS_PER_TXN; i++) { + const globalIndex = t * ROWS_PER_TXN + i; + batch.push({ + entity_id: `seq_commit_entity_${globalIndex}`, + version_id: "global", + schema_key: "commit_benchmark_entity", + file_id: `commit_file`, + plugin_key: "benchmark_plugin", + snapshot_content: JSON.stringify({ + id: `seq_commit_entity_${globalIndex}`, + value: `seq_data_${globalIndex}`, + metadata: { type: "commit_benchmark_seq", txn: t, index: i }, + }), + schema_version: "1.0", + untracked: false, + }); + } + + insertTransactionState({ + lix: lix as any, + data: batch, + timestamp: timestamp({ lix }), + }); + + // Commit the current transaction batch + commit({ + lix: { sqlite: lix.sqlite, db: lix.db as any, hooks: lix.hooks }, + }); + } +}); + +bench.todo("commit with mixed operations (insert/update/delete)"); diff --git a/packages/lix-sdk/src/state/commit.test.ts b/packages/lix-sdk/src/state/vtable/commit.test.ts similarity index 81% rename from packages/lix-sdk/src/state/commit.test.ts rename to packages/lix-sdk/src/state/vtable/commit.test.ts index f301ae1faa..ab871a0a07 100644 --- a/packages/lix-sdk/src/state/commit.test.ts +++ b/packages/lix-sdk/src/state/vtable/commit.test.ts @@ -1,176 +1,201 @@ import { test, expect, describe } from "vitest"; import type { Kysely } from "kysely"; -import type { LixInternalDatabaseSchema } from "../database/schema.js"; -import type { LixCommitEdge } from "../commit/schema.js"; -import { insertTransactionState } from "./insert-transaction-state.js"; +import type { LixInternalDatabaseSchema } from "../../database/schema.js"; +import type { LixCommitEdge } from "../../commit/schema.js"; +import { insertTransactionState } from "../transaction/insert-transaction-state.js"; import { commit } from "./commit.js"; -import { openLix } from "../lix/open-lix.js"; -import { nanoId, uuidV7 } from "../deterministic/index.js"; -import { switchAccount } from "../account/switch-account.js"; -import { commitIsAncestorOf } from "../query-filter/commit-is-ancestor-of.js"; -import { selectActiveVersion } from "../version/select-active-version.js"; +import { openLix } from "../../lix/open-lix.js"; +import { nanoId, timestamp, uuidV7 } from "../../deterministic/index.js"; +import { switchAccount } from "../../account/switch-account.js"; +import { commitIsAncestorOf } from "../../query-filter/commit-is-ancestor-of.js"; +import { selectActiveVersion } from "../../version/select-active-version.js"; -test("commit should include meta changes (changeset, edges, version updates) in the change table", async () => { +/** + * TL;DR + * ──► *Business* rows (actual user-domain data) are stored in the *active* + * version that the user is editing. + * ──► *Graph* rows (everything that describes the history DAG: change-sets, + * commits, edges, version objects) are *always* stored in the + * special version called **global**. + * + * This split gives us two key properties: + * + * 1. **Single source of truth for history topology** + * The entire DAG is materialised exactly once (under `global`), so + * graph traversals and lineage CTEs never need to bounce across version + * tables. Think "`.git/refs`-style catalogue", but in-DB. + * + * 2. **Version-local changes** + * + * + * BUSINESS DATA lives on the *active version*, + * GRAPH META-DATA lives on *global*. + * + * ┌─────────────────┐ ┌─────────────────────────┐ + * │ version_active │ user-data │ COMMIT (active) │ + * └─────────────────┘ ───────────────▶│ entity │ + * └─────────────────────────┘ + * ▲ + * │ graph rows that *describe* ↑ + * ┌────────────┐ ┌─────────┴───────────────────────────┐ + * │ global │ graph rows │ COMMIT (global) – graph-only │ + * └────────────┘ ─────────────────▶│ change_set, commit, edge, version │ + * └─────────────────────────────────────┘ + */ +test("split-commit: business rows on active version, graph rows on global", async () => { + /*──────────────────────── 1. initialise workspace ─────────────────────*/ const lix = await openLix({ - account: { id: "test-account", name: "Test User" }, - keyValues: [{ key: "lix_deterministic_mode", value: { enabled: true } }], + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + }, + ], }); const db = lix.db as unknown as Kysely; - // 1. Get the active version (should be 'global') - const activeVersions = await db + /* Resolve IDs for the two versions involved */ + const activeRow = await db .selectFrom("active_version") .selectAll() - .execute(); - - expect(activeVersions.length).toBe(1); - const activeVersion = activeVersions[0]; - expect(activeVersion).toBeDefined(); - const versionId = activeVersion!.version_id; + .executeTakeFirstOrThrow(); + const activeVersionId = activeRow.version_id; // e.g. "main" + expect(activeVersionId).not.toBe("global"); - // Get the previous change set for this version - const versionBefore = await db + const activeVersionBefore = await db .selectFrom("version") - .where("id", "=", versionId) + .where("id", "=", activeVersionId) .selectAll() .executeTakeFirstOrThrow(); - const previousCommitId = versionBefore.commit_id; + const globalVersionBefore = await db + .selectFrom("version") + .where("id", "=", "global") + .selectAll() + .executeTakeFirstOrThrow(); + + const prevCommitActive = activeVersionBefore.commit_id; + const prevCommitGlobal = globalVersionBefore.commit_id; - // 2. Insert transaction state + /*──────────────────────── 2. stage two user changes ───────────────────*/ insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { - entity_id: "test-entity-1", + entity_id: "para-1", schema_key: "lix_key_value", file_id: "lix", plugin_key: "lix_own_entity", - snapshot_content: JSON.stringify({ - key: "test-key-1", - value: "test-value-1", - }), + snapshot_content: JSON.stringify({ key: "k1", value: "v1" }), schema_version: "1.0", - version_id: versionId, + version_id: activeVersionId, untracked: false, }, - ], - }); - - insertTransactionState({ - lix, - data: [ { - entity_id: "test-entity-2", + entity_id: "para-2", schema_key: "lix_key_value", file_id: "lix", plugin_key: "lix_own_entity", - snapshot_content: JSON.stringify({ - key: "test-key-2", - value: "test-value-2", - }), + snapshot_content: JSON.stringify({ key: "k2", value: "v2" }), schema_version: "1.0", - version_id: versionId, + version_id: activeVersionId, untracked: false, }, ], }); - // 3. Commit + /*──────────────────────── 3. COMMIT ───────────────────────────────────*/ commit({ lix }); - // 4. Expect the commit of the active version to have one edge to the previous one - const versionAfter = await db + const activeVersionAfter = await db .selectFrom("version") - .where("id", "=", versionId) + .where("id", "=", activeVersionId) .selectAll() .executeTakeFirstOrThrow(); - - const newCommitId = versionAfter.commit_id; - expect(newCommitId).not.toBe(previousCommitId); - - // Check edges - should have exactly one edge from previous to new - const edges = await db - .selectFrom("commit_edge") - .where("parent_id", "=", previousCommitId) - .where("child_id", "=", newCommitId) + const globalVersionAfter = await db + .selectFrom("version") + .where("id", "=", "global") .selectAll() - .execute(); + .executeTakeFirstOrThrow(); - expect(edges.length).toBe(1); + const commitActiveId = activeVersionAfter.commit_id; // data commit + const commitGlobalId = globalVersionAfter.commit_id; // graph commit + + expect(commitActiveId).not.toBe(prevCommitActive); + expect(commitGlobalId).not.toBe(prevCommitGlobal); + + /* helper: build histogram of schema_key counts for a change_set --------*/ + const countSchemas = async (changeSetId: string) => { + const rows = await db + .selectFrom("change_set_element") + .innerJoin("change", "change_set_element.change_id", "change.id") + .where("change_set_id", "=", changeSetId) + .select([ + "change.schema_key", + "change.entity_id", + "change_set_element.change_id", + "change.snapshot_content", + ]) + .execute(); - // Get the change set ID from the commit - const newCommit = await db + return rows.reduce>((map, r) => { + map[r.schema_key] = (map[r.schema_key] ?? 0) + 1; + return map; + }, {}); + }; + + const commitActive = await db .selectFrom("commit") - .where("id", "=", newCommitId) + .where("id", "=", commitActiveId) + .selectAll() + .executeTakeFirstOrThrow(); + const commitGlobal = await db + .selectFrom("commit") + .where("id", "=", commitGlobalId) .selectAll() .executeTakeFirstOrThrow(); - const newChangeSetId = newCommit.change_set_id; - - // 5. Directly expect on the elements in this new set to contain the expected changes - const changeSetElements = await db - .selectFrom("change_set_element") - .where("change_set_id", "=", newChangeSetId) + const activeSchemas = await countSchemas(commitActive.change_set_id); + const globalSchemas = await countSchemas(commitGlobal.change_set_id); + + /*──────────────────────── 4. assertions ───────────────────────────────*/ + /* COMMIT ON ACTIVE VERSION ────────────────────────────────────────────*/ + expect(activeSchemas["lix_key_value"]).toBe(2); // user rows + + // Must *not* contain any graph-rows which belong to global commit + expect(activeSchemas["lix_change_author"]).toBeUndefined(); + expect(activeSchemas["lix_commit"]).toBeUndefined(); + expect(activeSchemas["lix_change_set"]).toBeUndefined(); + expect(activeSchemas["lix_commit_edge"]).toBeUndefined(); + expect(activeSchemas["lix_version"]).toBeUndefined(); + + // COMMIT ON GLOBAL (graph-only) + expect(globalSchemas["lix_key_value"]).toBeUndefined(); + + expect(globalSchemas["lix_change_author"]).toBe(2); // two entities (para-1, para-2) + expect(globalSchemas["lix_commit"]).toBe(2); // copy of active + self + expect(globalSchemas["lix_commit_edge"]).toBe(2); // edge(active) + edge(global) + expect(globalSchemas["lix_version"]).toBe(2); // version_active & global + expect(globalSchemas["lix_change_set"]).toBe(2); // active + self + // Actual count: 2 user + 2 authors + 6 global metadata + 2 meta-elements (no duplicates) = 12 + expect(globalSchemas["lix_change_set_element"]).toBe(12); + + /*──────────────────── 5. graph edges exist exactly once ───────────────*/ + const edgeActive = await db + .selectFrom("commit_edge") + .where("parent_id", "=", prevCommitActive) + .where("child_id", "=", commitActiveId) .selectAll() .execute(); + expect(edgeActive.length).toBe(1); // prevActive ─▶ active - // We expect exactly these elements: - // - 2 user data changes (test-entity-1, test-entity-2) - // - 2 change authors (one for each user data change) - // - 1 changeset creation - // - 1 commit creation - // - 1 commit edge creation - // - 1 version update - // Total: 8 elements - expect(changeSetElements.length).toBe(8); - - // Verify the specific changes are in the change set - const elementChangeIds = changeSetElements.map((e) => e.change_id); - - // Get the actual changes to verify content - const changes = await db - .selectFrom("change") - .where("id", "in", elementChangeIds) + const edgeGlobal = await db + .selectFrom("commit_edge") + .where("parent_id", "=", prevCommitGlobal) + .where("child_id", "=", commitGlobalId) .selectAll() .execute(); - - // Group by schema_key for easier verification - const changesBySchema = changes.reduce( - (acc, change) => { - if (!acc[change.schema_key]) { - acc[change.schema_key] = []; - } - acc[change.schema_key]!.push(change); - return acc; - }, - {} as Record - ); - - // Verify we have the expected types of changes - expect(changesBySchema["lix_key_value"]?.length).toBe(2); // Our test data - expect(changesBySchema["lix_change_author"]?.length).toBe(2); // Change authors for test data - expect(changesBySchema["lix_change_set"]?.length).toBe(1); // The new changeset - expect(changesBySchema["lix_commit"]?.length).toBe(1); // The new commit - expect(changesBySchema["lix_commit_edge"]?.length).toBe(1); // The commit edge - expect(changesBySchema["lix_version"]?.length).toBe(1); // Version update - - // Verify the test entities are included - const keyValueChanges = changesBySchema["lix_key_value"] || []; - const keyValueEntities = keyValueChanges.map((c) => c.entity_id); - expect(keyValueEntities).toContain("test-entity-1"); - expect(keyValueEntities).toContain("test-entity-2"); - - // Verify change authors were created for user data changes - const changeAuthors = changesBySchema["lix_change_author"] || []; - expect(changeAuthors.length).toBe(2); - - // Change author entity IDs should reference the user data change IDs - const userDataChangeIds = keyValueChanges.map((c) => c.id); - for (const author of changeAuthors) { - // Entity ID format for change authors is "changeId~accountId" - const changeId = author.entity_id.split("~")[0]; - expect(userDataChangeIds).toContain(changeId); - } + expect(edgeGlobal.length).toBe(1); // prevGlobal ─▶ global }); test("commit with no changes should not create a change set", async () => { @@ -189,7 +214,6 @@ test("commit with no changes should not create a change set", async () => { .selectFrom("change_set") .selectAll() .execute(); - // Commit with no changes commit({ lix }); @@ -231,6 +255,7 @@ test("commit should handle multiple versions correctly", async () => { // Create change sets for versions insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: versionAChangeSetId, @@ -249,6 +274,7 @@ test("commit should handle multiple versions correctly", async () => { insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: versionAWorkingChangeSetId, @@ -267,6 +293,7 @@ test("commit should handle multiple versions correctly", async () => { insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: versionBChangeSetId, @@ -285,6 +312,7 @@ test("commit should handle multiple versions correctly", async () => { insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: versionBWorkingChangeSetId, @@ -304,6 +332,7 @@ test("commit should handle multiple versions correctly", async () => { // Create commits for version A insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: versionACommitId, @@ -323,6 +352,7 @@ test("commit should handle multiple versions correctly", async () => { insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: versionAWorkingCommitId, @@ -343,6 +373,7 @@ test("commit should handle multiple versions correctly", async () => { // Create version A insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: versionAId, @@ -365,6 +396,7 @@ test("commit should handle multiple versions correctly", async () => { // Create commits for version B insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: versionBCommitId, @@ -384,6 +416,7 @@ test("commit should handle multiple versions correctly", async () => { insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: versionBWorkingCommitId, @@ -404,6 +437,7 @@ test("commit should handle multiple versions correctly", async () => { // Create version B insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: versionBId, @@ -426,6 +460,7 @@ test("commit should handle multiple versions correctly", async () => { // Insert entity for version A insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: "version-a-entity", @@ -446,6 +481,7 @@ test("commit should handle multiple versions correctly", async () => { // Insert entity for version B insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: "version-b-entity", @@ -829,6 +865,7 @@ test("global version should move forward when mutations occur", async () => { // Insert data with version_id = "global" insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: "test-global-entity", @@ -919,6 +956,7 @@ test("commit should create edge changes that are discoverable by lineage CTE", a // Insert data with version_id = "global" insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: "test-edge-entity", @@ -1064,6 +1102,7 @@ test("active version should move forward when mutations occur", async () => { // Insert data with version_id = activeVersionId insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ { entity_id: "test-active-entity", @@ -1436,6 +1475,68 @@ test("creates change_author records for insert, update, and delete operations", }); }); +test("global cache entry should be inherited by child versions in resolved view", async () => { + const lix = await openLix({}); + const db = lix.db as unknown as Kysely; + + // Get the active version (should be main, not global) + const activeVersion = await lix.db + .selectFrom("active_version") + .select("version_id") + .executeTakeFirstOrThrow(); + + expect(activeVersion.version_id).not.toBe("global"); + + // Insert a mock entity into global version via transaction + insertTransactionState({ + lix, + timestamp: timestamp({ lix }), + data: [ + { + entity_id: "mock-global-entity", + schema_key: "mock_schema", + file_id: "mock-file", + plugin_key: "mock_plugin", + snapshot_content: JSON.stringify({ + id: "mock-global-entity", + data: "test-data", + }), + schema_version: "1.0", + version_id: "global", + untracked: false, + }, + ], + }); + + // Commit the changes + commit({ lix }); + + // Verify cache has exactly one entry for this entity (in global version) + const cacheEntries = await db + .selectFrom("internal_state_cache") + .selectAll() + .where("entity_id", "=", "mock-global-entity") + .execute(); + + expect(cacheEntries).toHaveLength(1); + expect(cacheEntries[0]?.version_id).toBe("global"); + + // Verify resolved view returns the entity for both global and active version + const resolvedEntries = await db + .selectFrom("internal_resolved_state_all") + .select(["version_id", "entity_id", "schema_key"]) + .where("entity_id", "=", "mock-global-entity") + .orderBy("version_id", "asc") + .execute(); + + // Should have two entries: one for active version (inherited) and one for global + expect(resolvedEntries).toHaveLength(2); + + const versionIds = resolvedEntries.map((e) => e.version_id).sort(); + expect(versionIds).toContain("global"); + expect(versionIds).toContain(activeVersion.version_id); +}); + describe("file lixcol cache updates", () => { test("should update cache on file insert", async () => { const lix = await openLix({}); @@ -1601,8 +1702,6 @@ describe("file lixcol cache updates", () => { .selectAll() .executeTakeFirstOrThrow(); - console.log("Initial cache:", initialCache); - // Update the file multiple times using file_all (in deterministic mode, timestamps auto-increment) await lix.db .updateTable("file_all") @@ -1625,8 +1724,6 @@ describe("file lixcol cache updates", () => { .selectAll() .executeTakeFirstOrThrow(); - console.log("Final cache:", finalCache); - // created_at should be preserved from the initial insert expect(finalCache.created_at).toBe(initialCache.created_at); // updated_at should be different @@ -1682,6 +1779,7 @@ describe("file lixcol cache updates", () => { // Perform mixed operations insertTransactionState({ lix, + timestamp: timestamp({ lix }), data: [ // New file { diff --git a/packages/lix-sdk/src/state/vtable/commit.ts b/packages/lix-sdk/src/state/vtable/commit.ts new file mode 100644 index 0000000000..34a1364956 --- /dev/null +++ b/packages/lix-sdk/src/state/vtable/commit.ts @@ -0,0 +1,836 @@ +import { type Kysely, sql } from "kysely"; +import { + type LixChangeSet, + type LixChangeSetElement, + LixChangeSetElementSchema, + LixChangeSetSchema, +} from "../../change-set/schema.js"; +import type { LixChangeRaw } from "../../change/schema.js"; +import { executeSync } from "../../database/execute-sync.js"; +import type { LixInternalDatabaseSchema } from "../../database/schema.js"; +import { LixVersionSchema, type LixVersion } from "../../version/schema.js"; +import { nanoId } from "../../deterministic/index.js"; +import { uuidV7 } from "../../deterministic/uuid-v7.js"; +import { commitDeterministicSequenceNumber } from "../../deterministic/sequence.js"; +import { timestamp } from "../../deterministic/timestamp.js"; +import type { Lix } from "../../lix/open-lix.js"; +import { commitIsAncestorOf } from "../../query-filter/commit-is-ancestor-of.js"; +import type { LixCommitEdge } from "../../commit/schema.js"; +import { updateStateCache } from "../cache/update-state-cache.js"; +import { updateUntrackedState } from "../untracked/update-untracked-state.js"; + +/** + * Commits all transaction changes to permanent storage. + * + * This function handles the COMMIT stage of the state mutation flow. It takes + * all changes accumulated in the transaction table (internal_change_in_transaction), + * creates commits for each version with data changes, and then creates a global + * commit containing all the graph metadata (commits, changesets, edges, version updates). + * + * @example + * // After accumulating changes via insertTransactionState + * commit({ lix }); + * // All pending changes are now persisted + */ +export function commit(args: { + lix: Pick; +}): number { + const transactionTimestamp = timestamp({ lix: args.lix }); + const db = args.lix.db as unknown as Kysely; + + // Query all transaction changes + const allTransactionChanges = executeSync({ + lix: args.lix, + query: db + .selectFrom("internal_change_in_transaction") + .select([ + "id", + "entity_id", + "schema_key", + "schema_version", + "file_id", + "plugin_key", + "version_id", + sql`json(snapshot_content)`.as("snapshot_content"), + "created_at", + "untracked", + ]), + }); + + // Separate tracked and untracked changes + const trackedChangesByVersion = new Map(); + const untrackedChanges: any[] = []; + + for (const change of allTransactionChanges) { + if (change.untracked === 1) { + untrackedChanges.push(change); + } else { + if (!trackedChangesByVersion.has(change.version_id)) { + trackedChangesByVersion.set(change.version_id, []); + } + trackedChangesByVersion.get(change.version_id)!.push(change); + } + } + + // Process all untracked changes immediately + for (const change of untrackedChanges) { + updateUntrackedState({ + lix: args.lix, + change: { + id: change.id, + entity_id: change.entity_id, + schema_key: change.schema_key, + file_id: change.file_id, + plugin_key: change.plugin_key, + snapshot_content: change.snapshot_content, + schema_version: change.schema_version, + created_at: change.created_at, + }, + version_id: change.version_id, + }); + } + + // Prepare to collect all changes + const allChangesToFlush: LixChangeRaw[] = []; + // Collect change_set_elements separately to avoid filtering later + const changeSetElements: LixChangeRaw[] = []; + // Collect all change_authors separately (all are global since changes are global) + const allChangeAuthors: LixChangeRaw[] = []; + + // Track metadata for each version that gets a commit + const versionMetadata = new Map< + string, + { + commitId: string; + changeSetId: string; + previousCommitId: string; + } + >(); + + // Step 1: Create commits and changesets for each version with changes + for (const [version_id, changes] of trackedChangesByVersion) { + if (changes.length === 0) continue; + + // Get version info + const versionRows = executeSync({ + lix: args.lix, + query: db + .selectFrom("internal_resolved_state_all") + .where("schema_key", "=", "lix_version") + .where("entity_id", "=", version_id) + .where("snapshot_content", "is not", null) + .select("snapshot_content") + .limit(1), + }); + + if (versionRows.length === 0 || !versionRows[0]?.snapshot_content) { + throw new Error(`Version with id '${version_id}' not found.`); + } + + const versionData = JSON.parse( + versionRows[0].snapshot_content + ) as LixVersion; + const changeSetId = uuidV7({ lix: args.lix }); + const commitId = uuidV7({ lix: args.lix }); + + // Store metadata for later use + versionMetadata.set(version_id, { + commitId, + changeSetId, + previousCommitId: versionData.commit_id, + }); + } + + // Step 2: If we have any commits but global doesn't have one yet, create global commit + if (versionMetadata.size > 0 && !versionMetadata.has("global")) { + // Get global version info + const globalVersionRows = executeSync({ + lix: args.lix, + query: db + .selectFrom("internal_resolved_state_all") + .where("schema_key", "=", "lix_version") + .where("entity_id", "=", "global") + .where("snapshot_content", "is not", null) + .select("snapshot_content") + .limit(1), + }); + + if ( + globalVersionRows.length === 0 || + !globalVersionRows[0]?.snapshot_content + ) { + throw new Error(`Global version not found.`); + } + + const globalVersion = JSON.parse( + globalVersionRows[0].snapshot_content + ) as LixVersion; + const globalChangeSetId = nanoId({ lix: args.lix }); + const globalCommitId = uuidV7({ lix: args.lix }); + + // Store global metadata + versionMetadata.set("global", { + commitId: globalCommitId, + changeSetId: globalChangeSetId, + previousCommitId: globalVersion.commit_id, + }); + } + + // Get active accounts for change_author records + const activeAccounts = executeSync({ + lix: args.lix, + query: db + .selectFrom("internal_resolved_state_all") + .where("schema_key", "=", "lix_active_account") + .where("version_id", "=", "global") + .where("snapshot_content", "is not", null) + .select( + sql`json_extract(snapshot_content, '$.account_id')`.as("account_id") + ), + }); + + // Step 3: Process each version's changes completely + for (const [version_id, changes] of trackedChangesByVersion) { + if (changes.length === 0) continue; + + const meta = versionMetadata.get(version_id)!; + const changeSetId = meta.changeSetId; + + // Add user data changes + for (const change of changes) { + allChangesToFlush.push({ + id: change.id, + entity_id: change.entity_id, + schema_key: change.schema_key, + schema_version: change.schema_version, + file_id: change.file_id, + plugin_key: change.plugin_key, + created_at: change.created_at, + snapshot_content: change.snapshot_content, + }); + } + + // Create change_author records for each change and each active account + // These are global metadata since changes are global + for (const change of changes) { + for (const account of activeAccounts) { + const changeAuthorId = uuidV7({ lix: args.lix }); + const authorChange = { + id: changeAuthorId, + entity_id: `${change.id}~${account.account_id}`, + schema_key: "lix_change_author", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + created_at: transactionTimestamp, + snapshot_content: JSON.stringify({ + change_id: change.id, + account_id: account.account_id, + }), + }; + allChangeAuthors.push(authorChange); + } + } + + // Create changeset elements for all changes of this version + // These are global metadata - collect them separately + for (const change of changes) { + const elementId = uuidV7({ lix: args.lix }); + const changeSetElement = { + id: elementId, + entity_id: `${changeSetId}~${change.id}`, + schema_key: "lix_change_set_element", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: changeSetId, + change_id: change.id, + schema_key: change.schema_key, + file_id: change.file_id, + entity_id: change.entity_id, + } satisfies LixChangeSetElement), + schema_version: LixChangeSetElementSchema["x-lix-version"], + created_at: transactionTimestamp, + }; + // Add to separate collection to avoid filtering later + changeSetElements.push(changeSetElement); + } + } + + // Step 4: Handle working changeset updates for each version + for (const [version_id, changes] of trackedChangesByVersion) { + if (changes.length === 0) continue; + + // Get version data to access working_commit_id + const versionRows = executeSync({ + lix: args.lix, + query: db + .selectFrom("internal_resolved_state_all") + .where("schema_key", "=", "lix_version") + .where("entity_id", "=", version_id) + .select("snapshot_content") + .limit(1), + }); + + const versionData = JSON.parse( + versionRows[0]!.snapshot_content + ) as LixVersion; + + const [workingCommitRow] = executeSync({ + lix: args.lix, + query: db + .selectFrom("internal_resolved_state_all") + .where("schema_key", "=", "lix_commit") + .where("entity_id", "=", versionData.working_commit_id) + .where("snapshot_content", "is not", null) + .select("snapshot_content") + .limit(1), + }); + + if (workingCommitRow?.snapshot_content) { + const workingCommit = JSON.parse( + workingCommitRow.snapshot_content + ) as any; + const workingChangeSetId = workingCommit.change_set_id; + + // Filter out lix internal entities for working changeset + const userChanges = changes.filter( + (change) => + change.schema_key !== "lix_change_set" && + change.schema_key !== "lix_change_set_edge" && + change.schema_key !== "lix_change_set_element" && + change.schema_key !== "lix_version" + ); + + if (userChanges.length > 0) { + // Handle deletions and updates for working changeset elements + const deletionChanges = userChanges.filter((change) => { + const parsedSnapshot = change.snapshot_content + ? JSON.parse(change.snapshot_content) + : null; + return !parsedSnapshot || parsedSnapshot.snapshot_id === "no-content"; + }); + + const nonDeletionChanges = userChanges.filter((change) => { + const parsedSnapshot = change.snapshot_content + ? JSON.parse(change.snapshot_content) + : null; + return parsedSnapshot && parsedSnapshot.snapshot_id !== "no-content"; + }); + + // Check for entities at checkpoint (for deletions) + const entitiesAtCheckpoint = new Set(); + if (deletionChanges.length > 0) { + const checkpointCommitResult = executeSync({ + lix: args.lix, + query: db + .selectFrom("commit") + .innerJoin("entity_label", (join) => + join + .onRef("entity_label.entity_id", "=", "commit.id") + .on("entity_label.schema_key", "=", "lix_commit") + ) + .innerJoin("label", "label.id", "entity_label.label_id") + .where("label.name", "=", "checkpoint") + .where( + commitIsAncestorOf( + { id: versionData.commit_id }, + { includeSelf: true, depth: 1 } + ) + ) + .select("commit.id") + .limit(1), + }); + + const checkpointCommitId = checkpointCommitResult[0]?.id; + if (checkpointCommitId) { + const checkpointEntities = executeSync({ + lix: args.lix, + query: db + .selectFrom("state_history") + .where("depth", "=", 0) + .where("commit_id", "=", checkpointCommitId) + .where((eb) => + eb.or( + deletionChanges.map((change) => + eb.and([ + eb("entity_id", "=", change.entity_id), + eb("schema_key", "=", change.schema_key), + eb("file_id", "=", change.file_id), + ]) + ) + ) + ) + .select(["entity_id", "schema_key", "file_id"]), + }); + + for (const entity of checkpointEntities) { + entitiesAtCheckpoint.add( + `${entity.entity_id}|${entity.schema_key}|${entity.file_id}` + ); + } + } + } + + // Find existing working change set elements to delete + const existingEntities = executeSync({ + lix: args.lix, + query: db + .selectFrom("internal_resolved_state_all") + .select([ + "_pk", + "entity_id", + sql`json_extract(snapshot_content, '$.entity_id')`.as( + "element_entity_id" + ), + sql`json_extract(snapshot_content, '$.schema_key')`.as( + "element_schema_key" + ), + sql`json_extract(snapshot_content, '$.file_id')`.as( + "element_file_id" + ), + ]) + .where("entity_id", "like", `${workingChangeSetId}~%`) + .where("schema_key", "=", "lix_change_set_element") + .where("file_id", "=", "lix") + .where("version_id", "=", "global") + .where((eb) => + eb.or( + userChanges.map((change) => + eb.and([ + eb( + sql`json_extract(snapshot_content, '$.entity_id')`, + "=", + change.entity_id + ), + eb( + sql`json_extract(snapshot_content, '$.schema_key')`, + "=", + change.schema_key + ), + eb( + sql`json_extract(snapshot_content, '$.file_id')`, + "=", + change.file_id + ), + ]) + ) + ) + ), + }); + + // Delete existing working change set elements as untracked changes + for (const existing of existingEntities) { + // The entity_id for a change_set_element is "${change_set_id}~${change_id}" + // We already queried for entity_id LIKE '${workingChangeSetId}~%' + // So existing.entity_id already contains the correct format + const entityIdForDeletion = existing.entity_id; + // Handle working changeset elements as untracked + updateUntrackedState({ + lix: args.lix, + change: { + id: uuidV7({ lix: args.lix }), + entity_id: entityIdForDeletion, + schema_key: "lix_change_set_element", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: null, // null indicates deletion + schema_version: LixChangeSetElementSchema["x-lix-version"], + created_at: transactionTimestamp, + }, + version_id: "global", + }); + } + + // Add deletion changes that existed at checkpoint as untracked + for (const deletion of deletionChanges) { + const key = `${deletion.entity_id}|${deletion.schema_key}|${deletion.file_id}`; + if (entitiesAtCheckpoint.has(key)) { + // Handle working changeset elements as untracked + updateUntrackedState({ + lix: args.lix, + change: { + id: uuidV7({ lix: args.lix }), + entity_id: `${workingChangeSetId}~${deletion.id}`, + schema_key: "lix_change_set_element", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: workingChangeSetId, + change_id: deletion.id, + entity_id: deletion.entity_id, + schema_key: deletion.schema_key, + file_id: deletion.file_id, + } satisfies LixChangeSetElement), + schema_version: LixChangeSetElementSchema["x-lix-version"], + created_at: transactionTimestamp, + }, + version_id: "global", + }); + } + } + + // Add all non-deletions as untracked + for (const change of nonDeletionChanges) { + // Handle working changeset elements as untracked + updateUntrackedState({ + lix: args.lix, + change: { + id: uuidV7({ lix: args.lix }), + entity_id: `${workingChangeSetId}~${change.id}`, + schema_key: "lix_change_set_element", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: workingChangeSetId, + change_id: change.id, + entity_id: change.entity_id, + schema_key: change.schema_key, + file_id: change.file_id, + } satisfies LixChangeSetElement), + schema_version: LixChangeSetElementSchema["x-lix-version"], + created_at: transactionTimestamp, + }, + version_id: "global", + }); + } + } + } + } + + const globalChanges: LixChangeRaw[] = []; + + // Step 5: Generate global metadata commit (if any version had changes) + if (versionMetadata.size > 0) { + // Check if global needs its own commit or can reuse existing + const needsNewGlobalCommit = !versionMetadata.has("global"); + + if (needsNewGlobalCommit) { + // Get global version info + const globalVersionRows = executeSync({ + lix: args.lix, + query: db + .selectFrom("internal_resolved_state_all") + .where("schema_key", "=", "lix_version") + .where("entity_id", "=", "global") + .where("snapshot_content", "is not", null) + .select("snapshot_content") + .limit(1), + }); + + if ( + globalVersionRows.length === 0 || + !globalVersionRows[0]?.snapshot_content + ) { + throw new Error(`Global version not found.`); + } + + const globalVersion = JSON.parse( + globalVersionRows[0].snapshot_content + ) as LixVersion; + const globalChangeSetId = nanoId({ lix: args.lix }); + const globalCommitId = uuidV7({ lix: args.lix }); + + // Store global metadata + versionMetadata.set("global", { + commitId: globalCommitId, + changeSetId: globalChangeSetId, + previousCommitId: globalVersion.commit_id, + }); + } + + const globalMeta = versionMetadata.get("global")!; + + // Add all change_authors to globalChanges (they're global metadata) + globalChanges.push(...allChangeAuthors); + + // Add metadata for all versions (including global) + for (const [version_id, meta] of versionMetadata) { + // Add the changeset entity + const changeSetChangeId = uuidV7({ lix: args.lix }); + globalChanges.push({ + id: changeSetChangeId, + entity_id: meta.changeSetId, + schema_key: "lix_change_set", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + id: meta.changeSetId, + metadata: null, + } satisfies LixChangeSet), + schema_version: LixChangeSetSchema["x-lix-version"], + created_at: transactionTimestamp, + }); + + // Add the commit entity + const commitChangeId = uuidV7({ lix: args.lix }); + globalChanges.push({ + id: commitChangeId, + entity_id: meta.commitId, + schema_key: "lix_commit", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + id: meta.commitId, + change_set_id: meta.changeSetId, + }), + schema_version: "1.0", + created_at: transactionTimestamp, + }); + + // Add commit edge + const edgeChangeId = uuidV7({ lix: args.lix }); + globalChanges.push({ + id: edgeChangeId, + entity_id: `${meta.previousCommitId}~${meta.commitId}`, + schema_key: "lix_commit_edge", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + parent_id: meta.previousCommitId, + child_id: meta.commitId, + } satisfies LixCommitEdge), + schema_version: "1.0", + created_at: transactionTimestamp, + }); + + // Add version update + const versionChangeId = uuidV7({ lix: args.lix }); + + // Get the current version snapshot to update + const versionRows = executeSync({ + lix: args.lix, + query: db + .selectFrom("internal_resolved_state_all") + .where("schema_key", "=", "lix_version") + .where("entity_id", "=", version_id) + .where("snapshot_content", "is not", null) + .select("snapshot_content") + .limit(1), + }); + + const currentVersion = JSON.parse( + versionRows[0]!.snapshot_content + ) as LixVersion; + globalChanges.push({ + id: versionChangeId, + entity_id: version_id, + schema_key: "lix_version", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + ...currentVersion, + commit_id: meta.commitId, + } satisfies LixVersion), + schema_version: LixVersionSchema["x-lix-version"], + created_at: transactionTimestamp, + }); + } + + // Create changeset elements for all global metadata (these belong to global's changeset) + for (const change of globalChanges) { + const elementId = uuidV7({ lix: args.lix }); + changeSetElements.push({ + id: elementId, + entity_id: `${globalMeta.changeSetId}~${change.id}`, + schema_key: "lix_change_set_element", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: globalMeta.changeSetId, + change_id: change.id, + schema_key: change.schema_key, + file_id: change.file_id, + entity_id: change.entity_id, + } satisfies LixChangeSetElement), + schema_version: LixChangeSetElementSchema["x-lix-version"], + created_at: transactionTimestamp, + }); + } + + // Add ALL change_set_element records to globalChanges + // since they are all global metadata that should be cached at global level + globalChanges.push(...changeSetElements); + + // Create change_set_elements for the change_set_element changes themselves + // This ensures the materializer can find them + const metaChangeSetElements: LixChangeRaw[] = []; + for (const elementChange of changeSetElements) { + const metaElementId = uuidV7({ lix: args.lix }); + metaChangeSetElements.push({ + id: metaElementId, + entity_id: `${globalMeta.changeSetId}~${elementChange.id}`, + schema_key: "lix_change_set_element", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: globalMeta.changeSetId, + change_id: elementChange.id, + schema_key: elementChange.schema_key, + file_id: elementChange.file_id, + entity_id: elementChange.entity_id, + } satisfies LixChangeSetElement), + schema_version: LixChangeSetElementSchema["x-lix-version"], + created_at: transactionTimestamp, + }); + } + + // Add the meta change_set_elements to globalChanges as well + globalChanges.push(...metaChangeSetElements); + + // Add all global changes to flush + allChangesToFlush.push(...globalChanges); + + // Note: ALL changeset elements are now in globalChanges since they're all global metadata + } + + // Single batch insert of all tracked changes into the change table + if (allChangesToFlush.length > 0) { + executeSync({ + lix: args.lix, + // @ts-expect-error - snapshot_content is a JSON string, not parsed object + query: args.lix.db.insertInto("change").values(allChangesToFlush), + }); + } + + // Clear the transaction table after committing + executeSync({ + lix: args.lix, + query: db.deleteFrom("internal_change_in_transaction"), + }); + + // Update cache entries for each version + for (const [version_id, meta] of versionMetadata) { + // Collect all changes for this version + const versionChanges = trackedChangesByVersion.get(version_id) || []; + const allChangesForVersion = [ + ...versionChanges.map((change: any) => ({ + id: change.id, + entity_id: change.entity_id, + schema_key: change.schema_key, + schema_version: change.schema_version, + file_id: change.file_id, + plugin_key: change.plugin_key, + snapshot_content: change.snapshot_content, + created_at: change.created_at, + })), + // Include all global changes only for global version + // (change_authors are now in globalChanges) + ...(version_id === "global" ? globalChanges : []), + ]; + + updateStateCache({ + lix: args.lix, + changes: allChangesForVersion, + commit_id: meta.commitId, + version_id: version_id, + }); + + // Delete untracked state for any tracked changes that were committed + if (versionChanges.length > 0) { + const untrackedToDelete = new Set(); + for (const change of versionChanges) { + const key = `${change.entity_id}|${change.schema_key}|${change.file_id}|${version_id}`; + untrackedToDelete.add(key); + } + + for (const key of untrackedToDelete) { + const [entity_id, schema_key, file_id, vid] = key.split("|"); + executeSync({ + lix: args.lix, + query: db + .deleteFrom("internal_state_all_untracked") + .where("entity_id", "=", entity_id!) + .where("schema_key", "=", schema_key!) + .where("file_id", "=", file_id!) + .where("version_id", "=", vid!), + }); + } + } + + // Update file lixcol cache (existing logic) + const fileChanges = new Map< + string, + { change_id: string; created_at: string } + >(); + for (const change of versionChanges) { + if (change.file_id && change.file_id !== "lix") { + const existing = fileChanges.get(change.file_id); + if (!existing || change.created_at > existing.created_at) { + fileChanges.set(change.file_id, { + change_id: change.id, + created_at: change.created_at, + }); + } + } + } + + if (fileChanges.size > 0) { + const filesToDelete: string[] = []; + const filesToUpdate: Array<{ + file_id: string; + version_id: string; + latest_change_id: string; + latest_commit_id: string; + created_at: string; + updated_at: string; + }> = []; + + for (const [fileId, { change_id, created_at }] of fileChanges) { + const changeData = versionChanges.find((c: any) => c.id === change_id); + const isDeleted = + changeData?.schema_key === "lix_file_descriptor" && + !changeData.snapshot_content; + + if (isDeleted) { + filesToDelete.push(fileId); + } else { + filesToUpdate.push({ + file_id: fileId, + version_id: version_id, + latest_change_id: change_id, + latest_commit_id: meta.commitId, + created_at: created_at, + updated_at: created_at, + }); + } + } + + if (filesToDelete.length > 0) { + executeSync({ + lix: args.lix, + query: db + .deleteFrom("internal_file_lixcol_cache") + .where("version_id", "=", version_id) + .where("file_id", "in", filesToDelete), + }); + } + + if (filesToUpdate.length > 0) { + executeSync({ + lix: args.lix, + query: db + .insertInto("internal_file_lixcol_cache") + .values(filesToUpdate) + .onConflict((oc) => + oc.columns(["file_id", "version_id"]).doUpdateSet({ + latest_change_id: sql`excluded.latest_change_id`, + latest_commit_id: sql`excluded.latest_commit_id`, + updated_at: sql`excluded.updated_at`, + }) + ), + }); + } + } + } + + commitDeterministicSequenceNumber({ + lix: args.lix, + timestamp: transactionTimestamp, + }); + + // Emit state commit hook after transaction is successfully committed + const allChangesForHook: any[] = [...allChangesToFlush, ...untrackedChanges]; + args.lix.hooks._emit("state_commit", { changes: allChangesForHook }); + return args.lix.sqlite.sqlite3.capi.SQLITE_OK; +} diff --git a/packages/lix-sdk/src/state/vtable/index.ts b/packages/lix-sdk/src/state/vtable/index.ts new file mode 100644 index 0000000000..236264ce6c --- /dev/null +++ b/packages/lix-sdk/src/state/vtable/index.ts @@ -0,0 +1,4 @@ +export { applyStateVTable } from "./vtable.js"; +export { commit } from "./commit.js"; +export { validateStateMutation } from "./validate-state-mutation.js"; +export { serializeStatePk, parseStatePk } from "./primary-key.js"; diff --git a/packages/lix-sdk/src/state/vtable/insert-vtable-log.ts b/packages/lix-sdk/src/state/vtable/insert-vtable-log.ts new file mode 100644 index 0000000000..6329fed67f --- /dev/null +++ b/packages/lix-sdk/src/state/vtable/insert-vtable-log.ts @@ -0,0 +1,63 @@ +import { LixLogSchema, type LixLog } from "../../log/schema.js"; +import { uuidV7 } from "../../deterministic/uuid-v7.js"; +import { timestamp, type Lix } from "../../index.js"; +import { insertTransactionState } from "../transaction/insert-transaction-state.js"; + +// Track if logging is in progress per Lix instance to prevent recursion +const loggingInProgressMap = new WeakMap< + Pick, + boolean +>(); + +/** + * Insert a log entry directly using insertTransactionState to avoid recursion + * when logging from within the virtual table methods. + * + * This is a minimal wrapper that can be mocked in tests to control timestamps. + */ +export function insertVTableLog(args: { + lix: Pick; + id?: string; + key: string; + message: string; + level: string; + timestamp?: string; +}): void { + if (loggingInProgressMap.get(args.lix)) { + return; + } + + loggingInProgressMap.set(args.lix, true); + try { + const id = args.id ?? uuidV7({ lix: args.lix }); + // Insert into transaction state (untracked) to preserve previous behavior. + // Note: If called outside a vtable write, this may require a later commit to flush. + insertTransactionState({ + lix: { + sqlite: args.lix.sqlite, + db: args.lix.db, + hooks: undefined as any, + }, + timestamp: args.timestamp ?? timestamp({ lix: args.lix }), + data: [ + { + entity_id: id, + schema_key: LixLogSchema["x-lix-key"], + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + id, + key: args.key, + message: args.message, + level: args.level, + } satisfies LixLog), + schema_version: LixLogSchema["x-lix-version"], + version_id: "global", + untracked: true, + }, + ], + }); + } finally { + loggingInProgressMap.set(args.lix, false); + } +} diff --git a/packages/lix-sdk/src/state/vtable/primary-key.test.ts b/packages/lix-sdk/src/state/vtable/primary-key.test.ts new file mode 100644 index 0000000000..4cefe0f82c --- /dev/null +++ b/packages/lix-sdk/src/state/vtable/primary-key.test.ts @@ -0,0 +1,44 @@ +import { describe, expect, test } from "vitest"; +import { + encodeStatePkPart, + parseStatePk, + serializeStatePk, +} from "./primary-key.js"; + +describe("primary-key serialize/parse", () => { + const tags = ["T", "TI", "U", "UI", "C", "CI"] as const; + + test.each(tags as unknown as string[])("roundtrips for tag %s", (tag) => { + const fileId = "file~id/with%chars"; + const entityId = "entity~id:123%$"; + const versionId = "version~id%alpha"; + + const pk = serializeStatePk(tag as any, fileId, entityId, versionId); + const parsed = parseStatePk(pk); + + expect(parsed.tag).toBe(tag); + expect(parsed.fileId).toBe(fileId); + expect(parsed.entityId).toBe(entityId); + expect(parsed.versionId).toBe(versionId); + }); + + test("encodeStatePkPart escapes '~' and '%' safely", () => { + const original = "val~with%percent and /slashes?&"; + const encoded = encodeStatePkPart(original); + + // No raw '~' should remain in encoded part + expect(encoded).not.toContain("~"); + // Encoded should be reversible + const pk = ["U", encoded, encoded, encoded].join("~"); + const parsed = parseStatePk(pk); + expect(parsed.fileId).toBe(original); + expect(parsed.entityId).toBe(original); + expect(parsed.versionId).toBe(original); + }); + + test("parseStatePk throws on invalid format", () => { + expect(() => parseStatePk("only-one-part" as any)).toThrow( + /Invalid composite key/ + ); + }); +}); diff --git a/packages/lix-sdk/src/state/primary-key.ts b/packages/lix-sdk/src/state/vtable/primary-key.ts similarity index 90% rename from packages/lix-sdk/src/state/primary-key.ts rename to packages/lix-sdk/src/state/vtable/primary-key.ts index 5c213fa496..d06b7a1d59 100644 --- a/packages/lix-sdk/src/state/primary-key.ts +++ b/packages/lix-sdk/src/state/vtable/primary-key.ts @@ -6,13 +6,15 @@ * * Tag meanings: * + * - T: Transaction direct (from internal_change_in_transaction in child version) + * - TI: Transaction inherited (from internal_change_in_transaction in parent version) * - U: Untracked direct (from internal_state_all_untracked in child version) * - UI: Untracked inherited (from internal_state_all_untracked in parent version) * - C: Cache-tracked direct (from internal_state_cache in child version) * - CI: Cache-tracked inherited (from internal_state_cache in parent version) */ -export type StatePkTag = "U" | "UI" | "C" | "CI"; +export type StatePkTag = "T" | "TI" | "U" | "UI" | "C" | "CI"; const DELIM = "~"; // still our global separator const DELIM_ENC = "%7E"; // RFC 3986 percent-encoding for '~' diff --git a/packages/lix-sdk/src/state/validate-state-mutation.test.ts b/packages/lix-sdk/src/state/vtable/validate-state-mutation.test.ts similarity index 97% rename from packages/lix-sdk/src/state/validate-state-mutation.test.ts rename to packages/lix-sdk/src/state/vtable/validate-state-mutation.test.ts index e2049759a5..7f59ce0816 100644 --- a/packages/lix-sdk/src/state/validate-state-mutation.test.ts +++ b/packages/lix-sdk/src/state/vtable/validate-state-mutation.test.ts @@ -1,11 +1,11 @@ import { test, expect } from "vitest"; -import { openLix } from "../lix/open-lix.js"; +import { openLix } from "../../lix/open-lix.js"; import { validateStateMutation } from "./validate-state-mutation.js"; -import type { LixSchemaDefinition } from "../schema-definition/definition.js"; +import type { LixSchemaDefinition } from "../../schema-definition/definition.js"; import { Kysely, sql } from "kysely"; -import { createVersion } from "../version/create-version.js"; -import type { LixChangeSetElement } from "../change-set/schema.js"; -import type { LixInternalDatabaseSchema } from "../database/schema.js"; +import { createVersion } from "../../version/create-version.js"; +import type { LixChangeSetElement } from "../../change-set/schema.js"; +import type { LixInternalDatabaseSchema } from "../../database/schema.js"; test("throws if the schema is not a valid lix schema", async () => { const lix = await openLix({}); @@ -218,6 +218,68 @@ test("throws when primary key violates uniqueness constraint", async () => { ).toThrowError("Primary key constraint violation"); }); +test("state_all: inserting same PK twice in one transaction overwrites without PK error", async () => { + const lix = await openLix({}); + + // Define a mock schema with PK on 'key' + const schema = { + type: "object", + "x-lix-version": "1.0", + "x-lix-key": "kv_mock_vtab", + "x-lix-primary-key": ["key"], + properties: { + key: { type: "string" }, + value: { type: "string" }, + }, + required: ["key", "value"], + additionalProperties: false, + } as const satisfies LixSchemaDefinition; + + // Register schema + await lix.db.insertInto("stored_schema").values({ value: schema }).execute(); + + // Execute both inserts in a single DB transaction so that xUpdate/xCommit run once + await lix.db.transaction().execute(async (trx) => { + // First insert + await trx + .insertInto("state_all") + .values({ + entity_id: "kv_vtab1", + file_id: "lix", + schema_key: "kv_mock_vtab", + plugin_key: "lix_own_entity", + version_id: sql`(SELECT version_id FROM active_version)`, + snapshot_content: { key: "test", value: "A" }, + schema_version: "1.0", + }) + .execute(); + + // Second insert with same PK in the same transaction → should overwrite, not error + await trx + .insertInto("state_all") + .values({ + entity_id: "kv_vtab1", + file_id: "lix", + schema_key: "kv_mock_vtab", + plugin_key: "lix_own_entity", + version_id: sql`(SELECT version_id FROM active_version)`, + snapshot_content: { key: "test", value: "B" }, + schema_version: "1.0", + }) + .execute(); + }); + + // Verify the final materialized state reflects the second insert + const row = await lix.db + .selectFrom("state_all") + .where("schema_key", "=", "kv_mock_vtab") + .where("entity_id", "=", "kv_vtab1") + .selectAll() + .executeTakeFirst(); + + expect(row?.snapshot_content).toEqual({ key: "test", value: "B" }); +}); + test("handles composite primary keys", async () => { const lix = await openLix({}); @@ -3150,7 +3212,7 @@ test("state foreign key references should handle inherited entities", async () = const childVersion = await createVersion({ lix, name: "child_version", - inherits_from_version_id: mainVersion.id, + inheritsFrom: mainVersion, }); // The inherited entity should NOT be visible for foreign key validation diff --git a/packages/lix-sdk/src/state/validate-state-mutation.ts b/packages/lix-sdk/src/state/vtable/validate-state-mutation.ts similarity index 94% rename from packages/lix-sdk/src/state/validate-state-mutation.ts rename to packages/lix-sdk/src/state/vtable/validate-state-mutation.ts index 99c9549860..4f6deb1f07 100644 --- a/packages/lix-sdk/src/state/validate-state-mutation.ts +++ b/packages/lix-sdk/src/state/vtable/validate-state-mutation.ts @@ -1,9 +1,10 @@ import { Ajv } from "ajv"; -import type { Lix } from "../lix/open-lix.js"; -import { LixSchemaDefinition } from "../schema-definition/definition.js"; -import { executeSync } from "../database/execute-sync.js"; -import { sql } from "kysely"; -import type { LixChange } from "../change/schema.js"; +import type { Lix } from "../../lix/open-lix.js"; +import { LixSchemaDefinition } from "../../schema-definition/definition.js"; +import { executeSync } from "../../database/execute-sync.js"; +import { sql, type Kysely } from "kysely"; +import type { LixChange } from "../../change/schema.js"; +import type { LixInternalDatabaseSchema } from "../../database/schema.js"; /** * List of special entity types that are not stored as JSON in the state table, @@ -190,12 +191,22 @@ function validatePrimaryKeyConstraints(args: { primaryKeyValues.push(value); } - // Query existing state to check for duplicates - let query = args.lix.db - .selectFrom("state_all") - .select("snapshot_content") - .where("schema_key", "=", args.schema["x-lix-key"]) - .where("version_id", "=", args.version_id); + // Query existing resolved state (including cache/untracked/inherited) to check for duplicates, + // but ignore transaction rows (tag 'T' in _pk) so that multiple inserts within the same + // transaction can overwrite without tripping PK validation. + const db = args.lix.db as unknown as Kysely; + let query = db + .selectFrom("internal_resolved_state_all") + .select(["snapshot_content"]) + .where("schema_key", "=", args.schema["x-lix-key"]); + + // Constrain by version – internal_resolved_state_all exposes child version_id directly + query = query.where("version_id", "=", args.version_id); + // Exclude tombstones + query = query.where("snapshot_content", "is not", null); + + // Exclude transaction-state rows: _pk starting with 'T~' + query = query.where(sql`_pk NOT LIKE 'T~%'` as any); // For updates, exclude the current entity from the check if (args.operation === "update" && args.entity_id) { @@ -262,12 +273,17 @@ function validateUniqueConstraints(args: { continue; } - // Query existing state to check for duplicates - let query = args.lix.db - .selectFrom("state_all") - .select("snapshot_content") - .where("schema_key", "=", args.schema["x-lix-key"]) - .where("version_id", "=", args.version_id); + // Query existing resolved state for duplicates, excluding transaction-state rows (tag 'T') + const db = args.lix.db as unknown as Kysely; + let query = db + .selectFrom("internal_resolved_state_all") + .select(["snapshot_content"]) + .where("schema_key", "=", args.schema["x-lix-key"]); + + query = query.where("version_id", "=", args.version_id); + // Exclude tombstones + query = query.where("snapshot_content", "is not", null); + query = query.where(sql`_pk NOT LIKE 'T~%'` as any); // For updates, exclude the current entity from the check if (args.operation === "update" && args.entity_id) { diff --git a/packages/lix-sdk/src/state/schema.test.ts b/packages/lix-sdk/src/state/vtable/vtable.test.ts similarity index 66% rename from packages/lix-sdk/src/state/schema.test.ts rename to packages/lix-sdk/src/state/vtable/vtable.test.ts index 779167b8e6..20b01eec5b 100644 --- a/packages/lix-sdk/src/state/schema.test.ts +++ b/packages/lix-sdk/src/state/vtable/vtable.test.ts @@ -1,18 +1,17 @@ -import { test, expect } from "vitest"; -import { openLix } from "../lix/open-lix.js"; -import type { LixSchemaDefinition } from "../schema-definition/definition.js"; +import { expect } from "vitest"; +import type { LixSchemaDefinition } from "../../schema-definition/definition.js"; import { Kysely, sql } from "kysely"; -import { createVersion } from "../version/create-version.js"; -import type { LixInternalDatabaseSchema } from "../database/schema.js"; +import type { LixInternalDatabaseSchema } from "../../database/schema.js"; +import { createVersion } from "../../version/create-version.js"; import { simulationTest, normalSimulation, -} from "../test-utilities/simulation-test/simulation-test.js"; - -test("dstest discovery", () => {}); +} from "../../test-utilities/simulation-test/simulation-test.js"; +import { createVersionFromCommit } from "../../version/create-version-from-commit.js"; +import { openLix } from "../../lix/open-lix.js"; simulationTest( - "select, insert, update, delete entity", + "select, insert, update, delete entity via internal_state_vtable", async ({ openSimulatedLix }) => { const mockSchema: LixSchemaDefinition = { "x-lix-key": "mock_schema", @@ -41,8 +40,11 @@ simulationTest( .values({ value: mockSchema }) .execute(); - await lix.db - .insertInto("state_all") + const db = lix.db as unknown as Kysely; + + // Test INSERT via internal_state_vtable + await db + .insertInto("internal_state_vtable") .values({ entity_id: "e0", file_id: "f0", @@ -50,16 +52,24 @@ simulationTest( plugin_key: "lix_own_entity", schema_version: "1.0", version_id: sql`(SELECT version_id FROM active_version)`, - snapshot_content: { + snapshot_content: JSON.stringify({ value: "hello world", - }, + }), + untracked: 0, }) .execute(); - const viewAfterInsert = await lix.db - .selectFrom("state_all") + // Test SELECT from internal_state_vtable + const viewAfterInsert = await db + .selectFrom("internal_state_vtable") .where("schema_key", "=", "mock_schema") - .selectAll() + .select([ + "entity_id", + "file_id", + "schema_key", + "plugin_key", + sql`json(snapshot_content)`.as("snapshot_content"), + ]) .execute(); expect(viewAfterInsert).toMatchObject([ @@ -74,22 +84,29 @@ simulationTest( }, ]); - await lix.db - .updateTable("state_all") + // Test UPDATE via internal_state_vtable + await db + .updateTable("internal_state_vtable") .set({ - snapshot_content: { + snapshot_content: JSON.stringify({ value: "hello world - updated", - }, + }), }) .where("entity_id", "=", "e0") .where("schema_key", "=", "mock_schema") .where("file_id", "=", "f0") .execute(); - const viewAfterUpdate = await lix.db - .selectFrom("state_all") + const viewAfterUpdate = await db + .selectFrom("internal_state_vtable") .where("schema_key", "=", "mock_schema") - .selectAll() + .select([ + "entity_id", + "file_id", + "schema_key", + "plugin_key", + sql`json(snapshot_content)`.as("snapshot_content"), + ]) .execute(); expect(viewAfterUpdate).toMatchObject([ @@ -104,20 +121,22 @@ simulationTest( }, ]); - await lix.db - .deleteFrom("state_all") + // Test DELETE via internal_state_vtable + await db + .deleteFrom("internal_state_vtable") .where("entity_id", "=", "e0") .where( "version_id", "=", - lix.db.selectFrom("active_version").select("version_id") + db.selectFrom("active_version").select("version_id") ) .where("schema_key", "=", "mock_schema") .execute(); - const viewAfterDelete = await lix.db - .selectFrom("state_all") + const viewAfterDelete = await db + .selectFrom("internal_state_vtable") .where("schema_key", "=", "mock_schema") + .where("snapshot_content", "is not", null) .selectAll() .execute(); @@ -126,8 +145,8 @@ simulationTest( ); simulationTest( - "validates the schema on insert", - async ({ openSimulatedLix }) => { + "exposes tracked deletions as tombstones (NULL snapshot_content)", + async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -139,14 +158,12 @@ simulationTest( }); const mockSchema: LixSchemaDefinition = { - "x-lix-key": "mock_schema", + "x-lix-key": "mock_schema_tombstone", "x-lix-version": "1.0", type: "object", additionalProperties: false, properties: { - value: { - type: "number", - }, + value: { type: "string" }, }, }; @@ -154,99 +171,64 @@ simulationTest( .insertInto("stored_schema") .values({ value: mockSchema }) .execute(); - await expect( - lix.db - .insertInto("state_all") - .values({ - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "lix_own_entity", - schema_version: "1.0", - snapshot_content: { - value: "hello world", - }, - version_id: sql`(SELECT version_id FROM active_version)`, - }) - .execute() - ).rejects.toThrow(/value must be number/); - } -); - -simulationTest( - "validates the schema on update", - async ({ openSimulatedLix }) => { - const lix = await openSimulatedLix({}); - - const mockSchema: LixSchemaDefinition = { - "x-lix-key": "mock_schema", - "x-lix-version": "1.0", - type: "object", - additionalProperties: false, - properties: { - value: { - type: "number", - }, - }, - }; - await lix.db - .insertInto("stored_schema") - .values({ value: mockSchema }) - .execute(); + const db = lix.db as unknown as Kysely; - await lix.db - .insertInto("state_all") + // Insert a tracked row via the vtable into the active version + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "lix_own_entity", + entity_id: "e_tomb", + file_id: "f_tomb", + schema_key: "mock_schema_tombstone", + plugin_key: "test_plugin", schema_version: "1.0", - snapshot_content: { - value: 5, - }, version_id: sql`(SELECT version_id FROM active_version)`, + snapshot_content: JSON.stringify({ value: "live" }), + untracked: 0, }) .execute(); - await expect( - lix.db - .updateTable("state_all") - .set({ - snapshot_content: { - value: "hello world - updated", - }, - }) - .where("entity_id", "=", "e0") - .where("schema_key", "=", "mock_schema") - .where("file_id", "=", "f0") - .execute() - ).rejects.toThrow(/value must be number/); + // Delete it via the vtable in the active version (creates a tracked tombstone) + await db + .deleteFrom("internal_state_vtable") + .where("entity_id", "=", "e_tomb") + .where("schema_key", "=", "mock_schema_tombstone") + .where("file_id", "=", "f_tomb") + .where( + "version_id", + "=", + db.selectFrom("active_version").select("version_id") + ) + .execute(); - const viewAfterFailedUpdate = await lix.db - .selectFrom("state_all") - .where("schema_key", "=", "mock_schema") - .selectAll() + // Default filter (snapshot_content IS NOT NULL) would hide the deletion; ensure tombstone is exposed + const rows = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "e_tomb") + .where("schema_key", "=", "mock_schema_tombstone") + .where("file_id", "=", "f_tomb") + .select([ + "entity_id", + "schema_key", + "file_id", + "version_id", + "change_id", + "commit_id", + "snapshot_content", + ]) .execute(); - expect(viewAfterFailedUpdate).toMatchObject([ - { - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "lix_own_entity", - snapshot_content: { - value: 5, - }, - }, - ]); + expectDeterministic(rows).toHaveLength(1); + expectDeterministic(rows[0]?.snapshot_content).toBeNull(); + expectDeterministic(rows[0]?.change_id).toBeTruthy(); + expectDeterministic(rows[0]?.commit_id).toBeTruthy(); } ); simulationTest( - "state is separated by version", - async ({ openSimulatedLix }) => { + "delete ALL via vtable should delete untracked entities in active version", + async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -257,144 +239,71 @@ simulationTest( ], }); - await createVersion({ lix, id: "version_a" }); - await createVersion({ lix, id: "version_b" }); - - await lix.db - .insertInto("state_all") - .values([ - { - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - schema_version: "1.0", - snapshot_content: { - value: "hello world from version a", - }, - version_id: "version_a", - }, - { - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - schema_version: "1.0", - snapshot_content: { - value: "hello world from version b", - }, - version_id: "version_b", - }, - ]) - .execute(); - - const stateAfterInserts = await lix.db - .selectFrom("state_all") - .where("schema_key", "=", "mock_schema") - .where("entity_id", "=", "e0") - .selectAll() + const db = lix.db as unknown as Kysely; + // Create a tracked entity in active version + await db + .insertInto("internal_state_vtable") + .values({ + entity_id: "tracked-entity", + schema_key: "mock_test_schema", + file_id: "test-file", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ value: "tracked" }), + schema_version: "1.0", + version_id: sql`(SELECT version_id FROM active_version)`, + untracked: 0, + }) .execute(); - expect(stateAfterInserts).toMatchObject([ - { - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: { - value: "hello world from version a", - }, - version_id: "version_a", - }, - { - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: { - value: "hello world from version b", - }, - version_id: "version_b", - }, - ]); - - // Verify timestamps are present - expect(stateAfterInserts[0]?.created_at).toBeDefined(); - expect(stateAfterInserts[0]?.updated_at).toBeDefined(); - expect(stateAfterInserts[1]?.created_at).toBeDefined(); - expect(stateAfterInserts[1]?.updated_at).toBeDefined(); - - await lix.db - .updateTable("state_all") - .set({ - snapshot_content: { value: "hello world from version b UPDATED" }, + // Create an untracked entity in active version + await db + .insertInto("internal_state_vtable") + .values({ + entity_id: "untracked-entity", + schema_key: "mock_test_schema", + file_id: "test-file", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ value: "untracked" }), + schema_version: "1.0", + version_id: sql`(SELECT version_id FROM active_version)`, + untracked: 1, }) - .where("entity_id", "=", "e0") - .where("schema_key", "=", "mock_schema") - .where("version_id", "=", "version_b") .execute(); - const stateAfterUpdate = await lix.db - .selectFrom("state_all") - .where("schema_key", "=", "mock_schema") - .where("entity_id", "=", "e0") - .selectAll() + // Verify both exist in vtable + const beforeDelete = await db + .selectFrom("internal_state_vtable") + .where("schema_key", "=", "mock_test_schema") + .select(["entity_id"]) .execute(); + expectDeterministic(beforeDelete).toHaveLength(2); - expect(stateAfterUpdate).toMatchObject([ - { - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: { - value: "hello world from version a", - }, - version_id: "version_a", - }, - { - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: { - value: "hello world from version b UPDATED", - }, - version_id: "version_b", - }, - ]); - - await lix.db - .deleteFrom("state_all") - .where("entity_id", "=", "e0") - .where("version_id", "=", "version_b") + // Delete all entities of schema in active version + await db + .deleteFrom("internal_state_vtable") + .where("schema_key", "=", "mock_test_schema") + .where( + "version_id", + "=", + db.selectFrom("active_version").select("version_id") + ) .execute(); - const stateAfterDelete = await lix.db - .selectFrom("state_all") - .where("schema_key", "=", "mock_schema") - .where("entity_id", "=", "e0") + // Check both were deleted in vtable + const afterDelete = await db + .selectFrom("internal_state_vtable") + .where("schema_key", "=", "mock_test_schema") + .where("snapshot_content", "is not", null) .selectAll() .execute(); - expect(stateAfterDelete).toMatchObject([ - { - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "mock_plugin", - snapshot_content: { - value: "hello world from version a", - }, - version_id: "version_a", - }, - ]); + expectDeterministic(afterDelete).toHaveLength(0); } ); simulationTest( - "created_at and updated_at timestamps are computed correctly", - async ({ openSimulatedLix }) => { + "delete via vtable with WHERE should delete only untracked entities in active version", + async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -405,217 +314,190 @@ simulationTest( ], }); - const mockSchema: LixSchemaDefinition = { - "x-lix-key": "mock_schema", - "x-lix-version": "1.0", - type: "object", - additionalProperties: false, - properties: { - value: { - type: "string", - }, - }, - }; - - await lix.db - .insertInto("stored_schema") - .values({ value: mockSchema }) - .execute(); + const db = lix.db as unknown as Kysely; - // Insert initial entity - await lix.db - .insertInto("state_all") + // Create a tracked entity in active version + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "lix_own_entity", + entity_id: "tracked-entity", + schema_key: "mock_test_schema", + file_id: "test-file", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ value: "tracked" }), schema_version: "1.0", version_id: sql`(SELECT version_id FROM active_version)`, - snapshot_content: { - value: "initial value", - }, + untracked: 0, }) .execute(); - const stateAfterInsert = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "e0") - .selectAll() - .execute(); - - expect(stateAfterInsert).toHaveLength(1); - expect(stateAfterInsert[0]?.created_at).toBeDefined(); - expect(stateAfterInsert[0]?.updated_at).toBeDefined(); - expect(stateAfterInsert[0]?.created_at).toBe( - stateAfterInsert[0]?.updated_at - ); - - // Update the entity - await lix.db - .updateTable("state_all") - .set({ - snapshot_content: { - value: "updated value", - }, + // Create an untracked entity in active version + await db + .insertInto("internal_state_vtable") + .values({ + entity_id: "untracked-entity", + schema_key: "mock_test_schema", + file_id: "test-file", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ value: "untracked" }), + schema_version: "1.0", + version_id: sql`(SELECT version_id FROM active_version)`, + untracked: 1, }) - .where("entity_id", "=", "e0") - .where("schema_key", "=", "mock_schema") .execute(); - const stateAfterUpdate = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "e0") - .selectAll() + // Verify both exist in the vtable + const beforeDelete = await db + .selectFrom("internal_state_vtable") + .where("schema_key", "=", "mock_test_schema") + .select(["entity_id"]) .execute(); + expectDeterministic(beforeDelete).toHaveLength(2); - expect(stateAfterUpdate).toHaveLength(1); - expect(stateAfterUpdate[0]?.created_at).toBeDefined(); - expect(stateAfterUpdate[0]?.updated_at).toBeDefined(); + // Delete the untracked entity by id in active version + await db + .deleteFrom("internal_state_vtable") + .where("entity_id", "=", "untracked-entity") + .where( + "version_id", + "=", + db.selectFrom("active_version").select("version_id") + ) + .execute(); - // created_at should remain the same - expect(stateAfterUpdate[0]?.created_at).toBe( - stateAfterInsert[0]?.created_at - ); + // Should only have the tracked entity remaining + const afterDelete = await db + .selectFrom("internal_state_vtable") + .where("schema_key", "=", "mock_test_schema") + .select(["entity_id"]) + .execute(); + expectDeterministic(afterDelete).toHaveLength(1); + expectDeterministic(afterDelete[0]?.entity_id).toBe("tracked-entity"); - // updated_at should be different (newer) - expect(stateAfterUpdate[0]?.updated_at).not.toBe( - stateAfterInsert[0]?.updated_at - ); + // Confirm the untracked entry is gone in vtable + const stateAfterDelete = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "untracked-entity") + .where("schema_key", "=", "mock_test_schema") + .selectAll() + .execute(); + expectDeterministic(stateAfterDelete).toHaveLength(0); } ); - +// see https://github.com/opral/lix-sdk/issues/359 simulationTest( - "created_at and updated_at are version specific", - async ({ openSimulatedLix }) => { + "commit_id in state should be from the real auto-commit, not the working commit", + async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], }); - await createVersion({ lix, id: "version_a" }); - await createVersion({ lix, id: "version_b" }); + // Get the active version with its commit_id and working_commit_id + const activeVersion = await lix.db + .selectFrom("active_version") + .innerJoin("version", "version.id", "active_version.version_id") + .selectAll("version") + .executeTakeFirstOrThrow(); - const mockSchema: LixSchemaDefinition = { - "x-lix-key": "mock_schema", - "x-lix-version": "1.0", - additionalProperties: false, - type: "object", - properties: { - value: { - type: "string", - }, - }, - }; + // Verify we have both commit_id and working_commit_id + expectDeterministic(activeVersion.commit_id).toBeTruthy(); + expectDeterministic(activeVersion.working_commit_id).toBeTruthy(); + expectDeterministic(activeVersion.commit_id).not.toBe( + activeVersion.working_commit_id + ); - await lix.db - .insertInto("stored_schema") - .values({ value: mockSchema }) + const commitsBeforeInsert = await lix.db + .selectFrom("commit") + .select("id") .execute(); - // Insert entity in version A - await lix.db - .insertInto("state_all") + // Insert some state data via internal_state_vtable + const db = lix.db as unknown as Kysely; + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "lix_own_entity", + entity_id: "test-entity-1", + schema_key: "test_schema", + file_id: "test-file", + plugin_key: "test-plugin", schema_version: "1.0", - version_id: "version_a", - snapshot_content: { - value: "value in version a", - }, + snapshot_content: JSON.stringify({ value: "initial value" }), + version_id: sql`(SELECT version_id FROM active_version)`, + untracked: 0, }) .execute(); - // Insert same entity in version B - await lix.db - .insertInto("state_all") - .values({ - entity_id: "e0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "lix_own_entity", - schema_version: "1.0", - version_id: "version_b", - snapshot_content: { - value: "value in version b", - }, - }) + const commitsAfterInsert = await lix.db + .selectFrom("commit") + .select("id") .execute(); - const stateVersionA = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "e0") - .where("version_id", "=", "version_a") - .selectAll() - .execute(); + // two commits for the global and active version + expectDeterministic(commitsAfterInsert.length).toBe( + commitsBeforeInsert.length + 2 + ); - const stateVersionB = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "e0") - .where("version_id", "=", "version_b") - .selectAll() - .execute(); + const activeVersionAfterInsert = await lix.db + .selectFrom("active_version") + .innerJoin("version", "active_version.version_id", "version.id") + .selectAll("version") + .executeTakeFirstOrThrow(); - expect(stateVersionA).toHaveLength(1); - expect(stateVersionB).toHaveLength(1); + // Query to check the commit_id via vtable + const stateAfterInsert = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "test-entity-1") + .select(["entity_id", "commit_id"]) + .executeTakeFirstOrThrow(); - // Both should have timestamps - expect(stateVersionA[0]?.created_at).toBeDefined(); - expect(stateVersionA[0]?.updated_at).toBeDefined(); - expect(stateVersionB[0]?.created_at).toBeDefined(); - expect(stateVersionB[0]?.updated_at).toBeDefined(); + // The commit_id should NOT be the working_commit_id + expectDeterministic(stateAfterInsert.commit_id).not.toBe( + activeVersionAfterInsert.working_commit_id + ); - // the same entity has been inserted but with different changes - expect(stateVersionA[0]?.created_at).not.toBe(stateVersionB[0]?.created_at); + // The commit_id should be the auto-commit ID (not the working commit) + expectDeterministic(stateAfterInsert.commit_id).toBe( + activeVersionAfterInsert.commit_id + ); - await lix.db - .updateTable("state_all") - .set({ - snapshot_content: { - value: "updated value in version b", - }, - }) - .where("entity_id", "=", "e0") - .where("version_id", "=", "version_b") + // Update the state via vtable to trigger another auto-commit + await db + .updateTable("internal_state_vtable") + .where("entity_id", "=", "test-entity-1") + .set({ snapshot_content: JSON.stringify({ value: "updated value" }) }) .execute(); - const updatedStateVersionA = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "e0") - .where("version_id", "=", "version_a") - .selectAll() - .execute(); + // Check again + const stateAfterUpdate = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "test-entity-1") + .select(["entity_id", "commit_id"]) + .executeTakeFirstOrThrow(); - const updatedStateVersionB = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "e0") - .where("version_id", "=", "version_b") - .selectAll() - .execute(); + const activeVersionAfterUpdate = await lix.db + .selectFrom("active_version") + .innerJoin("version", "active_version.version_id", "version.id") + .selectAll("version") + .executeTakeFirstOrThrow(); - // Version A should remain unchanged - expect(updatedStateVersionA[0]?.updated_at).toBe( - stateVersionA[0]?.updated_at + // The commit_id should now be the new auto-commit ID + expectDeterministic(stateAfterUpdate.commit_id).toBe( + activeVersionAfterUpdate.commit_id ); - - // Version B should have updated timestamp - expect(updatedStateVersionB[0]?.updated_at).not.toBe( - stateVersionB[0]?.updated_at + expectDeterministic(stateAfterUpdate.commit_id).not.toBe( + activeVersion.working_commit_id ); } ); - simulationTest( - "state appears in both versions when they share the same commit", - async ({ openSimulatedLix }) => { + "untracked state in child overrides inherited untracked state", + async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -626,187 +508,86 @@ simulationTest( ], }); - const versionA = await createVersion({ lix, id: "version_a" }); - // Insert state into version A + const mockSchema: LixSchemaDefinition = { + "x-lix-key": "mock_schema", + "x-lix-version": "1.0", + type: "object", + additionalProperties: false, + properties: { + value: { type: "string" }, + }, + }; + await lix.db - .insertInto("state_all") + .insertInto("stored_schema") + .values({ value: mockSchema }) + .execute(); + + const db = lix.db as unknown as Kysely; + const childVersion = await createVersion({ lix, name: "child" }); + + // 1. Insert untracked state in global version + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "e0", - file_id: "f0", + entity_id: "untracked_override_test", + file_id: "f1", schema_key: "mock_schema", - plugin_key: "mock_plugin", + plugin_key: "p1", schema_version: "1.0", - snapshot_content: { - value: "shared state", - }, - version_id: "version_a", + snapshot_content: JSON.stringify({ value: "global untracked" }), + version_id: "global", + untracked: 1, }) .execute(); - const versionAAfterInsert = await lix.db - .selectFrom("version") - .where("id", "=", versionA.id) - .selectAll() - .executeTakeFirstOrThrow(); - - const sharedCommitId = versionAAfterInsert.commit_id; - - // Create version B from version A - const versionB = await createVersion({ - lix, - id: "version_b", - commit_id: sharedCommitId, - }); - - expect(versionB.commit_id).toBe(sharedCommitId); - - const stateInBothVersions = await lix.db - .selectFrom("state_all") - .where("schema_key", "=", "mock_schema") - .where("entity_id", "=", "e0") - .selectAll() + // 2. Verify child inherits untracked state + const inheritedState = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "untracked_override_test") + .where("version_id", "=", childVersion.id) + .select([sql`json(snapshot_content)`.as("snapshot_content"), "untracked"]) .execute(); - // Both versions should see the same state - expect(stateInBothVersions).toMatchObject([ - { - entity_id: "e0", - schema_key: "mock_schema", - snapshot_content: { value: "shared state" }, - version_id: "version_a", - commit_id: sharedCommitId, - }, - { - entity_id: "e0", - schema_key: "mock_schema", - snapshot_content: { value: "shared state" }, - version_id: "version_b", - commit_id: sharedCommitId, - }, - ]); - } -); - -simulationTest( - "state diverges when versions have common ancestor but different changes", - async ({ openSimulatedLix }) => { - const lix = await openSimulatedLix({ - keyValues: [ - { - key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, - lixcol_version_id: "global", - }, - ], + expectDeterministic(inheritedState).toHaveLength(1); + expectDeterministic(inheritedState[0]?.snapshot_content).toEqual({ + value: "global untracked", }); + expectDeterministic(inheritedState[0]?.untracked).toBe(1); - // Create base version and add initial state - const baseVersion = await createVersion({ lix, id: "base_version" }); - - await lix.db - .insertInto("state_all") + // 3. Insert untracked state in child version for same entity + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "e0", - file_id: "f0", + entity_id: "untracked_override_test", + file_id: "f1", schema_key: "mock_schema", - plugin_key: "mock_plugin", + plugin_key: "p1", schema_version: "1.0", - snapshot_content: { - value: "base state", - }, - version_id: "base_version", - }) - .execute(); - - const baseVersionAfterInsert = await lix.db - .selectFrom("version") - .where("id", "=", baseVersion.id) - .selectAll() - .executeTakeFirstOrThrow(); - - // Create two versions from the same base version - await createVersion({ - lix, - id: "version_a", - commit_id: baseVersionAfterInsert.commit_id, - }); - - await createVersion({ - lix, - id: "version_b", - commit_id: baseVersionAfterInsert.commit_id, - }); - - const versions = await lix.db - .selectFrom("version") - .where("id", "in", ["base_version", "version_a", "version_b"]) - .select(["id", "commit_id"]) - .execute(); - - expect(versions).toHaveLength(3); - - // Both versions should initially see the base state - const initialState = await lix.db - .selectFrom("state_all") - .where("schema_key", "=", "mock_schema") - .where("entity_id", "=", "e0") - .selectAll() - .execute(); - - expect(initialState).toHaveLength(3); // base, version_a, version_b - - // Update state in version A - await lix.db - .updateTable("state_all") - .set({ - snapshot_content: { value: "updated in version A" }, - }) - .where("entity_id", "=", "e0") - .where("version_id", "=", "version_a") - .execute(); - - // Update state in version B differently - await lix.db - .updateTable("state_all") - .set({ - snapshot_content: { value: "updated in version B" }, + snapshot_content: JSON.stringify({ value: "child untracked" }), + version_id: childVersion.id, + untracked: 1, }) - .where("entity_id", "=", "e0") - .where("version_id", "=", "version_b") .execute(); - const divergedState = await lix.db - .selectFrom("state_all") - .where("schema_key", "=", "mock_schema") - .where("entity_id", "=", "e0") - .selectAll() - .orderBy("version_id") + // 4. Verify child now sees its own untracked state + const finalState = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "untracked_override_test") + .where("version_id", "=", childVersion.id) + .select([sql`json(snapshot_content)`.as("snapshot_content"), "untracked"]) .execute(); - // All three versions should have different states - expect(divergedState).toMatchObject([ - { - entity_id: "e0", - snapshot_content: { value: "base state" }, - version_id: "base_version", - }, - { - entity_id: "e0", - snapshot_content: { value: "updated in version A" }, - version_id: "version_a", - }, - { - entity_id: "e0", - snapshot_content: { value: "updated in version B" }, - version_id: "version_b", - }, - ]); + expectDeterministic(finalState).toHaveLength(1); + expectDeterministic(finalState[0]?.snapshot_content).toEqual({ + value: "child untracked", + }); + expectDeterministic(finalState[0]?.untracked).toBe(1); } ); - simulationTest( - "delete operations remove entries from underlying data", - async ({ openSimulatedLix }) => { + "tracked state in child overrides inherited untracked state", + async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -817,58 +598,93 @@ simulationTest( ], }); - const activeVersion = await lix.db - .selectFrom("active_version") - .innerJoin("version", "active_version.version_id", "version.id") - .selectAll("version") - .executeTakeFirstOrThrow(); + const mockSchema: LixSchemaDefinition = { + "x-lix-key": "mock_schema", + "x-lix-version": "1.0", + type: "object", + additionalProperties: false, + properties: { + value: { type: "string" }, + }, + }; - // Insert initial state await lix.db - .insertInto("state_all") + .insertInto("stored_schema") + .values({ value: mockSchema }) + .execute(); + + const db = lix.db as unknown as Kysely; + const childVersion = await createVersion({ lix, name: "child" }); + + // 1. Insert untracked state in global version + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "delete-cache-entity", - schema_key: "delete-cache-schema", - file_id: "delete-cache-file", - plugin_key: "delete-plugin", - snapshot_content: { to: "delete" }, + entity_id: "override_test", + file_id: "f1", + schema_key: "mock_schema", + plugin_key: "p1", schema_version: "1.0", - version_id: activeVersion.id, + snapshot_content: JSON.stringify({ value: "global untracked" }), + version_id: "global", + untracked: 1, }) .execute(); - // Verify data exists - const beforeDelete = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "delete-cache-entity") - .selectAll() + // 2. Verify child inherits untracked state + const inheritedState = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "override_test") + .where("version_id", "=", childVersion.id) + .select([ + sql`json(snapshot_content)`.as("snapshot_content"), + "untracked", + "inherited_from_version_id", + ]) .execute(); - expect(beforeDelete).toHaveLength(1); + expectDeterministic(inheritedState).toHaveLength(1); + expectDeterministic(inheritedState[0]?.snapshot_content).toEqual({ + value: "global untracked", + }); + expectDeterministic(inheritedState[0]?.untracked).toBe(1); + expectDeterministic(inheritedState[0]?.inherited_from_version_id).toBe( + "global" + ); - // Delete the state - this creates a deletion change (doesn't physically remove cache entry) - await lix.db - .deleteFrom("state_all") - .where("entity_id", "=", "delete-cache-entity") - .where("schema_key", "=", "delete-cache-schema") - .where("file_id", "=", "delete-cache-file") - .where("version_id", "=", activeVersion.id) + // 3. Insert tracked state in child version for same entity + await db + .insertInto("internal_state_vtable") + .values({ + entity_id: "override_test", + file_id: "f1", + schema_key: "mock_schema", + plugin_key: "p1", + schema_version: "1.0", + snapshot_content: JSON.stringify({ value: "child tracked" }), + version_id: childVersion.id, + untracked: 0, + }) .execute(); - // Data should no longer be accessible through state view - const afterDelete = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "delete-cache-entity") - .selectAll() + // 4. Verify child now sees tracked state, not inherited untracked + const finalState = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "override_test") + .where("version_id", "=", childVersion.id) + .select([sql`json(snapshot_content)`.as("snapshot_content"), "untracked"]) .execute(); - expect(afterDelete).toHaveLength(0); + expectDeterministic(finalState).toHaveLength(1); + expectDeterministic(finalState[0]?.snapshot_content).toEqual({ + value: "child tracked", + }); + expectDeterministic(finalState[0]?.untracked).toBe(0); // Should be tracked } ); - simulationTest( - "change.created_at and state timestamps are consistent", - async ({ openSimulatedLix }) => { + "untracked state has untracked change_id for both inherited and non-inherited entities", + async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -885,9 +701,7 @@ simulationTest( type: "object", additionalProperties: false, properties: { - value: { - type: "string", - }, + value: { type: "string" }, }, }; @@ -896,50 +710,74 @@ simulationTest( .values({ value: mockSchema }) .execute(); - // Insert state data - await lix.db - .insertInto("state_all") + const db = lix.db as unknown as Kysely; + const childVersion = await createVersion({ lix, name: "child" }); + + // 1. Insert untracked state in global version (will be inherited by child) + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "timestamp-test-entity", + entity_id: "inherited-entity", + file_id: "test-file", schema_key: "mock_schema", - file_id: "timestamp-test-file", - plugin_key: "timestamp-test-plugin", - snapshot_content: { value: "timestamp test" }, + plugin_key: "test_plugin", schema_version: "1.0", - version_id: sql`(SELECT version_id FROM active_version)`, + snapshot_content: JSON.stringify({ value: "global untracked" }), + version_id: "global", + untracked: 1, }) .execute(); - // Get the change record - const changeRecord = await ( - lix.db as unknown as Kysely - ) - .selectFrom("internal_change") - .where("entity_id", "=", "timestamp-test-entity") - .where("schema_key", "=", "mock_schema") - .select(["created_at"]) - .executeTakeFirstOrThrow(); - - // Get the state cache record - const cacheRecord = await ( - lix.db as unknown as Kysely - ) - .selectFrom("internal_state_cache") - .where("entity_id", "=", "timestamp-test-entity") - .where("schema_key", "=", "mock_schema") - .select(["created_at", "updated_at"]) - .executeTakeFirstOrThrow(); + // 2. Insert untracked state directly in child version (non-inherited) + await db + .insertInto("internal_state_vtable") + .values({ + entity_id: "non-inherited-entity", + file_id: "test-file", + schema_key: "mock_schema", + plugin_key: "test_plugin", + schema_version: "1.0", + snapshot_content: JSON.stringify({ value: "child untracked" }), + version_id: childVersion.id, + untracked: 1, + }) + .execute(); - // Verify all timestamps are identical - expect(changeRecord.created_at).toBe(cacheRecord.created_at); - expect(changeRecord.created_at).toBe(cacheRecord.updated_at); - }, - { simulations: [normalSimulation] } -); + // 3. Query all untracked entities in child version + const untrackedEntities = await db + .selectFrom("internal_state_vtable") + .where("version_id", "=", childVersion.id) + .where("entity_id", "in", ["inherited-entity", "non-inherited-entity"]) + .where("untracked", "=", 1) + .select(["entity_id", "change_id"]) + .execute(); + + expectDeterministic(untrackedEntities).toHaveLength(2); + + // 4. Check that both entities have untracked change_id + for (const entity of untrackedEntities) { + expectDeterministic(entity.change_id).toBe("untracked"); + } + + // 5. Verify specific entities + const inheritedEntity = untrackedEntities.find( + (e) => e.entity_id === "inherited-entity" + ); + const nonInheritedEntity = untrackedEntities.find( + (e) => e.entity_id === "non-inherited-entity" + ); + + expectDeterministic(inheritedEntity).toBeDefined(); + expectDeterministic(nonInheritedEntity).toBeDefined(); + // Both inherited and non-inherited untracked entities should have change_id = "untracked" + expectDeterministic(inheritedEntity?.change_id).toBe("untracked"); + expectDeterministic(nonInheritedEntity?.change_id).toBe("untracked"); + } +); simulationTest( - "state and state_all views expose change_id for blame and diff functionality", - async ({ expectDeterministic, openSimulatedLix }) => { + "internal_state_vtable inherits untracked state from parent version into child", + async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -956,9 +794,7 @@ simulationTest( type: "object", additionalProperties: false, properties: { - value: { - type: "string", - }, + value: { type: "string" }, }, }; @@ -967,118 +803,198 @@ simulationTest( .values({ value: mockSchema }) .execute(); - // Insert initial state using Kysely to ensure virtual table is triggered - await lix.db - .insertInto("state_all") + const db = lix.db as unknown as Kysely; + + // Get active child version id + const activeVersion = await db + .selectFrom("active_version") + .innerJoin("version", "active_version.version_id", "version.id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + // Insert untracked entity into global version (parent) + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "change-id-test-entity", + file_id: "test-file", schema_key: "mock_schema", - file_id: "change-id-test-file", - plugin_key: "change-id-test-plugin", - snapshot_content: { value: "initial value" }, + plugin_key: "test_plugin", schema_version: "1.0", - version_id: sql`(SELECT version_id FROM active_version)`, + entity_id: "test_key", + snapshot_content: JSON.stringify({ value: "test_value" }), + version_id: "global", + untracked: 1, }) .execute(); - // Query state_all view to verify change_id is exposed - const stateAllResult = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "change-id-test-entity") - .where("schema_key", "=", "mock_schema") - .selectAll() - .execute(); - - expectDeterministic(stateAllResult).toHaveLength(1); - expect(stateAllResult[0]?.change_id).toBeDefined(); - expect(typeof stateAllResult[0]?.change_id).toBe("string"); - - // Query state view (filtered by active version) to verify change_id is exposed - const stateResult = await lix.db - .selectFrom("state") - .where("entity_id", "=", "change-id-test-entity") - .where("schema_key", "=", "mock_schema") - .selectAll() - .execute(); - - expectDeterministic(stateResult).toHaveLength(1); - expect(stateResult[0]?.change_id).toBeDefined(); - expect(typeof stateResult[0]?.change_id).toBe("string"); + // Read from global (parent) + const globalState = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "test_key") + .where("version_id", "=", "global") + .select([sql`json(snapshot_content)`.as("snapshot_content")]) + .executeTakeFirstOrThrow(); - // Verify that change_id matches between state and state_all views - expect(stateResult[0]?.change_id).toBe(stateAllResult[0]?.change_id); + expectDeterministic(globalState).toBeDefined(); - // Get the actual change record to verify the change_id is correct - const changeRecord = await lix.db - .selectFrom("change") - .where("entity_id", "=", "change-id-test-entity") - .where("schema_key", "=", "mock_schema") - .select(["change.id", "snapshot_content"]) + // Read from child active version (should inherit from global) + const versionState = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "test_key") + .where("version_id", "=", activeVersion.id) + .select([ + sql`json(snapshot_content)`.as("snapshot_content"), + "inherited_from_version_id", + ]) .executeTakeFirstOrThrow(); - // Verify that the change_id in the views matches the actual change.id - expect(stateResult[0]?.change_id).toBe(changeRecord.id); - expect(stateAllResult[0]?.change_id).toBe(changeRecord.id); - - // Verify that the snapshot content in the change matches the state view - expect(changeRecord.snapshot_content).toEqual({ value: "initial value" }); - expect(stateResult[0]?.snapshot_content).toEqual({ - value: "initial value", + expectDeterministic(versionState).toBeDefined(); + expectDeterministic(versionState.snapshot_content).toEqual( + globalState.snapshot_content + ); + expectDeterministic(versionState.inherited_from_version_id).toBe("global"); + } +); +simulationTest( + "untracked state overrides inherited state (untracked > inherited)", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + lixcol_version_id: "global", + }, + ], }); - // Update the entity to create a new change + const mockSchema: LixSchemaDefinition = { + "x-lix-key": "mock_schema", + "x-lix-version": "1.0", + type: "object", + additionalProperties: false, + properties: { + value: { type: "string" }, + }, + }; + await lix.db - .updateTable("state_all") - .set({ - snapshot_content: { value: "updated value" }, + .insertInto("stored_schema") + .values({ value: mockSchema }) + .execute(); + + const db = lix.db as unknown as Kysely; + + // Step 1: Insert entity in global version (tracked) + await db + .insertInto("internal_state_vtable") + .values({ + entity_id: "inherited-entity", + file_id: "test-file", + schema_key: "mock_schema", + plugin_key: "test_plugin", + schema_version: "1.0", + version_id: "global", + snapshot_content: JSON.stringify({ value: "inherited value" }), + untracked: 0, }) - .where("entity_id", "=", "change-id-test-entity") - .where("schema_key", "=", "mock_schema") .execute(); - // Query again to verify change_id updated after modification - const updatedStateResult = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "change-id-test-entity") - .where("schema_key", "=", "mock_schema") - .selectAll() + // Step 2: Create a child version that inherits from global + const childVersion = await createVersion({ lix, name: "child-version" }); + + // Verify inheritance is set up correctly + expectDeterministic(childVersion.inherits_from_version_id).toBe("global"); + + // Step 3: Verify child initially sees inherited entity + const inheritedState = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "inherited-entity") + .where("version_id", "=", childVersion.id) + .select([ + sql`json(snapshot_content)`.as("snapshot_content"), + "inherited_from_version_id", + ]) .execute(); - expect(updatedStateResult).toHaveLength(1); - expect(updatedStateResult[0]?.change_id).toBeDefined(); - // The change_id should be different after the update (new change created) - expect(updatedStateResult[0]?.change_id).not.toBe( - stateResult[0]?.change_id + expectDeterministic(inheritedState).toHaveLength(1); + expectDeterministic(inheritedState[0]?.snapshot_content).toEqual({ + value: "inherited value", + }); + expectDeterministic(inheritedState[0]?.inherited_from_version_id).toBe( + "global" ); - // Get the new change record by matching the change_id from the updated state - const newChangeRecord = await lix.db - .selectFrom("change") - .where("change.id", "=", updatedStateResult[0]!.change_id) - .select(["change.id", "snapshot_content"]) - .executeTakeFirstOrThrow(); + // Step 4: Add untracked state for same entity in child version + await db + .insertInto("internal_state_vtable") + .values({ + entity_id: "inherited-entity", + file_id: "test-file", + schema_key: "mock_schema", + plugin_key: "test_plugin", + schema_version: "1.0", + version_id: childVersion.id, + snapshot_content: JSON.stringify({ value: "untracked override" }), + untracked: 1, + }) + .execute(); - // Verify the new change_id matches the latest change - expect(updatedStateResult[0]?.change_id).toBe(newChangeRecord.id); + // Step 5: Query should return untracked state (higher priority than inherited) + const finalState = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "inherited-entity") + .where("version_id", "=", childVersion.id) + .select([ + sql`json(snapshot_content)`.as("snapshot_content"), + "inherited_from_version_id", + "version_id", + ]) + .execute(); - // Verify that the updated snapshot content in the change matches the state view - expect(newChangeRecord.snapshot_content).toEqual({ - value: "updated value", + expectDeterministic(finalState).toHaveLength(1); + expectDeterministic(finalState[0]?.snapshot_content).toEqual({ + value: "untracked override", }); - expect(updatedStateResult[0]?.snapshot_content).toEqual({ - value: "updated value", + expectDeterministic(finalState[0]?.inherited_from_version_id).toBe(null); + expectDeterministic(finalState[0]?.version_id).toBe(childVersion.id); + + // Step 6: Verify the inherited entity still exists in global version (unchanged) + const globalState = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "inherited-entity") + .where("version_id", "=", "global") + .select([ + sql`json(snapshot_content)`.as("snapshot_content"), + "inherited_from_version_id", + ]) + .execute(); + + expectDeterministic(globalState).toHaveLength(1); + expectDeterministic(globalState[0]?.snapshot_content).toEqual({ + value: "inherited value", }); + expectDeterministic(globalState[0]?.inherited_from_version_id).toBe(null); + + // Step 7: No changes should be created for untracked mutations + const changes = await db + .selectFrom("change") + .where("entity_id", "=", "inherited-entity") + .where("schema_key", "=", "mock_schema") + .selectAll() + .execute(); + + expectDeterministic(changes).toHaveLength(1); } ); - simulationTest( - "state and state_all views expose commit_id for history queries", - async ({ expectDeterministic, openSimulatedLix }) => { + "untracked state has highest priority in UNION (untracked > tracked > inherited)", + async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], @@ -1101,124 +1017,97 @@ simulationTest( .values({ value: mockSchema }) .execute(); - // Insert initial state using Kysely to ensure virtual table is triggered - await lix.db - .insertInto("state_all") + const db = lix.db as unknown as Kysely; + + // Step 1: Insert tracked state with "init" + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "change-set-id-test-entity", + entity_id: "entity0", + file_id: "test-file", schema_key: "mock_schema", - file_id: "change-set-id-test-file", - plugin_key: "change-set-id-test-plugin", - snapshot_content: { value: "initial value" }, + plugin_key: "test_plugin", schema_version: "1.0", version_id: sql`(SELECT version_id FROM active_version)`, + snapshot_content: JSON.stringify({ value: "init" }), + untracked: 0, }) .execute(); - const activeVersionAfterInsert = await lix.db - .selectFrom("active_version") - .innerJoin("version", "active_version.version_id", "version.id") - .selectAll("version") - .executeTakeFirstOrThrow(); - - // Query state_all view to verify change_set_id is exposed - const stateAllResult = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "change-set-id-test-entity") - .where("schema_key", "=", "mock_schema") - .selectAll() + // Verify tracked state exists + const afterInit = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "entity0") + .select([sql`json(snapshot_content)`.as("snapshot_content")]) .execute(); - expectDeterministic(stateAllResult).toHaveLength(1); - expectDeterministic(stateAllResult[0]).toHaveProperty("commit_id"); - expectDeterministic(stateAllResult[0]?.commit_id).toBe( - activeVersionAfterInsert.commit_id - ); + expectDeterministic(afterInit).toHaveLength(1); + expectDeterministic(afterInit[0]?.snapshot_content).toEqual({ + value: "init", + }); - // Query state view (filtered by active version) to verify commit_id is exposed - const stateResult = await lix.db - .selectFrom("state") - .where("entity_id", "=", "change-set-id-test-entity") + // Step 2: Update to untracked state with "update" (should NOT delete tracked state) + await db + .updateTable("internal_state_vtable") + .set({ + snapshot_content: JSON.stringify({ value: "update" }), + untracked: 1, + }) + .where("entity_id", "=", "entity0") .where("schema_key", "=", "mock_schema") - .selectAll() + .where("file_id", "=", "test-file") .execute(); - expectDeterministic(stateResult).toHaveLength(1); - expectDeterministic(stateResult[0]?.commit_id).toBeDefined(); - expectDeterministic(stateResult[0]?.commit_id).toBe( - activeVersionAfterInsert.commit_id - ); + // Step 3: Query should return untracked state "update" (highest priority) + const afterUntrackedUpdate = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "entity0") + .select([sql`json(snapshot_content)`.as("snapshot_content")]) + .execute(); - // Verify that commit_id matches between state and state_all views - expectDeterministic(stateResult[0]?.commit_id).toBe( - stateAllResult[0]?.commit_id - ); + expectDeterministic(afterUntrackedUpdate).toHaveLength(1); + expectDeterministic(afterUntrackedUpdate[0]?.snapshot_content).toEqual({ + value: "update", + }); - // Get the change_set_element records - there should be two: - // 1. One in the working change set - // 2. One in the version's current change set (after commit) - const changeSetElements = await lix.db - .selectFrom("change_set_element") - .where("entity_id", "=", "change-set-id-test-entity") + // Step 4: Update back to tracked state with "update2" (should delete untracked state) + await db + .updateTable("internal_state_vtable") + .set({ + snapshot_content: JSON.stringify({ value: "update2" }), + untracked: 0, + }) + .where("entity_id", "=", "entity0") .where("schema_key", "=", "mock_schema") - .where("file_id", "=", "change-set-id-test-file") - .select(["change_set_id", "change_id"]) - .orderBy("change_set_id") + .where("file_id", "=", "test-file") .execute(); - expectDeterministic(changeSetElements).toHaveLength(2); - - // Get the version to understand which change sets we're dealing with - const version = await lix.db - .selectFrom("version") - .where("id", "=", activeVersionAfterInsert.id) - .select(["id", "commit_id", "working_commit_id"]) - .executeTakeFirstOrThrow(); + // Step 5: Query should return tracked state "update2" + const afterTrackedUpdate = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "entity0") + .select([sql`json(snapshot_content)`.as("snapshot_content")]) + .execute(); - // Get the change set ID from the version's commit - const versionCommit = await lix.db - .selectFrom("commit") - .where("id", "=", version.commit_id) - .selectAll() - .executeTakeFirstOrThrow(); + expectDeterministic(afterTrackedUpdate).toHaveLength(1); + expectDeterministic(afterTrackedUpdate[0]?.snapshot_content).toEqual({ + value: "update2", + }); - // Get the change set ID from the working commit - const workingCommit = await lix.db - .selectFrom("commit") - .where("id", "=", version.working_commit_id) + // Verify that a change was created for the final tracked mutation + const changes = await db + .selectFrom("change") + .where("entity_id", "=", "entity0") + .where("schema_key", "=", "mock_schema") .selectAll() - .executeTakeFirstOrThrow(); - - // Find which change_set_element is in the version's change set (not working) - const versionChangeSetElement = changeSetElements.find( - (el) => el.change_set_id === versionCommit.change_set_id - ); - const workingChangeSetElement = changeSetElements.find( - (el) => el.change_set_id === workingCommit.change_set_id - ); - - expectDeterministic(versionChangeSetElement).toBeDefined(); - expectDeterministic(workingChangeSetElement).toBeDefined(); - - // The state view should show the commit_id from the version, - // not related to the working change set (which is temporary and not part of the graph) - expectDeterministic(stateResult[0]?.commit_id).toBe(version.commit_id); - expectDeterministic(stateAllResult[0]?.commit_id).toBe(version.commit_id); + .execute(); - // Verify that the change_id also matches for consistency - expectDeterministic(stateResult[0]?.change_id).toBe( - versionChangeSetElement!.change_id - ); - expectDeterministic(stateAllResult[0]?.change_id).toBe( - versionChangeSetElement!.change_id - ); + expectDeterministic(changes.length).toBeGreaterThan(0); } ); - -// Write-through cache behavior tests simulationTest( - "write-through cache: insert operations populate cache immediately", - async ({ openSimulatedLix }) => { + "deleting without filtering for the version_id deletes the entity from all versions", + async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -1229,275 +1118,150 @@ simulationTest( ], }); - const activeVersion = await lix.db - .selectFrom("active_version") - .innerJoin("version", "active_version.version_id", "version.id") - .selectAll("version") - .executeTakeFirstOrThrow(); - - // Insert state data - should populate cache via write-through - await lix.db - .insertInto("state_all") + // Insert an entity into global version via vtable + const db = lix.db as unknown as Kysely; + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "write-through-entity", - schema_key: "write-through-schema", - file_id: "write-through-file", - plugin_key: "write-through-plugin", - snapshot_content: { test: "write-through-data" }, + entity_id: "shared-entity", + file_id: "test-file", + schema_key: "test_schema", + plugin_key: "test_plugin", + snapshot_content: JSON.stringify({ + id: "shared-entity", + name: "Global Entity", + }), schema_version: "1.0", - version_id: activeVersion.id, + version_id: "global", + untracked: 0, }) .execute(); - // Cache should be populated immediately via write-through - const cacheEntry = await ( - lix.db as unknown as Kysely - ) - .selectFrom("internal_state_cache") - .where("entity_id", "=", "write-through-entity") - .where("schema_key", "=", "write-through-schema") - .where("file_id", "=", "write-through-file") - .where("version_id", "=", activeVersion.id) - .selectAll() - .select(sql`json(snapshot_content)`.as("snapshot_content")) - .executeTakeFirst(); - - expect(cacheEntry).toBeDefined(); - expect(cacheEntry?.entity_id).toBe("write-through-entity"); - expect(cacheEntry?.plugin_key).toBe("write-through-plugin"); - expect(cacheEntry?.snapshot_content).toEqual({ - test: "write-through-data", + // Create a child version that inherits from global + const childVersion = await createVersion({ + lix, + name: "child-version", + inheritsFrom: { id: "global" }, }); - // State view should return the same data (from cache) - const stateResults = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "write-through-entity") + // Verify inheritance - both global and child should see the entity via vtable + const beforeDelete = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "shared-entity") + .where("version_id", "in", ["global", childVersion.id]) + .select([ + "entity_id", + "version_id", + "inherited_from_version_id", + sql`json(snapshot_content)`.as("snapshot_content"), + ]) + .execute(); + + expectDeterministic(beforeDelete).toHaveLength(2); + expectDeterministic(beforeDelete).toMatchObject([ + { + entity_id: "shared-entity", + version_id: "global", + inherited_from_version_id: null, + snapshot_content: { id: "shared-entity", name: "Global Entity" }, + }, + { + entity_id: "shared-entity", + version_id: childVersion.id, + inherited_from_version_id: "global", + snapshot_content: { id: "shared-entity", name: "Global Entity" }, + }, + ]); + + // Delete across versions by not filtering by version_id + await db + .deleteFrom("internal_state_vtable") + .where("entity_id", "=", "shared-entity") + .where("schema_key", "=", "test_schema") + .execute(); + + const afterDelete = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "shared-entity") + .where("snapshot_content", "is not", null) .selectAll() .execute(); - expect(stateResults).toHaveLength(1); - expect(stateResults[0]?.entity_id).toBe("write-through-entity"); - expect(stateResults[0]?.snapshot_content).toEqual({ - test: "write-through-data", - }); - }, - { simulations: [normalSimulation] } + // Should be deleted from every version + expectDeterministic(afterDelete).toHaveLength(0); + } ); simulationTest( - "write-through cache: update operations update cache immediately", - async ({ openSimulatedLix }) => { - const lix = await openSimulatedLix({ + "untracked state is persisted across lix openings", + async ({ openSimulatedLix, expectDeterministic }) => { + const mockSchema: LixSchemaDefinition = { + "x-lix-key": "mock_schema", + "x-lix-version": "1.0", + type: "object", + additionalProperties: false, + properties: { + value: { + type: "string", + }, + }, + }; + + // First session - create and insert untracked state via vtable + const lix1 = await openSimulatedLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], }); - const activeVersion = await lix.db - .selectFrom("active_version") - .innerJoin("version", "active_version.version_id", "version.id") - .selectAll("version") - .executeTakeFirstOrThrow(); + await lix1.db + .insertInto("stored_schema") + .values({ value: mockSchema }) + .execute(); - // Insert initial state - await lix.db - .insertInto("state_all") + const db1 = lix1.db as unknown as Kysely; + + await db1 + .insertInto("internal_state_vtable") .values({ - entity_id: "update-cache-entity", - schema_key: "update-cache-schema", - file_id: "update-cache-file", - plugin_key: "initial-plugin", - snapshot_content: { initial: "value" }, + entity_id: "persistent-entity", + file_id: "test-file", + schema_key: "mock_schema", + plugin_key: "test_plugin", schema_version: "1.0", - version_id: activeVersion.id, + version_id: sql`(SELECT version_id FROM active_version)`, + snapshot_content: JSON.stringify({ + value: "persistent untracked value", + }), + untracked: 1, }) .execute(); - // Update the state - should update cache via write-through - await lix.db - .updateTable("state_all") - .set({ - snapshot_content: { updated: "value" }, - plugin_key: "updated-plugin", - }) - .where("entity_id", "=", "update-cache-entity") - .where("schema_key", "=", "update-cache-schema") - .where("file_id", "=", "update-cache-file") - .where("version_id", "=", activeVersion.id) - .execute(); + // Second session - verify untracked state persists + const lix2 = await openLix({ blob: await lix1.toBlob() }); - // Cache should be immediately updated - const cacheEntry = await ( - lix.db as unknown as Kysely - ) - .selectFrom("internal_state_cache") - .where("entity_id", "=", "update-cache-entity") - .where("schema_key", "=", "update-cache-schema") - .where("file_id", "=", "update-cache-file") - .where("version_id", "=", activeVersion.id) - .selectAll() - .select(sql`json(snapshot_content)`.as("snapshot_content")) - .executeTakeFirst(); + const db2 = lix2.db as unknown as Kysely; + const persistedState = await db2 + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "persistent-entity") + .select([sql`json(snapshot_content)`.as("snapshot_content")]) + .execute(); - expect(cacheEntry).toBeDefined(); - expect(cacheEntry?.snapshot_content).toEqual({ - updated: "value", + expectDeterministic(persistedState).toHaveLength(1); + expectDeterministic(persistedState[0]?.snapshot_content).toEqual({ + value: "persistent untracked value", }); - expect(cacheEntry?.plugin_key).toBe("updated-plugin"); - - // State view should return updated data - const stateResults = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "update-cache-entity") - .selectAll() - .execute(); - expect(stateResults).toHaveLength(1); - expect(stateResults[0]?.snapshot_content).toEqual({ updated: "value" }); - expect(stateResults[0]?.plugin_key).toBe("updated-plugin"); - }, - { simulations: [normalSimulation] } + await lix2.close(); + } ); simulationTest( - "delete operations are validated for foreign key constraints", - async ({ openSimulatedLix, expectDeterministic }) => { - const lix = await openSimulatedLix({ - keyValues: [ - { - key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, - lixcol_version_id: "global", - }, - ], - }); - - // Define parent schema (referenced entity) - const parentSchema: LixSchemaDefinition = { - "x-lix-key": "parent_entity", - "x-lix-version": "1.0", - "x-lix-primary-key": ["id"], - type: "object", - properties: { - id: { type: "string" }, - name: { type: "string" }, - }, - required: ["id", "name"], - additionalProperties: false, - }; - - // Define child schema with foreign key to parent - const childSchema: LixSchemaDefinition = { - "x-lix-key": "child_entity", - "x-lix-version": "1.0", - "x-lix-primary-key": ["id"], - "x-lix-foreign-keys": [ - { - properties: ["parent_id"], - references: { - schemaKey: "parent_entity", - properties: ["id"], - }, - }, - ], - type: "object", - properties: { - id: { type: "string" }, - parent_id: { type: "string" }, - value: { type: "string" }, - }, - required: ["id", "parent_id", "value"], - additionalProperties: false, - }; - - // Register both schemas - await lix.db - .insertInto("stored_schema") - .values([{ value: parentSchema }, { value: childSchema }]) - .execute(); - - // Insert parent entity - await lix.db - .insertInto("state_all") - .values({ - entity_id: "parent-1", - schema_key: "parent_entity", - file_id: "test-file", - plugin_key: "test-plugin", - snapshot_content: { - id: "parent-1", - name: "Parent Entity", - }, - schema_version: "1.0", - version_id: sql`(SELECT version_id FROM active_version)`, - }) - .execute(); - - // Insert child entity that references the parent - await lix.db - .insertInto("state_all") - .values({ - entity_id: "child-1", - schema_key: "child_entity", - file_id: "test-file", - plugin_key: "test-plugin", - snapshot_content: { - id: "child-1", - parent_id: "parent-1", - value: "Child Value", - }, - schema_version: "1.0", - version_id: sql`(SELECT version_id FROM active_version)`, - }) - .execute(); - - // Verify both entities exist - const parentBefore = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "parent-1") - .where("schema_key", "=", "parent_entity") - .selectAll() - .execute(); - - const childBefore = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "child-1") - .where("schema_key", "=", "child_entity") - .selectAll() - .execute(); - - expectDeterministic(parentBefore).toHaveLength(1); - expectDeterministic(childBefore).toHaveLength(1); - - // Attempting to delete the parent entity should fail due to foreign key constraint - // because there's a child entity that references it - await expect( - lix.db - .deleteFrom("state_all") - .where("entity_id", "=", "parent-1") - .where("schema_key", "=", "parent_entity") - .execute() - ).rejects.toThrow(/foreign key/i); - - // Verify the parent still exists after failed deletion attempt - const parentAfter = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "parent-1") - .where("schema_key", "=", "parent_entity") - .selectAll() - .execute(); - - expectDeterministic(parentAfter).toHaveLength(1); - } -); - -simulationTest( - "child version inherits entities from parent version", + "child version inherits entities from parent version", async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ @@ -1592,7 +1356,7 @@ simulationTest( const childVersion = await createVersion({ lix, id: "child-version", - inherits_from_version_id: "global", + inheritsFrom: { id: "global" }, }); // Verify the child initially sees the inherited entity @@ -1786,32 +1550,362 @@ simulationTest( .selectAll() .execute(); - expectDeterministic(inheritedEntityAfterDelete).toHaveLength(1); - expectDeterministic( - inheritedEntityAfterDelete[0]?.snapshot_content - ).toEqual({ - id: "shared-entity", - name: "shared Entity", - }); + expectDeterministic(inheritedEntityAfterDelete).toHaveLength(1); + expectDeterministic( + inheritedEntityAfterDelete[0]?.snapshot_content + ).toEqual({ + id: "shared-entity", + name: "shared Entity", + }); + + // Verify we now only see the global entity through the state view (deletion marker is hidden) + const allEntities = await lix.db + .selectFrom("state_all") + .where("entity_id", "=", "shared-entity") + .selectAll() + .execute(); + + // Both cache hit and cache miss scenarios should behave identically: + // copy-on-write deletion hides the entity from child but preserves it in parent + expectDeterministic(allEntities).toHaveLength(1); + expectDeterministic(allEntities[0]?.version_id).toBe("global"); + expectDeterministic(allEntities[0]?.inherited_from_version_id).toBe(null); // It's the original global entity + } +); +simulationTest( + "delete operations are validated for foreign key constraints", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + // Define parent schema (referenced entity) + const parentSchema: LixSchemaDefinition = { + "x-lix-key": "parent_entity", + "x-lix-version": "1.0", + "x-lix-primary-key": ["id"], + type: "object", + properties: { + id: { type: "string" }, + name: { type: "string" }, + }, + required: ["id", "name"], + additionalProperties: false, + }; + + // Define child schema with foreign key to parent + const childSchema: LixSchemaDefinition = { + "x-lix-key": "child_entity", + "x-lix-version": "1.0", + "x-lix-primary-key": ["id"], + "x-lix-foreign-keys": [ + { + properties: ["parent_id"], + references: { + schemaKey: "parent_entity", + properties: ["id"], + }, + }, + ], + type: "object", + properties: { + id: { type: "string" }, + parent_id: { type: "string" }, + value: { type: "string" }, + }, + required: ["id", "parent_id", "value"], + additionalProperties: false, + }; + + // Register both schemas + await lix.db + .insertInto("stored_schema") + .values([{ value: parentSchema }, { value: childSchema }]) + .execute(); + + // Insert parent entity + await lix.db + .insertInto("state_all") + .values({ + entity_id: "parent-1", + schema_key: "parent_entity", + file_id: "test-file", + plugin_key: "test-plugin", + snapshot_content: { + id: "parent-1", + name: "Parent Entity", + }, + schema_version: "1.0", + version_id: sql`(SELECT version_id FROM active_version)`, + }) + .execute(); + + // Insert child entity that references the parent + await lix.db + .insertInto("state_all") + .values({ + entity_id: "child-1", + schema_key: "child_entity", + file_id: "test-file", + plugin_key: "test-plugin", + snapshot_content: { + id: "child-1", + parent_id: "parent-1", + value: "Child Value", + }, + schema_version: "1.0", + version_id: sql`(SELECT version_id FROM active_version)`, + }) + .execute(); + + // Verify both entities exist + const parentBefore = await lix.db + .selectFrom("state_all") + .where("entity_id", "=", "parent-1") + .where("schema_key", "=", "parent_entity") + .selectAll() + .execute(); + + const childBefore = await lix.db + .selectFrom("state_all") + .where("entity_id", "=", "child-1") + .where("schema_key", "=", "child_entity") + .selectAll() + .execute(); + + expectDeterministic(parentBefore).toHaveLength(1); + expectDeterministic(childBefore).toHaveLength(1); + + // Attempting to delete the parent entity should fail due to foreign key constraint + // because there's a child entity that references it + await expect( + lix.db + .deleteFrom("state_all") + .where("entity_id", "=", "parent-1") + .where("schema_key", "=", "parent_entity") + .execute() + ).rejects.toThrow(/foreign key/i); + + // Verify the parent still exists after failed deletion attempt + const parentAfter = await lix.db + .selectFrom("state_all") + .where("entity_id", "=", "parent-1") + .where("schema_key", "=", "parent_entity") + .selectAll() + .execute(); + + expectDeterministic(parentAfter).toHaveLength(1); + } +); +simulationTest( + "validates the schema on insert via internal_state_vtable", + async ({ openSimulatedLix }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + lixcol_version_id: "global", + }, + ], + }); + + const mockSchema: LixSchemaDefinition = { + "x-lix-key": "mock_schema", + "x-lix-version": "1.0", + type: "object", + additionalProperties: false, + properties: { + value: { + type: "number", + }, + }, + }; + + await lix.db + .insertInto("stored_schema") + .values({ value: mockSchema }) + .execute(); + + const db = lix.db as unknown as Kysely; + + // Test that invalid data is rejected by internal_state_vtable + await expect( + db + .insertInto("internal_state_vtable") + .values({ + entity_id: "e0", + file_id: "f0", + schema_key: "mock_schema", + plugin_key: "lix_own_entity", + schema_version: "1.0", + snapshot_content: JSON.stringify({ + value: "hello world", // Should be a number, not a string + }), + version_id: sql`(SELECT version_id FROM active_version)`, + untracked: 0, + }) + .execute() + ).rejects.toThrow(/value must be number/); + } +); + +simulationTest( + "validates the schema on update via internal_state_vtable", + async ({ openSimulatedLix }) => { + const lix = await openSimulatedLix({}); + + const mockSchema: LixSchemaDefinition = { + "x-lix-key": "mock_schema", + "x-lix-version": "1.0", + type: "object", + additionalProperties: false, + properties: { + value: { + type: "number", + }, + }, + }; + + await lix.db + .insertInto("stored_schema") + .values({ value: mockSchema }) + .execute(); + + const db = lix.db as unknown as Kysely; + + // Insert valid data first + await db + .insertInto("internal_state_vtable") + .values({ + entity_id: "e0", + file_id: "f0", + schema_key: "mock_schema", + plugin_key: "lix_own_entity", + schema_version: "1.0", + snapshot_content: JSON.stringify({ + value: 5, + }), + version_id: sql`(SELECT version_id FROM active_version)`, + untracked: 0, + }) + .execute(); + + // Test that invalid update is rejected by internal_state_vtable + await expect( + db + .updateTable("internal_state_vtable") + .set({ + snapshot_content: JSON.stringify({ + value: "hello world - updated", // Should be a number, not a string + }), + }) + .where("entity_id", "=", "e0") + .where("schema_key", "=", "mock_schema") + .where("file_id", "=", "f0") + .execute() + ).rejects.toThrow(/value must be number/); + + // Verify the data wasn't changed + const viewAfterFailedUpdate = await db + .selectFrom("internal_state_vtable") + .where("schema_key", "=", "mock_schema") + .select([ + "entity_id", + "file_id", + "schema_key", + "plugin_key", + sql`json(snapshot_content)`.as("snapshot_content"), + ]) + .execute(); + + expect(viewAfterFailedUpdate).toMatchObject([ + { + entity_id: "e0", + file_id: "f0", + schema_key: "mock_schema", + plugin_key: "lix_own_entity", + snapshot_content: { + value: 5, + }, + }, + ]); + } +); + +simulationTest( + "delete operations remove entries from underlying data via internal_state_vtable", + async ({ openSimulatedLix }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + lixcol_version_id: "global", + }, + ], + }); + + const activeVersion = await lix.db + .selectFrom("active_version") + .innerJoin("version", "active_version.version_id", "version.id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + const db = lix.db as unknown as Kysely; + + // Insert initial state via internal_state_vtable + await db + .insertInto("internal_state_vtable") + .values({ + entity_id: "delete-cache-entity", + schema_key: "delete-cache-schema", + file_id: "delete-cache-file", + plugin_key: "delete-plugin", + snapshot_content: JSON.stringify({ to: "delete" }), + schema_version: "1.0", + version_id: sql`(SELECT version_id FROM active_version)`, + untracked: 0, + }) + .execute(); + + // Verify data exists + const beforeDelete = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "delete-cache-entity") + .selectAll() + .execute(); + expect(beforeDelete).toHaveLength(1); + + // Delete the state via internal_state_vtable + await db + .deleteFrom("internal_state_vtable") + .where("entity_id", "=", "delete-cache-entity") + .where("schema_key", "=", "delete-cache-schema") + .where("file_id", "=", "delete-cache-file") + .where("version_id", "=", activeVersion.id) + .execute(); - // Verify we now only see the global entity through the state view (deletion marker is hidden) - const allEntities = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "shared-entity") + // Data should no longer be accessible + const afterDelete = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "delete-cache-entity") + .where("snapshot_content", "is not", null) .selectAll() .execute(); - - // Both cache hit and cache miss scenarios should behave identically: - // copy-on-write deletion hides the entity from child but preserves it in parent - expectDeterministic(allEntities).toHaveLength(1); - expectDeterministic(allEntities[0]?.version_id).toBe("global"); - expectDeterministic(allEntities[0]?.inherited_from_version_id).toBe(null); // It's the original global entity + expect(afterDelete).toHaveLength(0); } ); simulationTest( - "deleting without filtering for the version_id deletes the entity from all versions", - async ({ openSimulatedLix, expectDeterministic }) => { + "write-through cache: insert operations populate cache immediately via internal_state_vtable", + async ({ openSimulatedLix }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -1822,74 +1916,149 @@ simulationTest( ], }); - // Insert an entity into global version - await lix.db - .insertInto("state_all") + const activeVersion = await lix.db + .selectFrom("active_version") + .innerJoin("version", "active_version.version_id", "version.id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + const db = lix.db as unknown as Kysely; + + // Insert state data via internal_state_vtable - should populate cache + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "shared-entity", - file_id: "test-file", - schema_key: "test_schema", - plugin_key: "test_plugin", - version_id: "global", - snapshot_content: { - id: "shared-entity", - name: "Global Entity", - }, + entity_id: "write-through-entity", + schema_key: "write-through-schema", + file_id: "write-through-file", + plugin_key: "write-through-plugin", + snapshot_content: JSON.stringify({ test: "write-through-data" }), schema_version: "1.0", + version_id: activeVersion.id, + untracked: 0, }) .execute(); - // Create a child version that inherits from global - const childVersion = await createVersion({ - lix, - name: "child-version", - inherits_from_version_id: "global", + // Cache should be populated immediately via write-through + const cacheEntry = await db + .selectFrom("internal_state_cache") + .where("entity_id", "=", "write-through-entity") + .where("schema_key", "=", "write-through-schema") + .where("file_id", "=", "write-through-file") + .where("version_id", "=", activeVersion.id) + .selectAll() + .select(sql`json(snapshot_content)`.as("snapshot_content")) + .executeTakeFirst(); + + expect(cacheEntry).toBeDefined(); + expect(cacheEntry?.entity_id).toBe("write-through-entity"); + expect(cacheEntry?.plugin_key).toBe("write-through-plugin"); + expect(cacheEntry?.snapshot_content).toEqual({ + test: "write-through-data", }); - // Verify inheritance - both global and child should see the entity - const beforeDelete = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "shared-entity") - .where("version_id", "in", ["global", childVersion.id]) - .selectAll() + // Virtual table should return the same data + const stateResults = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "write-through-entity") + .select(["entity_id", sql`json(snapshot_content)`.as("snapshot_content")]) .execute(); - expectDeterministic(beforeDelete).toHaveLength(2); // One in global, one inherited in child - expectDeterministic(beforeDelete).toMatchObject([ - { - entity_id: "shared-entity", - version_id: "global", - inherited_from_version_id: null, - snapshot_content: { id: "shared-entity", name: "Global Entity" }, - }, - { - entity_id: "shared-entity", - version_id: childVersion.id, - inherited_from_version_id: "global", - snapshot_content: { id: "shared-entity", name: "Global Entity" }, - }, - ]); + expect(stateResults).toHaveLength(1); + expect(stateResults[0]?.entity_id).toBe("write-through-entity"); + expect(stateResults[0]?.snapshot_content).toEqual({ + test: "write-through-data", + }); + }, + { simulations: [normalSimulation] } +); - await lix.db - .deleteFrom("state_all") - .where("entity_id", "=", "shared-entity") - .where("schema_key", "=", "test_schema") +simulationTest( + "write-through cache: update operations update cache immediately via internal_state_vtable", + async ({ openSimulatedLix }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true, bootstrap: true }, + lixcol_version_id: "global", + }, + ], + }); + + const activeVersion = await lix.db + .selectFrom("active_version") + .innerJoin("version", "active_version.version_id", "version.id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + const db = lix.db as unknown as Kysely; + + // Insert initial state via internal_state_vtable + await db + .insertInto("internal_state_vtable") + .values({ + entity_id: "update-cache-entity", + schema_key: "update-cache-schema", + file_id: "update-cache-file", + plugin_key: "initial-plugin", + snapshot_content: JSON.stringify({ initial: "value" }), + schema_version: "1.0", + version_id: activeVersion.id, + untracked: 0, + }) .execute(); - const afterDelete = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "shared-entity") + // Update the state via internal_state_vtable - should update cache + await db + .updateTable("internal_state_vtable") + .set({ + snapshot_content: JSON.stringify({ updated: "value" }), + plugin_key: "updated-plugin", + }) + .where("entity_id", "=", "update-cache-entity") + .where("schema_key", "=", "update-cache-schema") + .where("file_id", "=", "update-cache-file") + .where("version_id", "=", activeVersion.id) + .execute(); + + // Cache should be immediately updated + const cacheEntry = await db + .selectFrom("internal_state_cache") + .where("entity_id", "=", "update-cache-entity") + .where("schema_key", "=", "update-cache-schema") + .where("file_id", "=", "update-cache-file") + .where("version_id", "=", activeVersion.id) .selectAll() + .select(sql`json(snapshot_content)`.as("snapshot_content")) + .executeTakeFirst(); + + expect(cacheEntry).toBeDefined(); + expect(cacheEntry?.snapshot_content).toEqual({ + updated: "value", + }); + expect(cacheEntry?.plugin_key).toBe("updated-plugin"); + + // Virtual table should return updated data + const stateResults = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "update-cache-entity") + .select([ + sql`json(snapshot_content)`.as("snapshot_content"), + "plugin_key", + ]) .execute(); - // Should be deleted from every version - expectDeterministic(afterDelete).toHaveLength(0); - } + expect(stateResults).toHaveLength(1); + expect(stateResults[0]?.snapshot_content).toEqual({ updated: "value" }); + expect(stateResults[0]?.plugin_key).toBe("updated-plugin"); + }, + { simulations: [normalSimulation] } ); simulationTest( - "untracked mutations don't trigger change control", - async ({ openSimulatedLix, expectDeterministic }) => { + "change.created_at and state timestamps are consistent", + async ({ openSimulatedLix }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -1917,114 +2086,50 @@ simulationTest( .values({ value: mockSchema }) .execute(); - // Count changes before any untracked mutations - const changesInitial = await lix.db - .selectFrom("change") - .selectAll() - .execute(); - - // 1. INSERT untracked state + // Insert state data await lix.db .insertInto("state_all") .values({ - entity_id: "untracked-entity", - file_id: "test-file", + entity_id: "timestamp-test-entity", schema_key: "mock_schema", - plugin_key: "test_plugin", + file_id: "timestamp-test-file", + plugin_key: "timestamp-test-plugin", + snapshot_content: { value: "timestamp test" }, schema_version: "1.0", version_id: sql`(SELECT version_id FROM active_version)`, - snapshot_content: { - value: "untracked value", - }, - untracked: true, - }) - .execute(); - - // Count changes after untracked insert - const changesAfterInsert = await lix.db - .selectFrom("change") - .selectAll() - .execute(); - - // Number of changes should be identical (no change control for untracked) - expectDeterministic(changesAfterInsert.length).toBe(changesInitial.length); - - // Verify the untracked entity exists in state view - const untrackedState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "untracked-entity") - .selectAll() - .execute(); - - expectDeterministic(untrackedState).toHaveLength(1); - expectDeterministic(untrackedState[0]?.snapshot_content).toEqual({ - value: "untracked value", - }); - expectDeterministic(untrackedState[0]?.untracked).toBe(1); - - // 2. UPDATE untracked state - await lix.db - .updateTable("state_all") - .where("entity_id", "=", "untracked-entity") - .set({ - snapshot_content: { - value: "untracked value updated", - }, - untracked: true, }) .execute(); - // Count changes after untracked update - const changesAfterUpdate = await lix.db - .selectFrom("change") - .selectAll() - .execute(); - - // Number of changes should still be identical (no change control for untracked) - expectDeterministic(changesAfterUpdate.length).toBe(changesInitial.length); - - // Verify the untracked entity was updated - const updatedState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "untracked-entity") - .selectAll() - .execute(); - - expectDeterministic(updatedState).toHaveLength(1); - expectDeterministic(updatedState[0]?.snapshot_content).toEqual({ - value: "untracked value updated", - }); - expectDeterministic(updatedState[0]?.untracked).toBe(1); - - // 3. DELETE untracked state - await lix.db - .deleteFrom("state_all") - .where("entity_id", "=", "untracked-entity") - .execute(); - - // Count changes after untracked delete - const changesAfterDelete = await lix.db - .selectFrom("change") - .selectAll() - .execute(); - - // Number of changes should still be identical (no change control for untracked) - expectDeterministic(changesAfterDelete.length).toBe(changesInitial.length); + // Get the change record + const changeRecord = await ( + lix.db as unknown as Kysely + ) + .selectFrom("internal_change") + .where("entity_id", "=", "timestamp-test-entity") + .where("schema_key", "=", "mock_schema") + .select(["created_at"]) + .executeTakeFirstOrThrow(); - // Verify the untracked entity was deleted - const deletedState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "untracked-entity") - .selectAll() - .execute(); + // Get the state cache record + const cacheRecord = await ( + lix.db as unknown as Kysely + ) + .selectFrom("internal_state_cache") + .where("entity_id", "=", "timestamp-test-entity") + .where("schema_key", "=", "mock_schema") + .select(["created_at", "updated_at"]) + .executeTakeFirstOrThrow(); - expectDeterministic(deletedState).toHaveLength(0); - } + // Verify all timestamps are identical + expect(changeRecord.created_at).toBe(cacheRecord.created_at); + expect(changeRecord.created_at).toBe(cacheRecord.updated_at); + }, + { simulations: [normalSimulation] } ); simulationTest( - "tracked update to previously untracked entity deletes untracked state", - async ({ openSimulatedLix, expectDeterministic }) => { + "internal_state_vtable exposes change_id for blame and diff functionality", + async ({ openSimulatedLix }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -2052,140 +2157,94 @@ simulationTest( .values({ value: mockSchema }) .execute(); - // Insert untracked state - await lix.db - .insertInto("state_all") + const db = lix.db as unknown as Kysely; + + // Insert initial state via internal_state_vtable (tracked) + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "override-entity", - file_id: "test-file", + entity_id: "change-id-test-entity", + file_id: "change-id-test-file", schema_key: "mock_schema", - plugin_key: "test_plugin", + plugin_key: "change-id-test-plugin", schema_version: "1.0", version_id: sql`(SELECT version_id FROM active_version)`, - snapshot_content: { - value: "untracked value", - }, - untracked: true, + snapshot_content: JSON.stringify({ value: "initial value" }), + untracked: 0, }) .execute(); - // Verify untracked state exists - const untrackedState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "override-entity") - .selectAll() - .execute(); - - expectDeterministic(untrackedState).toHaveLength(1); - expectDeterministic(untrackedState[0]?.snapshot_content).toEqual({ - value: "untracked value", - }); - - // Now update the untracked entity to make it tracked (should delete from untracked table) - await lix.db - .updateTable("state_all") - .set({ - snapshot_content: { - value: "tracked value", - }, - untracked: false, - }) - .where("entity_id", "=", "override-entity") + // Query internal_state_vtable to verify change_id is exposed + const initialResult = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "change-id-test-entity") .where("schema_key", "=", "mock_schema") + .where("file_id", "=", "change-id-test-file") + .select(["change_id", sql`json(snapshot_content)`.as("snapshot_content")]) .execute(); - // Verify tracked state has overridden untracked state - const finalState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "override-entity") - .selectAll() - .execute(); - - expectDeterministic(finalState).toHaveLength(1); - expectDeterministic(finalState[0]?.snapshot_content).toEqual({ - value: "tracked value", - }); + expect(initialResult).toHaveLength(1); + expect(initialResult[0]?.change_id).toBeDefined(); + expect(typeof initialResult[0]?.change_id).toBe("string"); - // Verify a change was created for the tracked mutation - const changes = await lix.db + // Get the actual change record to verify the change_id + const changeRecord = await db .selectFrom("change") - .where("entity_id", "=", "override-entity") + .where("entity_id", "=", "change-id-test-entity") .where("schema_key", "=", "mock_schema") - .selectAll() - .execute(); - - expectDeterministic(changes.length).toBeGreaterThan(0); - } -); - -simulationTest( - "untracked state is persisted across lix sessions", - async ({ openSimulatedLix, expectDeterministic }) => { - const mockSchema: LixSchemaDefinition = { - "x-lix-key": "mock_schema", - "x-lix-version": "1.0", - type: "object", - additionalProperties: false, - properties: { - value: { - type: "string", - }, - }, - }; + .select(["change.id", "snapshot_content"]) + .executeTakeFirstOrThrow(); - // First session - create and insert untracked state - const lix1 = await openSimulatedLix({ - keyValues: [ - { - key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, - lixcol_version_id: "global", - }, - ], + expect(initialResult[0]?.change_id).toBe(changeRecord.id); + expect(changeRecord.snapshot_content).toEqual({ value: "initial value" }); + expect(initialResult[0]?.snapshot_content).toEqual({ + value: "initial value", }); - await lix1.db - .insertInto("stored_schema") - .values({ value: mockSchema }) + // Update the entity to create a new change via internal_state_vtable + await db + .updateTable("internal_state_vtable") + .set({ + snapshot_content: JSON.stringify({ value: "updated value" }), + }) + .where("entity_id", "=", "change-id-test-entity") + .where("schema_key", "=", "mock_schema") + .where("file_id", "=", "change-id-test-file") .execute(); - await lix1.db - .insertInto("state_all") - .values({ - entity_id: "persistent-entity", - file_id: "test-file", - schema_key: "mock_schema", - plugin_key: "test_plugin", - schema_version: "1.0", - version_id: sql`(SELECT version_id FROM active_version)`, - snapshot_content: { - value: "persistent untracked value", - }, - untracked: true, - }) + // Query again to verify change_id updated after modification + const updatedResult = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "change-id-test-entity") + .where("schema_key", "=", "mock_schema") + .where("file_id", "=", "change-id-test-file") + .select(["change_id", sql`json(snapshot_content)`.as("snapshot_content")]) .execute(); - // Second session - verify untracked state persists - const lix2 = await openLix({ blob: await lix1.toBlob() }); + expect(updatedResult).toHaveLength(1); + expect(updatedResult[0]?.change_id).toBeDefined(); + expect(updatedResult[0]?.change_id).not.toBe(initialResult[0]?.change_id); - const persistedState = await lix2.db - .selectFrom("state_all") - .where("entity_id", "=", "persistent-entity") - .selectAll() - .execute(); + // Get the new change record by matching the change_id from the updated state + const newChangeRecord = await db + .selectFrom("change") + .where("change.id", "=", updatedResult[0]!.change_id) + .select(["change.id", "snapshot_content"]) + .executeTakeFirstOrThrow(); - expectDeterministic(persistedState).toHaveLength(1); - expectDeterministic(persistedState[0]?.snapshot_content).toEqual({ - value: "persistent untracked value", + expect(updatedResult[0]?.change_id).toBe(newChangeRecord.id); + expect(newChangeRecord.snapshot_content).toEqual({ + value: "updated value", + }); + expect(updatedResult[0]?.snapshot_content).toEqual({ + value: "updated value", }); - - await lix2.close(); } ); simulationTest( - "untracked state has highest priority in UNION (untracked > tracked > inherited)", - async ({ openSimulatedLix, expectDeterministic }) => { + "exposes commit_id for history queries", + async ({ expectDeterministic, openSimulatedLix }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -2208,124 +2267,130 @@ simulationTest( }, }; - await lix.db + const db = lix.db as unknown as Kysely; + + await db .insertInto("stored_schema") .values({ value: mockSchema }) .execute(); - // Step 1: Insert tracked state with "init" - await lix.db - .insertInto("state_all") + // Insert initial state using Kysely to ensure virtual table is triggered + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "entity0", - file_id: "test-file", + entity_id: "change-set-id-test-entity", schema_key: "mock_schema", - plugin_key: "test_plugin", + file_id: "change-set-id-test-file", + plugin_key: "change-set-id-test-plugin", + snapshot_content: JSON.stringify({ value: "initial value" }), schema_version: "1.0", version_id: sql`(SELECT version_id FROM active_version)`, - snapshot_content: { - value: "init", - }, - untracked: false, + untracked: 0, }) .execute(); - // Verify tracked state exists - const afterInit = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "entity0") + const activeVersionAfterInsert = await lix.db + .selectFrom("active_version") + .innerJoin("version", "active_version.version_id", "version.id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + // Query state_all view to verify commit_id is exposed + const stateResult = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "change-set-id-test-entity") + .where("schema_key", "=", "mock_schema") .selectAll() .execute(); - expectDeterministic(afterInit).toHaveLength(1); - expectDeterministic(afterInit[0]?.snapshot_content).toEqual({ - value: "init", - }); + expectDeterministic(stateResult).toHaveLength(1); + expectDeterministic(stateResult[0]).toHaveProperty("commit_id"); + expectDeterministic(stateResult[0]?.commit_id).toBe( + activeVersionAfterInsert.commit_id + ); - // Step 2: Update to untracked state with "update" (should NOT delete tracked state) - await lix.db - .updateTable("state_all") - .set({ - snapshot_content: { - value: "update", - }, - untracked: true, - }) - .where("entity_id", "=", "entity0") + // Get the change_set_element records - there should be two: + // 1. One in the working change set + // 2. One in the version's current change set (after commit) + const changeSetElements = await lix.db + .selectFrom("change_set_element") + .where("entity_id", "=", "change-set-id-test-entity") .where("schema_key", "=", "mock_schema") + .where("file_id", "=", "change-set-id-test-file") + .select(["change_set_id", "change_id"]) + .orderBy("change_set_id") .execute(); - // Step 3: Query should return untracked state "update" (highest priority) - const afterUntrackedUpdate = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "entity0") - .selectAll() - .execute(); + expectDeterministic(changeSetElements).toHaveLength(2); - expectDeterministic(afterUntrackedUpdate).toHaveLength(1); - expectDeterministic(afterUntrackedUpdate[0]?.snapshot_content).toEqual({ - value: "update", - }); + // Get the version to understand which change sets we're dealing with + const version = await lix.db + .selectFrom("version") + .where("id", "=", activeVersionAfterInsert.id) + .select(["id", "commit_id", "working_commit_id"]) + .executeTakeFirstOrThrow(); - // Step 4: Update back to tracked state with "update2" (should delete untracked state) - await lix.db - .updateTable("state_all") - .set({ - snapshot_content: { - value: "update2", - }, - untracked: false, - }) - .where("entity_id", "=", "entity0") - .where("schema_key", "=", "mock_schema") - .execute(); + // Get the change set ID from the version's commit + const versionCommit = await lix.db + .selectFrom("commit") + .where("id", "=", version.commit_id) + .selectAll() + .executeTakeFirstOrThrow(); - // Step 5: Query should return tracked state "update2" - const afterTrackedUpdate = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "entity0") + // Get the change set ID from the working commit + const workingCommit = await lix.db + .selectFrom("commit") + .where("id", "=", version.working_commit_id) .selectAll() - .execute(); + .executeTakeFirstOrThrow(); - expectDeterministic(afterTrackedUpdate).toHaveLength(1); - expectDeterministic(afterTrackedUpdate[0]?.snapshot_content).toEqual({ - value: "update2", - }); + // Find which change_set_element is in the version's change set (not working) + const versionChangeSetElement = changeSetElements.find( + (el) => el.change_set_id === versionCommit.change_set_id + ); + const workingChangeSetElement = changeSetElements.find( + (el) => el.change_set_id === workingCommit.change_set_id + ); - // Verify that a change was created for the final tracked mutation - const changes = await lix.db - .selectFrom("change") - .where("entity_id", "=", "entity0") - .where("schema_key", "=", "mock_schema") - .selectAll() - .execute(); + expectDeterministic(versionChangeSetElement).toBeDefined(); + expectDeterministic(workingChangeSetElement).toBeDefined(); - expectDeterministic(changes.length).toBeGreaterThan(0); + // The state view should show the commit_id from the version, + // not related to the working change set (which is temporary and not part of the graph) + expectDeterministic(stateResult[0]?.commit_id).toBe(version.commit_id); + expectDeterministic(stateResult[0]?.commit_id).toBe(version.commit_id); + + // Verify that the change_id also matches for consistency + expectDeterministic(stateResult[0]?.change_id).toBe( + versionChangeSetElement!.change_id + ); + expectDeterministic(stateResult[0]?.change_id).toBe( + versionChangeSetElement!.change_id + ); } ); simulationTest( - "untracked state overrides inherited state (untracked > inherited)", + "transaction table is empty after select, insert, update, delete via internal_state_vtable", async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], }); + // Prepare a simple schema for state mutations const mockSchema: LixSchemaDefinition = { - "x-lix-key": "mock_schema", + "x-lix-key": "mock_schema_txn", "x-lix-version": "1.0", type: "object", additionalProperties: false, properties: { - value: { - type: "string", - }, + value: { type: "string" }, }, }; @@ -2334,106 +2399,64 @@ simulationTest( .values({ value: mockSchema }) .execute(); - // Step 1: Insert entity in global version (will be inherited by child) - await lix.db - .insertInto("state_all") - .values({ - entity_id: "inherited-entity", - file_id: "test-file", - schema_key: "mock_schema", - plugin_key: "test_plugin", - schema_version: "1.0", - version_id: "global", - snapshot_content: { - value: "inherited value", - }, - untracked: false, - }) - .execute(); - - // Step 2: Create a child version that inherits from global - const childVersion = await createVersion({ lix, name: "child-version" }); + const db = lix.db as unknown as Kysely; - // Verify inheritance is set up correctly - expectDeterministic(childVersion.inherits_from_version_id).toBe("global"); + // Helper to assert transaction table is empty + const expectTxnEmpty = async () => { + const rows = await db + .selectFrom("internal_change_in_transaction") + .selectAll() + .execute(); + expectDeterministic(rows.length).toBe(0); + }; - // Step 3: Verify child initially sees inherited entity - const inheritedState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "inherited-entity") - .where("version_id", "=", childVersion.id) + // 1) SELECT should not stage anything + await db + .selectFrom("internal_state_vtable") + .where("schema_key", "=", "mock_schema_txn") .selectAll() .execute(); + await expectTxnEmpty(); - expectDeterministic(inheritedState).toHaveLength(1); - expectDeterministic(inheritedState[0]?.snapshot_content).toEqual({ - value: "inherited value", - }); - expectDeterministic(inheritedState[0]?.inherited_from_version_id).toBe( - "global" - ); - - // Step 4: Add untracked state for same entity in child version - await lix.db - .insertInto("state_all") + // 2) INSERT tracked via internal_state_vtable + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "inherited-entity", - file_id: "test-file", - schema_key: "mock_schema", - plugin_key: "test_plugin", + entity_id: "e_txn", + file_id: "f_txn", + schema_key: "mock_schema_txn", + plugin_key: "lix_own_entity", schema_version: "1.0", - version_id: childVersion.id, - snapshot_content: { - value: "untracked override", - }, - untracked: true, + version_id: sql`(SELECT version_id FROM active_version)`, + snapshot_content: JSON.stringify({ value: "v1" }), + untracked: 0, // tracked }) .execute(); + await expectTxnEmpty(); - // Step 5: Query should return untracked state (higher priority than inherited) - const finalState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "inherited-entity") - .where("version_id", "=", childVersion.id) - .selectAll() - .execute(); - - expectDeterministic(finalState).toHaveLength(1); - expectDeterministic(finalState[0]?.snapshot_content).toEqual({ - value: "untracked override", - }); - expectDeterministic(finalState[0]?.inherited_from_version_id).toBe(null); // Should not be inherited anymore - expectDeterministic(finalState[0]?.version_id).toBe(childVersion.id); - - // Step 6: Verify the inherited entity still exists in global version (unchanged) - const globalState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "inherited-entity") - .where("version_id", "=", "global") - .selectAll() + // 3) UPDATE via internal_state_vtable + await db + .updateTable("internal_state_vtable") + .set({ snapshot_content: JSON.stringify({ value: "v2" }) }) + .where("entity_id", "=", "e_txn") + .where("schema_key", "=", "mock_schema_txn") + .where("file_id", "=", "f_txn") .execute(); + await expectTxnEmpty(); - expectDeterministic(globalState).toHaveLength(1); - expectDeterministic(globalState[0]?.snapshot_content).toEqual({ - value: "inherited value", - }); - expectDeterministic(globalState[0]?.inherited_from_version_id).toBe(null); - - // Step 7: No changes should be created for untracked mutations - const changes = await lix.db - .selectFrom("change") - .where("entity_id", "=", "inherited-entity") - .where("schema_key", "=", "mock_schema") - .selectAll() + // 4) DELETE via internal_state_vtable + await db + .deleteFrom("internal_state_vtable") + .where("entity_id", "=", "e_txn") + .where("schema_key", "=", "mock_schema_txn") + .where("file_id", "=", "f_txn") .execute(); - - // Should only have the original change from global version, not the untracked one - expectDeterministic(changes).toHaveLength(1); + await expectTxnEmpty(); } ); simulationTest( - "untracked state inheritance", + "untracked mutations don't trigger change control via internal_state_vtable", async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ @@ -2458,148 +2481,97 @@ simulationTest( }; await lix.db - .insertInto("stored_schema_all") - .values({ value: mockSchema, lixcol_version_id: "global" }) + .insertInto("stored_schema") + .values({ value: mockSchema }) .execute(); - const activeVersion = await lix.db - .selectFrom("active_version") - .innerJoin("version", "active_version.version_id", "version.id") - .selectAll("version") - .executeTakeFirstOrThrow(); + const db = lix.db as unknown as Kysely; - // inserting into the global version - await lix.db - .insertInto("state_all") + // Count changes before any untracked mutations + const changesInitial = await db.selectFrom("change").selectAll().execute(); + + // 1. INSERT untracked state via internal_state_vtable + await db + .insertInto("internal_state_vtable") .values({ + entity_id: "untracked-entity", file_id: "test-file", schema_key: "mock_schema", plugin_key: "test_plugin", schema_version: "1.0", - entity_id: "test_key", - snapshot_content: { - value: "test_value", - }, - version_id: "global", - untracked: true, + version_id: sql`(SELECT version_id FROM active_version)`, + snapshot_content: JSON.stringify({ + value: "untracked value", + }), + untracked: 1, // untracked }) .execute(); - const globalState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "test_key") - .where("version_id", "=", "global") - .select("snapshot_content") - .executeTakeFirstOrThrow(); - - expectDeterministic(globalState).toBeDefined(); - - const versionState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "test_key") - .where("version_id", "=", activeVersion.id) - .select("snapshot_content") - .executeTakeFirstOrThrow(); - - expectDeterministic(versionState).toBeDefined(); - expectDeterministic(versionState).toEqual(globalState); - } -); - -simulationTest( - "tracked state in child overrides inherited untracked state", - async ({ openSimulatedLix, expectDeterministic }) => { - const lix = await openSimulatedLix({ - keyValues: [ - { - key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, - lixcol_version_id: "global", - }, - ], - }); + // Count changes after untracked insert + const changesAfterInsert = await db + .selectFrom("change") + .selectAll() + .execute(); - const mockSchema: LixSchemaDefinition = { - "x-lix-key": "mock_schema", - "x-lix-version": "1.0", - type: "object", - additionalProperties: false, - properties: { - value: { - type: "string", - }, - }, - }; + // Number of changes should be identical (no change control for untracked) + expectDeterministic(changesAfterInsert.length).toBe(changesInitial.length); - await lix.db - .insertInto("stored_schema") // Use stored_schema, not stored_schema_all - .values({ value: mockSchema }) + // Verify the untracked entity exists in vtable + const untrackedState = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "untracked-entity") + .select([ + "entity_id", + sql`json(snapshot_content)`.as("snapshot_content"), + "untracked", + ]) .execute(); - const childVersion = await createVersion({ lix, name: "child" }); + expectDeterministic(untrackedState).toHaveLength(1); + expectDeterministic(untrackedState[0]?.snapshot_content).toEqual({ + value: "untracked value", + }); + expectDeterministic(untrackedState[0]?.untracked).toBe(1); - // 1. Insert untracked state in global version - await lix.db - .insertInto("state_all") - .values({ - entity_id: "override_test", - file_id: "f1", - schema_key: "mock_schema", - plugin_key: "p1", - schema_version: "1.0", - snapshot_content: { value: "global untracked" }, - version_id: "global", - untracked: true, + // 2. UPDATE untracked state via internal_state_vtable + await db + .updateTable("internal_state_vtable") + .where("entity_id", "=", "untracked-entity") + .set({ + snapshot_content: JSON.stringify({ + value: "untracked value updated", + }), }) .execute(); - // 2. Verify child inherits untracked state - const inheritedState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "override_test") - .where("version_id", "=", childVersion.id) + // Count changes after untracked update + const changesAfterUpdate = await db + .selectFrom("change") .selectAll() .execute(); - expectDeterministic(inheritedState).toHaveLength(1); - expectDeterministic(inheritedState[0]?.snapshot_content).toEqual({ - value: "global untracked", - }); - expectDeterministic(inheritedState[0]?.untracked).toBe(1); + // Still no new changes + expectDeterministic(changesAfterUpdate.length).toBe(changesInitial.length); - // 3. Insert tracked state in child version for same entity - await lix.db - .insertInto("state_all") - .values({ - entity_id: "override_test", - file_id: "f1", - schema_key: "mock_schema", - plugin_key: "p1", - schema_version: "1.0", - snapshot_content: { value: "child tracked" }, - version_id: childVersion.id, - untracked: false, // Important: this is tracked state - }) + // 3. DELETE untracked state via internal_state_vtable + await db + .deleteFrom("internal_state_vtable") + .where("entity_id", "=", "untracked-entity") .execute(); - // 4. Verify child now sees tracked state, not inherited untracked - const finalState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "override_test") - .where("version_id", "=", childVersion.id) + // Count changes after untracked delete + const changesAfterDelete = await db + .selectFrom("change") .selectAll() .execute(); - expectDeterministic(finalState).toHaveLength(1); - expectDeterministic(finalState[0]?.snapshot_content).toEqual({ - value: "child tracked", - }); - expectDeterministic(finalState[0]?.untracked).toBe(0); // Should be tracked + // Still no new changes + expectDeterministic(changesAfterDelete.length).toBe(changesInitial.length); } ); simulationTest( - "untracked state in child overrides inherited untracked state", + "tracked update to previously untracked entity deletes untracked state via internal_state_vtable", async ({ openSimulatedLix, expectDeterministic }) => { const lix = await openSimulatedLix({ keyValues: [ @@ -2624,75 +2596,81 @@ simulationTest( }; await lix.db - .insertInto("stored_schema") // Use stored_schema + .insertInto("stored_schema") .values({ value: mockSchema }) .execute(); - const childVersion = await createVersion({ lix, name: "child" }); + const db = lix.db as unknown as Kysely; - // 1. Insert untracked state in global version - await lix.db - .insertInto("state_all") + // Insert untracked state via internal_state_vtable + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "untracked_override_test", - file_id: "f1", + entity_id: "override-entity", + file_id: "test-file", schema_key: "mock_schema", - plugin_key: "p1", + plugin_key: "test_plugin", schema_version: "1.0", - snapshot_content: { value: "global untracked" }, - version_id: "global", - untracked: true, + version_id: sql`(SELECT version_id FROM active_version)`, + snapshot_content: JSON.stringify({ + value: "untracked value", + }), + untracked: 1, }) .execute(); - // 2. Verify child inherits untracked state - const inheritedState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "untracked_override_test") - .where("version_id", "=", childVersion.id) - .selectAll() + // Verify untracked state exists + const untrackedState = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "override-entity") + .select([sql`json(snapshot_content)`.as("snapshot_content"), "untracked"]) .execute(); - expectDeterministic(inheritedState).toHaveLength(1); - expectDeterministic(inheritedState[0]?.snapshot_content).toEqual({ - value: "global untracked", + expectDeterministic(untrackedState).toHaveLength(1); + expectDeterministic(untrackedState[0]?.snapshot_content).toEqual({ + value: "untracked value", }); - expectDeterministic(inheritedState[0]?.untracked).toBe(1); - // 3. Insert untracked state in child version for same entity - await lix.db - .insertInto("state_all") - .values({ - entity_id: "untracked_override_test", - file_id: "f1", - schema_key: "mock_schema", - plugin_key: "p1", - schema_version: "1.0", - snapshot_content: { value: "child untracked" }, - version_id: childVersion.id, - untracked: true, + // Now update the entity as tracked (should delete from untracked table) + await db + .updateTable("internal_state_vtable") + .set({ + snapshot_content: JSON.stringify({ + value: "tracked value", + }), + untracked: 0, }) + .where("entity_id", "=", "override-entity") + .where("schema_key", "=", "mock_schema") .execute(); - // 4. Verify child now sees its own untracked state - const finalState = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "untracked_override_test") - .where("version_id", "=", childVersion.id) - .selectAll() + // Verify tracked state has overridden untracked state + const finalState = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "override-entity") + .select([sql`json(snapshot_content)`.as("snapshot_content"), "untracked"]) .execute(); expectDeterministic(finalState).toHaveLength(1); expectDeterministic(finalState[0]?.snapshot_content).toEqual({ - value: "child untracked", + value: "tracked value", }); - expectDeterministic(finalState[0]?.untracked).toBe(1); + + // Verify a change was created for the tracked mutation + const changes = await db + .selectFrom("change") + .where("entity_id", "=", "override-entity") + .where("schema_key", "=", "mock_schema") + .selectAll() + .execute(); + + expectDeterministic(changes.length).toBeGreaterThan(0); } ); simulationTest( - "untracked state has untracked change_id for both inherited and non-inherited entities", - async ({ openSimulatedLix, expectDeterministic }) => { + "created_at and updated_at timestamps are computed correctly via internal_state_vtable", + async ({ openSimulatedLix }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -2720,190 +2698,288 @@ simulationTest( .values({ value: mockSchema }) .execute(); - const childVersion = await createVersion({ lix, name: "child" }); + const db = lix.db as unknown as Kysely; - // 1. Insert untracked state in global version (will be inherited by child) - await lix.db - .insertInto("state_all") + // Insert initial entity via internal_state_vtable + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "inherited-entity", - file_id: "test-file", + entity_id: "e0", + file_id: "f0", schema_key: "mock_schema", - plugin_key: "test_plugin", + plugin_key: "lix_own_entity", schema_version: "1.0", - snapshot_content: { value: "global untracked" }, - version_id: "global", - untracked: true, + version_id: sql`(SELECT version_id FROM active_version)`, + snapshot_content: JSON.stringify({ + value: "initial value", + }), + untracked: 0, }) .execute(); - // 2. Insert untracked state directly in child version (non-inherited) - await lix.db - .insertInto("state_all") - .values({ - entity_id: "non-inherited-entity", - file_id: "test-file", - schema_key: "mock_schema", - plugin_key: "test_plugin", - schema_version: "1.0", - snapshot_content: { value: "child untracked" }, - version_id: childVersion.id, - untracked: true, + const stateAfterInsert = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "e0") + .selectAll() + .execute(); + + expect(stateAfterInsert).toHaveLength(1); + expect(stateAfterInsert[0]?.created_at).toBeDefined(); + expect(stateAfterInsert[0]?.updated_at).toBeDefined(); + expect(stateAfterInsert[0]?.created_at).toBe( + stateAfterInsert[0]?.updated_at + ); + + // Update the entity via internal_state_vtable + await db + .updateTable("internal_state_vtable") + .set({ + snapshot_content: JSON.stringify({ + value: "updated value", + }), }) + .where("entity_id", "=", "e0") + .where("schema_key", "=", "mock_schema") .execute(); - // 3. Query all untracked entities in child version - const untrackedEntities = await lix.db - .selectFrom("state_all") - .where("version_id", "=", childVersion.id) - .where("entity_id", "in", ["inherited-entity", "non-inherited-entity"]) - .where("untracked", "=", true) + const stateAfterUpdate = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "e0") .selectAll() .execute(); - expectDeterministic(untrackedEntities).toHaveLength(2); - - // 4. Check that both entities have untracked change_id - for (const entity of untrackedEntities) { - expectDeterministic(entity.change_id).toBe("untracked"); - } + expect(stateAfterUpdate).toHaveLength(1); + expect(stateAfterUpdate[0]?.created_at).toBeDefined(); + expect(stateAfterUpdate[0]?.updated_at).toBeDefined(); - // 5. Verify specific entities - const inheritedEntity = untrackedEntities.find( - (e) => e.entity_id === "inherited-entity" - ); - const nonInheritedEntity = untrackedEntities.find( - (e) => e.entity_id === "non-inherited-entity" + // created_at should remain the same + expect(stateAfterUpdate[0]?.created_at).toBe( + stateAfterInsert[0]?.created_at ); - expectDeterministic(inheritedEntity).toBeDefined(); - expectDeterministic(nonInheritedEntity).toBeDefined(); - - // Both inherited and non-inherited untracked entities should have change_id = "untracked" - expectDeterministic(inheritedEntity?.change_id).toBe("untracked"); - expectDeterministic(nonInheritedEntity?.change_id).toBe("untracked"); + // updated_at should be different (newer) + expect(stateAfterUpdate[0]?.updated_at).not.toBe( + stateAfterInsert[0]?.updated_at + ); } ); simulationTest( - "state version_id defaults active version", - async ({ openSimulatedLix, expectDeterministic }) => { + "untracked insert then delete within same transaction leaves no residue via internal_state_vtable", + async ({ openSimulatedLix }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + const mockSchema: LixSchemaDefinition = { - "x-lix-key": "mock_schema", + "x-lix-key": "key_value", "x-lix-version": "1.0", type: "object", additionalProperties: false, properties: { - value: { - type: "string", - }, + key: { type: "string" }, + value: { type: ["boolean", "null"] }, }, }; + await lix.db + .insertInto("stored_schema") + .values({ value: mockSchema }) + .execute(); + + const db = lix.db as unknown as Kysely; + + const active = await lix.db + .selectFrom("active_version") + .selectAll() + .executeTakeFirstOrThrow(); + + await db.transaction().execute(async (trx) => { + await trx + .insertInto("internal_state_vtable") + .values({ + entity_id: "tx_skip_flag_untracked", + file_id: "system", + schema_key: "key_value", + plugin_key: "lix_own_entity", + schema_version: "1.0", + version_id: (active as any).version_id ?? (active as any).id, + snapshot_content: JSON.stringify({ + key: "tx_skip_flag_untracked", + value: true, + }), + untracked: 1, // SQLite uses INTEGER for boolean + }) + .execute(); + + await trx + .deleteFrom("internal_state_vtable") + .where("entity_id", "=", "tx_skip_flag_untracked") + .where( + "version_id", + "=", + (active as any).version_id ?? (active as any).id + ) + .execute(); + }); + + const remaining = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "tx_skip_flag_untracked") + .where( + "version_id", + "=", + (active as any).version_id ?? (active as any).id + ) + .where("snapshot_content", "is not", null) + .selectAll() + .execute(); + + expect(remaining).toHaveLength(0); + } +); + +simulationTest( + "state is separated by version via internal_state_vtable", + async ({ openSimulatedLix }) => { const lix = await openSimulatedLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], }); - await lix.db - .insertInto("stored_schema") - .values({ value: mockSchema }) - .execute(); + await createVersion({ lix, id: "version_a" }); + await createVersion({ lix, id: "version_b" }); - // Get the active version ID to verify it gets auto-filled - const activeVersion = await lix.db - .selectFrom("active_version") - .select("version_id") - .executeTakeFirstOrThrow(); + const db = lix.db as unknown as Kysely; - // Insert into state view without specifying version_id - // This should auto-fill with the active version - await lix.db - .insertInto("state") - .values({ - entity_id: "entity0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "lix_own_entity", - schema_version: "1.0", - snapshot_content: { value: "initial content" }, - }) + await db + .insertInto("internal_state_vtable") + .values([ + { + entity_id: "e0", + file_id: "f0", + schema_key: "mock_schema", + plugin_key: "mock_plugin", + schema_version: "1.0", + snapshot_content: JSON.stringify({ + value: "hello world from version a", + }), + version_id: "version_a", + untracked: 0, + }, + { + entity_id: "e0", + file_id: "f0", + schema_key: "mock_schema", + plugin_key: "mock_plugin", + schema_version: "1.0", + snapshot_content: JSON.stringify({ + value: "hello world from version b", + }), + version_id: "version_b", + untracked: 0, + }, + ]) .execute(); - // Verify the entity was inserted with the correct version_id - const insertedEntity = await lix.db - .selectFrom("state") - .where("entity_id", "=", "entity0") + const stateAfterInserts = await db + .selectFrom("internal_state_vtable") + .where("schema_key", "=", "mock_schema") + .where("entity_id", "=", "e0") + .where("snapshot_content", "is not", null) .selectAll() .execute(); - expectDeterministic(insertedEntity).toHaveLength(1); - expectDeterministic(insertedEntity[0]).toMatchObject({ - entity_id: "entity0", - file_id: "f0", - schema_key: "mock_schema", - plugin_key: "lix_own_entity", - schema_version: "1.0", - snapshot_content: { value: "initial content" }, - }); + expect(stateAfterInserts).toHaveLength(2); - // Verify the version_id was auto-filled with the active version - const entityInStateAll = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "entity0") - .select("version_id") - .executeTakeFirstOrThrow(); + // The vtable returns snapshot_content as string (JSON), we need to parse it + const content0 = stateAfterInserts[0]!.snapshot_content; + const content1 = stateAfterInserts[1]!.snapshot_content; - expectDeterministic(entityInStateAll.version_id).toBe( - activeVersion.version_id - ); + expect(content0).toEqual({ + value: "hello world from version a", + }); + expect(content1).toEqual({ + value: "hello world from version b", + }); - // Test update operation - await lix.db - .updateTable("state") - .where("entity_id", "=", "entity0") + // Verify timestamps are present + expect(stateAfterInserts[0]?.created_at).toBeDefined(); + expect(stateAfterInserts[0]?.updated_at).toBeDefined(); + expect(stateAfterInserts[1]?.created_at).toBeDefined(); + expect(stateAfterInserts[1]?.updated_at).toBeDefined(); + + await db + .updateTable("internal_state_vtable") .set({ - snapshot_content: { value: "updated content" }, + snapshot_content: JSON.stringify({ + value: "hello world from version b UPDATED", + }), }) + .where("entity_id", "=", "e0") + .where("schema_key", "=", "mock_schema") + .where("version_id", "=", "version_b") .execute(); - // Verify update worked - const updatedEntity = await lix.db - .selectFrom("state") - .where("entity_id", "=", "entity0") + const stateAfterUpdate = await db + .selectFrom("internal_state_vtable") + .where("schema_key", "=", "mock_schema") + .where("entity_id", "=", "e0") + .where("snapshot_content", "is not", null) .selectAll() .execute(); - expectDeterministic(updatedEntity[0]?.snapshot_content).toEqual({ - value: "updated content", + expect(stateAfterUpdate).toHaveLength(2); + + const updateContent0 = stateAfterUpdate[0]!.snapshot_content; + const updateContent1 = stateAfterUpdate[1]!.snapshot_content; + + expect(updateContent0).toEqual({ + value: "hello world from version a", + }); + expect(updateContent1).toEqual({ + value: "hello world from version b UPDATED", }); - // Test delete operation - await lix.db - .deleteFrom("state") - .where("entity_id", "=", "entity0") + await db + .deleteFrom("internal_state_vtable") + .where("entity_id", "=", "e0") + .where("version_id", "=", "version_b") .execute(); - // Verify delete worked - const deletedEntity = await lix.db - .selectFrom("state") - .where("entity_id", "=", "entity0") + const stateAfterDelete = await db + .selectFrom("internal_state_vtable") + .where("schema_key", "=", "mock_schema") + .where("entity_id", "=", "e0") + .where("snapshot_content", "is not", null) .selectAll() .execute(); - expectDeterministic(deletedEntity).toHaveLength(0); + expect(stateAfterDelete).toHaveLength(1); + expect(stateAfterDelete[0]?.version_id).toBe("version_a"); + + const deleteContent = stateAfterDelete[0]!.snapshot_content; + + expect(deleteContent).toEqual({ + value: "hello world from version a", + }); } ); -// https://github.com/opral/lix-sdk/issues/344 simulationTest( - "deleting key_value entities from state should not cause infinite loop", - async ({ openSimulatedLix, expectDeterministic }) => { + "created_at and updated_at are version specific via internal_state_vtable", + async ({ openSimulatedLix }) => { const lix = await openSimulatedLix({ keyValues: [ { @@ -2914,349 +2990,388 @@ simulationTest( ], }); - // 1. Insert key_value in global version (tracked) + await createVersion({ lix, id: "version_a" }); + await createVersion({ lix, id: "version_b" }); + + const mockSchema: LixSchemaDefinition = { + "x-lix-key": "mock_schema", + "x-lix-version": "1.0", + additionalProperties: false, + type: "object", + properties: { + value: { + type: "string", + }, + }, + }; + await lix.db - .insertInto("key_value_all") + .insertInto("stored_schema") + .values({ value: mockSchema }) + .execute(); + + const db = lix.db as unknown as Kysely; + + // Insert entity in version A + await db + .insertInto("internal_state_vtable") .values({ - key: "test-key-global", - value: "global-tracked-value", - lixcol_version_id: "global", + entity_id: "e0", + file_id: "f0", + schema_key: "mock_schema", + plugin_key: "lix_own_entity", + schema_version: "1.0", + version_id: "version_a", + snapshot_content: JSON.stringify({ + value: "value in version a", + }), + untracked: 0, }) .execute(); - // 2. Insert key_value in global version (untracked) - await lix.db - .insertInto("key_value_all") + // Insert same entity in version B + await db + .insertInto("internal_state_vtable") .values({ - key: "test-key-global-untracked", - value: "global-untracked-value", - lixcol_version_id: "global", - lixcol_untracked: true, + entity_id: "e0", + file_id: "f0", + schema_key: "mock_schema", + plugin_key: "lix_own_entity", + schema_version: "1.0", + version_id: "version_b", + snapshot_content: JSON.stringify({ + value: "value in version b", + }), + untracked: 0, }) .execute(); - // 3. Insert key_value in active version (tracked) - await lix.db - .insertInto("key_value") - .values({ - key: "test-key-active", - value: "active-tracked-value", - }) - .execute(); + const stateVersionA = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "e0") + .where("version_id", "=", "version_a") + .selectAll() + .execute(); + + const stateVersionB = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "e0") + .where("version_id", "=", "version_b") + .selectAll() + .execute(); + + expect(stateVersionA).toHaveLength(1); + expect(stateVersionB).toHaveLength(1); + + // Both should have timestamps + expect(stateVersionA[0]?.created_at).toBeDefined(); + expect(stateVersionA[0]?.updated_at).toBeDefined(); + expect(stateVersionB[0]?.created_at).toBeDefined(); + expect(stateVersionB[0]?.updated_at).toBeDefined(); + + // the same entity has been inserted but with different changes + expect(stateVersionA[0]?.created_at).not.toBe(stateVersionB[0]?.created_at); - // 4. Insert key_value in active version (untracked) - await lix.db - .insertInto("key_value") - .values({ - key: "test-key-active-untracked", - value: "active-untracked-value", - lixcol_untracked: true, + await db + .updateTable("internal_state_vtable") + .set({ + snapshot_content: JSON.stringify({ + value: "updated value in version b", + }), }) + .where("entity_id", "=", "e0") + .where("version_id", "=", "version_b") .execute(); - // Verify all entities exist before deletion (including inherited) - const entitiesBeforeDelete = await lix.db - .selectFrom("state") - .where("schema_key", "=", "lix_key_value") - .where("entity_id", "like", "test-key-%") + const updatedStateVersionA = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "e0") + .where("version_id", "=", "version_a") .selectAll() .execute(); - // state view shows active version entities + inherited from global - expectDeterministic(entitiesBeforeDelete).toHaveLength(4); - - // Delete all key_value entities - // this is the reproduction of the infinite loop issue - await lix.db - .deleteFrom("state") - .where("schema_key", "=", "lix_key_value") - .execute(); - - // Verify all entities are deleted - const keyValueAfterDelete = await lix.db - .selectFrom("state") - .where("schema_key", "=", "lix_key_value") - .where("entity_id", "like", "test-key-%") + const updatedStateVersionB = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "e0") + .where("version_id", "=", "version_b") .selectAll() .execute(); - expectDeterministic(keyValueAfterDelete).toHaveLength(0); + // Version A should remain unchanged + expect(updatedStateVersionA[0]?.updated_at).toBe( + stateVersionA[0]?.updated_at + ); + + // Version B should have updated timestamp + expect(updatedStateVersionB[0]?.updated_at).not.toBe( + stateVersionB[0]?.updated_at + ); } ); -// see https://github.com/opral/lix-sdk/issues/359 simulationTest( - "commit_id in state should be from the real auto-commit, not the working commit", - async ({ openSimulatedLix, expectDeterministic }) => { + "state appears in both versions when they share the same commit via internal_state_vtable", + async ({ openSimulatedLix }) => { const lix = await openSimulatedLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], }); - // Get the active version with its commit_id and working_commit_id - const activeVersion = await lix.db - .selectFrom("active_version") - .innerJoin("version", "version.id", "active_version.version_id") - .selectAll("version") - .executeTakeFirstOrThrow(); - - // Verify we have both commit_id and working_commit_id - expectDeterministic(activeVersion.commit_id).toBeTruthy(); - expectDeterministic(activeVersion.working_commit_id).toBeTruthy(); - expectDeterministic(activeVersion.commit_id).not.toBe( - activeVersion.working_commit_id - ); + const versionA = await createVersion({ lix, id: "version_a" }); - const commitsBeforeInsert = await lix.db - .selectFrom("commit") - .select("id") - .execute(); + const db = lix.db as unknown as Kysely; - // Insert some state data - await lix.db - .insertInto("state") + // Insert state into version A + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "test-entity-1", - schema_key: "test_schema", - file_id: "test-file", - plugin_key: "test-plugin", + entity_id: "e0", + file_id: "f0", + schema_key: "mock_schema", + plugin_key: "mock_plugin", schema_version: "1.0", - snapshot_content: { value: "initial value" }, + snapshot_content: JSON.stringify({ + value: "shared state", + }), + version_id: "version_a", + untracked: 0, }) .execute(); - const commitsAfterInsert = await lix.db - .selectFrom("commit") - .select("id") - .execute(); - - // two commits for the global and active version - expectDeterministic(commitsAfterInsert.length).toBe( - commitsBeforeInsert.length + 2 - ); - - const activeVersionAfterInsert = await lix.db - .selectFrom("active_version") - .innerJoin("version", "active_version.version_id", "version.id") - .selectAll("version") - .executeTakeFirstOrThrow(); - - // Query the state to check the commit_id - const stateAfterInsert = await lix.db - .selectFrom("state") - .where("entity_id", "=", "test-entity-1") - .select(["entity_id", "commit_id"]) + const versionAAfterInsert = await lix.db + .selectFrom("version") + .where("id", "=", versionA.id) + .selectAll() .executeTakeFirstOrThrow(); - // The commit_id should NOT be the working_commit_id - expectDeterministic(stateAfterInsert.commit_id).not.toBe( - activeVersionAfterInsert.working_commit_id - ); + const versionB = await createVersionFromCommit({ + lix, + id: "version_b", + commit: { id: versionAAfterInsert.commit_id }, + }); - // The commit_id should be the auto-commit ID (not the working commit) - expectDeterministic(stateAfterInsert.commit_id).toBe( - activeVersionAfterInsert.commit_id - ); + expect(versionB.commit_id).toBe(versionAAfterInsert.commit_id); - // Update the state to trigger another auto-commit - await lix.db - .updateTable("state") - .where("entity_id", "=", "test-entity-1") - .set({ snapshot_content: { value: "updated value" } }) + const stateInBothVersions = await db + .selectFrom("internal_state_vtable") + .where("schema_key", "=", "mock_schema") + .where("entity_id", "=", "e0") + .selectAll() .execute(); - // Check the state again - const stateAfterUpdate = await lix.db - .selectFrom("state") - .where("entity_id", "=", "test-entity-1") - .select(["entity_id", "commit_id"]) - .executeTakeFirstOrThrow(); - - const activeVersionAfterUpdate = await lix.db - .selectFrom("active_version") - .innerJoin("version", "active_version.version_id", "version.id") - .selectAll("version") - .executeTakeFirstOrThrow(); - - // The commit_id should now be the new auto-commit ID - expectDeterministic(stateAfterUpdate.commit_id).toBe( - activeVersionAfterUpdate.commit_id + // Both versions should see the same state + expect(stateInBothVersions).toHaveLength(2); + expect(stateInBothVersions[0]?.entity_id).toBe("e0"); + expect(stateInBothVersions[1]?.entity_id).toBe("e0"); + + const sharedContent0 = stateInBothVersions[0]!.snapshot_content; + const sharedContent1 = stateInBothVersions[1]!.snapshot_content; + + expect(sharedContent0).toEqual({ value: "shared state" }); + expect(sharedContent1).toEqual({ value: "shared state" }); + expect(stateInBothVersions[0]?.version_id).toBe("version_a"); + expect(stateInBothVersions[1]?.version_id).toBe("version_b"); + expect(stateInBothVersions[0]?.commit_id).toBe( + versionAAfterInsert.commit_id ); - expectDeterministic(stateAfterUpdate.commit_id).not.toBe( - activeVersion.working_commit_id + expect(stateInBothVersions[1]?.commit_id).toBe( + versionAAfterInsert.commit_id ); } ); simulationTest( - "delete ALL from state view should delete untracked entities", - async ({ openSimulatedLix, expectDeterministic }) => { + "state diverges when versions have common ancestor but different changes via internal_state_vtable", + async ({ openSimulatedLix }) => { const lix = await openSimulatedLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], }); - // Create a tracked entity in state - await lix.db - .insertInto("state") + // Create base version and add initial state + const baseVersion = await createVersion({ lix, id: "base_version" }); + + const db = lix.db as unknown as Kysely; + + await db + .insertInto("internal_state_vtable") .values({ - entity_id: "tracked-entity", - schema_key: "mock_test_schema", - file_id: "test-file", - plugin_key: "test_plugin", - snapshot_content: { value: "tracked" }, + entity_id: "e0", + file_id: "f0", + schema_key: "mock_schema", + plugin_key: "mock_plugin", schema_version: "1.0", + snapshot_content: JSON.stringify({ + value: "base state", + }), + version_id: "base_version", + untracked: 0, }) .execute(); - // Create an untracked entity in state_all directly with the active version - await lix.db - .insertInto("state") - .values({ - entity_id: "untracked-entity", - schema_key: "mock_test_schema", - file_id: "test-file", - plugin_key: "test_plugin", - snapshot_content: { value: "untracked" }, - schema_version: "1.0", - untracked: true, - }) + // Create two versions from the same base version + await createVersion({ + lix, + id: "version_a", + from: baseVersion, + }); + + await createVersion({ + lix, + id: "version_b", + from: baseVersion, + }); + + const versions = await lix.db + .selectFrom("version") + .where("id", "in", ["base_version", "version_a", "version_b"]) + .select(["id", "commit_id"]) .execute(); - // Verify we have both entities in state view - const beforeDelete = await lix.db - .selectFrom("state") - .where("schema_key", "=", "mock_test_schema") + expect(versions).toHaveLength(3); + + // Both versions should initially see the base state + const initialState = await db + .selectFrom("internal_state_vtable") + .where("schema_key", "=", "mock_schema") + .where("entity_id", "=", "e0") .selectAll() .execute(); - expectDeterministic(beforeDelete).toHaveLength(2); - expectDeterministic( - beforeDelete.some((e) => e.entity_id === "tracked-entity") - ).toBe(true); - expectDeterministic( - beforeDelete.some((e) => e.entity_id === "untracked-entity") - ).toBe(true); + expect(initialState).toHaveLength(3); // base, version_a, version_b - // Delete ALL from the state view (no WHERE clause) - await lix.db - .deleteFrom("state") - .where("schema_key", "=", "mock_test_schema") + // Update state in version A + await db + .updateTable("internal_state_vtable") + .set({ + snapshot_content: JSON.stringify({ value: "updated in version A" }), + }) + .where("entity_id", "=", "e0") + .where("version_id", "=", "version_a") .execute(); - // Check if ALL entries were deleted including untracked - const afterDelete = await lix.db - .selectFrom("state") - .where("schema_key", "=", "mock_test_schema") - .selectAll() + // Update state in version B differently + await db + .updateTable("internal_state_vtable") + .set({ + snapshot_content: JSON.stringify({ value: "updated in version B" }), + }) + .where("entity_id", "=", "e0") + .where("version_id", "=", "version_b") .execute(); - // This should be 0 - all entries including untracked should be deleted - expectDeterministic(afterDelete).toHaveLength(0); - - // Also check the underlying state_all table - const stateAfterDelete = await lix.db - .selectFrom("state_all") - .where("schema_key", "=", "mock_test_schema") + const divergedState = await db + .selectFrom("internal_state_vtable") + .where("schema_key", "=", "mock_schema") + .where("entity_id", "=", "e0") .selectAll() + .orderBy("version_id") .execute(); - // All entities should be gone from state_all - expectDeterministic(stateAfterDelete).toHaveLength(0); + // All three versions should have different states + expect(divergedState).toHaveLength(3); + expect(divergedState[0]?.version_id).toBe("base_version"); + expect(divergedState[1]?.version_id).toBe("version_a"); + expect(divergedState[2]?.version_id).toBe("version_b"); + + const divergedContent0 = divergedState[0]!.snapshot_content; + const divergedContent1 = divergedState[1]!.snapshot_content; + const divergedContent2 = divergedState[2]!.snapshot_content; + + expect(divergedContent0).toEqual({ value: "base state" }); + expect(divergedContent1).toEqual({ value: "updated in version A" }); + expect(divergedContent2).toEqual({ value: "updated in version B" }); } ); simulationTest( - "delete from state view with WHERE should delete untracked entities", - async ({ openSimulatedLix, expectDeterministic }) => { + "tracked insert then delete within same transaction leaves no residue via internal_state_vtable", + async ({ openSimulatedLix }) => { const lix = await openSimulatedLix({ keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], }); - // Create a tracked entity in state - await lix.db - .insertInto("state") - .values({ - entity_id: "tracked-entity", - schema_key: "mock_test_schema", - file_id: "test-file", - plugin_key: "test_plugin", - snapshot_content: { value: "tracked" }, - schema_version: "1.0", - }) - .execute(); + const mockSchema: LixSchemaDefinition = { + "x-lix-key": "key_value", + "x-lix-version": "1.0", + type: "object", + additionalProperties: false, + properties: { + key: { type: "string" }, + value: { type: ["boolean", "null"] }, + }, + }; - // Create an untracked entity in state_all directly with the active version await lix.db - .insertInto("state") - .values({ - entity_id: "untracked-entity", - schema_key: "mock_test_schema", - file_id: "test-file", - plugin_key: "test_plugin", - snapshot_content: { value: "untracked" }, - schema_version: "1.0", - untracked: true, - }) - .execute(); - - // Verify both entities exist in the state view - const beforeDelete = await lix.db - .selectFrom("state") - .where("schema_key", "=", "mock_test_schema") - .selectAll() + .insertInto("stored_schema") + .values({ value: mockSchema }) .execute(); - expectDeterministic(beforeDelete).toHaveLength(2); - expectDeterministic( - beforeDelete.some((e) => e.entity_id === "tracked-entity") - ).toBe(true); - expectDeterministic( - beforeDelete.some((e) => e.entity_id === "untracked-entity") - ).toBe(true); - - // Delete the untracked entity from the state view with WHERE clause - await lix.db - .deleteFrom("state") - .where("entity_id", "=", "untracked-entity") - .execute(); + const db = lix.db as unknown as Kysely; - // Check if the untracked entry was deleted - const afterDelete = await lix.db - .selectFrom("state") - .where("schema_key", "=", "mock_test_schema") + const active = await lix.db + .selectFrom("active_version") .selectAll() - .execute(); - - // Should only have the tracked entity remaining - expectDeterministic(afterDelete).toHaveLength(1); - expectDeterministic(afterDelete[0]?.entity_id).toBe("tracked-entity"); + .executeTakeFirstOrThrow(); - // Also check the underlying state_all table to confirm deletion - const stateAfterDelete = await lix.db - .selectFrom("state_all") - .where("entity_id", "=", "untracked-entity") - .where("schema_key", "=", "mock_test_schema") + await db.transaction().execute(async (trx) => { + // tracked by default (untracked: 0) + await trx + .insertInto("internal_state_vtable") + .values({ + entity_id: "tx_skip_flag_tracked", + file_id: "system", + schema_key: "key_value", + plugin_key: "lix_own_entity", + schema_version: "1.0", + version_id: (active as any).version_id ?? (active as any).id, + snapshot_content: JSON.stringify({ + key: "tx_skip_flag_tracked", + value: true, + }), + untracked: 0, // tracked (SQLite uses INTEGER for boolean) + }) + .execute(); + + await trx + .deleteFrom("internal_state_vtable") + .where("entity_id", "=", "tx_skip_flag_tracked") + .where( + "version_id", + "=", + (active as any).version_id ?? (active as any).id + ) + .execute(); + }); + + const remaining = await db + .selectFrom("internal_state_vtable") + .where("entity_id", "=", "tx_skip_flag_tracked") + .where( + "version_id", + "=", + (active as any).version_id ?? (active as any).id + ) + .where("snapshot_content", "is not", null) .selectAll() .execute(); - // The untracked entry should be gone from state_all too - expectDeterministic(stateAfterDelete).toHaveLength(0); + expect(remaining).toHaveLength(0); } ); diff --git a/packages/lix-sdk/src/state/vtable/vtable.ts b/packages/lix-sdk/src/state/vtable/vtable.ts new file mode 100644 index 0000000000..589a563ea4 --- /dev/null +++ b/packages/lix-sdk/src/state/vtable/vtable.ts @@ -0,0 +1,841 @@ +import type { Kysely, Generated } from "kysely"; +import { sql } from "kysely"; +import type { LixInternalDatabaseSchema } from "../../database/schema.js"; +import type { Lix } from "../../lix/open-lix.js"; +import { executeSync } from "../../database/execute-sync.js"; +import { validateStateMutation } from "./validate-state-mutation.js"; +import { insertTransactionState } from "../transaction/insert-transaction-state.js"; +import { isStaleStateCache } from "../cache/is-stale-state-cache.js"; +import { markStateCacheAsFresh } from "../cache/mark-state-cache-as-stale.js"; +import { populateStateCache } from "../cache/populate-state-cache.js"; +import { parseStatePk, serializeStatePk } from "./primary-key.js"; +import { timestamp } from "../../deterministic/timestamp.js"; +import { insertVTableLog } from "./insert-vtable-log.js"; +import { commit } from "./commit.js"; + +// Type definition for the internal state virtual table +export type InternalStateVTable = { + _pk: Generated; // HIDDEN PRIMARY KEY + entity_id: string; + schema_key: string; + file_id: string; + version_id: string; + plugin_key: string; + snapshot_content: string | null; // JSON string or null for tombstones + schema_version: string; + created_at: Generated; + updated_at: Generated; + inherited_from_version_id: string | null; + change_id: Generated; + untracked: number; // SQLite uses INTEGER for boolean + commit_id: Generated; +}; + +// Virtual table schema definition +const VTAB_CREATE_SQL = `CREATE TABLE x( + _pk HIDDEN TEXT NOT NULL PRIMARY KEY, + entity_id TEXT, + schema_key TEXT, + file_id TEXT, + version_id TEXT, + plugin_key TEXT, + snapshot_content TEXT, + schema_version TEXT, + created_at TEXT, + updated_at TEXT, + inherited_from_version_id TEXT, + change_id TEXT, + untracked INTEGER, + commit_id TEXT +) WITHOUT ROWID;`; + +export function applyStateVTable( + lix: Pick +): void { + const { sqlite, hooks } = lix; + const db = lix.db as unknown as Kysely; + + sqlite.createFunction({ + name: "validate_snapshot_content", + deterministic: true, + arity: 5, + // @ts-expect-error - type mismatch + xFunc: (_ctxPtr: number, ...args: any[]) => { + return validateStateMutation({ + lix: { sqlite, db: db as any }, + schema: args[0] ? JSON.parse(args[0]) : null, + snapshot_content: JSON.parse(args[1]), + operation: args[2] || undefined, + entity_id: args[3] || undefined, + version_id: args[4], + }); + }, + }); + + // Create virtual table using the proper SQLite WASM API (following vtab-experiment pattern) + const capi = sqlite.sqlite3.capi; + const module = new capi.sqlite3_module(); + + // Store cursor state + const cursorStates = new Map(); + + /** + * Flag to prevent recursion when updating cache state. + * + * The guard ensures that while we're marking cache as fresh, any nested state queries + * bypass the cache and use materialized state directly, preventing recursion. + * + * Why is this needed is unclear. Queries are executed in sync. Why concurrent + * reads simultaneously update the cache is not clear. Given that state + * materialization is rare, this workaround has been deemed sufficient. + * + * This is a temporary fix and should be revisited in the future. + */ + let isUpdatingCacheState = false; + + module.installMethods( + { + xCreate: ( + dbHandle: any, + _pAux: any, + _argc: number, + _argv: any, + pVTab: any + ) => { + const result = capi.sqlite3_declare_vtab(dbHandle, VTAB_CREATE_SQL); + if (result !== capi.SQLITE_OK) { + return result; + } + + sqlite.sqlite3.vtab.xVtab.create(pVTab); + return capi.SQLITE_OK; + }, + + xConnect: ( + dbHandle: any, + _pAux: any, + _argc: number, + _argv: any, + pVTab: any + ) => { + const result = capi.sqlite3_declare_vtab(dbHandle, VTAB_CREATE_SQL); + if (result !== capi.SQLITE_OK) { + return result; + } + + sqlite.sqlite3.vtab.xVtab.create(pVTab); + return capi.SQLITE_OK; + }, + + xBegin: () => { + // TODO comment in after all internal v-table logic uses underlying state view + // // assert that we are not already in a transaction (the internal_change_in_transaction table is empty) + // const existingChangesInTransaction = executeSync({ + // lix: { sqlite }, + // query: db.selectFrom("internal_change_in_transaction").selectAll(), + // }); + // if (existingChangesInTransaction.length > 0) { + // const errorMessage = "Transaction already in progress"; + // if (canLog()) { + // createLixOwnLogSync({ + // lix: { sqlite, db: db as any }, + // key: "lix_state_xbegin_error", + // level: "error", + // message: `xBegin error: ${errorMessage}`, + // }); + // } + // throw new Error(errorMessage); + // } + }, + + xCommit: () => { + return commit({ lix: { sqlite, db: db as any, hooks } }); + }, + + xRollback: () => { + sqlite.exec({ + sql: "DELETE FROM internal_change_in_transaction", + returnValue: "resultRows", + }); + }, + + xBestIndex: (pVTab: any, pIdxInfo: any) => { + try { + const idxInfo = sqlite.sqlite3.vtab.xIndexInfo(pIdxInfo); + + // Track which columns have equality constraints + const usableConstraints: string[] = []; + let argIndex = 0; + + // Column mapping (matching the CREATE TABLE order in xCreate/xConnect) + const columnMap = [ + "_pk", // 0 (HIDDEN column) + "entity_id", // 1 + "schema_key", // 2 + "file_id", // 3 + "version_id", // 4 + "plugin_key", // 5 + "snapshot_content", // 6 + "schema_version", // 7 + "created_at", // 8 + "updated_at", // 9 + "inherited_from_version_id", // 10 + "change_id", // 11 + "untracked", // 12 + "commit_id", // 13 + ]; + + // Process constraints + // @ts-expect-error - idxInfo.$nConstraint is not defined in the type + for (let i = 0; i < idxInfo.$nConstraint; i++) { + // @ts-expect-error - idxInfo.nthConstraint is not defined in the type + const constraint = idxInfo.nthConstraint(i); + + // Only handle equality constraints that are usable + if ( + constraint.$op === capi.SQLITE_INDEX_CONSTRAINT_EQ && + constraint.$usable + ) { + const columnName = columnMap[constraint.$iColumn]; + if (columnName) { + usableConstraints.push(columnName); + + // Mark this constraint as used + // @ts-expect-error - idxInfo.nthConstraintUsage is not defined in the type + idxInfo.nthConstraintUsage(i).$argvIndex = ++argIndex; + } + } + } + + const fullTableCost = 1000000; // Default cost for full table scan + const fullTableRows = 10000000; + + // Set the index string to pass column names to xFilter + if (usableConstraints.length > 0) { + const idxStr = usableConstraints.join(","); + // @ts-expect-error - idxInfo.$idxStr is not defined in the type + idxInfo.$idxStr = sqlite.sqlite3.wasm.allocCString(idxStr, false); + // @ts-expect-error - idxInfo.$needToFreeIdxStr is not defined in the type + idxInfo.$needToFreeIdxStr = 1; // We don't need SQLite to free this string + + // Lower cost when we can use filters (more selective) + // @ts-expect-error - idxInfo.$estimatedCost is not defined in the type + idxInfo.$estimatedCost = + fullTableCost / (usableConstraints.length + 1); + // @ts-expect-error - idxInfo.$estimatedRows is not defined in the type + idxInfo.$estimatedRows = Math.ceil( + fullTableRows / (usableConstraints.length + 1) + ); + } else { + // @ts-expect-error - idxInfo.$needToFreeIdxStr is not defined in the type + idxInfo.$needToFreeIdxStr = 0; + + // Higher cost for full table scan + // @ts-expect-error - idxInfo.$estimatedCost is not defined in the type + idxInfo.$estimatedCost = fullTableCost; + // @ts-expect-error - idxInfo.$estimatedRows is not defined in the type + idxInfo.$estimatedRows = fullTableRows; + } + + return capi.SQLITE_OK; + } finally { + // Always log timing even if error occurs + } + }, + + xDisconnect: () => { + return capi.SQLITE_OK; + }, + + xDestroy: () => { + return capi.SQLITE_OK; + }, + + xOpen: (_pVTab: any, pCursor: any) => { + const cursor = sqlite.sqlite3.vtab.xCursor.create(pCursor); + cursorStates.set(cursor.pointer, { + results: [], + rowIndex: 0, + }); + return capi.SQLITE_OK; + }, + + xClose: (pCursor: any) => { + cursorStates.delete(pCursor); + return capi.SQLITE_OK; + }, + + xFilter: ( + pCursor: any, + idxNum: number, + idxStrPtr: number, + argc: number, + argv: any + ) => { + const cursorState = cursorStates.get(pCursor); + const idxStr = sqlite.sqlite3.wasm.cstrToJs(idxStrPtr); + + // Debug: Track recursion depth + const recursionKey = "_vtab_recursion_depth"; + // @ts-expect-error - using global for debugging + const currentDepth = (globalThis[recursionKey] || 0) + 1; + // @ts-expect-error - using global for debugging + globalThis[recursionKey] = currentDepth; + + if (currentDepth > 10) { + // @ts-expect-error - using global for debugging + globalThis[recursionKey] = 0; // Reset + throw new Error( + `Virtual table recursion depth exceeded: ${currentDepth}` + ); + } + + try { + // Extract filter arguments if provided + const filters: Record = {}; + if (argc > 0 && argv) { + const args = sqlite.sqlite3.capi.sqlite3_values_to_js(argc, argv); + // Parse idxStr to understand which columns are being filtered + // idxStr format: "column1,column2,..." + if (idxStr) { + const columns = idxStr.split(",").filter((c) => c.length > 0); + for (let i = 0; i < Math.min(columns.length, args.length); i++) { + if (args[i] !== null) { + filters[columns[i]!] = args[i]; // Keep original type + } + } + } + } + + // If we're updating cache state, we must use resolved state view directly to avoid recursion + if (isUpdatingCacheState) { + // Query directly from resolved state (now includes tombstones) + let query = db + .selectFrom("internal_resolved_state_all") + .selectAll(); + + // Apply filters + for (const [column, value] of Object.entries(filters)) { + query = query.where(column as any, "=", value); + } + + const stateResults = executeSync({ + lix: { sqlite }, + query, + }); + + cursorState.results = stateResults || []; + cursorState.rowIndex = 0; + return capi.SQLITE_OK; + } + + // Normal path: check cache staleness + const cacheIsStale = isStaleStateCache({ + lix: { sqlite, db: db as any }, + }); + + // Try cache first - but only if it's not stale + let cacheResults: any[] | null = null; + if (!cacheIsStale) { + // Select directly from resolved state using Kysely (includes tombstones) + let query = db + .selectFrom("internal_resolved_state_all") + .selectAll(); + + // Apply filters + for (const [column, value] of Object.entries(filters)) { + query = query.where(column as any, "=", value); + } + + cacheResults = executeSync({ + lix: { sqlite }, + query, + }); + } + + cursorState.results = cacheResults || []; + cursorState.rowIndex = 0; + + if (cacheIsStale) { + // Populate cache directly with materialized state + populateStateCache({ sqlite, db: db as any }); + + // Do not log here: xFilter can be invoked during SELECT-only paths + // and should avoid writing to the transaction state/logs. + + // Mark cache as fresh after population + isUpdatingCacheState = true; + try { + markStateCacheAsFresh({ lix: { sqlite, db: db as any, hooks } }); + } finally { + isUpdatingCacheState = false; + } + + let query = db + .selectFrom("internal_resolved_state_all") + .selectAll(); + + // Apply filters + for (const [column, value] of Object.entries(filters)) { + query = query.where(column as any, "=", value); + } + + const newResults = executeSync({ + lix: { sqlite }, + query, + }); + cursorState.results = newResults || []; + } + + return capi.SQLITE_OK; + } finally { + // Always decrement recursion depth + // @ts-expect-error - using global for debugging + globalThis[recursionKey] = currentDepth - 1; + } + }, + + xNext: (pCursor: any) => { + const cursorState = cursorStates.get(pCursor); + cursorState.rowIndex++; + return capi.SQLITE_OK; + }, + + xEof: (pCursor: any) => { + const cursorState = cursorStates.get(pCursor); + return cursorState.rowIndex >= cursorState.results.length ? 1 : 0; + }, + + xColumn: (pCursor: any, pContext: any, iCol: number) => { + const cursorState = cursorStates.get(pCursor); + const row = cursorState.results[cursorState.rowIndex]; + + if (!row) { + capi.sqlite3_result_null(pContext); + return capi.SQLITE_OK; + } + + // Handle primary key column (_pk) + if (iCol === 0) { + if (Array.isArray(row)) { + // For array results, _pk is at index 0 + capi.sqlite3_result_js(pContext, row[0]); + } else if (row._pk) { + // If row already has _pk, use it + capi.sqlite3_result_js(pContext, row._pk); + } else { + // Generate primary key from row data + const tag = row.untracked ? "U" : "C"; + const primaryKey = serializeStatePk( + tag, + row.file_id, + row.entity_id, + row.version_id + ); + capi.sqlite3_result_js(pContext, primaryKey); + } + return capi.SQLITE_OK; + } + + // Handle array-style results from SQLite exec + let value; + if (Array.isArray(row)) { + // For array results, composite_key is at index 0, so we use iCol directly + value = row[iCol]; + } else { + const columnName = getColumnName(iCol); + value = row[columnName]; + } + + // Handle special cases for null values that might be stored as strings + if ( + value === "null" && + getColumnName(iCol) === "inherited_from_version_id" + ) { + capi.sqlite3_result_null(pContext); + return capi.SQLITE_OK; + } + + if (value === null) { + capi.sqlite3_result_null(pContext); + } else { + capi.sqlite3_result_js(pContext, value); + } + + return capi.SQLITE_OK; + }, + + xRowid: () => { + // For WITHOUT ROWID tables, xRowid should not be called + // But if it is, we return an error + return capi.SQLITE_ERROR; + }, + + xUpdate: (_pVTab: number, nArg: number, ppArgv: any) => { + try { + const _timestamp = timestamp({ lix }); + // Extract arguments using the proper SQLite WASM API + const args = sqlite.sqlite3.capi.sqlite3_values_to_js(nArg, ppArgv); + + // DELETE operation: nArg = 1, args[0] = old primary key + if (nArg === 1) { + const oldPk = args[0] as string; + if (!oldPk) { + throw new Error("Missing primary key for DELETE operation"); + } + + // Use handleStateDelete for all cases - it handles both tracked and untracked + handleStateDelete(lix as any, oldPk, _timestamp); + + return capi.SQLITE_OK; + } + + // INSERT operation: nArg = N+2, args[0] = NULL, args[1] = new primary key + // UPDATE operation: nArg = N+2, args[0] = old primary key, args[1] = new primary key + const isInsert = args[0] === null; + const isUpdate = args[0] !== null; + + if (!isInsert && !isUpdate) { + throw new Error("Invalid xUpdate operation"); + } + + // Extract column values (args[2] through args[N+1]) + // Column order: _pk, entity_id, schema_key, file_id, version_id, plugin_key, + // snapshot_content, schema_version, created_at, updated_at, inherited_from_version_id, change_id, untracked + const entity_id = args[3]; + const schema_key = args[4]; + const file_id = args[5]; + const version_id = args[6]; + const plugin_key = args[7]; + // this is an update where we have a snapshot_content + // the snapshot_content is a JSON string as returned by SQlite + const snapshot_content = args[8] as string; + const schema_version = args[9]; + // Skip created_at (args[10]), updated_at (args[11]), inherited_from_version_id (args[12]), change_id (args[13]) + const untracked = args[14] ?? false; + + // assert required fields + if (!entity_id || !schema_key || !file_id || !plugin_key) { + throw new Error("Missing required fields for state mutation"); + } + + if (!version_id) { + throw new Error("version_id is required for state mutation"); + } + + // Call validation function (same logic as triggers) + const storedSchema = getStoredSchema(lix as any, schema_key); + + validateStateMutation({ + lix: lix as any, + schema: storedSchema ? JSON.parse(storedSchema) : null, + snapshot_content: JSON.parse(snapshot_content), + operation: isInsert ? "insert" : "update", + entity_id: String(entity_id), + version_id: String(version_id), + untracked: Boolean(untracked), + }); + + // Use insertTransactionState which handles both tracked and untracked entities + insertTransactionState({ + lix: lix as any, + timestamp: _timestamp, + data: [ + { + entity_id: String(entity_id), + schema_key: String(schema_key), + file_id: String(file_id), + plugin_key: String(plugin_key), + snapshot_content, + schema_version: String(schema_version), + version_id: String(version_id), + untracked: Boolean(untracked), + }, + ], + }); + + // TODO: This cache copying logic is a temporary workaround for shared commits. + // The proper solution requires improving cache miss logic to handle commit sharing + // without duplicating entries. See: https://github.com/opral/lix-sdk/issues/309 + // + // Handle cache copying for new versions that share commits (v2 cache) + if (isInsert && String(schema_key) === "lix_version") { + const versionData = JSON.parse(snapshot_content); + const newVersionId = versionData.id; + const commitId = versionData.commit_id; + + if (newVersionId && commitId) { + // Find other versions that point to the same commit + const existingVersionsWithSameCommit = sqlite.exec({ + sql: ` + SELECT json_extract(snapshot_content, '$.id') as version_id + FROM internal_state_cache + WHERE schema_key = 'lix_version' + AND json_extract(snapshot_content, '$.commit_id') = ? + AND json_extract(snapshot_content, '$.id') != ? + `, + bind: [commitId, newVersionId], + returnValue: "resultRows", + }); + + // If there are existing versions with the same commit, copy their cache entries + if ( + existingVersionsWithSameCommit && + existingVersionsWithSameCommit.length > 0 + ) { + const sourceVersionId = existingVersionsWithSameCommit[0]![0]; // Take first existing version + + // Get all unique schema keys from the source version + const schemaKeys = sqlite.exec({ + sql: `SELECT DISTINCT schema_key FROM internal_state_cache WHERE version_id = ? AND schema_key != 'lix_version'`, + bind: [sourceVersionId], + returnValue: "resultRows", + }) as string[][]; + + // Copy cache entries for each schema key to the appropriate physical table + for (const row of schemaKeys || []) { + const sourceSchemaKey = row[0]; + if (!sourceSchemaKey) continue; + const sanitizedSchemaKey = sourceSchemaKey.replace( + /[^a-zA-Z0-9]/g, + "_" + ); + const tableName = `internal_state_cache_${sanitizedSchemaKey}`; + + // Check if table exists first + const tableExists = sqlite.exec({ + sql: `SELECT 1 FROM sqlite_schema WHERE type='table' AND name=?`, + bind: [tableName], + returnValue: "resultRows", + }); + + if (tableExists && tableExists.length > 0) { + // Copy entries from source version to new version using v2 cache structure + sqlite.exec({ + sql: ` + INSERT OR IGNORE INTO ${tableName} + (entity_id, schema_key, file_id, version_id, plugin_key, snapshot_content, schema_version, created_at, updated_at, inherited_from_version_id, inheritance_delete_marker, change_id, commit_id) + SELECT + entity_id, schema_key, file_id, ?, plugin_key, snapshot_content, schema_version, created_at, updated_at, + CASE + WHEN inherited_from_version_id IS NULL THEN ? + ELSE inherited_from_version_id + END as inherited_from_version_id, + inheritance_delete_marker, change_id, commit_id + FROM ${tableName} + WHERE version_id = ? + `, + bind: [newVersionId, sourceVersionId, sourceVersionId], + }); + } + } + } + } + } + return capi.SQLITE_OK; + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + + // Log error for debugging + insertVTableLog({ + lix, + timestamp: timestamp({ lix }), + key: "lix_state_xupdate_error", + level: "error", + message: `xUpdate error: ${errorMessage}`, + }); + + throw error; // Re-throw to propagate error + } + }, + }, + false + ); + + // Register the vtable under a clearer internal name + capi.sqlite3_create_module( + sqlite.pointer!, + "internal_state_vtable", + module, + 0 + ); + + // Create the internal vtable (raw state surface) + sqlite.exec( + `CREATE VIRTUAL TABLE IF NOT EXISTS internal_state_vtable USING internal_state_vtable();` + ); +} + +export function handleStateDelete( + lix: Pick, + primaryKey: string, + timestamp: string +): void { + // Query the row to delete using the resolved state view with Kysely + const rowToDelete = executeSync({ + lix, + query: (lix.db as unknown as Kysely) + .selectFrom("internal_resolved_state_all") + .select([ + "entity_id", + "schema_key", + "file_id", + "version_id", + "plugin_key", + "snapshot_content", + "schema_version", + "untracked", + "inherited_from_version_id", + ]) + .where("_pk", "=", primaryKey), + })[0]; + + if (!rowToDelete) { + throw new Error(`Row not found for primary key: ${primaryKey}`); + } + + const entity_id = rowToDelete.entity_id; + const schema_key = rowToDelete.schema_key; + const file_id = rowToDelete.file_id; + const version_id = rowToDelete.version_id; + const plugin_key = rowToDelete.plugin_key; + const snapshot_content = rowToDelete.snapshot_content; + const schema_version = rowToDelete.schema_version; + const untracked = rowToDelete.untracked; + + // If entity is untracked, handle differently based on its source (transaction/inherited/direct) + if (untracked) { + // Parse the primary key tag to determine where the row is coming from in the resolved view + const parsed = parseStatePk(primaryKey); + + if (parsed.tag === "UI") { + // Inherited untracked: create a tombstone to block inheritance + insertTransactionState({ + lix, + timestamp, + data: [ + { + entity_id: String(entity_id), + schema_key: String(schema_key), + file_id: String(file_id), + plugin_key: String(plugin_key), + snapshot_content: null, // Deletion tombstone + schema_version: String(schema_version), + version_id: String(version_id), + untracked: true, + }, + ], + }); + return; + } + + if (parsed.tag === "T" || parsed.tag === "TI") { + // The row is coming from the transaction stage (pending untracked insert/update). + // Overwrite the pending transaction row with a deletion so the commit drops it + // and nothing is persisted to the untracked table. + insertTransactionState({ + lix, + timestamp, + data: [ + { + entity_id: String(entity_id), + schema_key: String(schema_key), + file_id: String(file_id), + plugin_key: String(plugin_key), + snapshot_content: null, // mark as delete in txn + schema_version: String(schema_version), + version_id: String(version_id), + untracked: true, + }, + ], + }); + return; + } + + // Direct untracked in this version (U tag) – delete from the untracked table immediately + executeSync({ + lix, + query: (lix.db as unknown as Kysely) + .deleteFrom("internal_state_all_untracked") + .where("entity_id", "=", String(entity_id)) + .where("schema_key", "=", String(schema_key)) + .where("file_id", "=", String(file_id)) + .where("version_id", "=", String(version_id)), + }); + return; + } + + const storedSchema = getStoredSchema(lix, schema_key); + + validateStateMutation({ + lix, + schema: storedSchema ? JSON.parse(storedSchema) : null, + snapshot_content: JSON.parse(snapshot_content as string), + operation: "delete", + entity_id: String(entity_id), + version_id: String(version_id), + }); + + insertTransactionState({ + lix, + timestamp, + data: [ + { + entity_id: String(entity_id), + schema_key: String(schema_key), + file_id: String(file_id), + plugin_key: String(plugin_key), + snapshot_content: null, // No snapshot content for DELETE + schema_version: String(schema_version), + version_id: String(version_id), + untracked: false, // tracked entity + }, + ], + }); +} + +// Helper functions for the virtual table + +function getStoredSchema( + lix: Pick, + schemaKey: any +): string | null { + // Query directly from internal_resolved_state_all to avoid vtable recursion + const result = executeSync({ + lix, + query: (lix.db as unknown as Kysely) + .selectFrom("internal_resolved_state_all") + .select(sql`json_extract(snapshot_content, '$.value')`.as("value")) + .where("schema_key", "=", "lix_stored_schema") + .where( + sql`json_extract(snapshot_content, '$.key')`, + "=", + String(schemaKey) + ) + .where("snapshot_content", "is not", null) + .limit(1), + }); + + return result && result.length > 0 ? result[0]!.value : null; +} + +function getColumnName(columnIndex: number): string { + const columns = [ + "_pk", + "entity_id", + "schema_key", + "file_id", + "version_id", + "plugin_key", + "snapshot_content", + "schema_version", + "created_at", + "updated_at", + "inherited_from_version_id", + "change_id", + "untracked", + "commit_id", + ]; + return columns[columnIndex] || "unknown"; +} diff --git a/packages/lix-sdk/src/test-utilities/simulation-test/cache-miss-simulation.test.ts b/packages/lix-sdk/src/test-utilities/simulation-test/cache-miss-simulation.test.ts index 625e64d847..110fdb462e 100644 --- a/packages/lix-sdk/src/test-utilities/simulation-test/cache-miss-simulation.test.ts +++ b/packages/lix-sdk/src/test-utilities/simulation-test/cache-miss-simulation.test.ts @@ -1,4 +1,4 @@ -import { expect, test } from "vitest"; +import { expect, test, vi } from "vitest"; import { simulationTest, normalSimulation, @@ -6,11 +6,16 @@ import { } from "./simulation-test.js"; import { timestamp } from "../../deterministic/timestamp.js"; import { nextDeterministicSequenceNumber } from "../../deterministic/sequence.js"; +import * as clearCacheModule from "../../state/cache/clear-state-cache.js"; + test("cache miss simulation test discovery", () => {}); simulationTest( "cache miss simulation clears cache before every select", async ({ openSimulatedLix }) => { + // Spy on clearStateCache + const clearStateCacheSpy = vi.spyOn(clearCacheModule, "clearStateCache"); + const lix = await openSimulatedLix({ keyValues: [ { @@ -19,39 +24,29 @@ simulationTest( lixcol_version_id: "global", lixcol_untracked: true, }, - { - key: "lix_log_levels", - value: ["debug"], - lixcol_version_id: "global", - lixcol_untracked: true, - }, ], }); + // Reset the spy counter after initialization + clearStateCacheSpy.mockClear(); + // Insert test data await lix.db .insertInto("key_value") .values([{ key: "test_1", value: "value_1" }]) .execute(); - const timeBefore = timestamp({ lix }); - + // First select query await lix.db .selectFrom("key_value") .where("key", "=", "test_1") .selectAll() .execute(); - const cacheMissAfter = await lix.db - .selectFrom("log") - .where("key", "=", "lix_state_cache_miss") - .where("lixcol_created_at", ">", timeBefore) - .selectAll() - .execute(); + // Second select query + await lix.db.selectFrom("key_value").selectAll().execute(); - // greater than one because the entity view key value - // might fire off subqueries that also trigger cache misses - expect(cacheMissAfter.length).toBeGreaterThan(1); + expect(clearStateCacheSpy).toHaveBeenCalledTimes(2); }, { simulations: [cacheMissSimulation], diff --git a/packages/lix-sdk/src/test-utilities/simulation-test/cache-miss-simulation.ts b/packages/lix-sdk/src/test-utilities/simulation-test/cache-miss-simulation.ts index 7268e110a0..194273692b 100644 --- a/packages/lix-sdk/src/test-utilities/simulation-test/cache-miss-simulation.ts +++ b/packages/lix-sdk/src/test-utilities/simulation-test/cache-miss-simulation.ts @@ -1,5 +1,7 @@ import { vi } from "vitest"; import * as cacheModule from "../../state/cache/mark-state-cache-as-stale.js"; +import { clearStateCache } from "../../state/cache/clear-state-cache.js"; +import * as insertVTableLogModule from "../../state/vtable/insert-vtable-log.js"; import type { SimulationTestDef } from "./simulation-test.js"; const CACHE_TIMESTAMP = "2099-12-31T23:59:59.999Z"; @@ -7,6 +9,7 @@ const CACHE_TIMESTAMP = "2099-12-31T23:59:59.999Z"; // Store original functions const originalMarkStale = cacheModule.markStateCacheAsStale; const originalMarkFresh = cacheModule.markStateCacheAsFresh; +const originalInsertVTableLog = insertVTableLogModule.insertVTableLog; // Create wrapped versions that inject the fixed timestamp const wrappedMarkStale = (args: any) => { @@ -17,6 +20,16 @@ const wrappedMarkFresh = (args: any) => { return originalMarkFresh({ ...args, timestamp: CACHE_TIMESTAMP }); }; +const wrappedInsertVTableLog = (args: any) => { + // Skip cache miss logs entirely to avoid consuming sequence numbers + if (args.key === "lix_state_cache_miss") { + // Don't insert the log at all + return; + } + // Pass through all other logs unchanged + return originalInsertVTableLog(args); +}; + /** * Cache miss simulation - Clears cache before every select operation to force re-materialization from changes. * This tests that state can be correctly reconstructed from the change log @@ -25,13 +38,16 @@ const wrappedMarkFresh = (args: any) => { export const cacheMissSimulation: SimulationTestDef = { name: "cache miss", setup: async (lix) => { - // Mock the cache marking functions to use our wrapped versions + // Set up mocks for cache operations and logs vi.spyOn(cacheModule, "markStateCacheAsStale").mockImplementation( wrappedMarkStale ); vi.spyOn(cacheModule, "markStateCacheAsFresh").mockImplementation( wrappedMarkFresh ); + vi.spyOn(insertVTableLogModule, "insertVTableLog").mockImplementation( + wrappedInsertVTableLog + ); // Don't clear cache on bootup - it consumes sequence numbers // The cache will be cleared before each query anyway @@ -42,13 +58,8 @@ export const cacheMissSimulation: SimulationTestDef = { // Override execute query.execute = async function (...args: any[]) { - // Clear cache before executing select - lix.sqlite.exec({ - sql: "DELETE FROM internal_state_cache", - returnValue: "resultRows", - }); - - cacheModule.markStateCacheAsStale({ lix, timestamp: CACHE_TIMESTAMP }); + // This forces re-materialization from changes + clearStateCache({ lix, timestamp: CACHE_TIMESTAMP }); // Call the original execute return originalExecute.apply(this, args); diff --git a/packages/lix-sdk/src/test-utilities/simulation-test/simulation-test.test.ts b/packages/lix-sdk/src/test-utilities/simulation-test/simulation-test.test.ts index d8d32df7f3..47941a2fbe 100644 --- a/packages/lix-sdk/src/test-utilities/simulation-test/simulation-test.test.ts +++ b/packages/lix-sdk/src/test-utilities/simulation-test/simulation-test.test.ts @@ -4,7 +4,7 @@ import { type SimulationTestDef, normalSimulation, } from "./simulation-test.js"; -import { commit } from "../../state/commit.js"; +import { commit } from "../../state/vtable/commit.js"; test("simulation test discovery", () => {}); @@ -123,7 +123,7 @@ describe("database operations are deterministic", async () => { keyValues: [ { key: "lix_deterministic_mode", - value: { enabled: true, bootstrap: true }, + value: { enabled: true }, lixcol_version_id: "global", }, ], diff --git a/packages/lix-sdk/src/test-utilities/simulation-test/simulation-test.ts b/packages/lix-sdk/src/test-utilities/simulation-test/simulation-test.ts index fd29340987..090b4ce487 100644 --- a/packages/lix-sdk/src/test-utilities/simulation-test/simulation-test.ts +++ b/packages/lix-sdk/src/test-utilities/simulation-test/simulation-test.ts @@ -104,6 +104,8 @@ export function simulationTest( const expectedValues = new Map(); test.each(simulationsToRun)(`${name} > $name`, async (simulation) => { + vi.restoreAllMocks(); + let callIndex = 0; const isFirstSimulation = simulation === simulationsToRun[0]; @@ -171,6 +173,6 @@ Use regular expect() for simulation-specific assertions. openSimulatedLix, expectDeterministic: deterministicExpect as ExpectDeterministic, }); - vi.resetAllMocks(); + vi.restoreAllMocks(); // Restore original implementations, not just reset }); } diff --git a/packages/lix-sdk/src/thread/create-thread-comment.test.ts b/packages/lix-sdk/src/thread/create-thread-comment.test.ts index 506eed3aad..416a3cda18 100644 --- a/packages/lix-sdk/src/thread/create-thread-comment.test.ts +++ b/packages/lix-sdk/src/thread/create-thread-comment.test.ts @@ -111,12 +111,14 @@ test("defaults parent_id to the last comment when not provided", async () => { expect(fourthComment.parent_id).toBe(firstComment.id); - // Create fifth comment without parent_id (should default to third comment, which is still the leaf) + // Create fifth comment without parent_id + // Should default to the most recently created leaf (fourth comment) + // This follows common UI patterns where replies go to the most recent activity const fifthComment = await createThreadComment({ lix, thread_id: thread.id, body: fromPlainText("Fifth comment"), }); - expect(fifthComment.parent_id).toBe(thirdComment.id); + expect(fifthComment.parent_id).toBe(fourthComment.id); }); diff --git a/packages/lix-sdk/src/thread/create-thread-comment.ts b/packages/lix-sdk/src/thread/create-thread-comment.ts index f312cf133a..b4dde0d153 100644 --- a/packages/lix-sdk/src/thread/create-thread-comment.ts +++ b/packages/lix-sdk/src/thread/create-thread-comment.ts @@ -29,9 +29,12 @@ export async function createThreadComment( .select("lixcol_version_id") .executeTakeFirstOrThrow(); - // If parent_id is not provided, find the leaf comment using SQL traversal + // If parent_id is not provided, find the most appropriate parent comment + // Following common UI patterns (GitHub, Slack), we default to the most recently + // created leaf comment, which approximates "most recently active" thread let parentId = args.parent_id; if (parentId === undefined) { + // Find all leaf comments (comments with no children) const leafComment = await trx .selectFrom("thread_comment_all as c1") .where("c1.thread_id", "=", args.thread_id) @@ -52,7 +55,10 @@ export async function createThreadComment( ) ) ) - .select("c1.id") + .select(["c1.id", "c1.lixcol_created_at"]) + // Select the most recently created leaf (approximates most recent activity) + .orderBy("c1.lixcol_created_at", "desc") + .orderBy("c1.id", "desc") // Secondary order by id for absolute determinism .executeTakeFirst(); parentId = leafComment?.id ?? null; } diff --git a/packages/lix-sdk/src/version/create-version-from-commit.test.ts b/packages/lix-sdk/src/version/create-version-from-commit.test.ts new file mode 100644 index 0000000000..ea8bea9ee3 --- /dev/null +++ b/packages/lix-sdk/src/version/create-version-from-commit.test.ts @@ -0,0 +1,211 @@ +import { expect, test } from "vitest"; +import { openLix } from "../lix/open-lix.js"; +import { createChangeSet } from "../change-set/create-change-set.js"; +import type { Lix } from "../lix/open-lix.js"; +import type { LixChangeSet } from "../change-set/schema.js"; +import type { LixCommit } from "../commit/schema.js"; +import { uuidV7 } from "../deterministic/uuid-v7.js"; +import { createVersionFromCommit } from "./create-version-from-commit.js"; +import { createVersion } from "./create-version.js"; + +// Local test helper: create a global commit pointing to a given change set +async function createCommit(args: { + lix: Lix; + changeSet: Pick; +}): Promise> { + const commitId = uuidV7({ lix: args.lix }); + await args.lix.db + .insertInto("commit_all") + .values({ + id: commitId, + change_set_id: args.changeSet.id, + lixcol_version_id: "global", + }) + .execute(); + const row = await args.lix.db + .selectFrom("commit_all") + .where("id", "=", commitId) + .where("lixcol_version_id", "=", "global") + .selectAll() + .executeTakeFirstOrThrow(); + return { id: row.id, change_set_id: row.change_set_id }; +} + +// Planning all test cases for createVersionFromCommit (empty bodies for TDD) + +test("creates a version pointing to the given commit", async () => { + const lix = await openLix({}); + + // Create an empty change set and a commit + const cs = await createChangeSet({ + lix, + lixcol_version_id: "global", + elements: [], + }); + + const commit = await createCommit({ lix, changeSet: cs }); + + const version = await createVersionFromCommit({ lix, commit }); + + expect(version.commit_id).toBe(commit.id); + expect(version.id).toBeDefined(); +}); + +test("accepts commit as { id } object", async () => { + const lix = await openLix({}); + + const cs = await createChangeSet({ + lix, + lixcol_version_id: "global", + elements: [], + }); + const commit = await createCommit({ lix, changeSet: cs }); + + // Pass a minimal object with only id + const version = await createVersionFromCommit({ + lix, + commit: { id: commit.id }, + }); + + expect(version.commit_id).toBe(commit.id); +}); + +test("sets a fresh working_commit_id backed by an empty change set", async () => { + const lix = await openLix({}); + + // Build a base commit to branch from + const cs = await createChangeSet({ + lix, + lixcol_version_id: "global", + elements: [], + }); + const base = await createCommit({ lix, changeSet: cs }); + + const v = await createVersionFromCommit({ lix, commit: base }); + + // working commit should be new and different from base commit + expect(v.working_commit_id).toBeDefined(); + expect(v.working_commit_id).not.toBe(base.id); + + // working commit should point to an empty change set + const workingCommit = await lix.db + .selectFrom("commit_all") + .where("id", "=", v.working_commit_id) + .where("lixcol_version_id", "=", "global") + .selectAll() + .executeTakeFirstOrThrow(); + + const elements = await lix.db + .selectFrom("change_set_element_all") + .where("change_set_id", "=", workingCommit.change_set_id) + .where("lixcol_version_id", "=", "global") + .selectAll() + .execute(); + + expect(elements.length).toBe(0); +}); + +test("defaults inheritsFrom to global when omitted", async () => { + const lix = await openLix({}); + + const cs = await createChangeSet({ + lix, + lixcol_version_id: "global", + elements: [], + }); + const commit = await createCommit({ lix, changeSet: cs }); + + const v = await createVersionFromCommit({ lix, commit }); + expect(v.inherits_from_version_id).toBe("global"); +}); + +test("allows explicit inheritsFrom: null (no inheritance)", async () => { + const lix = await openLix({}); + + const cs = await createChangeSet({ + lix, + lixcol_version_id: "global", + elements: [], + }); + + const commit = await createCommit({ lix, changeSet: cs }); + + const v = await createVersionFromCommit({ + lix, + commit, + inheritsFrom: null, + }); + expect(v.inherits_from_version_id).toBeNull(); +}); + +test("allows explicit inheritsFrom: { version }", async () => { + const lix = await openLix({}); + + // Create a parent version to inherit from + const parent = await createVersion({ lix, name: "parent-version" }); + + const cs = await createChangeSet({ + lix, + lixcol_version_id: "global", + elements: [], + }); + const commit = await createCommit({ lix, changeSet: cs }); + + const v = await createVersionFromCommit({ + lix, + commit, + inheritsFrom: parent, + }); + + expect(v.inherits_from_version_id).toBe(parent.id); +}); + +test("supports custom id and name", async () => { + const lix = await openLix({}); + + const cs = await createChangeSet({ + lix, + lixcol_version_id: "global", + elements: [], + }); + const commit = await createCommit({ lix, changeSet: cs }); + + const v = await createVersionFromCommit({ + lix, + commit, + id: "my-version-id", + name: "My Version", + }); + expect(v.id).toBe("my-version-id"); + expect(v.name).toBe("My Version"); +}); + +test("throws when the commit does not exist", async () => { + const lix = await openLix({}); + await expect( + createVersionFromCommit({ lix, commit: { id: "non-existent-commit" } }) + ).rejects.toThrow(); +}); + +test("works within a transaction", async () => { + const lix = await openLix({}); + + const cs = await createChangeSet({ + lix, + lixcol_version_id: "global", + elements: [], + }); + const commit = await createCommit({ lix, changeSet: cs }); + + const v = await lix.db.transaction().execute(async (trx) => { + return createVersionFromCommit({ lix: { ...lix, db: trx }, commit }); + }); + + // Visible outside the transaction and consistent + const readBack = await lix.db + .selectFrom("version") + .selectAll() + .where("id", "=", v.id) + .executeTakeFirstOrThrow(); + expect(readBack.commit_id).toBe(commit.id); +}); diff --git a/packages/lix-sdk/src/version/create-version-from-commit.ts b/packages/lix-sdk/src/version/create-version-from-commit.ts new file mode 100644 index 0000000000..968192c5f2 --- /dev/null +++ b/packages/lix-sdk/src/version/create-version-from-commit.ts @@ -0,0 +1,100 @@ +import type { Lix } from "../lix/open-lix.js"; +import type { LixCommit } from "../commit/schema.js"; +import type { LixVersion } from "./schema.js"; +import { createChangeSet } from "../change-set/create-change-set.js"; +import { uuidV7 } from "../deterministic/uuid-v7.js"; +import { nanoId } from "../deterministic/index.js"; + +/** + * Creates a new version that starts at a specific commit. + * + * - Points the new version's `commit_id` to the provided commit. + * - Generates a fresh `working_commit_id` that references an empty change set (global scope). + * - Does not modify the active version or any other versions. + * - Inheritance lineage is controlled via `inheritsFrom` and defaults to `"global"`. + * + * @param args.lix - The Lix instance. + * @param args.commit - The commit to branch from (only `id` is required). + * @param args.id - Optional explicit version id. + * @param args.name - Optional version name. + * @param args.inheritsFrom - Optional lineage: a parent version to inherit from, or `null` to disable; defaults to `"global"`. + * + * @example + * // Branch from a commit + * const v = await createVersionFromCommit({ lix, commit }); + * + * @example + * // Custom id and name + * const v = await createVersionFromCommit({ lix, commit, id: "my-id", name: "My Branch" }); + * + * @example + * // Inherit from a specific version + * const parent = await createVersion({ lix, name: "base" }); + * const v = await createVersionFromCommit({ lix, commit, inheritsFrom: parent }); + * + * @example + * // Opt-out of inheritance + * const v = await createVersionFromCommit({ lix, commit, inheritsFrom: null }); + * + * @throws If the provided commit id does not exist. + */ +export async function createVersionFromCommit(args: { + lix: Lix; + commit: Pick; + id?: LixVersion["id"]; + name?: LixVersion["name"]; + inheritsFrom?: LixVersion | Pick | null; +}): Promise { + const executeInTransaction = async (trx: Lix["db"]) => { + // Create a working (empty) change set for the new version + const workingCs = await createChangeSet({ + lix: { ...args.lix, db: trx }, + lixcol_version_id: "global", + }); + + // Create a fresh working commit pointing to the working change set + const workingCommitId = uuidV7({ lix: args.lix }); + await trx + .insertInto("commit_all") + .values({ + id: workingCommitId, + change_set_id: workingCs.id, + lixcol_version_id: "global", + }) + .execute(); + + // Determine inheritance + const inherits_from_version_id = + args.inheritsFrom === undefined + ? "global" + : args.inheritsFrom === null + ? null + : args.inheritsFrom.id; + + const versionId = args.id ?? nanoId({ lix: args.lix }); + + // Insert the new version pointing to the provided commit + await trx + .insertInto("version") + .values({ + id: versionId, + name: args.name, + commit_id: args.commit.id, + working_commit_id: workingCommitId, + inherits_from_version_id, + }) + .execute(); + + const newVersion = await trx + .selectFrom("version") + .selectAll() + .where("id", "=", versionId) + .executeTakeFirstOrThrow(); + + return newVersion; + }; + + return args.lix.db.isTransaction + ? executeInTransaction(args.lix.db) + : args.lix.db.transaction().execute(executeInTransaction); +} diff --git a/packages/lix-sdk/src/version/create-version.test.ts b/packages/lix-sdk/src/version/create-version.test.ts index a70d3da59d..f33d146333 100644 --- a/packages/lix-sdk/src/version/create-version.test.ts +++ b/packages/lix-sdk/src/version/create-version.test.ts @@ -2,14 +2,14 @@ import { test, expect } from "vitest"; import { openLix } from "../lix/open-lix.js"; import { createVersion } from "./create-version.js"; -test("should create a version with the provided commit_id", async () => { +test("should create a version from a provided version (from)", async () => { const lix = await openLix({}); // Create a source version to get a commit_id const sourceVersion = await createVersion({ lix, name: "source-version" }); const newVersion = await createVersion({ lix, - commit_id: sourceVersion.commit_id, + from: { id: sourceVersion.id }, }); // The new version should have the same commit as the source version @@ -27,7 +27,7 @@ test("should create a version with the specified name", async () => { const newVersion = await createVersion({ lix, - commit_id: sourceVersion.commit_id, + from: { id: sourceVersion.id }, name: versionName, }); @@ -42,7 +42,7 @@ test("should create a version with the specified id", async () => { const newVersion = await createVersion({ lix, - commit_id: sourceVersion.commit_id, + from: { id: sourceVersion.id }, id: "hello world", }); @@ -60,7 +60,7 @@ test("should work within an existing transaction", async () => { const newVersion = await lix.db.transaction().execute(async (trx) => { return createVersion({ lix: { ...lix, db: trx }, // Pass the transaction object - commit_id: sourceVersion.commit_id, + from: { id: sourceVersion.id }, name: versionName, }); }); @@ -78,20 +78,19 @@ test("should work within an existing transaction", async () => { expect(dbVersion.name).toBe(versionName); }); -test("should fail if the provided commit_id does not exist", async () => { +test("should fail if the provided 'from' version does not exist", async () => { const lix = await openLix({}); - const nonExistentCommitId = "non-existent-commit-id"; + const nonExistentVersionId = "non-existent-version-id"; await expect( - createVersion({ lix, commit_id: nonExistentCommitId }) - // Should fail when trying to use non-existent commit + createVersion({ lix, from: { id: nonExistentVersionId } }) ).rejects.toThrow(); }); test("should automatically create inheritance from global version", async () => { const lix = await openLix({}); - // Create a new version without commit_id (should inherit from active version) + // Create a new version (should inherit from active version) const newVersion = await createVersion({ lix, name: "test-version", @@ -103,7 +102,7 @@ test("should automatically create inheritance from global version", async () => expect(newVersion.commit_id).toBeDefined(); }); -test("should default to active version's commit_id when no commit_id is provided", async () => { +test("should default to active version's commit_id when 'from' is omitted or 'active'", async () => { const lix = await openLix({}); // Get the active version @@ -113,7 +112,7 @@ test("should default to active version's commit_id when no commit_id is provided .selectAll("version") .executeTakeFirstOrThrow(); - // Create a new version without commit_id + // Create a new version without from const newVersion = await createVersion({ lix, name: "branched-version", @@ -135,7 +134,7 @@ test("multiple versions created without commit_id should all point to active ver .selectAll("version") .executeTakeFirstOrThrow(); - // Create two versions without commit_id + // Create two versions without from const version1 = await createVersion({ lix, name: "version-1", @@ -153,14 +152,14 @@ test("multiple versions created without commit_id should all point to active ver expect(version1.id).not.toBe(version2.id); }); -test("should allow explicit null for inherits_from_version_id", async () => { +test("should allow explicit null for inheritsFrom", async () => { const lix = await openLix({}); // Create a version with explicit null inheritance const version = await createVersion({ lix, name: "standalone-version", - inherits_from_version_id: null, + inheritsFrom: null, }); // Should be null, not "global" diff --git a/packages/lix-sdk/src/version/create-version.ts b/packages/lix-sdk/src/version/create-version.ts index e332a8ed41..00da588cf4 100644 --- a/packages/lix-sdk/src/version/create-version.ts +++ b/packages/lix-sdk/src/version/create-version.ts @@ -1,86 +1,47 @@ -import { createChangeSet } from "../change-set/create-change-set.js"; import { nanoId } from "../deterministic/index.js"; -import { uuidV7 } from "../deterministic/uuid-v7.js"; import type { Lix } from "../lix/open-lix.js"; import type { LixVersion } from "./schema.js"; +import { createVersionFromCommit } from "./create-version-from-commit.js"; /** - * Creates a new version. + * Creates a new version branching from a version's commit id (defaults to active when `from` is omitted). * - * If `commit_id` is provided, the new version will point to that commit. - * Otherwise, defaults to the active version's commit_id. - * - * @example - * // Create a version branching from the current active version - * const version = await createVersion({ lix, name: "feature-branch" }); - * - * @example - * // Create a version pointing to a specific commit - * const version = await createVersion({ lix, commit_id: existingCommitId }); + * For branching from a specific commit id, use `createVersionFromCommit`. */ export async function createVersion(args: { lix: Lix; id?: LixVersion["id"]; - commit_id?: LixVersion["commit_id"]; name?: LixVersion["name"]; - inherits_from_version_id?: LixVersion["inherits_from_version_id"] | null; + from?: LixVersion | Pick; + inheritsFrom?: LixVersion | Pick | null; }): Promise { const executeInTransaction = async (trx: Lix["db"]) => { - const workingCs = await createChangeSet({ - lix: { ...args.lix, db: trx }, - lixcol_version_id: "global", - }); - - let commitId: string; - - if (args.commit_id) { - // Use the provided commit - commitId = args.commit_id; + // Resolve base commit from from/active + let baseCommitId: string; + if (args.from) { + const base = await trx + .selectFrom("version") + .select(["commit_id"]) + .where("id", "=", (args.from as Pick).id) + .executeTakeFirstOrThrow(); + baseCommitId = base.commit_id; } else { - // Default to active version's commit_id - const activeVersion = await trx + const active = await trx .selectFrom("active_version") .innerJoin("version", "version.id", "active_version.version_id") .select("version.commit_id") .executeTakeFirstOrThrow(); - - commitId = activeVersion.commit_id; + baseCommitId = active.commit_id; } - // Create a working commit for the new version - const workingCommitId = uuidV7({ lix: args.lix }); - await trx - .insertInto("commit_all") - .values({ - id: workingCommitId, - change_set_id: workingCs.id, - lixcol_version_id: "global", - }) - .execute(); - - const versionId = args.id ?? nanoId({ lix: args.lix }); - - await trx - .insertInto("version") - .values({ - id: versionId, - name: args.name, - commit_id: commitId, - working_commit_id: workingCommitId, - inherits_from_version_id: - args.inherits_from_version_id === undefined - ? "global" - : args.inherits_from_version_id, - }) - .execute(); - - const newVersion = await trx - .selectFrom("version") - .selectAll() - .where("id", "=", versionId) - .executeTakeFirstOrThrow(); - - return newVersion; + // Delegate to the commit-based creator for a single code path + return createVersionFromCommit({ + lix: { ...args.lix, db: trx }, + commit: { id: baseCommitId }, + id: args.id ?? nanoId({ lix: args.lix }), + name: args.name, + inheritsFrom: args.inheritsFrom, + }); }; if (args.lix.db.isTransaction) { diff --git a/packages/lix-sdk/src/version/index.ts b/packages/lix-sdk/src/version/index.ts index a8a8a46fc9..1f97978d74 100644 --- a/packages/lix-sdk/src/version/index.ts +++ b/packages/lix-sdk/src/version/index.ts @@ -5,3 +5,5 @@ export { } from "./schema.js"; export { createVersion } from "./create-version.js"; export { switchVersion } from "./switch-version.js"; +export { createVersionFromCommit } from "./create-version-from-commit.js"; +export { selectVersionDiff } from "./select-version-diff.js"; diff --git a/packages/lix-sdk/src/version/merge-version.test.ts b/packages/lix-sdk/src/version/merge-version.test.ts new file mode 100644 index 0000000000..fe2fbec98c --- /dev/null +++ b/packages/lix-sdk/src/version/merge-version.test.ts @@ -0,0 +1,901 @@ +import { test } from "vitest"; +import { sql } from "kysely"; +import { simulationTest } from "../test-utilities/simulation-test/simulation-test.js"; +import { createVersion } from "./create-version.js"; +import { mergeVersion } from "./merge-version.js"; + +test("simulationTest discovery", () => {}); + +// We will implement mergeVersion step by step. +// For now, outline the test plan with todos (flat list). + +// Core behavior +simulationTest( + "merges created entities from source into target", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + // Create versions + const source = await createVersion({ lix, name: "source" }); + const target = await createVersion({ lix, name: "target" }); + + // Insert entities only in source + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_created_1", + schema_key: "test_entity", + file_id: "fileA", + version_id: source.id, + plugin_key: "test_plugin", + snapshot_content: { v: "from-source-1" }, + schema_version: "1.0", + }) + .execute(); + + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_created_2", + schema_key: "test_entity", + file_id: "fileA", + version_id: source.id, + plugin_key: "test_plugin", + snapshot_content: { v: "from-source-2" }, + schema_version: "1.0", + }) + .execute(); + + // Execute merge + await mergeVersion({ lix, source, target }); + + // Target should now contain the created entities with source content + const targetRows = await lix.db + .selectFrom("state_all") + .where("version_id", "=", target.id) + .where("file_id", "=", "fileA") + .where("schema_key", "=", "test_entity") + .select([ + "entity_id", + "schema_key", + "file_id", + "version_id", + "change_id", + "commit_id", + "plugin_key", + "schema_version", + "snapshot_content", + ]) + .execute(); + + expectDeterministic(targetRows.length).toBe(2); + const map = new Map(targetRows.map((r: any) => [r.entity_id, r])); + expectDeterministic(map.get("e_created_1")?.snapshot_content).toEqual({ + v: "from-source-1", + }); + expectDeterministic(map.get("e_created_2")?.snapshot_content).toEqual({ + v: "from-source-2", + }); + + // Ensure no new change rows were created for these entities + // 1) Target change_ids equal source change_ids + const sourceRows = await lix.db + .selectFrom("state_all") + .where("version_id", "=", source.id) + .where("file_id", "=", "fileA") + .where("schema_key", "=", "test_entity") + .select(["entity_id", "change_id"]) + .execute(); + const srcMap = new Map( + sourceRows.map((r: any) => [r.entity_id, r.change_id]) + ); + expectDeterministic(map.get("e_created_1")?.change_id).toBe( + srcMap.get("e_created_1") + ); + expectDeterministic(map.get("e_created_2")?.change_id).toBe( + srcMap.get("e_created_2") + ); + + // 2) The change table contains only the original two user changes for these keys + const changeRowsForKeys = await lix.db + .selectFrom("change") + .where("schema_key", "=", "test_entity") + .where("entity_id", "in", ["e_created_1", "e_created_2"]) + .select(["id"]) + .execute(); + + expectDeterministic(changeRowsForKeys.length).toBe(2); + } +); + +simulationTest( + "merges updated entities: source overwrites target content", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + const source = await createVersion({ lix, name: "source" }); + const target = await createVersion({ lix, name: "target" }); + + // Seed different content on both + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_upd", + schema_key: "test_entity", + file_id: "fileU", + version_id: target.id, + plugin_key: "test_plugin", + snapshot_content: { v: "target" }, + schema_version: "1.0", + }) + .execute(); + + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_upd", + schema_key: "test_entity", + file_id: "fileU", + version_id: source.id, + plugin_key: "test_plugin", + snapshot_content: { v: "source" }, + schema_version: "1.0", + }) + .execute(); + + // Capture source change_id + const srcRow = await lix.db + .selectFrom("state_all") + .where("version_id", "=", source.id) + .where("file_id", "=", "fileU") + .where("schema_key", "=", "test_entity") + .where("entity_id", "=", "e_upd") + .select(["change_id"]) + .executeTakeFirstOrThrow(); + + await mergeVersion({ lix, source, target }); + + const tgtRow = await lix.db + .selectFrom("state_all") + .where("version_id", "=", target.id) + .where("file_id", "=", "fileU") + .where("schema_key", "=", "test_entity") + .where("entity_id", "=", "e_upd") + .select(["snapshot_content", "change_id"]) + .executeTakeFirstOrThrow(); + + expectDeterministic(tgtRow.snapshot_content).toEqual({ v: "source" }); + // No new live change: change_id equals source change + expectDeterministic(tgtRow.change_id).toBe(srcRow.change_id); + } +); +simulationTest( + "applies explicit deletions: source tombstones delete target content", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + const source = await createVersion({ lix, name: "source" }); + const target = await createVersion({ lix, name: "target" }); + + // Seed live in target + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_del", + schema_key: "test_entity", + file_id: "fileD", + version_id: target.id, + plugin_key: "test_plugin", + snapshot_content: { v: "target" }, + schema_version: "1.0", + }) + .execute(); + + // Create live in source then delete to create tombstone + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_del", + schema_key: "test_entity", + file_id: "fileD", + version_id: source.id, + plugin_key: "test_plugin", + snapshot_content: { v: "source" }, + schema_version: "1.0", + }) + .execute(); + await lix.db + .deleteFrom("state_all") + .where("entity_id", "=", "e_del") + .where("schema_key", "=", "test_entity") + .where("file_id", "=", "fileD") + .where("version_id", "=", source.id) + .execute(); + + await mergeVersion({ lix, source, target }); + + // Target should have no live entity now + const tgtRows = await lix.db + .selectFrom("state_all") + .where("version_id", "=", target.id) + .where("file_id", "=", "fileD") + .where("schema_key", "=", "test_entity") + .where("entity_id", "=", "e_del") + .selectAll() + .execute(); + expectDeterministic(tgtRows.length).toBe(0); + + // Verify a deletion change was created and referenced in the merge change_set + const afterTarget = await lix.db + .selectFrom("version") + .where("id", "=", target.id) + .selectAll() + .executeTakeFirstOrThrow(); + const commitChangeRow = await lix.db + .selectFrom("change") + .where("schema_key", "=", "lix_commit") + .where( + sql`json_extract(snapshot_content, '$.id')`, + "=", + afterTarget.commit_id as any + ) + .select([sql`json(snapshot_content)`.as("snapshot")]) + .executeTakeFirstOrThrow(); + const changeSetId = (commitChangeRow as any).snapshot + .change_set_id as string; + + // Find referenced deletion change for this key + const cseRows = await lix.db + .selectFrom("change") + .where("schema_key", "=", "lix_change_set_element") + .where( + sql`json_extract(snapshot_content, '$.change_set_id')`, + "=", + changeSetId as any + ) + .select([sql`json(snapshot_content)`.as("snapshot")]) + .execute(); + const anchoredIds = cseRows.map((r: any) => r.snapshot.change_id); + const anchoredChanges = anchoredIds.length + ? await lix.db + .selectFrom("change") + .where("id", "in", anchoredIds) + .select([ + "id", + "schema_key", + "entity_id", + sql`json(snapshot_content)`.as("snapshot"), + ]) + .execute() + : []; + const del = anchoredChanges.find( + (c: any) => + c.schema_key === "test_entity" && + c.entity_id === "e_del" && + c.snapshot === null + ); + expectDeterministic(Boolean(del)).toBe(true); + } +); + +simulationTest( + "keeps target-only entities unchanged (no explicit delete in source)", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + const source = await createVersion({ lix, name: "source" }); + const target = await createVersion({ lix, name: "target" }); + + // Seed only in target + await lix.db + .insertInto("state_all") + .values({ + entity_id: "only-target", + schema_key: "test_entity", + file_id: "fileT", + version_id: target.id, + plugin_key: "test_plugin", + snapshot_content: { v: "target" }, + schema_version: "1.0", + }) + .execute(); + + const before = await lix.db + .selectFrom("version") + .where("id", "=", target.id) + .selectAll() + .executeTakeFirstOrThrow(); + + await mergeVersion({ lix, source, target }); + + // Target row unchanged + const tgt = await lix.db + .selectFrom("state_all") + .where("version_id", "=", target.id) + .where("file_id", "=", "fileT") + .where("schema_key", "=", "test_entity") + .where("entity_id", "=", "only-target") + .selectAll() + .executeTakeFirstOrThrow(); + + expectDeterministic(tgt.snapshot_content).toEqual({ v: "target" }); + + // No merge commit created (no diffs) -> commit_id unchanged + const after = await lix.db + .selectFrom("version") + .where("id", "=", target.id) + .selectAll() + .executeTakeFirstOrThrow(); + + expectDeterministic(after.commit_id).toBe(before.commit_id); + } +); + +simulationTest( + "no-op merge returns current target tip when no changes", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + const source = await createVersion({ lix, name: "source" }); + const target = await createVersion({ lix, name: "target" }); + + const before = await lix.db + .selectFrom("version") + .where("id", "=", target.id) + .selectAll() + .executeTakeFirstOrThrow(); + + await mergeVersion({ lix, source, target }); + + const after = await lix.db + .selectFrom("version") + .where("id", "=", target.id) + .selectAll() + .executeTakeFirstOrThrow(); + + expectDeterministic(after.commit_id).toBe(before.commit_id); + } +); + +simulationTest( + "idempotent: running the same merge twice results in no further changes", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + const source = await createVersion({ lix, name: "source" }); + const target = await createVersion({ lix, name: "target" }); + + await lix.db + .insertInto("state_all") + .values({ + entity_id: "idem-1", + schema_key: "test_entity", + file_id: "fileI", + version_id: source.id, + plugin_key: "test_plugin", + snapshot_content: { v: "from-source" }, + schema_version: "1.0", + }) + .execute(); + + await mergeVersion({ lix, source, target }); + + const afterFirst = await lix.db + .selectFrom("version") + .where("id", "=", target.id) + .selectAll() + .executeTakeFirstOrThrow(); + + expectDeterministic(afterFirst.commit_id); + + // Helper to inspect a commit: list its change_set contents by schema + const dumpCommit = async (commitId: string) => { + const commitChange = await lix.db + .selectFrom("change") + .where("schema_key", "=", "lix_commit") + .where( + sql`json_extract(snapshot_content, '$.id')`, + "=", + commitId as any + ) + .select(["id", sql`json(snapshot_content)`.as("snapshot")]) + .executeTakeFirst(); + if (!commitChange) return { commitId, change_set_id: null, schemas: {} }; + const change_set_id = (commitChange as any).snapshot + .change_set_id as string; + const cse = await lix.db + .selectFrom("change") + .where("schema_key", "=", "lix_change_set_element") + .where( + sql`json_extract(snapshot_content, '$.change_set_id')`, + "=", + change_set_id as any + ) + .select([sql`json(snapshot_content)`.as("snapshot")]) + .execute(); + const anchoredIds = cse.map((r: any) => r.snapshot.change_id as string); + const referenced = anchoredIds.length + ? await lix.db + .selectFrom("change") + .where("id", "in", anchoredIds) + .select([ + "id", + "schema_key", + "entity_id", + sql`json(snapshot_content)`.as("snapshot"), + ]) + .execute() + : []; + const bySchema = referenced.reduce>( + (m, r: any) => { + m[r.schema_key] = (m[r.schema_key] ?? 0) + 1; + return m; + }, + {} + ); + return { commitId, change_set_id, schemas: bySchema, referenced }; + }; + + // Verify snapshots after first merge + const firstStateAll = await lix.db + .selectFrom("state_all") + .where("version_id", "=", target.id) + .where("file_id", "=", "fileI") + .where("schema_key", "=", "test_entity") + .where("entity_id", "=", "idem-1") + .select([ + "change_id", + "commit_id", + sql`json(snapshot_content)`.as("snapshot"), + ]) + .executeTakeFirstOrThrow(); + expectDeterministic(firstStateAll); + + await mergeVersion({ lix, source, target }); + + const afterSecond = await lix.db + .selectFrom("version") + .where("id", "=", target.id) + .selectAll() + .executeTakeFirstOrThrow(); + expectDeterministic(afterSecond.commit_id); + + // Dump commit contents for both tips + const firstDump = await dumpCommit(afterFirst.commit_id); + const secondDump = await dumpCommit(afterSecond.commit_id); + expectDeterministic(firstDump); + expectDeterministic(secondDump); + + // Deterministic across simulations and within a run + expectDeterministic(afterSecond.commit_id).toBe(afterFirst.commit_id); + + // State remains referenced once + const tgtRows = await lix.db + .selectFrom("state_all") + .where("version_id", "=", target.id) + .where("file_id", "=", "fileI") + .where("schema_key", "=", "test_entity") + .where("entity_id", "=", "idem-1") + .selectAll() + .execute(); + expectDeterministic(tgtRows.length).toBe(1); + + // Verify snapshots after second merge + const secondStateAll = await lix.db + .selectFrom("state_all") + .where("version_id", "=", target.id) + .where("file_id", "=", "fileI") + .where("schema_key", "=", "test_entity") + .where("entity_id", "=", "idem-1") + .select([ + "change_id", + "commit_id", + sql`json(snapshot_content)`.as("snapshot"), + ]) + .executeTakeFirstOrThrow(); + + // Verify business snapshots are correct across both surfaces + expectDeterministic(firstStateAll.snapshot).toEqual({ v: "from-source" }); + + expectDeterministic(secondStateAll.snapshot).toEqual({ v: "from-source" }); + } +); + +simulationTest( + "treats source === target as a no-op merge", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + const version = await createVersion({ lix, name: "same" }); + + // Seed some business data in the version + await lix.db + .insertInto("state_all") + .values({ + entity_id: "noop-1", + schema_key: "test_entity", + file_id: "fileS", + version_id: version.id, + plugin_key: "test_plugin", + snapshot_content: { v: "seed" }, + schema_version: "1.0", + }) + .execute(); + + // Capture pointers before + const beforeTarget = await lix.db + .selectFrom("version") + .where("id", "=", version.id) + .selectAll() + .executeTakeFirstOrThrow(); + const beforeGlobal = await lix.db + .selectFrom("version") + .where("id", "=", "global") + .selectAll() + .executeTakeFirstOrThrow(); + + // Call merge with identical source and target + await mergeVersion({ lix, source: version, target: version }); + + // Pointers must be unchanged + const afterTarget = await lix.db + .selectFrom("version") + .where("id", "=", version.id) + .selectAll() + .executeTakeFirstOrThrow(); + const afterGlobal = await lix.db + .selectFrom("version") + .where("id", "=", "global") + .selectAll() + .executeTakeFirstOrThrow(); + + expectDeterministic(afterTarget.commit_id).toBe(beforeTarget.commit_id); + expectDeterministic(afterGlobal.commit_id).toBe(beforeGlobal.commit_id); + + // tate remains unchanged + const row = await lix.db + .selectFrom("state_all") + .where("file_id", "=", "fileS") + .where("schema_key", "=", "test_entity") + .where("entity_id", "=", "noop-1") + .selectAll() + .executeTakeFirstOrThrow(); + + expectDeterministic(row.snapshot_content).toEqual({ v: "seed" }); + } +); +// Edge cases +test.todo("handles empty source (no entities) gracefully"); +test.todo("handles empty target (fresh branch) gracefully"); +test.todo("handles mixed created/updated/deleted keys across multiple files"); + +/** + * TL;DR — Global/Source/Target merge model + * + * - Target (the version we merge into) holds user-domain entities. It receives + * a new tip commit (target.tip_after) that ANCHORS the winning entity changes. + * - Global holds the DAG (graph) for all versions. It publishes commits, edges + * and version pointers so the full history can be re-materialized from the + * global tip alone. + * - Source remains untouched by the merge: its `commit_id` is unchanged. + * + * Properties guaranteed by the two-commit model: + * 1) Target commit (target.tip_after) has TWO PARENTS: + * - parent_1 = target.tip_before + * - parent_2 = source.tip_before + * 2) Global commit (global.tip_after) publishes graph-only rows for BOTH commits + * (global.tip_after and target.tip_after) and the necessary edges/version updates: + * - Commits (2): ['lix_commit' for global.tip_after, 'lix_commit' for target.tip_after] + * - Edges (3): [target.tip_before → target.tip_after, + * source.tip_before → target.tip_after, + * global.tip_before → global.tip_after] + * - Versions (2): ['lix_version' for target.id → target.tip_after, + * 'lix_version' for 'global' → global.tip_after] + * 3) No user-domain entities are referenced under global — only graph metadata and version updates. + * + * Visual overview (simplified): + * + * ┌─────────────────┐ ┌─────────────────────────┐ + * │ version_target │ target data │ COMMIT (target) │ + * └─────────────────┘ ───────────────▶│ references user entities │ + * └──────────┬──────────────┘ + * │ parent edges (2) + * │ from: target.tip_before, source.tip_before + * ▼ + * ┌────────────┐ ┌─────────────────────────┐ + * │ global │ graph rows │ COMMIT (global) │ + * └────────────┘ ─────────────────▶│ change_set with: │ + * │ commits (2), │ + * │ edges (3), │ + * │ versions (2) │ + * └──────────┬──────────────┘ + * │ edge (1) + * │ from: global.tip_before to global.tip_after + * ▼ + * versions: target → target.tip_after, + * global → global.tip_after + */ +simulationTest( + "merge meta: two-commit model writes global graph rows and local data", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + // Create versions + const source = await createVersion({ lix, name: "source" }); + const target = await createVersion({ lix, name: "target" }); + + // Insert one entity only in source + await lix.db + .insertInto("state_all") + .values({ + entity_id: "merge-meta-1", + schema_key: "test_entity", + file_id: "fileM", + version_id: source.id, + plugin_key: "test_plugin", + snapshot_content: { v: "content" }, + schema_version: "1.0", + }) + .execute(); + + // Capture tips before merge for edge assertions + const beforeSource = await lix.db + .selectFrom("version") + .where("id", "=", source.id) + .selectAll() + .executeTakeFirstOrThrow(); + + const beforeTarget = await lix.db + .selectFrom("version") + .where("id", "=", target.id) + .selectAll() + .executeTakeFirstOrThrow(); + + const beforeGlobal = await lix.db + .selectFrom("version") + .where("id", "=", "global") + .selectAll() + .executeTakeFirstOrThrow(); + + // Perform merge + await mergeVersion({ lix, source, target }); + + const afterSource = await lix.db + .selectFrom("version") + .where("id", "=", source.id) + .selectAll() + .executeTakeFirstOrThrow(); + + // Source commit id should remain unchanged by merge + expectDeterministic(afterSource.commit_id).toBe(beforeSource.commit_id); + + // Fetch updated target and global version commits + const afterTarget = await lix.db + .selectFrom("version") + .where("id", "=", target.id) + .selectAll() + .executeTakeFirstOrThrow(); + + const afterGlobal = await lix.db + .selectFrom("version") + .where("id", "=", "global") + .selectAll() + .executeTakeFirstOrThrow(); + + // Identify target tip commit and verify it has exactly two parents + const targetCommitId = afterTarget.commit_id; + const edgesToTarget = await lix.db + .selectFrom("commit_edge") + .where("child_id", "=", targetCommitId) + .select(["parent_id", "child_id"]) + .execute(); + + expectDeterministic(edgesToTarget.length).toBe(2); + + const targetParents = new Set(edgesToTarget.map((r: any) => r.parent_id)); + + expectDeterministic(targetParents.has(beforeTarget.commit_id)).toBe(true); + expectDeterministic(targetParents.has(beforeSource.commit_id)).toBe(true); + + // ── Expect block 1: GLOBAL ──────────────────────────────────────────────── + // Global expectations: + // - Global tip changed + // - Global change_set references: commits (2), edges (3), versions (2) + + expectDeterministic(afterGlobal.commit_id).not.toBe(beforeGlobal.commit_id); + + const globalCommit = await lix.db + .selectFrom("commit_all") + .where("lixcol_version_id", "=", "global") + .where("id", "=", afterGlobal.commit_id) + .selectAll() + .executeTakeFirstOrThrow(); + + const changeSetId = globalCommit.change_set_id; + const cseRows = await lix.db + .selectFrom("change_set_element_all") + .where("lixcol_version_id", "=", "global") + .where("change_set_id", "=", changeSetId) + .select([ + "change_set_id", + "change_id", + "entity_id", + "schema_key", + "file_id", + ]) + .execute(); + + const schemaCounts = cseRows.reduce>( + (acc, r: any) => { + acc[r.schema_key] = (acc[r.schema_key] ?? 0) + 1; + return acc; + }, + {} + ); + expectDeterministic(schemaCounts["lix_commit"]).toBe(2); + expectDeterministic(schemaCounts["lix_commit_edge"]).toBe(3); + expectDeterministic(schemaCounts["lix_version"]).toBe(2); + + // Commits: one for afterGlobal and one for afterTarget + const commitIds = new Set( + cseRows + .filter((r: any) => r.schema_key === "lix_commit") + .map((r: any) => r.entity_id) + ); + expectDeterministic(commitIds.has(afterGlobal.commit_id)).toBe(true); + expectDeterministic(commitIds.has(afterTarget.commit_id)).toBe(true); + + // Edges: two parents for target, and one edge prevGlobal -> afterGlobal + const edgePairs = cseRows + .filter((r: any) => r.schema_key === "lix_commit_edge") + .map((r: any) => r.entity_id.split("~")); + const edgeParents = edgePairs.map((p: any) => p[0]); + const edgeChildren = edgePairs.map((p: any) => p[1]); + const parentsSet = new Set(edgeParents); + const childrenSet = new Set(edgeChildren); + // target edges + expectDeterministic(parentsSet.has(beforeTarget.commit_id)).toBe(true); + expectDeterministic(parentsSet.has(beforeSource.commit_id)).toBe(true); + // ensure child target appears + expectDeterministic(childrenSet.has(targetCommitId)).toBe(true); + // global lineage edge + expectDeterministic(parentsSet.has(beforeGlobal.commit_id)).toBe(true); + expectDeterministic(childrenSet.has(afterGlobal.commit_id)).toBe(true); + + // Versions referenced in CSE: verify presence, and verify pointers via version view + const versionEntityIds = new Set( + cseRows + .filter((r: any) => r.schema_key === "lix_version") + .map((r: any) => r.entity_id) + ); + expectDeterministic(versionEntityIds.has(target.id)).toBe(true); + expectDeterministic(versionEntityIds.has("global")).toBe(true); + const versionTargetNow = await lix.db + .selectFrom("version") + .where("id", "=", target.id) + .selectAll() + .executeTakeFirstOrThrow(); + const versionGlobalNow = await lix.db + .selectFrom("version") + .where("id", "=", "global") + .selectAll() + .executeTakeFirstOrThrow(); + expectDeterministic(versionTargetNow.commit_id).toBe(afterTarget.commit_id); + expectDeterministic(versionGlobalNow.commit_id).toBe(afterGlobal.commit_id); + + // ── Expect block 2: LOCAL ──────────────────────────────────────────────── + // Local expectations: + // - Target state reflects the local entity change + + // Business state lands in target version's state view + const targetState = await lix.db + .selectFrom("state_all") + .where("version_id", "=", target.id) + .where("file_id", "=", "fileM") + .where("schema_key", "=", "test_entity") + .selectAll() + .execute(); + + expectDeterministic(targetState.length).toBe(1); + + // Target commit change_set should reference exactly the merged entity once + const targetCommit = await lix.db + .selectFrom("commit_all") + .where("lixcol_version_id", "=", "global") + .where("id", "=", afterTarget.commit_id) + .selectAll() + .executeTakeFirstOrThrow(); + const targetChangeSetId = targetCommit.change_set_id; + const targetCseRows = await lix.db + .selectFrom("change_set_element_all") + .where("lixcol_version_id", "=", "global") + .where("change_set_id", "=", targetChangeSetId) + .select([ + "change_set_id", + "change_id", + "entity_id", + "schema_key", + "file_id", + ]) + .execute(); + const targetSchemaCounts = targetCseRows.reduce>( + (acc, r: any) => { + acc[r.schema_key] = (acc[r.schema_key] ?? 0) + 1; + return acc; + }, + {} + ); + expectDeterministic(targetSchemaCounts["test_entity"]).toBe(1); + expectDeterministic(targetSchemaCounts["lix_commit"] ?? 0).toBe(0); + expectDeterministic(targetSchemaCounts["lix_commit_edge"] ?? 0).toBe(0); + expectDeterministic(targetSchemaCounts["lix_version"] ?? 0).toBe(0); + const anchoredEntity = targetCseRows.find( + (r: any) => r.schema_key === "test_entity" + ); + expectDeterministic(Boolean(anchoredEntity)).toBe(true); + expectDeterministic(anchoredEntity?.entity_id).toBe("merge-meta-1"); + expectDeterministic(anchoredEntity?.file_id).toBe("fileM"); + } +); diff --git a/packages/lix-sdk/src/version/merge-version.ts b/packages/lix-sdk/src/version/merge-version.ts new file mode 100644 index 0000000000..fc6ae03f8f --- /dev/null +++ b/packages/lix-sdk/src/version/merge-version.ts @@ -0,0 +1,546 @@ +import type { Lix } from "../lix/open-lix.js"; +import type { LixVersion } from "./schema.js"; +import { selectVersionDiff } from "./select-version-diff.js"; +import { sql, type Kysely } from "kysely"; +import { uuidV7 } from "../deterministic/uuid-v7.js"; +import { timestamp } from "../deterministic/timestamp.js"; +import type { LixInternalDatabaseSchema } from "../database/schema.js"; +import type { LixChangeRaw } from "../change/schema.js"; +import { updateStateCache } from "../state/cache/update-state-cache.js"; +export async function mergeVersion(args: { + lix: Lix; + source: Pick; + target?: Pick; + // strategy?: "last_edit" | "ours" | "theirs" // reserved for future +}): Promise { + const { lix } = args; + + if (args.target && args.target.id === args.source.id) { + return; + } + + await lix.db.transaction().execute(async (trx) => { + const target = args.target + ? { id: args.target.id } + : await trx + .selectFrom("active_version") + .innerJoin("version", "version.id", "active_version.version_id") + .select("version.id as id") + .executeTakeFirstOrThrow(); + + // Build diffs and collect winning change ids (created/updated) + const diffs = await selectVersionDiff({ + lix: { ...lix, db: trx } as any, + source: args.source, + target, + }) + .where("diff.status", "in", ["created", "updated", "deleted"]) // ignore unchanged + .selectAll() + .execute(); + + // Resolve tip commits for parents + const sourceVersion = await trx + .selectFrom("version") + .selectAll() + .where("id", "=", args.source.id) + .executeTakeFirstOrThrow(); + const targetVersion = await trx + .selectFrom("version") + .selectAll() + .where("id", "=", target.id) + .executeTakeFirstOrThrow(); + + const beforeGlobal = await trx + .selectFrom("version") + .selectAll() + .where("id", "=", "global") + .executeTakeFirstOrThrow(); + + // Prepare elements to reference and deletion changes to create + const toReference: Array<{ + id: string; + entity_id: string; + schema_key: string; + file_id: string; + }> = []; + const toDelete: Array<{ + entity_id: string; + schema_key: string; + file_id: string; + plugin_key: string; + schema_version: string; + }> = []; + + for (const d of diffs) { + if (d.status === "created" || d.status === "updated") { + if (d.after_change_id) { + toReference.push({ + id: d.after_change_id, + entity_id: d.entity_id, + schema_key: d.schema_key, + file_id: d.file_id, + }); + } + } else if (d.status === "deleted") { + // Lookup plugin_key and schema_version from target's before_change_id + const before = await trx + .selectFrom("change") + .where("id", "=", d.before_change_id!) + .select(["plugin_key", "schema_version"]) + .executeTakeFirstOrThrow(); + + toDelete.push({ + entity_id: d.entity_id, + schema_key: d.schema_key, + file_id: d.file_id, + plugin_key: before.plugin_key, + schema_version: before.schema_version, + }); + } + } + + // Flush pending source tracked changes for referenced items into the change table + // and remove them from the transaction queue so commit.ts won't create a source commit. + if (toReference.length > 0) { + const intDbLocal = trx as unknown as Kysely; + const refIds = toReference.map((r) => r.id); + + // Read pending rows from the transaction table + const pending = await intDbLocal + .selectFrom("internal_change_in_transaction") + .select([ + "id", + "entity_id", + "schema_key", + "schema_version", + "file_id", + "plugin_key", + sql`json(snapshot_content)`.as("snapshot_content"), + "created_at", + ]) + .where("version_id", "=", sourceVersion.id) + .where("id", "in", refIds) + .execute(); + + if (pending.length > 0) { + // Insert into persistent change table using the view's insert trigger + await trx + .insertInto("change") + .values( + pending.map((p: any) => ({ + id: p.id, + entity_id: p.entity_id, + schema_key: p.schema_key, + schema_version: p.schema_version, + file_id: p.file_id, + plugin_key: p.plugin_key, + snapshot_content: p.snapshot_content + ? JSON.stringify(p.snapshot_content) + : null, + created_at: p.created_at, + })) as any + ) + .execute(); + + // Remove from transaction queue to prevent automatic commit logic for source + await intDbLocal + .deleteFrom("internal_change_in_transaction") + .where("id", "in", refIds) + .execute(); + } + } + + // If nothing to do, return current target tip commit + if (toReference.length === 0 && toDelete.length === 0) { + // nothing to merge + return; + } + + // Two-commit model IDs + const targetChangeSetId = uuidV7({ lix }); + const targetCommitId = uuidV7({ lix }); + const globalChangeSetId = uuidV7({ lix }); + const globalCommitId = uuidV7({ lix }); + const now = timestamp({ lix }); + + // Build change rows for two-commit model + const changeRows: LixChangeRaw[] = []; + + // 1) Define both change_set entities (global + target) + const globalChangeSetChangeId = uuidV7({ lix }); + changeRows.push({ + id: globalChangeSetChangeId, + entity_id: globalChangeSetId, + schema_key: "lix_change_set", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + id: globalChangeSetId, + metadata: null, + }), + created_at: now, + }); + + const targetChangeSetChangeId = uuidV7({ lix }); + changeRows.push({ + id: targetChangeSetChangeId, + entity_id: targetChangeSetId, + schema_key: "lix_change_set", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + id: targetChangeSetId, + metadata: null, + }), + created_at: now, + }); + + // 2) Target commit (references user-domain changes) + const targetCommitChangeId = uuidV7({ lix }); + changeRows.push({ + id: targetCommitChangeId, + entity_id: targetCommitId, + schema_key: "lix_commit", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + id: targetCommitId, + change_set_id: targetChangeSetId, + }), + created_at: now, + }); + + // Track all CSE changes so we can publish meta CSEs (CSE-of-CSE) under global + const cseChangeRows: LixChangeRaw[] = []; + // Reference user changes under TARGET change set (in GLOBAL view) + for (const el of toReference) { + const cseRow: LixChangeRaw = { + id: uuidV7({ lix }), + entity_id: `${targetChangeSetId}~${el.id}`, + schema_key: "lix_change_set_element", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: targetChangeSetId, + change_id: el.id, + entity_id: el.entity_id, + schema_key: el.schema_key, + file_id: el.file_id, + }), + created_at: now, + }; + changeRows.push(cseRow); + cseChangeRows.push(cseRow); + } + + // Create deletion change rows + reference them under TARGET change set + const deletionChanges: LixChangeRaw[] = []; + for (const del of toDelete) { + const delChangeId = uuidV7({ lix }); + deletionChanges.push({ + id: delChangeId, + entity_id: del.entity_id, + schema_key: del.schema_key, + file_id: del.file_id, + plugin_key: del.plugin_key, + schema_version: del.schema_version, + snapshot_content: null, + created_at: now, + }); + const cseRow: LixChangeRaw = { + id: uuidV7({ lix }), + entity_id: `${targetChangeSetId}~${delChangeId}`, + schema_key: "lix_change_set_element", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: targetChangeSetId, + change_id: delChangeId, + entity_id: del.entity_id, + schema_key: del.schema_key, + file_id: del.file_id, + }), + created_at: now, + }; + changeRows.push(cseRow); + cseChangeRows.push(cseRow); + } + + // 3) Global commit (publishes graph metadata for both commits) + const globalCommitChangeId = uuidV7({ lix }); + changeRows.push({ + id: globalCommitChangeId, + entity_id: globalCommitId, + schema_key: "lix_commit", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + id: globalCommitId, + change_set_id: globalChangeSetId, + }), + created_at: now, + }); + + // Commit edges: target has two parents; global has one lineage edge + const edgeTargetFromTargetBeforeId = uuidV7({ lix }); + changeRows.push({ + id: edgeTargetFromTargetBeforeId, + entity_id: `${targetVersion.commit_id}~${targetCommitId}`, + schema_key: "lix_commit_edge", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + parent_id: targetVersion.commit_id, + child_id: targetCommitId, + }), + created_at: now, + }); + const edgeTargetFromSourceBeforeId = uuidV7({ lix }); + changeRows.push({ + id: edgeTargetFromSourceBeforeId, + entity_id: `${sourceVersion.commit_id}~${targetCommitId}`, + schema_key: "lix_commit_edge", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + parent_id: sourceVersion.commit_id, + child_id: targetCommitId, + }), + created_at: now, + }); + // Global lineage edge: always link from beforeGlobal.tip to global.tip_after + const globalLineageParentId = beforeGlobal.commit_id; + const edgeGlobalLineageId = uuidV7({ lix }); + changeRows.push({ + id: edgeGlobalLineageId, + entity_id: `${globalLineageParentId}~${globalCommitId}`, + schema_key: "lix_commit_edge", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + parent_id: globalLineageParentId, + child_id: globalCommitId, + }), + created_at: now, + }); + + // Reference graph rows under GLOBAL change set + for (const meta of [ + { + change_id: targetCommitChangeId, + entity_id: targetCommitId, + schema_key: "lix_commit", + }, + { + change_id: globalCommitChangeId, + entity_id: globalCommitId, + schema_key: "lix_commit", + }, + { + change_id: edgeTargetFromTargetBeforeId, + entity_id: `${targetVersion.commit_id}~${targetCommitId}`, + schema_key: "lix_commit_edge", + }, + { + change_id: edgeTargetFromSourceBeforeId, + entity_id: `${sourceVersion.commit_id}~${targetCommitId}`, + schema_key: "lix_commit_edge", + }, + { + change_id: edgeGlobalLineageId, + entity_id: `${globalLineageParentId}~${globalCommitId}`, + schema_key: "lix_commit_edge", + }, + ]) { + const cseRow: LixChangeRaw = { + id: uuidV7({ lix }), + entity_id: `${globalChangeSetId}~${meta.change_id}`, + schema_key: "lix_change_set_element", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: globalChangeSetId, + change_id: meta.change_id, + entity_id: meta.entity_id, + schema_key: meta.schema_key, + file_id: "lix", + }), + created_at: now, + }; + changeRows.push(cseRow); + cseChangeRows.push(cseRow); + } + + // Version updates (target -> targetCommitId, global -> globalCommitId) + const intDb = trx as unknown as Kysely; + const targetVersionRow = await intDb + .selectFrom("internal_resolved_state_all") + .where("schema_key", "=", "lix_version") + .where("entity_id", "=", targetVersion.id) + .where("snapshot_content", "is not", null) + .select([sql`json(snapshot_content)`.as("snapshot_content")]) + .executeTakeFirstOrThrow(); + const currentTargetVersion = + targetVersionRow.snapshot_content as unknown as LixVersion; + const updatedTargetVersion: LixChangeRaw = { + id: uuidV7({ lix }), + entity_id: targetVersion.id, + schema_key: "lix_version", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + ...currentTargetVersion, + commit_id: targetCommitId, + }), + created_at: now, + }; + changeRows.push(updatedTargetVersion); + const cseRowTargetVersion: LixChangeRaw = { + id: uuidV7({ lix }), + entity_id: `${globalChangeSetId}~${updatedTargetVersion.id}`, + schema_key: "lix_change_set_element", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: globalChangeSetId, + change_id: updatedTargetVersion.id, + entity_id: targetVersion.id, + schema_key: "lix_version", + file_id: "lix", + }), + created_at: now, + }; + changeRows.push(cseRowTargetVersion); + cseChangeRows.push(cseRowTargetVersion); + + const globalVersionRow = await intDb + .selectFrom("internal_resolved_state_all") + .where("schema_key", "=", "lix_version") + .where("entity_id", "=", "global") + .where("snapshot_content", "is not", null) + .select([sql`json(snapshot_content)`.as("snapshot_content")]) + .executeTakeFirstOrThrow(); + const currentGlobalVersion = + globalVersionRow.snapshot_content as unknown as LixVersion; + const updatedGlobalVersion: LixChangeRaw = { + id: uuidV7({ lix }), + entity_id: "global", + schema_key: "lix_version", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + ...currentGlobalVersion, + commit_id: globalCommitId, + }), + created_at: now, + }; + changeRows.push(updatedGlobalVersion); + const cseRowGlobalVersion: LixChangeRaw = { + id: uuidV7({ lix }), + entity_id: `${globalChangeSetId}~${updatedGlobalVersion.id}`, + schema_key: "lix_change_set_element", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: globalChangeSetId, + change_id: updatedGlobalVersion.id, + entity_id: "global", + schema_key: "lix_version", + file_id: "lix", + }), + created_at: now, + }; + changeRows.push(cseRowGlobalVersion); + cseChangeRows.push(cseRowGlobalVersion); + + // 4) Create meta change_set_elements for the change_set_element changes themselves (CSE-of-CSE) + for (const elementChange of cseChangeRows) { + changeRows.push({ + id: uuidV7({ lix }), + entity_id: `${globalChangeSetId}~${elementChange.id}`, + schema_key: "lix_change_set_element", + schema_version: "1.0", + file_id: "lix", + plugin_key: "lix_own_entity", + snapshot_content: JSON.stringify({ + change_set_id: globalChangeSetId, + change_id: elementChange.id, + entity_id: elementChange.entity_id, + schema_key: elementChange.schema_key, + file_id: elementChange.file_id, + }), + created_at: now, + }); + } + + // Insert all changes (graph + change set defs + target deletions) + const allChanges = [...changeRows, ...deletionChanges]; + if (allChanges.length > 0) { + await trx + .insertInto("change") + .values(allChanges as any) + .execute(); + } + + // Populate caches + // Global: graph + change_set (both) + CSEs + updateStateCache({ + lix, + changes: changeRows, + version_id: "global", + commit_id: globalCommitId, + }); + + // Target: only business/user-domain changes (winning refs + deletions) + let referencedChangeRows: LixChangeRaw[] = []; + if (toReference.length > 0) { + const ids = toReference.map((a) => a.id); + const rows = await trx + .selectFrom("change") + .where("id", "in", ids) + .select([ + "id", + "entity_id", + "schema_key", + "file_id", + "plugin_key", + "schema_version", + sql`json(snapshot_content)`.as("snapshot_content"), + "created_at", + ]) + .execute(); + referencedChangeRows = rows.map((r: any) => ({ + id: r.id, + entity_id: r.entity_id, + schema_key: r.schema_key, + file_id: r.file_id, + plugin_key: r.plugin_key, + schema_version: r.schema_version, + snapshot_content: r.snapshot_content + ? JSON.stringify(r.snapshot_content) + : null, + created_at: r.created_at, + })); + } + updateStateCache({ + lix, + changes: [...referencedChangeRows, ...deletionChanges], + version_id: target.id, + commit_id: targetCommitId, + }); + }); +} diff --git a/packages/lix-sdk/src/version/select-version-diff.bench.ts b/packages/lix-sdk/src/version/select-version-diff.bench.ts new file mode 100644 index 0000000000..2d28d6ce2d --- /dev/null +++ b/packages/lix-sdk/src/version/select-version-diff.bench.ts @@ -0,0 +1,128 @@ +import { bench } from "vitest"; +import { openLix } from "../lix/open-lix.js"; +import { createVersion } from "./create-version.js"; +import { selectVersionDiff as selectVersionDiff } from "./select-version-diff.js"; + +const COUNTS = { + created: 10, + updated: 10, + deleted: 10, +} as const; + +type Ctx = { + lix: Awaited>; + sourceId: string; + targetId: string; +}; + +const readyCtx: Promise = (async () => { + const lix = await openLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + const source = await createVersion({ lix, name: "bench_source" }); + const target = await createVersion({ lix, name: "bench_target" }); + + // Seed created (only in source) + for (let i = 0; i < COUNTS.created; i++) { + await lix.db + .insertInto("state_all") + .values({ + entity_id: `created_${i}`, + schema_key: "bench_diff_entity", + file_id: "bench_file", + version_id: source.id, + plugin_key: "bench_plugin", + snapshot_content: { v: i }, + schema_version: "1.0", + }) + .execute(); + } + + // Seed deleted (only in target) + for (let i = 0; i < COUNTS.deleted; i++) { + await lix.db + .insertInto("state_all") + .values({ + entity_id: `deleted_${i}`, + schema_key: "bench_diff_entity", + file_id: "bench_file", + version_id: target.id, + plugin_key: "bench_plugin", + snapshot_content: { v: i }, + schema_version: "1.0", + }) + .execute(); + } + + // Seed updated (present in both, different change ids/content) + for (let i = 0; i < COUNTS.updated; i++) { + const id = `updated_${i}`; + // target first (older) + await lix.db + .insertInto("state_all") + .values({ + entity_id: id, + schema_key: "bench_diff_entity", + file_id: "bench_file", + version_id: target.id, + plugin_key: "bench_plugin", + snapshot_content: { v: "old" }, + schema_version: "1.0", + }) + .execute(); + // source later (newer) + await lix.db + .insertInto("state_all") + .values({ + entity_id: id, + schema_key: "bench_diff_entity", + file_id: "bench_file", + version_id: source.id, + plugin_key: "bench_plugin", + snapshot_content: { v: "new" }, + schema_version: "1.0", + }) + .execute(); + } + + return { lix, sourceId: source.id, targetId: target.id } satisfies Ctx; +})(); + +bench("selectVersionDiff (exclude unchanged)", async () => { + try { + const { lix, sourceId, targetId } = await readyCtx; + + const qb = selectVersionDiff({ + lix, + source: { id: sourceId }, + target: { id: targetId }, + }).where("diff.status", "!=", "unchanged"); + + const rows = await qb.execute(); + // Consume result to prevent dead-code elimination + if (!rows || rows.length === 0) + throw new Error("unexpected empty diff in bench"); + } catch (error) { + console.error("Error during selectVersionDiff bench:", error); + } +}); + +bench("selectVersionDiff (full document diff)", async () => { + const { lix, sourceId, targetId } = await readyCtx; + const qb = selectVersionDiff({ + lix, + source: { id: sourceId }, + target: { id: targetId }, + }); + + const rows = await qb.execute(); + if (!rows || rows.length === 0) + throw new Error("unexpected empty diff in bench"); +}); diff --git a/packages/lix-sdk/src/version/select-version-diff.test.ts b/packages/lix-sdk/src/version/select-version-diff.test.ts new file mode 100644 index 0000000000..a67407b7ce --- /dev/null +++ b/packages/lix-sdk/src/version/select-version-diff.test.ts @@ -0,0 +1,660 @@ +import { createVersion } from "./create-version.js"; +import { selectVersionDiff } from "./select-version-diff.js"; +import { simulationTest } from "../test-utilities/simulation-test/simulation-test.js"; + +simulationTest( + "created: key only in source -> before=null, after=source (query)", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + // Create source and target versions first + const sourceVersion = await createVersion({ lix, name: "source" }); + const targetVersion = await createVersion({ lix, name: "target" }); + + // Add an entity only in the source version + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e1", + schema_key: "diff_test_entity", + file_id: "file1", + version_id: sourceVersion.id, + plugin_key: "test_plugin", + snapshot_content: { value: "A" }, + schema_version: "1.0", + }) + .execute(); + + const diff = await selectVersionDiff({ + lix, + source: sourceVersion, + target: targetVersion, + }) + .where("status", "!=", "unchanged") + .execute(); + + expectDeterministic(diff).toHaveLength(1); + const d = diff[0]!; + expectDeterministic(d.entity_id).toBe("e1"); + expectDeterministic(d.schema_key).toBe("diff_test_entity"); + expectDeterministic(d.file_id).toBe("file1"); + expectDeterministic(d.before_version_id).toBeNull(); + expectDeterministic(d.before_change_id).toBeNull(); + expectDeterministic(d.after_version_id).toBe(sourceVersion.id); + expectDeterministic(d.after_change_id).not.toBeNull(); + expectDeterministic(d.status).toBe("created"); + + // Verify the 'after' change id refers to a stored change row + const stored = await lix.db + .selectFrom("change") + .where("id", "=", d.after_change_id!) + .selectAll() + .executeTakeFirstOrThrow(); + + expectDeterministic(stored).toMatchObject({ + id: d.after_change_id!, + entity_id: "e1", + schema_key: "diff_test_entity", + file_id: "file1", + plugin_key: "test_plugin", + snapshot_content: { value: "A" }, + }); + + // After commit id should be the commit_id from state_all for the source side + const srcState = await lix.db + .selectFrom("state_all") + .where("version_id", "=", sourceVersion.id) + .where("file_id", "=", "file1") + .where("schema_key", "=", "diff_test_entity") + .where("entity_id", "=", "e1") + .selectAll() + .executeTakeFirstOrThrow(); + + expectDeterministic(d.after_commit_id).toBe(srcState.commit_id); + } +); + +simulationTest( + "updated: both changed without common ancestor -> source wins (query)", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + // Start both versions from the same tip + const sourceVersion = await createVersion({ lix, name: "source" }); + const targetVersion = await createVersion({ lix, name: "target" }); + + // Both sides change the same key independently (common ancestor had no e1) + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e1", + schema_key: "diff_test_entity", + file_id: "file1", + version_id: targetVersion.id, + plugin_key: "test_plugin", + snapshot_content: { value: "target" }, + schema_version: "1.0", + }) + .execute(); + + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e1", + schema_key: "diff_test_entity", + file_id: "file1", + version_id: sourceVersion.id, + plugin_key: "test_plugin", + snapshot_content: { value: "source" }, + schema_version: "1.0", + }) + .execute(); + + // Fetch diff rows for that key + const diffs = await selectVersionDiff({ + lix, + source: sourceVersion, + target: targetVersion, + }) + .where("diff.file_id", "=", "file1") + .where("diff.schema_key", "=", "diff_test_entity") + .where("diff.entity_id", "=", "e1") + .execute(); + + expectDeterministic(diffs).toHaveLength(1); + const r = diffs[0]!; + expectDeterministic(r.status).toBe("updated"); + expectDeterministic(r.before_version_id).toBe(targetVersion.id); + expectDeterministic(r.after_version_id).toBe(sourceVersion.id); + + // Verify winner is source side by checking after_change_id equals source state change_id + const srcState = await lix.db + .selectFrom("state_all") + .where("version_id", "=", sourceVersion.id) + .where("file_id", "=", "file1") + .where("schema_key", "=", "diff_test_entity") + .where("entity_id", "=", "e1") + .selectAll() + .executeTakeFirstOrThrow(); + + const tgtState = await lix.db + .selectFrom("state_all") + .where("version_id", "=", targetVersion.id) + .where("file_id", "=", "file1") + .where("schema_key", "=", "diff_test_entity") + .where("entity_id", "=", "e1") + .selectAll() + .executeTakeFirstOrThrow(); + + expectDeterministic(r.after_change_id).toBe(srcState.change_id); + expectDeterministic(r.before_change_id).toBe(tgtState.change_id); + } +); + +simulationTest( + "updated: both changed after common ancestor -> source wins (query)", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + // Get active version and seed a common ancestor state for e1 + const active = await lix.db + .selectFrom("active_version") + .innerJoin("version", "version.id", "active_version.version_id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e1", + schema_key: "diff_test_entity", + file_id: "file1", + version_id: active.id, + plugin_key: "test_plugin", + snapshot_content: { value: "ancestor" }, + schema_version: "1.0", + }) + .execute(); + + // Branch source and target from the active version + const sourceVersion = await createVersion({ + lix, + name: "source", + from: active, + }); + + const targetVersion = await createVersion({ + lix, + name: "target", + from: active, + }); + + // Both sides update e1 independently + await lix.db + .updateTable("state_all") + .set({ snapshot_content: { value: "target" } }) + .where("version_id", "=", targetVersion.id) + .where("file_id", "=", "file1") + .where("schema_key", "=", "diff_test_entity") + .where("entity_id", "=", "e1") + .execute(); + + await lix.db + .updateTable("state_all") + .set({ snapshot_content: { value: "source" } }) + .where("version_id", "=", sourceVersion.id) + .where("file_id", "=", "file1") + .where("schema_key", "=", "diff_test_entity") + .where("entity_id", "=", "e1") + .execute(); + + // Diff should pick source as winner + const diffs = await selectVersionDiff({ + lix, + source: sourceVersion, + target: targetVersion, + }) + .where("diff.file_id", "=", "file1") + .where("diff.schema_key", "=", "diff_test_entity") + .where("diff.entity_id", "=", "e1") + .execute(); + + expectDeterministic(diffs).toHaveLength(1); + const r = diffs[0]!; + expectDeterministic(r.status).toBe("updated"); + expectDeterministic(r.before_version_id).toBe(targetVersion.id); + expectDeterministic(r.after_version_id).toBe(sourceVersion.id); + + const srcState = await lix.db + .selectFrom("state_all") + .where("version_id", "=", sourceVersion.id) + .where("file_id", "=", "file1") + .where("schema_key", "=", "diff_test_entity") + .where("entity_id", "=", "e1") + .selectAll() + .executeTakeFirstOrThrow(); + + const tgtState = await lix.db + .selectFrom("state_all") + .where("version_id", "=", targetVersion.id) + .where("file_id", "=", "file1") + .where("schema_key", "=", "diff_test_entity") + .where("entity_id", "=", "e1") + .selectAll() + .executeTakeFirstOrThrow(); + + expectDeterministic(r.after_change_id).toBe(srcState.change_id); + expectDeterministic(r.before_change_id).toBe(tgtState.change_id); + } +); + +simulationTest( + "deleted: source explicit deletion beats target content -> after=source (tombstone), before=target (query)", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + // Seed common ancestor with entity e1 + const active = await lix.db + .selectFrom("active_version") + .innerJoin("version", "version.id", "active_version.version_id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e1", + schema_key: "diff_test_entity", + file_id: "file1", + version_id: active.id, + plugin_key: "test_plugin", + snapshot_content: { value: "ancestor" }, + schema_version: "1.0", + }) + .execute(); + + // Branch source and target from the ancestor + const sourceVersion = await createVersion({ + lix, + name: "source", + from: active, + }); + const targetVersion = await createVersion({ + lix, + name: "target", + from: active, + }); + + // Perform deletion in source version (remove e1); target keeps content + await lix.db + .deleteFrom("state_all") + .where("version_id", "=", sourceVersion.id) + .where("file_id", "=", "file1") + .where("schema_key", "=", "diff_test_entity") + .where("entity_id", "=", "e1") + .execute(); + + // Confirm target still has the entity + const tgtState = await lix.db + .selectFrom("state_all") + .where("version_id", "=", targetVersion.id) + .where("file_id", "=", "file1") + .where("schema_key", "=", "diff_test_entity") + .where("entity_id", "=", "e1") + .selectAll() + .executeTakeFirstOrThrow(); + + // Diff should classify as deleted: after=null, before=target + const diffs = await selectVersionDiff({ + lix, + source: sourceVersion, + target: targetVersion, + }) + .where("diff.file_id", "=", "file1") + .where("diff.schema_key", "=", "diff_test_entity") + .where("diff.entity_id", "=", "e1") + .execute(); + + expectDeterministic(diffs).toHaveLength(1); + const r = diffs[0]!; + expectDeterministic(r.status).toBe("deleted"); + expectDeterministic(r.before_version_id).toBe(targetVersion.id); + // With explicit deletes, source contributes a tombstone row + expectDeterministic(r.after_version_id).toBe(sourceVersion.id); + expectDeterministic(r.after_change_id).toBeTruthy(); + expectDeterministic(r.before_change_id).toBe(tgtState.change_id); + + // Verify the 'after' change is a deletion (tombstone) + const afterChange = await lix.db + .selectFrom("change") + .where("id", "=", r.after_change_id!) + .selectAll() + .executeTakeFirstOrThrow(); + expectDeterministic(afterChange.snapshot_content).toBe(null); + } +); + +simulationTest( + "unchanged: identical leaves are returned when not filtered (query)", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + // Seed common ancestor state + const active = await lix.db + .selectFrom("active_version") + .innerJoin("version", "version.id", "active_version.version_id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e1", + schema_key: "diff_test_entity", + file_id: "file1", + version_id: active.id, + plugin_key: "test_plugin", + snapshot_content: { value: "same" }, + schema_version: "1.0", + }) + .execute(); + + // Branch without changes + const sourceVersion = await createVersion({ + lix, + name: "source", + from: active, + }); + const targetVersion = await createVersion({ + lix, + name: "target", + from: active, + }); + + const diffs = await selectVersionDiff({ + lix, + source: sourceVersion, + target: targetVersion, + }) + .where("diff.file_id", "=", "file1") + .where("diff.schema_key", "=", "diff_test_entity") + .where("diff.entity_id", "=", "e1") + .execute(); + + expectDeterministic(diffs).toHaveLength(1); + const r = diffs[0]!; + expectDeterministic(r.status).toBe("unchanged"); + expectDeterministic(r.before_version_id).toBe(targetVersion.id); + expectDeterministic(r.after_version_id).toBe(sourceVersion.id); + expectDeterministic(r.before_change_id).toBe(r.after_change_id); + } +); + +simulationTest( + "unchanged: entity exists only in target, never in source (no explicit delete)", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + // Create source and target versions from the same tip + const sourceVersion = await createVersion({ lix, name: "source" }); + const targetVersion = await createVersion({ lix, name: "target" }); + + // Insert entity only in target; source never had it and did not delete it + await lix.db + .insertInto("state_all") + .values({ + entity_id: "only-in-target", + schema_key: "diff_test_entity", + file_id: "file-only-target", + version_id: targetVersion.id, + plugin_key: "test_plugin", + snapshot_content: { v: "target" }, + schema_version: "1.0", + }) + .execute(); + + const diffs = await selectVersionDiff({ + lix, + source: sourceVersion, + target: targetVersion, + }) + .where("diff.file_id", "=", "file-only-target") + .where("diff.schema_key", "=", "diff_test_entity") + .where("diff.entity_id", "=", "only-in-target") + .execute(); + + expectDeterministic(diffs).toHaveLength(1); + const r = diffs[0]!; + expectDeterministic(r.status).toBe("unchanged"); + expectDeterministic(r.before_version_id).toBe(targetVersion.id); + // Since we treat target-only as unchanged, mirror target on after_* + expectDeterministic(r.after_version_id).toBe(targetVersion.id); + expectDeterministic(r.after_change_id).toBe(r.before_change_id); + } +); + +simulationTest( + "handles multiple files and schema keys across versions", + async ({ openSimulatedLix, expectDeterministic }) => { + const lix = await openSimulatedLix({ + keyValues: [ + { + key: "lix_deterministic_mode", + value: { enabled: true }, + lixcol_version_id: "global", + }, + ], + }); + + // Seed common ancestor with an unchanged entity on file2/schemaA + const active = await lix.db + .selectFrom("active_version") + .innerJoin("version", "version.id", "active_version.version_id") + .selectAll("version") + .executeTakeFirstOrThrow(); + + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_same", + schema_key: "schemaA", + file_id: "file2", + version_id: active.id, + plugin_key: "test_plugin", + snapshot_content: { v: "same" }, + schema_version: "1.0", + }) + .execute(); + + // Branch source from the active version + const sourceVersion = await createVersion({ + lix, + name: "source", + from: active, + }); + + // file1/schemaA: deleted (source explicitly deletes, target has content later) + // Seed then delete in source before target exists (no common ancestor for this key) + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_del", + schema_key: "schemaA", + file_id: "file1", + version_id: sourceVersion.id, + plugin_key: "test_plugin", + snapshot_content: { v: "to-be-deleted" }, + schema_version: "1.0", + }) + .execute(); + + await lix.db + .deleteFrom("state_all") + .where("entity_id", "=", "e_del") + .where("schema_key", "=", "schemaA") + .where("file_id", "=", "file1") + .where("version_id", "=", sourceVersion.id) + .execute(); + + // Create target after the source tombstone was created + const targetVersion = await createVersion({ + lix, + name: "target", + from: active, + }); + + // file1/schemaA: created (only in source) + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_add", + schema_key: "schemaA", + file_id: "file1", + version_id: sourceVersion.id, + plugin_key: "test_plugin", + snapshot_content: { v: "source" }, + schema_version: "1.0", + }) + .execute(); + + // Ensure target has content for the deleted key + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_del", + schema_key: "schemaA", + file_id: "file1", + version_id: targetVersion.id, + plugin_key: "test_plugin", + snapshot_content: { v: "target" }, + schema_version: "1.0", + }) + .execute(); + + // file1/schemaB: updated (both present but different) + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_upd", + schema_key: "schemaB", + file_id: "file1", + version_id: targetVersion.id, + plugin_key: "test_plugin", + snapshot_content: { v: "target" }, + schema_version: "1.0", + }) + .execute(); + + await lix.db + .insertInto("state_all") + .values({ + entity_id: "e_upd", + schema_key: "schemaB", + file_id: "file1", + version_id: sourceVersion.id, + plugin_key: "test_plugin", + snapshot_content: { v: "source" }, + schema_version: "1.0", + }) + .execute(); + + // Query all diffs and assert statuses across files/schemas + const diffs = await selectVersionDiff({ + lix, + source: sourceVersion, + target: targetVersion, + }).execute(); + + // Build lookup by key + const key = (r: any) => `${r.file_id}|${r.schema_key}|${r.entity_id}`; + const map = new Map(diffs.map((r: any) => [key(r), r])); + + // created (file1/schemaA/e_add) + const created = map.get("file1|schemaA|e_add"); + expectDeterministic(created).toBeDefined(); + expectDeterministic(created!.status).toBe("created"); + expectDeterministic(created!.before_version_id).toBeNull(); + expectDeterministic(created!.after_version_id).toBe(sourceVersion.id); + expectDeterministic(created!.before_change_id).toBeNull(); + expectDeterministic(created!.after_change_id).toBeTruthy(); + + // deleted (file1/schemaA/e_del): source tombstone vs target content + const deleted = map.get("file1|schemaA|e_del"); + expectDeterministic(deleted).toBeDefined(); + expectDeterministic(deleted!.status).toBe("deleted"); + expectDeterministic(deleted!.before_version_id).toBe(targetVersion.id); + expectDeterministic(deleted!.after_version_id).toBe(sourceVersion.id); + expectDeterministic(deleted!.after_change_id).toBeTruthy(); + expectDeterministic(deleted!.before_change_id).toBeTruthy(); + + // updated (file1/schemaB/e_upd) + const updated = map.get("file1|schemaB|e_upd"); + expectDeterministic(updated).toBeDefined(); + expectDeterministic(updated!.status).toBe("updated"); + expectDeterministic(updated!.before_version_id).toBe(targetVersion.id); + expectDeterministic(updated!.after_version_id).toBe(sourceVersion.id); + expectDeterministic(updated!.after_change_id).toBeTruthy(); + expectDeterministic(updated!.before_change_id).toBeTruthy(); + expectDeterministic(updated!.after_change_id).not.toBe( + updated!.before_change_id + ); + + // unchanged (file2/schemaA/e_same) + const unchanged = map.get("file2|schemaA|e_same"); + expectDeterministic(unchanged).toBeDefined(); + expectDeterministic(unchanged!.status).toBe("unchanged"); + expectDeterministic(unchanged!.before_version_id).toBe(targetVersion.id); + expectDeterministic(unchanged!.after_version_id).toBe(sourceVersion.id); + expectDeterministic(unchanged!.after_change_id).toBe( + unchanged!.before_change_id + ); + } +); diff --git a/packages/lix-sdk/src/version/select-version-diff.ts b/packages/lix-sdk/src/version/select-version-diff.ts new file mode 100644 index 0000000000..cb6918214c --- /dev/null +++ b/packages/lix-sdk/src/version/select-version-diff.ts @@ -0,0 +1,204 @@ +import type { Lix } from "../lix/open-lix.js"; +import type { LixVersion } from "./schema.js"; +import { sql, type SelectQueryBuilder } from "kysely"; + +export type DiffRow = { + entity_id: string; + schema_key: string; + file_id: string; + before_version_id: string | null; + before_change_id: string | null; + before_commit_id: string | null; + after_version_id: string | null; + after_change_id: string | null; + after_commit_id: string | null; + status: "created" | "updated" | "deleted" | "unchanged" | null; +}; + +/** + * Compares two versions and returns differences between their entities. + * + * This function is modeled for merging a source version into a target version, + * which is why the source always wins in conflict scenarios (when both versions + * modified the same entity). It performs a full outer join between source and + * target versions to identify created, updated, deleted, and unchanged entities. + * + * Note: More sophisticated diff strategies and proper conflict handling are + * planned for the future. Please upvote https://github.com/opral/lix-sdk/issues/368 + * if you need conflict detection and resolution capabilities. + * + * The returned query builder allows for flexible filtering and composition + * before executing the diff. When no target is specified, compares against + * the active version. + * + * Diff status meanings: + * - `created`: Entity exists only in source version (new addition) + * - `deleted`: Entity explicitly deleted in source (tombstone present) + * - `updated`: Entity exists in both but with different change_ids (source wins) + * - `unchanged`: Entity has same change_id in both OR exists only in target without explicit deletion + * + * Visual representation (source → target): + * ``` + * Status | Source | Target | before_version_id | after_version_id | before_* | after_* + * ------------|--------|--------|-------------------|------------------|----------|---------- + * created | ✓ | ✗ | null | source.id | null | source + * deleted | ✓* | ✓ | target.id | source.id | target | tombstone + * updated | ✓ | ✓ | target.id | source.id | target | source + * unchanged | ✓ | ✓ | target.id | source.id | same | same + * unchanged | ✗ | ✓ | target.id | target.id | target | target + * ``` + * * Source ✓* indicates a tombstone (explicit deletion) + * + * Performance tips: + * - Filter by status to exclude unchanged entities (most common) + * - Filter by file_id when diffing specific documents + * - Filter by schema_key when interested in specific entity types + * + * @example + * // Get all changes between two versions + * const changes = await selectVersionDiff({ lix, source, target }) + * .where('diff.status', '!=', 'unchanged') + * .execute(); + * + * @example + * // Compare specific file between source and active version + * const fileDiff = await selectVersionDiff({ lix, source }) + * .where('diff.file_id', '=', 'file1.json') + * .where('diff.status', '!=', 'unchanged') + * .orderBy('diff.entity_id') + * .execute(); + * + * @example + * // Get only entities of a specific schema that were modified + * const schemaDiff = await selectVersionDiff({ lix, source, target }) + * .where('diff.schema_key', '=', 'message') + * .where('diff.status', 'in', ['created', 'updated', 'deleted']) + * .execute(); + * + * @example + * // Find entities that exist only in target (no explicit delete in source) + * const targetOnly = await selectVersionDiff({ lix, source, target }) + * .where('diff.status', '=', 'unchanged') + * .whereRef('diff.after_version_id', '=', 'diff.before_version_id') + * .execute(); + * + * @example + * // Check if specific entities changed + * const entityDiff = await selectVersionDiff({ lix, source, target }) + * .where('diff.entity_id', 'in', ['entity1', 'entity2', 'entity3']) + * .where('diff.status', '!=', 'unchanged') + * .execute(); + * + * if (entityDiff.length > 0) { + * // Some entities changed + * } + * + * ## Understanding Common Ancestor Behavior + * + * Imagine you and a colleague both start from the same document (common ancestor): + * - You create a "source" version and make changes + * - Your colleague creates a "target" version and makes different changes + * + * When comparing these versions, the diff needs to know: + * 1. What did YOU intentionally delete? (will be removed from target) + * 2. What did your colleague add that you never knew about? (will be kept) + * + * The system tracks deletions using "tombstones" - special markers that say + * "this entity was deleted". When you delete something, a tombstone is created. + * + * This means: + * - If you deleted "entity A" that existed in the common ancestor → + * Status: "deleted" (tombstone present, will be removed from target) + * - If "entity B" only exists in target (added after you created your version) → + * Status: "unchanged" (no tombstone, you never knew about it, so it stays) + * + * Without this logic, the system couldn't tell the difference between + * "I deleted this" and "I never had this". + */ +export function selectVersionDiff(args: { + lix: Lix; + source: Pick; + target?: Pick; +}): SelectQueryBuilder { + const db = args.lix.db; + + const sourceVersionId = sql.lit(args.source.id); + const targetVersionId = args.target + ? sql.lit(args.target.id) + : sql`(SELECT version_id FROM active_version)`; + + const sub = sql` +WITH + -- Source side should expose explicit deletions (tombstones) + -- Use state_with_tombstones to include rows with NULL snapshot_content + s AS ( + SELECT entity_id, schema_key, file_id, change_id, commit_id, version_id, snapshot_content + FROM state_with_tombstones + WHERE version_id = ${sourceVersionId} + ), + t AS ( + SELECT entity_id, schema_key, file_id, change_id, commit_id, version_id + FROM state_all + WHERE version_id = ${targetVersionId} + ), + joined AS ( + SELECT + COALESCE(s.entity_id, t.entity_id) AS entity_id, + COALESCE(s.schema_key, t.schema_key) AS schema_key, + COALESCE(s.file_id, t.file_id) AS file_id, + t.version_id AS before_version_id, + t.change_id AS before_change_id, + t.commit_id AS before_commit_id, + s.version_id AS after_version_id, + s.change_id AS after_change_id, + s.commit_id AS after_commit_id, + CASE + -- Explicit delete in source (tombstone takes precedence) + WHEN s.change_id IS NOT NULL AND s.snapshot_content IS NULL THEN 'deleted' + -- Created in source only (live row) + WHEN s.change_id IS NOT NULL AND t.change_id IS NULL AND s.snapshot_content IS NOT NULL THEN 'created' + -- Both present and different (live row in source) + WHEN s.change_id IS NOT NULL AND t.change_id IS NOT NULL AND s.snapshot_content IS NOT NULL AND s.change_id != t.change_id THEN 'updated' + -- Both present and same (live row in source) + WHEN s.change_id IS NOT NULL AND t.change_id IS NOT NULL AND s.snapshot_content IS NOT NULL AND s.change_id = t.change_id THEN 'unchanged' + ELSE NULL + END AS status + FROM s + LEFT JOIN t ON t.entity_id = s.entity_id AND t.schema_key = s.schema_key AND t.file_id = s.file_id + UNION ALL + SELECT + COALESCE(s.entity_id, t.entity_id) AS entity_id, + COALESCE(s.schema_key, t.schema_key) AS schema_key, + COALESCE(s.file_id, t.file_id) AS file_id, + t.version_id AS before_version_id, + t.change_id AS before_change_id, + t.commit_id AS before_commit_id, + -- For target-only rows (no source contribution), mirror target values for after_* + -- This represents entities that exist in target but were never in source + t.version_id AS after_version_id, + t.change_id AS after_change_id, + t.commit_id AS after_commit_id, + CASE + -- Target-only: entity exists in target but never existed in source (no explicit delete) + -- Treated as unchanged since source doesn't modify it + WHEN s.change_id IS NULL AND t.change_id IS NOT NULL THEN 'unchanged' + -- These branches shouldn't occur in this half because s.change_id IS NULL filter is applied + WHEN s.change_id IS NOT NULL AND t.change_id IS NULL THEN 'created' + WHEN s.change_id IS NOT NULL AND t.change_id IS NOT NULL AND s.change_id != t.change_id THEN 'updated' + WHEN s.change_id IS NOT NULL AND t.change_id IS NOT NULL AND s.change_id = t.change_id THEN 'unchanged' + ELSE NULL + END AS status + FROM t + LEFT JOIN s ON s.entity_id = t.entity_id AND s.schema_key = t.schema_key AND s.file_id = t.file_id + WHERE s.change_id IS NULL + ) +SELECT * +FROM joined + `; + + const aliased = sql`(${sub})`.as("diff"); + const qb = db + .selectFrom(aliased) + .selectAll() as unknown as SelectQueryBuilder; + return qb; +} diff --git a/packages/lix-sdk/src/version/switch-version.test.ts b/packages/lix-sdk/src/version/switch-version.test.ts index 88f6a63a28..2d1fa8418c 100644 --- a/packages/lix-sdk/src/version/switch-version.test.ts +++ b/packages/lix-sdk/src/version/switch-version.test.ts @@ -1,7 +1,7 @@ import { expect, test } from "vitest"; import { openLix } from "../lix/open-lix.js"; -import { createVersion } from "./create-version.js"; import { switchVersion } from "./switch-version.js"; +import { createVersion } from "./create-version.js"; test("switching versiones should update the active_version", async () => { const lix = await openLix({}); @@ -15,7 +15,7 @@ test("switching versiones should update the active_version", async () => { const newVersion = await lix.db.transaction().execute(async (trx) => { const newVersion = await createVersion({ lix: { ...lix, db: trx }, - commit_id: activeVersion.commit_id, + from: activeVersion, }); await switchVersion({ lix: { ...lix, db: trx }, to: newVersion }); return newVersion; diff --git a/packages/lix-sdk/src/version/switch-version.ts b/packages/lix-sdk/src/version/switch-version.ts index 343aa9e2b5..e7e3ce6ad9 100644 --- a/packages/lix-sdk/src/version/switch-version.ts +++ b/packages/lix-sdk/src/version/switch-version.ts @@ -16,7 +16,7 @@ import type { LixVersion } from "./schema.js"; * * ```ts * await lix.db.transaction().execute(async (trx) => { - * const newVersion = await createVersion({ lix: { db: trx }, commit_id: currentVersion.commit_id }); + * const newVersion = await createVersion({ lix: { db: trx }, commit: currentVersion }); * await switchVersion({ lix: { db: trx }, to: newVersion }); * }); * ``` diff --git a/packages/lix-sdk/vitest.config.ts b/packages/lix-sdk/vitest.config.ts index 24bbfa8970..f3422f8336 100644 --- a/packages/lix-sdk/vitest.config.ts +++ b/packages/lix-sdk/vitest.config.ts @@ -4,7 +4,8 @@ import codspeedPlugin from "@codspeed/vitest-plugin"; export default defineConfig({ plugins: [process.env.CODSPEED_BENCH ? codspeedPlugin() : undefined], test: { - // default timeout - testTimeout: 20000, + // increased default timeout to avoid ci/cd issues + // the high timeout will be needed less with further performance improvements + testTimeout: 120000, }, }); diff --git a/packages/lix/plugins/prosemirror/example/src/components/ChangeSet.tsx b/packages/lix/plugins/prosemirror/example/src/components/ChangeSet.tsx index d5aa6afe1d..77faf32856 100644 --- a/packages/lix/plugins/prosemirror/example/src/components/ChangeSet.tsx +++ b/packages/lix/plugins/prosemirror/example/src/components/ChangeSet.tsx @@ -9,9 +9,8 @@ import { ChevronRight, } from "lucide-react"; import { - applyChangeSet, + transition, createThread, - createUndoCommit, type LixChangeSet as ChangeSetType, } from "@lix-js/sdk"; import { useKeyValue } from "../hooks/useKeyValue"; @@ -95,7 +94,7 @@ export const ChangeSet = forwardRef( return selectThreads(lix, { commitId: commit.id }); }); -// Removed the console.log statement as it is a debugging artifact. + // Removed the console.log statement as it is a debugging artifact. // Get the first comment if it exists const firstComment = threads?.[0]?.comments?.[0]; @@ -230,20 +229,25 @@ export const ChangeSet = forwardRef( .where("change_set_id", "=", changeSet.id) .selectAll() .executeTakeFirst(); - + if (!commit) { console.error("Could not find commit for change set"); return; } - const undoCommit = await createUndoCommit({ - lix, - commit: { id: commit.id }, - }); - await applyChangeSet({ - lix, - changeSet: { id: undoCommit.change_set_id }, - }); + // Find the parent of this commit and transition to it + const parent = await lix.db + .selectFrom("commit_edge") + .where("child_id", "=", commit.id) + .select(["parent_id"]) + .executeTakeFirst(); + + if (!parent) { + console.error("Cannot undo: commit has no parent"); + return; + } + + await transition({ lix, to: { id: parent.parent_id } }); }} title="Undo this change set" > diff --git a/packages/lix/plugins/prosemirror/example/src/components/VersionToolbar.tsx b/packages/lix/plugins/prosemirror/example/src/components/VersionToolbar.tsx index e6a49ba01a..4a38995eee 100644 --- a/packages/lix/plugins/prosemirror/example/src/components/VersionToolbar.tsx +++ b/packages/lix/plugins/prosemirror/example/src/components/VersionToolbar.tsx @@ -43,7 +43,7 @@ const VersionToolbar: React.FC = () => { const newVersion = await createVersion({ lix, name: `${firstName}'s Version`, - commit_id: activeVersion.commit_id, + from: activeVersion, }); await switchVersion({ lix, to: newVersion }); // Scroll to the end of the scrollbar after a short delay to ensure DOM update @@ -71,7 +71,7 @@ const VersionToolbar: React.FC = () => { const newVersion = await createVersion({ lix, name: `${firstName}'s Version`, - commit_id: activeVersion.commit_id, + from: activeVersion, }); await switchVersion({ lix, to: { id: newVersion.id } }); }