diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index efce1a9391..5c4c084c32 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -2,9 +2,9 @@ name: test on: push: - branches: [ "main" ] + branches: ["main"] pull_request: - branches: [ "main" ] + branches: ["main"] jobs: test: @@ -15,19 +15,19 @@ jobs: node-version: [20.x, 22.x] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4 - - name: Use Node.js ${{ matrix.node-version }} - uses: actions/setup-node@v4 - with: - node-version: ${{ matrix.node-version }} + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} - - name: Build - run: | - rm package-lock.json - npm install -g npm@latest - npm install - npm run test + - name: Build + run: | + rm package-lock.json + npm install -g npm@latest + npm install + npm run test api-test: runs-on: ubuntu-latest @@ -44,8 +44,63 @@ jobs: uses: actions/setup-node@v4 with: node-version: ${{ matrix.node-version }} - + - name: API Test run: | - pip install -r ./tools/api-tester/ci/requirements.txt - ./tools/api-tester/ci/run.py + pip install -r ./tests/ci/requirements.txt + ./tests/ci/api-test.py + + playwright-test: + runs-on: ubuntu-latest + timeout-minutes: 10 + + strategy: + matrix: + node-version: [22.x] + + steps: + - uses: actions/checkout@v4 + + - name: Use Node.js ${{ matrix.node-version }} + uses: actions/setup-node@v4 + with: + node-version: ${{ matrix.node-version }} + + - name: Install Dependencies + run: npm install + working-directory: ./tests/playwright + + - name: Install Playwright Browsers + run: npx playwright install --with-deps + working-directory: ./tests/playwright + + - name: Playwright Test + run: | + pip install -r ./tests/ci/requirements.txt + ./tests/ci/playwright-test.py + + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: server-logs + path: | + /tmp/backend.log + /tmp/fs-tree-manager.log + retention-days: 3 + + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: config-files + path: | + ./volatile/config/config.json + ./src/fs_tree_manager/config.yaml + ./tests/client-config.yaml + retention-days: 3 + + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: playwright-report + path: tests/playwright/playwright-report/ + retention-days: 3 diff --git a/.gitignore b/.gitignore index 03b96d0586..38b12f0acd 100644 --- a/.gitignore +++ b/.gitignore @@ -45,10 +45,28 @@ jsconfig.json # the exact tree installed in the node_modules folder package-lock.json +# ====================================================================== +# fs tree manager +# ====================================================================== +src/fs_tree_manager/config.yaml + +# ====================================================================== +# playwright test (currently only test the client-replica) +# ====================================================================== +tests/client-config.yaml + +# ====================================================================== +# auto-generated js and js.map files +# ====================================================================== +src/puter-js/src/modules/FileSystem/replica/manager.js +src/puter-js/src/modules/FileSystem/replica/manager.js.map + +# ====================================================================== +# other +# ====================================================================== # AI STUFF AGENTS.md .roo - # source maps *.map \ No newline at end of file diff --git a/doc/RFCS/20250821_client_replica_file_system.md b/doc/RFCS/20250821_client_replica_file_system.md new file mode 100644 index 0000000000..054193b41f --- /dev/null +++ b/doc/RFCS/20250821_client_replica_file_system.md @@ -0,0 +1,403 @@ +- Feature Name: Client Replica Filesystem +- Status: In Progress +- Date: 2025-08-21 + +## Table of Contents + +- [Summary](#summary) +- [Motivation](#motivation) +- [Implementation](#implementation) + - [Data Structure](#data-structure) + - [Client-Replica Initialization](#client-replica-initialization) + - [Client-Replica Synchronization](#client-replica-synchronization) + - [File System Operations Upon Fetching](#file-system-operations-upon-fetching) + - [FS-Tree Manager](#fs-tree-manager) + - [FS Hooks](#fs-hooks) + - [Adaptation to the Existing Codebase](#adaptation-to-the-existing-codebase) + - [Anomaly - Stale Fetch Due to Local Update](#anomaly---stale-fetch-due-to-local-update) + - [Anomaly - Stale Fetch Due to Failed Event Notification](#anomaly---stale-fetch-due-to-failed-event-notification) + - [Anomaly - Overlapping FS Syncs](#anomaly---overlapping-fs-syncs) + - [Puter-JS Variables](#puter-js-variables) + - [Client-Replica Lifecycle](#client-replica-lifecycle) + - [Code Location](#code-location) +- [Scalability](#scalability) + - [First Stage - Single Instance](#first-stage---single-instance) + - [Second Stage - Partitioned FS-Tree Manager](#second-stage---partitioned-fs-tree-manager) +- [Fault Tolerance](#fault-tolerance) +- [Metrics](#metrics) + - [Change Propagation Time](#change-propagation-time) +- [Optimization in the Future](#optimization-in-the-future) +- [Failure Scenarios](#failure-scenarios) + - [FS-Tree Manager Failure](#fs-tree-manager-failure) + - [FS-Update Notification Failure](#fs-update-notification-failure) +- [Alternatives and Trade-offs](#alternatives-and-trade-offs) + - [Last-Updated Time for "Stale Replica Fetch"](#last-updated-time-for-stale-replica-fetch) + - [Alternative Storage Models](#alternative-storage-models) +- [TODO](#todo) + +## Summary + +**Client Replica Filesystem** is a mechanism that keeps a **full replica** of a user’s filesystem tree on the client and regularly sync updates from the server. This feature allows: + +* Rapid file system operations for read-only APIs such as `stat`, `readdir`, and `search`. No network round trips are needed. +* Lower network I/O along with reduced database and CPU load on the server. + +## Motivation + +The **puter filesystem** is a critical component of Puter, it provides a POSIX-like filesystem interface to `puter-js` and powers the filesystem operations in the GUI web client. APIs provided by the filesystem include: + +- Read-only APIs: `stat`, `readdir`, `search`. +- Write APIs: `mkdir`, `write`, `copy`, `move`, `rename`, `delete`, etc. + +Currently, all of these operations are handled through the synchronous HTTP API and suffer from latency issues caused by network round trips and database index contention. For example, when a user opens a folder in the GUI web client, the request will go all the way to database to find what's inside the folder. There are 20 million filesystem entries in the database and the latency will keep increasing as the number of files grows. + +To tackle this issue, we propose maintaining a **full replica** of the filesystem rooted at the user’s home directory on the client (e.g., for user Tim, all filesystem nodes under `/Tim` are stored locally). This allows users to perform read-only operations on the client replica without waiting for a server response. Updates to the filesystem will be fetched from the server periodically. + +![](assets/20251008_134412_puter-client_replica_overview.drawio.svg) + +Network protocol used: + +![](assets/20251027_174627_puter-client_replica_network.drawio.svg) + +## Implementation + +### Data Structure + +**Merkle Tree** is used to quickly compare two file system trees and synchronize them by sending only the differences. + +In our implementation, we use two key ideas: + +1. **Bidirectional Nodes** + Each node stores references to both its parent and children. + + * **Top-down traversal**: used for tree comparison and path lookup. + * **Bottom-up traversal**: used to recalculate hashes when a node is updated. +2. **Heap (Index by UUID)** + We maintain a heap-like structure (UUID → node map) to: + + * Enable fast node lookups by UUID. + * Prevent duplicate nodes in the tree. + +> A Merkle tree is a hash tree where leaves are hashes of the values of individual nodes. Parent nodes higher in the tree are hashes of their respective children. The principal advantage of Merkle tree is that each branch of the tree can be checked independently without requiring nodes to download the entire tree or the entire data set. Moreover, Merkle trees help in reducing the amount of data that needs to be transferred while checking for inconsistencies among replicas. For instance, if the hash values of the root of two trees are equal, then the values of the leaf nodes in the tree are equal and the nodes require no synchronization. If not, it implies that the values of some replicas are different. In such cases, the nodes may exchange the hash values of children and the process continues until it reaches the leaves of the trees, at which point the hosts can identify the nodes that are “out of sync”. + +### Client-Replica Initialization + +Both initialization and synchronization are done via websocket to save network traffic. + +Initial fetch is done via websocket event `replica/fetch`. + +### Client-Replica Synchronization + +Since CRDT is not used, synchronization between client and server is one-way — the client only fetches changes from the server. Each node in the tree includes a `hash` field that is the hash of **all its children's hashes + its own metadata**. So its safe to say 2 trees are the same if and only if their root nodes have the same hash. + +Synchronize is done via websocket event `replica/pull_diff`. + +Client start a sync by sending a request to the server: + +```json +{ + "pull_request": [ + { + "uuid": "", + "merkle_hash": "" + } + ] +} +``` + +Server send push requests when there are differences between the client and server. This action is simple, just send requested nodes and their children to the client. + +```json +{ + "push_request": [ + { + "uuid": "", + "merkle_hash": "", + "fs_entry": "...", + "children": [ + { + "uuid": "", + "merkle_hash": "", + "fs_entry": "..." + }, + { + "uuid": "", + "merkle_hash": "", + "fs_entry": "..." + }, + { + "uuid": "", + "merkle_hash": "", + "fs_entry": "..." + }, + { + "uuid": "", + "merkle_hash": "", + "fs_entry": "..." + } + ] + } + ] +} +``` + +Client does the following actions in sequence: + +1. Update the fs_entry for the level-1 node. +2. Compare the children list with the client-replica. + 2.a For nodes with the same uuid and hash, skip. + 2.b For nodes with the same uuid and different hash, update the fs_entry for the node. Then add the node to the next pull request (as level-1 node). + 2.c For nodes that missing from the server response, remove it and all its ancestors from the local replica. + 2.d For nodes that missing from the client-replica, add it the local replica. Then add the node to the next pull request (as level-1 node). +3. Send the next pull request to the server if there are any nodes to update. +4. Stop when 1) there are no nodes to update or 2) the server response is empty. + +There are some details to consider: + +- Client Memory Usage: In the POC implementation, a tree consists of 100K nodes takes around 10MB browser memory. A hard limit of 20MB (i.e., 200K nodes) can be set at server side to avoid taking too much memory. When a user has too much file nodes under his home directory, server can send an error back to the client. +- Initialization Time: According to the data size mentioned above, the initialization will finish within 1 second. But it's still great to put it in a background task to avoid blocking the UI thread. +- Permission: Permission check should be enforced on both client side and server side. A user can only fetch the tree started from his home directory. A simpler design is to remove the args from `puter.fs.fetch_tree` and make it "fetch all files for the current user". + +### File System Operations Upon Fetching + +To make the system consistent, the local replica will work with all existing file system APIs except `read` and `write`. A simple implementation is to have a switch branch for local replica: + +```js +const readdir = async function (...args) { + // ... (existing code) + + if (this.local_replica.available) { + return this.local_replica.readdir(options.path); + } + + // ... (existing code which fetches from server) +} +``` + +### FS-Tree Manager + +Just a standalone service that manages the FS-Tree. + +- It only creates a new in-memory FS tree when a user requests it and memory usage is below the threshold. +- It periodically purges FS trees that haven't been synced for a while. +- It periodically purges FS trees that haven't been accessed for a while. + +### FS Hooks + +#### Hooks in Puter Backend + +NB: Put hooks in WSPushService may cause duplicate events, remove them in the future. + +- [X] mkdir (`fs.create.*` event) (code: `src/backend/src/services/WSPushService.js`) +- [X] new file + - code: `src/backend/src/filesystem/hl_operations/hl_mkdir.js` + - implementation: newFSEntry +- [X] write file + - code: `src/backend/src/filesystem/hl_operations/hl_write.js` + - implementation: newFSEntry +- [X] rename (code: `src/backend/src/routers/filesystem_api/rename.js`) +- [X] move (`fs.move.*` event) (code: `src/backend/src/services/WSPushService.js`) + - TODO: move dir (with children) does not work +- [X] delete file/dir (code: `src/backend/src/filesystem/hl_operations/hl_remove.js`) + +#### Hooks in Puter-JS + +- [X] mkdir + + - code: `src/puter-js/src/modules/FileSystem/operations/mkdir.js` + - implementation: newFSEntry +- [ ] new file +- [ ] write file +- [X] rename + + - code: `src/puter-js/src/modules/FileSystem/operations/rename.js` + - implementation: dedicated rename api (since complete fsentry is not available) +- [X] move + + - code: `src/puter-js/src/modules/FileSystem/operations/move.js` + - implementation: removeFSEntry + newFSEntry +- [X] delete file/dir + + - code: `src/puter-js/src/modules/FileSystem/operations/deleteFSEntry.js` + - implementation: removeFSEntry + findNodeByPath (since only path is available) +- [X] stat (code: `src/puter-js/src/modules/FileSystem/operations/stat.js`) +- [X] readdir (code: `src/puter-js/src/modules/FileSystem/operations/readdir.js`) +- [ ] search + +### Adaptation to the Existing Codebase + +#### FSEntry Parent + +As of now, there are 4 attributes in a fsentry that are related to parent: + +- `parent_id` +- `parent_uid` +- `dirname` +- `dirpath` + +`parent_id`/`parent_uid` is defined as database columns ([link](https://github.com/HeyPuter/puter/blob/847b3a07a4ec59e724063f460a4c26cb62b04d42/src/backend/src/services/database/sqlite_setup/0001_create-tables.sql#L82-L83)) and there are some subtle differences: + +- `parent_id` may be an int id or a string uuid. +- `parent_uid` is string uuid most of the time. + +`dirname`/`dirpath` is calculated in the process of business logic ([link](https://github.com/HeyPuter/puter/blob/847b3a07a4ec59e724063f460a4c26cb62b04d42/src/backend/src/filesystem/FSNodeContext.js#L829-L830)) and often returned to the client. + +- `dirname` may be the last part of `path` or the whole `path`. +- `dirpath` is always the whole `path`. + +`parent_id`/`parent_uid`/`dirname`/`dirpath` are consitent with each other most of the time, but may out of sync in some cases (e.g: move operation). The receivers (i.e: puter-js, fs-tree-manager) may validate the consistency of these attributes but **MUST** throw an error if they are inconsistent. Other approaches such as silent fail or fallback to one of them are **PROHIBITED** since the inconsistency will propagate during sync process and hard to diagnose. + +The fix of inconsistency should be done inside the puter backend and marked as `client-replica patch`. + +FSEntry receivers should rely on `parent_uid` field. + +#### FSEntry ID/UID/UUID/MYSQL_ID + +TODO + +- uuid may missing from it +- id may be a string uuid +- uid is often seen in fsentry, it's a string uuid most of the time + +#### User ID/UUID + +TODO + +- id is a int id most of the time +- id is more accessible than uuid (TODO: explain why) + +#### Heterogeneous FSEntry + +FS-Tree Manager accepts FSEntry from 2 different sources: + +- database, fields: [link](https://github.com/HeyPuter/puter/blob/847b3a07a4ec59e724063f460a4c26cb62b04d42/src/backend/src/services/database/sqlite_setup/0001_create-tables.sql#L70) +- puter backend, which does some post-processing in `getSafeEntry` ([link](https://github.com/HeyPuter/puter/blob/847b3a07a4ec59e724063f460a4c26cb62b04d42/src/backend/src/filesystem/FSNodeContext.js#L771)), including but not limited to: + + - add dirname, dirpath + - add id, uid, remove uuid + - remove user_id + - remove bucket, bucket_region + - bool/int is_dir -> boolean + - int/other size -> int + +These differences poses 3 challenges for FS-Tree Manager: + +1. It fetches the tree from database then push it to the client without post-processing, which leads to inconsistent FSEntry from puter-js' point of view. +2. It has to maintain FSEntry in 2 different formats, which is error-prone. +3. There is a high chance of inconsistency between the in-memory FS Tree and the database. + +To cope with these challenges, we propose the following workarounds: + +- Just return the raw FSEntry to the client for now. Adapt to the post-processing format in the future. +- Store both formats in the FS-Tree Manager for now. Add a normalizer in the input procedure in the future. +- Drop the in-memory FS Tree directly during “anti-entropy sync.” + +### Anomaly - Stale Fetch Due to Local Update + +A stale fetch can happens immediately after a local update: + +1. At time `t`, a FS updated happens and local-replica is updated. +2. At time `t + 1`, a sync happens, the client fetches the stale replica from server. +3. At time `t + 2`, the FS update event reaches the FS-Tree Manager and the in-memory FS Tree is updated. + +The nature of this anomaly is that FS update has to be reflected to the client-replica as soon as possible so the read APIs can see the update, while there might be a delay between the FS update and the FS-Tree Manager's update. + +The naive solution is to stop periodic syncs for 3 seconds after any local update. + +A better solution is introduce `last_updated_time` to all replicas but it introduces other pitfalls like clock skew and extra complexity. + +TOOD: add a diagram so it's easier to understand. + +### Anomaly - Stale Fetch Due to Failed Event Notification + +TODO + +### Anomaly - Overlapping FS Syncs + +TODO + +### Puter-JS Variables + +- `puter.fs.replica.available` - whether the client-replica is available +- `puter.fs.replica.last_local_update` - the timestamp of the last local update +- `puter.fs.replica.setDebug(true/false)` - toggle debug widget and logs, may be merged with `puter.debugMode` in the future +- `puter.fs.replica.fs_tree` - the in-memory FS Tree, should only be used by internal code +- `puter.fs.replica.local_read` - count of local read operations performed by puter-js, used for debugging +- `puter.fs.replica.remote_read` - count of remote read operations performed by puter-js, used for debugging + +### Client-Replica Lifecycle + +1. Fetch the replica from server on `puter.setAuthToken`, establish a websocket connection if it's missing. +2. On websocket connected, start the pull diff process. +3. On websocket disconnected, stop the pull diff process. + +**Race Condition**: + +- `fetch replica` and `pull diff` is protected by a lock. + +### Code Location + +- `src/puter-js/src/modules/FileSystem/replica` - puter-js client +- `src/backend/src/routers/filesystem_api/fs_tree_manager` - puter backend +- `src/fs_tree_manager` - fs-tree-manager service, including golang server and protobuf definitions +- `doc/RFCS/20250821_client_replica_file_system.md` - this document, currently include all infromation about the client-replica file system + +## Scalability + +### First Stage - Single Instance + +The first stage is to have a single instance of the FS-Tree Manager. We will use following strategies to avoid out-of-memory (OOM) issues: + +- On server initialization, don't cache any FS tree. +- Only build FS tree when a request comes in. +- Evict FS tree from memory when it's not used for 10 minutes. Use `last_access_time` for the eviction logic. +- Set a hard limit of 4GB for the FS-Tree Manager, reject to create new FS tree when the memory usage reaches the limit. + +![](assets/20251008_134849_puter-client_replica_deployment1.drawio.svg) + +### Second Stage - Partitioned FS-Tree Manager + +Use consistent hashing (by userid) to partition the FS-Tree Manager. + +TODO: Add more details on how to add/remove instances. + +TODO: We may need a GUI control panel for partition management. + +## Fault Tolerance + +TODO: + +scenario 1: FS-Tree Manager is unavailable on all APIs. + +scenario 2: FS-Tree Manager is only unavailable on fetch/sync APIs. + +scenario 3: FS-Tree Manager is only unavailable on fs update APIs. + +## Metrics + +### Change Propagation Time + +The time it takes for a change made on one client (such as creating, renaming, or deleting a file or folder) to appear and become visible on another client. + +The **Change Propagation Time** on original synchronize model is negligible. But with the new model, the **Average Change Propagation Time** will be 6 seconds. And the **Maximum Change Propagation Time** will be 15 seconds when internal services are functioning normally. + +## Optimization in the Future + +- In the initial implementation, FS-Tree Manager can only serve a request if it holds the entire FS tree in memory. We can optimize it by only holding the root node in memory and fetch the children on demand. +- Replace some part of HTTP API with websocket to reduce round trips and latency. + +## Failure Scenarios + +### FS-Tree Manager Failure + +### FS-Update Notification Failure + +## Alternatives and Trade-offs + +### Last-Updated Time for "Stale Replica Fetch" + +### Alternative Storage Models + +## TODO + +- [ ] puter-js readdir support path with `~` (e.g: `~/Desktop`) diff --git a/doc/RFCS/assets/20251008_134412_puter-client_replica_overview.drawio.svg b/doc/RFCS/assets/20251008_134412_puter-client_replica_overview.drawio.svg new file mode 100644 index 0000000000..b9dd667ad5 --- /dev/null +++ b/doc/RFCS/assets/20251008_134412_puter-client_replica_overview.drawio.svg @@ -0,0 +1,4 @@ + + + +
puter-js
puter backend
fs-tree
manager
fs write
(rmdir, rename, mv, ...)
fs read
(stat, readdir)
database
(fsentries table)
fs read
(stat, readdir)
fs write
(rmdir, rename, mv, ...)
fs tree
for /Tim
fs tree
for /Alice
fetch update
(every 5s)
auto switch
client-replica
for /Tim
auth
background initialize
(for users in the whitelist)
anti-entropy sync
(once every hour)
read call, block
write call, non-block
non-block call
non-block cronjob
 notify the fs update 

Legend

  • Arrow always point to the data receiver.
  • "block" means puter-js APIs will wait for it.
  • "non-block" means puter-js API will not wait for it.

initialize
\ No newline at end of file diff --git a/doc/RFCS/assets/20251008_134726_puter-client_replica_network.drawio.svg b/doc/RFCS/assets/20251008_134726_puter-client_replica_network.drawio.svg new file mode 100644 index 0000000000..6054162ae0 --- /dev/null +++ b/doc/RFCS/assets/20251008_134726_puter-client_replica_network.drawio.svg @@ -0,0 +1,4 @@ + + + +
puter-js
puter backend
fs-tree
manager
fs write
(rmdir, rename, mv, ...)
fs read
(stat, readdir)
database
(fsentries table)
fs read
(stat, readdir)
fs write
(rmdir, rename, mv, ...)
fs tree
for /Tim
fs tree
for /Alice
fetch update
(every 5s)
auto switch
client-replica
for /Tim
auth
background initialize
(for users in the whitelist)
anti-entropy sync
(once every hour)
read call, block
write call, non-block
non-block call
non-block cronjob
 notify the fs update 

Legend

  • Arrow always point to the data receiver.
  • "block" means puter-js APIs will wait for it.
  • "non-block" means puter-js API will not wait for it.

initialize
use websocket to avoid DDOS
use grpc, since protobuf is used for uniformed data structures
direct db read
\ No newline at end of file diff --git a/doc/RFCS/assets/20251008_134849_puter-client_replica_deployment1.drawio.svg b/doc/RFCS/assets/20251008_134849_puter-client_replica_deployment1.drawio.svg new file mode 100644 index 0000000000..f53937b8d3 --- /dev/null +++ b/doc/RFCS/assets/20251008_134849_puter-client_replica_deployment1.drawio.svg @@ -0,0 +1,4 @@ + + + +
database
(fsentries table)
fs-tree
manager
fs tree
for /Tim
fs tree
for /Alice
puter backend
fs write
(rmdir, rename, mv, ...)
fs read
(stat, readdir)
puter-js
puter backend
fs write
(rmdir, rename, mv, ...)
fs read
(stat, readdir)
puter-js
\ No newline at end of file diff --git a/doc/RFCS/assets/20251027_174627_puter-client_replica_network.drawio.svg b/doc/RFCS/assets/20251027_174627_puter-client_replica_network.drawio.svg new file mode 100644 index 0000000000..61e1b73468 --- /dev/null +++ b/doc/RFCS/assets/20251027_174627_puter-client_replica_network.drawio.svg @@ -0,0 +1,4 @@ + + + +
puter-js
puter backend
fs-tree
manager
fs write
(rmdir, rename, mv, ...)
fs read
(stat, readdir)
database
(fsentries table)
fs read
(stat, readdir)
fs write
(rmdir, rename, mv, ...)
fs tree
for /Tim
fs tree
for /Alice
fetch update
(every 5s)
auto switch
client-replica
for /Tim
auth
background initialize
(for users in the whitelist)
anti-entropy sync
(once every hour)
read call, block
write call, non-block
non-block call
non-block cronjob
 notify the fs update 

Legend

  • Arrow always point to the data receiver.
  • "block" means puter-js APIs will wait for it.
  • "non-block" means puter-js API will not wait for it.

initialize
websocket
grpc
direct db read
\ No newline at end of file diff --git a/eslint.config.js b/eslint.config.js index 2ee7cabb5d..a3fc70b872 100644 --- a/eslint.config.js +++ b/eslint.config.js @@ -10,6 +10,7 @@ export default defineConfig([ // TypeScript support block { files: ['**/*.ts'], + ignores: ['**/*.test.ts', '**/*.spec.ts', '**/test/**', '**/tests/**'], languageOptions: { parser: tseslintParser, parserOptions: { @@ -29,6 +30,26 @@ export default defineConfig([ '@typescript-eslint/consistent-type-definitions': ['error', 'interface'], }, }, + // TypeScript support for test files (without project requirement) + { + files: ['**/*.test.ts', '**/*.spec.ts', '**/test/**/*.ts', '**/tests/**/*.ts'], + languageOptions: { + parser: tseslintParser, + parserOptions: { + ecmaVersion: 'latest', + sourceType: 'module', + }, + }, + plugins: { + '@typescript-eslint': tseslintPlugin, + }, + rules: { + '@typescript-eslint/no-explicit-any': 'warn', + '@typescript-eslint/no-unused-vars': ['error', { argsIgnorePattern: '^_' }], + '@typescript-eslint/ban-ts-comment': 'warn', + '@typescript-eslint/consistent-type-definitions': ['error', 'interface'], + }, + }, { plugins: { js, diff --git a/package-lock.json b/package-lock.json index 54cf393767..3a831e0fd4 100644 --- a/package-lock.json +++ b/package-lock.json @@ -18,11 +18,14 @@ "@aws-sdk/client-secrets-manager": "^3.879.0", "@aws-sdk/client-sns": "^3.907.0", "@google/genai": "^1.19.0", + "@grpc/grpc-js": "^1.14.0", "@heyputer/putility": "^1.0.2", "@paralleldrive/cuid2": "^2.2.2", "@stylistic/eslint-plugin-js": "^4.4.1", "dedent": "^1.5.3", "express-xml-bodyparser": "^0.4.1", + "google-protobuf": "^4.0.0", + "grpc-tools": "^1.13.0", "ioredis": "^5.6.0", "javascript-time-ago": "^2.5.11", "json-colorizer": "^3.0.1", @@ -31,11 +34,13 @@ "rollup": "^4.52.4", "simple-git": "^3.25.0", "string-template": "^1.0.0", - "uuid": "^9.0.1" + "uuid": "^9.0.1", + "xxhash-wasm": "^1.1.0" }, "devDependencies": { "@eslint/js": "^9.35.0", "@stylistic/eslint-plugin": "^5.3.1", + "@types/ws": "^8.18.1", "@typescript-eslint/eslint-plugin": "^8.46.1", "@typescript-eslint/parser": "^8.46.1", "chalk": "^4.1.0", @@ -50,6 +55,7 @@ "license-check-and-add": "^4.0.5", "mocha": "^10.6.0", "nodemon": "^3.1.0", + "ts-node": "^10.9.2", "typescript": "^5.4.5", "uglify-js": "^3.17.4", "vite-plugin-static-copy": "^3.1.3", @@ -1162,6 +1168,30 @@ "node": ">=0.1.90" } }, + "node_modules/@cspotcode/source-map-support": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", + "integrity": "sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/trace-mapping": "0.3.9" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@cspotcode/source-map-support/node_modules/@jridgewell/trace-mapping": { + "version": "0.3.9", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.9.tgz", + "integrity": "sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.0.3", + "@jridgewell/sourcemap-codec": "^1.4.10" + } + }, "node_modules/@dabh/diagnostics": { "version": "2.0.8", "resolved": "https://registry.npmjs.org/@dabh/diagnostics/-/diagnostics-2.0.8.tgz", @@ -6791,6 +6821,34 @@ "node": ">=10.13.0" } }, + "node_modules/@tsconfig/node10": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", + "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node12": { + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/@tsconfig/node12/-/node12-1.0.11.tgz", + "integrity": "sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node14": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@tsconfig/node14/-/node14-1.0.3.tgz", + "integrity": "sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node16": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz", + "integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/accepts": { "version": "1.3.7", "resolved": "https://registry.npmjs.org/@types/accepts/-/accepts-1.3.7.tgz", @@ -7131,10 +7189,11 @@ } }, "node_modules/@types/node": { - "version": "24.7.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-24.7.1.tgz", - "integrity": "sha512-CmyhGZanP88uuC5GpWU9q+fI61j2SkhO3UGMUdfYRE6Bcy0ccyzn1Rqj9YAB/ZY4kOXmNf0ocah5GtphmLMP6Q==", + "version": "24.8.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.8.0.tgz", + "integrity": "sha512-5x08bUtU8hfboMTrJ7mEO4CpepS9yBwAqcL52y86SWNmbPX8LVbNs3EP4cNrIZgdjk2NAlP2ahNihozpoZIxSg==", "license": "MIT", + "peer": true, "dependencies": { "undici-types": "~7.14.0" } @@ -7277,6 +7336,16 @@ "integrity": "sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw==", "license": "MIT" }, + "node_modules/@types/ws": { + "version": "8.18.1", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", + "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@typescript-eslint/eslint-plugin": { "version": "8.46.1", "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.46.1.tgz", @@ -7956,6 +8025,19 @@ "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, + "node_modules/acorn-walk": { + "version": "8.3.4", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", + "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "acorn": "^8.11.0" + }, + "engines": { + "node": ">=0.4.0" + } + }, "node_modules/agent-base": { "version": "7.1.4", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", @@ -8158,6 +8240,13 @@ "node": ">=10" } }, + "node_modules/arg": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", + "integrity": "sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==", + "dev": true, + "license": "MIT" + }, "node_modules/argle": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/argle/-/argle-1.1.2.tgz", @@ -9660,6 +9749,13 @@ "node": ">=10.0.0" } }, + "node_modules/create-require": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", + "integrity": "sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==", + "dev": true, + "license": "MIT" + }, "node_modules/cross-fetch": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.2.0.tgz", @@ -11835,6 +11931,12 @@ "node": ">=14" } }, + "node_modules/google-protobuf": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/google-protobuf/-/google-protobuf-4.0.0.tgz", + "integrity": "sha512-b8wmenhUMf2WNL+xIJ/slvD/hEE6V3nRnG86O2bzkBrMweM9gnqZE1dfXlDjibY3aXJXDNbAHepevYyQ7qWKsQ==", + "license": "(BSD-3-Clause AND Apache-2.0)" + }, "node_modules/gopd": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", @@ -11891,6 +11993,19 @@ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", "license": "MIT" }, + "node_modules/grpc-tools": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/grpc-tools/-/grpc-tools-1.13.0.tgz", + "integrity": "sha512-7CbkJ1yWPfX0nHjbYG58BQThNhbICXBZynzCUxCb3LzX5X9B3hQbRY2STiRgIEiLILlK9fgl0z0QVGwPCdXf5g==", + "hasInstallScript": true, + "dependencies": { + "@mapbox/node-pre-gyp": "^1.0.5" + }, + "bin": { + "grpc_tools_node_protoc": "bin/protoc.js", + "grpc_tools_node_protoc_plugin": "bin/protoc_plugin.js" + } + }, "node_modules/gtoken": { "version": "7.1.0", "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-7.1.0.tgz", @@ -13663,6 +13778,13 @@ "semver": "bin/semver.js" } }, + "node_modules/make-error": { + "version": "1.3.6", + "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", + "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==", + "dev": true, + "license": "ISC" + }, "node_modules/math-intrinsics": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", @@ -17707,6 +17829,91 @@ "typescript": ">=4.8.4" } }, + "node_modules/ts-loader": { + "version": "9.5.4", + "resolved": "https://registry.npmjs.org/ts-loader/-/ts-loader-9.5.4.tgz", + "integrity": "sha512-nCz0rEwunlTZiy6rXFByQU1kVVpCIgUpc/psFiKVrUwrizdnIbRFu8w7bxhUF0X613DYwT4XzrZHpVyMe758hQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "enhanced-resolve": "^5.0.0", + "micromatch": "^4.0.0", + "semver": "^7.3.4", + "source-map": "^0.7.4" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "typescript": "*", + "webpack": "^5.0.0" + } + }, + "node_modules/ts-loader/node_modules/source-map": { + "version": "0.7.6", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.6.tgz", + "integrity": "sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">= 12" + } + }, + "node_modules/ts-node": { + "version": "10.9.2", + "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz", + "integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@cspotcode/source-map-support": "^0.8.0", + "@tsconfig/node10": "^1.0.7", + "@tsconfig/node12": "^1.0.7", + "@tsconfig/node14": "^1.0.0", + "@tsconfig/node16": "^1.0.2", + "acorn": "^8.4.1", + "acorn-walk": "^8.1.1", + "arg": "^4.1.0", + "create-require": "^1.1.0", + "diff": "^4.0.1", + "make-error": "^1.1.1", + "v8-compile-cache-lib": "^3.0.1", + "yn": "3.1.1" + }, + "bin": { + "ts-node": "dist/bin.js", + "ts-node-cwd": "dist/bin-cwd.js", + "ts-node-esm": "dist/bin-esm.js", + "ts-node-script": "dist/bin-script.js", + "ts-node-transpile-only": "dist/bin-transpile.js", + "ts-script": "dist/bin-script-deprecated.js" + }, + "peerDependencies": { + "@swc/core": ">=1.2.50", + "@swc/wasm": ">=1.2.50", + "@types/node": "*", + "typescript": ">=2.7" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "@swc/wasm": { + "optional": true + } + } + }, + "node_modules/ts-node/node_modules/diff": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", + "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, "node_modules/tslib": { "version": "2.8.1", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", @@ -17993,6 +18200,13 @@ "uuid": "dist/bin/uuid" } }, + "node_modules/v8-compile-cache-lib": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz", + "integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==", + "dev": true, + "license": "MIT" + }, "node_modules/validator": { "version": "13.15.15", "resolved": "https://registry.npmjs.org/validator/-/validator-13.15.15.tgz", @@ -18859,6 +19073,12 @@ "node": ">=0.4" } }, + "node_modules/xxhash-wasm": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/xxhash-wasm/-/xxhash-wasm-1.1.0.tgz", + "integrity": "sha512-147y/6YNh+tlp6nd/2pWq38i9h6mz/EuQ6njIrmW8D1BS5nCqs0P6DG+m6zTGnNz5I+uhZ0SHxBs9BsPrwcKDA==", + "license": "MIT" + }, "node_modules/y18n": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/y18n/-/y18n-4.0.3.tgz", @@ -19087,6 +19307,16 @@ "decamelize": "^1.2.0" } }, + "node_modules/yn": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", + "integrity": "sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/yocto-queue": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", @@ -20082,8 +20312,11 @@ "@heyputer/putility": "^1.0.3" }, "devDependencies": { + "@types/node": "^24.8.0", "concurrently": "^8.2.2", "http-server": "^14.1.1", + "ts-loader": "^9.5.4", + "typescript": "^5.9.3", "webpack-cli": "^5.1.4" } }, diff --git a/package.json b/package.json index 27af4afabc..98520c66ed 100644 --- a/package.json +++ b/package.json @@ -13,6 +13,7 @@ "devDependencies": { "@eslint/js": "^9.35.0", "@stylistic/eslint-plugin": "^5.3.1", + "@types/ws": "^8.18.1", "@typescript-eslint/eslint-plugin": "^8.46.1", "@typescript-eslint/parser": "^8.46.1", "chalk": "^4.1.0", @@ -27,6 +28,7 @@ "license-check-and-add": "^4.0.5", "mocha": "^10.6.0", "nodemon": "^3.1.0", + "ts-node": "^10.9.2", "typescript": "^5.4.5", "uglify-js": "^3.17.4", "vite-plugin-static-copy": "^3.1.3", @@ -62,11 +64,14 @@ "@aws-sdk/client-secrets-manager": "^3.879.0", "@aws-sdk/client-sns": "^3.907.0", "@google/genai": "^1.19.0", + "@grpc/grpc-js": "^1.14.0", "@heyputer/putility": "^1.0.2", "@paralleldrive/cuid2": "^2.2.2", "@stylistic/eslint-plugin-js": "^4.4.1", "dedent": "^1.5.3", "express-xml-bodyparser": "^0.4.1", + "google-protobuf": "^4.0.0", + "grpc-tools": "^1.13.0", "ioredis": "^5.6.0", "javascript-time-ago": "^2.5.11", "json-colorizer": "^3.0.1", @@ -75,7 +80,8 @@ "rollup": "^4.52.4", "simple-git": "^3.25.0", "string-template": "^1.0.0", - "uuid": "^9.0.1" + "uuid": "^9.0.1", + "xxhash-wasm": "^1.1.0" }, "optionalDependencies": { "sharp": "^0.34.4", diff --git a/src/backend/src/filesystem/hl_operations/hl_copy.js b/src/backend/src/filesystem/hl_operations/hl_copy.js index 000ba5f3c3..182f40f069 100644 --- a/src/backend/src/filesystem/hl_operations/hl_copy.js +++ b/src/backend/src/filesystem/hl_operations/hl_copy.js @@ -216,6 +216,7 @@ class HLCopy extends HLFilesystemOperation { await this.copied.awaitStableEntry(); const response = await this.copied.getSafeEntry({ thumbnail: true }); + return { copied : response, overwritten diff --git a/src/backend/src/filesystem/hl_operations/hl_mkdir.js b/src/backend/src/filesystem/hl_operations/hl_mkdir.js index 94339db7a3..d0aae566e0 100644 --- a/src/backend/src/filesystem/hl_operations/hl_mkdir.js +++ b/src/backend/src/filesystem/hl_operations/hl_mkdir.js @@ -32,6 +32,7 @@ const { is_valid_path } = require('../validation'); const { HLRemove } = require('./hl_remove'); const { LLMkdir } = require('../ll_operations/ll_mkdir'); + class MkTree extends HLFilesystemOperation { static DESCRIPTION = ` High-level operation for making directory trees @@ -373,7 +374,10 @@ class HLMkdir extends HLFilesystemOperation { }); await this.created.awaitStableEntry(); - return await this.created.getSafeEntry(); + const response = await this.created.getSafeEntry(); + + + return response; } const ll_mkdir = new LLMkdir(); @@ -397,6 +401,7 @@ class HLMkdir extends HLFilesystemOperation { } response.requested_path = values.path; + return response; } diff --git a/src/backend/src/filesystem/hl_operations/hl_mklink.js b/src/backend/src/filesystem/hl_operations/hl_mklink.js index 1e053d93e9..08faaa85c5 100644 --- a/src/backend/src/filesystem/hl_operations/hl_mklink.js +++ b/src/backend/src/filesystem/hl_operations/hl_mklink.js @@ -69,7 +69,9 @@ class HLMkLink extends HLFilesystemOperation { }); await created.awaitStableEntry(); - return await created.getSafeEntry(); + const response = await created.getSafeEntry(); + + return response; } } diff --git a/src/backend/src/filesystem/hl_operations/hl_mkshortcut.js b/src/backend/src/filesystem/hl_operations/hl_mkshortcut.js index b2d7775c17..22d68a7816 100644 --- a/src/backend/src/filesystem/hl_operations/hl_mkshortcut.js +++ b/src/backend/src/filesystem/hl_operations/hl_mkshortcut.js @@ -98,7 +98,9 @@ class HLMkShortcut extends HLFilesystemOperation { }); await created.awaitStableEntry(); - return await created.getSafeEntry(); + const response = await created.getSafeEntry(); + + return response; } } diff --git a/src/backend/src/filesystem/hl_operations/hl_remove.js b/src/backend/src/filesystem/hl_operations/hl_remove.js index 9e76c193ea..52fea853f7 100644 --- a/src/backend/src/filesystem/hl_operations/hl_remove.js +++ b/src/backend/src/filesystem/hl_operations/hl_remove.js @@ -16,12 +16,13 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ -const APIError = require("../../api/APIError"); -const { chkperm } = require("../../helpers"); -const { TYPE_DIRECTORY } = require("../FSNodeContext"); -const { LLRmDir } = require("../ll_operations/ll_rmdir"); -const { LLRmNode } = require("../ll_operations/ll_rmnode"); -const { HLFilesystemOperation } = require("./definitions"); +const APIError = require('../../api/APIError'); +const { chkperm } = require('../../helpers'); +const { TYPE_DIRECTORY } = require('../FSNodeContext'); +const { LLRmDir } = require('../ll_operations/ll_rmdir'); +const { LLRmNode } = require('../ll_operations/ll_rmnode'); +const { HLFilesystemOperation } = require('./definitions'); +const { sendFSRemove } = require('../../routers/filesystem_api/fs_tree_manager/common'); class HLRemove extends HLFilesystemOperation { static PARAMETERS = { @@ -29,9 +30,9 @@ class HLRemove extends HLFilesystemOperation { user: {}, recursive: {}, descendants_only: {}, - } + }; - async _run () { + async _run() { const { target, user } = this.values; if ( ! await target.exists() ) { @@ -44,11 +45,42 @@ class HLRemove extends HLFilesystemOperation { if ( await target.get('type') === TYPE_DIRECTORY ) { const ll_rmdir = new LLRmDir(); - return await ll_rmdir.run(this.values); + const result = await ll_rmdir.run(this.values); + + // ================== client-replica hook start ================== + // "remove" hook + (async () => { + try { + const target = this.values.target; + const uuid = target.entry.uuid || target.entry.uid; + await sendFSRemove(user.id, uuid); + } catch( e ) { + console.error(e); + } + })(); + // ================== client-replica hook end ==================== + + return result; } const ll_rmnode = new LLRmNode(); - return await ll_rmnode.run(this.values); + + const result = await ll_rmnode.run(this.values); + + // ================== client-replica hook start ================== + // "remove" hook + (async () => { + try { + const target = this.values.target; + const uuid = target.entry.uuid || target.entry.uid; + await sendFSRemove(user.id, uuid); + } catch( e ) { + console.error(e); + } + })(); + // ================== client-replica hook end ==================== + + return result; } } diff --git a/src/backend/src/filesystem/hl_operations/hl_write.js b/src/backend/src/filesystem/hl_operations/hl_write.js index b89eabe8ae..2c23a7947b 100644 --- a/src/backend/src/filesystem/hl_operations/hl_write.js +++ b/src/backend/src/filesystem/hl_operations/hl_write.js @@ -33,6 +33,7 @@ const { HLFilesystemOperation } = require("./definitions"); const { MkTree } = require("./hl_mkdir"); const { Actor } = require("../../services/auth/Actor"); const { LLCWrite, LLOWrite } = require("../ll_operations/ll_write"); +const { sendFSNew } = require("../../routers/filesystem_api/fs_tree_manager/common"); class WriteCommonFeature { install_in_instance (instance) { @@ -430,6 +431,22 @@ class HLWrite extends HLFilesystemOperation { const response = await this.written.getSafeEntry({ thumbnail: true }); this.checkpoint('after get safe entry'); + // ================== client-replica hook start ================== + // "write" hook + (async () => { + try { + const user_id = values?.user?.id; + if ( ! user_id ) { + console.error('user_id is missing'); + return; + } + await sendFSNew(user_id, response); + } catch( e ) { + console.error(e); + } + })(); + // ================== client-replica hook end ==================== + return response; } } diff --git a/src/backend/src/modules/puterfs/lib/PuterFSProvider.js b/src/backend/src/modules/puterfs/lib/PuterFSProvider.js index 40712ddcea..b1c3e4ed7b 100644 --- a/src/backend/src/modules/puterfs/lib/PuterFSProvider.js +++ b/src/backend/src/modules/puterfs/lib/PuterFSProvider.js @@ -229,6 +229,21 @@ class PuterFSProvider extends putility.AdvancedBase { const new_path = _path.join(await new_parent.get('path'), new_name); const svc_fsEntry = services.get('fsEntryService'); + + // ================== client-replica patch start ================== + // patch: the parent_uid should be updated after move operation + { + const new_parent_uid = await new_parent.get('uid'); + node.entry.parent_uid = new_parent_uid; + + const db = services.get('database').get(DB_WRITE, 'filesystem'); + await db.write( + 'UPDATE fsentries SET parent_uid = ? WHERE uuid = ?', + [new_parent_uid, node.uid] + ); + } + // ================== client-replica patch end ==================== + const op_update = await svc_fsEntry.update(node.uid, { ...( await node.get('parent_uid') !== await new_parent.get('uid') diff --git a/src/backend/src/modules/web/WebServerService.js b/src/backend/src/modules/web/WebServerService.js index bd7ea879a0..eda6c1464d 100644 --- a/src/backend/src/modules/web/WebServerService.js +++ b/src/backend/src/modules/web/WebServerService.js @@ -276,13 +276,43 @@ class WebServerService extends BaseService { } }); + console.log('[xiaochen-debug] socketio.use done'); + const context = Context.get(); socketio.on('connection', (socket) => { + console.log('[xiaochen-debug] socketio.on connection handler done'); + socket.on('disconnect', () => { }); socket.on('trash.is_empty', (msg) => { socket.broadcast.to(socket.user.id).emit('trash.is_empty', msg); }); + + // ====================================================================== + // client-replica related handlers + // ====================================================================== + if (config.services?.['client-replica']?.enabled) { + const replicaFetchHandler = require('../../routers/filesystem_api/fs_tree_manager/fetch_replica'); + if (replicaFetchHandler.event && replicaFetchHandler.handler) { + socket.on(replicaFetchHandler.event, (data) => { + replicaFetchHandler.handler(socket, data); + }); + console.log('[xiaochen-debug] event registered: replica/fetch'); + } + + const replicaPullDiffHandler = require('../../routers/filesystem_api/fs_tree_manager/pull_diff'); + if (replicaPullDiffHandler.event && replicaPullDiffHandler.handler) { + socket.on(replicaPullDiffHandler.event, (data) => { + replicaPullDiffHandler.handler(socket, data); + }); + console.log('[xiaochen-debug] event registered: replica/pull_diff'); + } + } + + // ====================================================================== + // other handlers + // ====================================================================== + const svc_event = this.services.get('event'); svc_event.emit('web.socket.connected', { socket, diff --git a/src/backend/src/modules/web/lib/eggspress.js b/src/backend/src/modules/web/lib/eggspress.js index 49ae0571e2..904b97f9d2 100644 --- a/src/backend/src/modules/web/lib/eggspress.js +++ b/src/backend/src/modules/web/lib/eggspress.js @@ -187,9 +187,6 @@ module.exports = function eggspress (route, settings, handler) { return next(); } } - if ( config.env === 'dev' && process.env.DEBUG ) { - console.log(`request url: ${req.url}, body: ${JSON.stringify(req.body)}`); - } try { const expected_ctx = res.locals.ctx; const received_ctx = Context.get(undefined, { allow_fallback: true }); diff --git a/src/backend/src/routers/filesystem_api/fs_tree_manager/common.js b/src/backend/src/routers/filesystem_api/fs_tree_manager/common.js new file mode 100644 index 0000000000..279d3c2796 --- /dev/null +++ b/src/backend/src/routers/filesystem_api/fs_tree_manager/common.js @@ -0,0 +1,289 @@ +'use strict'; + +const grpc = require('@grpc/grpc-js'); +const path = require('path'); + +// gRPC generated code +const genDir = path.join(__dirname, '../../../../../fs_tree_manager/js'); +const { + FSTreeManagerClient, +} = require(path.join(genDir, 'fs_tree_manager_grpc_pb.js')); +const { + FSEntry, + NewFSEntryRequest, + RemoveFSEntryRequest, + PurgeReplicaRequest, + PullRequest, + PullRequestItem, + FetchReplicaRequest, +} = require(path.join(genDir, 'fs_tree_manager_pb.js')); + +// protobuf built-in types +const { Struct } = require('google-protobuf/google/protobuf/struct_pb.js'); + +const config = require('../../../config'); + +// Singleton client instance +let clientInstance = null; + +/** + * Get the gRPC client for the FS Tree Manager service + * Returns null if the client-replica service is not configured or disabled + * Creates and caches the client instance on first call + * @returns {FSTreeManagerClient|null} The gRPC client or null if not available + */ +function getClient() { + // Return cached instance if available + if ( clientInstance !== null ) { + return clientInstance; + } + + const clientReplicaConfig = config.services?.['client-replica']; + + // Return null if config is missing or disabled + if ( !clientReplicaConfig || !clientReplicaConfig.enabled ) { + clientInstance = null; + return null; + } + + const fsTreeManagerUrl = clientReplicaConfig.fs_tree_manager_url; + + // Return null if URL is not configured + if ( !fsTreeManagerUrl ) { + clientInstance = null; + return null; + } + + // Create and cache gRPC client + clientInstance = new FSTreeManagerClient(fsTreeManagerUrl, grpc.credentials.createInsecure(), { + // Reconnect backoff (defaults can be slow: ~20s→120s) + // + // ref: + // - https://grpc.github.io/grpc/core/group__grpc__arg__keys.html + // - https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md + 'grpc.initial_reconnect_backoff_ms': 500, + 'grpc.min_reconnect_backoff_ms': 500, + 'grpc.max_reconnect_backoff_ms': 5000, + + // // Keepalive so dead TCPs are detected quickly + // 'grpc.keepalive_time_ms': 15000, // send PING every 15s + // 'grpc.keepalive_timeout_ms': 5000, // wait 5s for PING ack + // 'grpc.keepalive_permit_without_calls': 1, // allow pings when idle + + // // (Optional) be polite about PING cadence + // 'grpc.http2.min_time_between_pings_ms': 10000, + }); + + return clientInstance; +} + +/** + * Sends a new filesystem entry to the gRPC service + * @param {number} userId - The user ID for the request + * @param {Object} metadata - The metadata for the FSEntry + * @returns {Promise} - Resolves when the entry is sent successfully + * @throws {Error} - If the gRPC call fails + */ +async function sendFSNew(userId, metadata) { + return new Promise((resolve, reject) => { + if ( !userId ) { + reject(new Error('User ID is required')); + return; + } + if ( !metadata ) { + reject(new Error('Metadata is required')); + return; + } + + const client = getClient(); + if ( !client ) { + // Client-replica service is not available, silently resolve + resolve(); + return; + } + + const fsEntry = buildFsEntry(metadata); + const request = new NewFSEntryRequest(); + request.setUserId(userId); + request.setFsEntry(fsEntry); + + client.newFSEntry(request, (err, _response) => { + if ( err ) { + reject(new Error(`[xiaochen-error] Failed to send fs new entry: ${err.message}`)); + return; + } + // console.log(`[xiaochen-log] sendFSNew: ${userId}, ${metadata.path}`); + resolve(); + }); + }); +} + +/** + * Sends a remove filesystem entry to the gRPC service + * @param {number} userId - The user ID for the request + * @param {string} uuid - The UUID of the FSEntry to remove + * @returns {Promise} - Resolves when the entry is sent successfully + * @throws {Error} - If the gRPC call fails + */ +async function sendFSRemove(userId, uuid) { + return new Promise((resolve, reject) => { + if ( !userId ) { + reject(new Error('User ID is required')); + return; + } + if ( !uuid ) { + reject(new Error('UUID is required')); + return; + } + + const client = getClient(); + if ( !client ) { + // Client-replica service is not available, silently resolve + resolve(); + return; + } + + const request = new RemoveFSEntryRequest(); + request.setUserId(userId); + request.setUuid(uuid); + + client.removeFSEntry(request, (err, _response) => { + if ( err ) { + reject(new Error(`[xiaochen-error] Failed to send fs remove entry: ${err.message}`)); + return; + } + // console.log(`[xiaochen-log] sendFSRemove: ${userId}, ${uuid}`); + resolve(); + }); + }); +} + +async function sendFSPurge(userId) { + return new Promise((resolve, reject) => { + if ( !userId ) { + reject(new Error('User ID is required')); + return; + } + + const client = getClient(); + if ( !client ) { + // Client-replica service is not available, silently resolve + resolve(); + return; + } + + const request = new PurgeReplicaRequest(); + request.setUserId(userId); + + client.purgeReplica(request, (err, _response) => { + if ( err ) { + reject(new Error(`[xiaochen-error] Failed to send fs purge replica: ${err.message}`)); + return; + } + resolve(); + }); + }); +}; + +/** + * Recursively sanitize values so they can be accepted by google.protobuf.Struct. + * - undefined -> dropped + * - Date -> ISO string + * - BigInt -> string + * - Buffer/Uint8Array -> base64 string + * - Map -> plain object + * - Set -> array + * - Other non-JSON types -> string fallback + * + * NB: This function MUST mimic the behavior of safe-stable-stringify to ensure consistency. + * + * Notes on undefined: + * - safe-stable-stringify.stringify has the same behavior on undefined as JSON.stringify (https://github.com/BridgeAR/safe-stable-stringify/blob/bafd93def367f38c4f5ebd598fde7970f331ca9c/test.js#L513) + * - undefined in object is dropped + * - undefined in array is converted to null + * - undefined in map/set is dropped + * - Another solution is to use safe-stable-stringify.stringify + parse, it's safer and slower. + */ +function sanitizeForStruct(value) { + if ( value === undefined ) { + return null; + } + if ( value === null ) return null; + + const t = typeof value; + if ( t === 'string' || t === 'number' || t === 'boolean' ) return value; + + if ( Array.isArray(value) ) { + return value.map(sanitizeForStruct); + } + + if ( value instanceof Date ) return value.toISOString(); + + if ( typeof Buffer !== 'undefined' && Buffer.isBuffer(value) ) { + return value.toString('base64'); + } + if ( value instanceof Uint8Array ) { + return Buffer.from(value).toString('base64'); + } + + if ( value instanceof Map ) { + // TODO: Mimic the behavior of safe-stable-stringify on "undefined" values. + return Object.fromEntries(Array.from(value.entries()).map(([k, v]) => [k, sanitizeForStruct(v)])); + } + if ( value instanceof Set ) { + // TODO: Mimic the behavior of safe-stable-stringify on "undefined" values. + return Array.from(value).map(sanitizeForStruct); + } + + if ( value && value.constructor === Object ) { + const out = {}; + for ( const [k, v] of Object.entries(value) ) { + // Mimic the behavior of safe-stable-stringify. + if ( v === undefined ) { + continue; + } + out[k] = sanitizeForStruct(v); + } + return out; + } + + if ( t === 'bigint' ) return value.toString(); + + if ( typeof value.toJSON === 'function' ) { + return sanitizeForStruct(value.toJSON()); + } + + return String(value); +} + +/** + * Build an FSEntry message from a plain JS metadata object. + * @param {Object} metadataObj - The raw metadata object. + * @returns {FSEntry} + */ +function buildFsEntry(metadataObj) { + const sanitized = sanitizeForStruct(metadataObj); + const struct = Struct.fromJavaScript(sanitized); + const fsEntry = new FSEntry(); + fsEntry.setMetadata(struct); + return fsEntry; +} + +module.exports = { + // gRPC client function and protobuf classes + getClient, + FSEntry, + NewFSEntryRequest, + RemoveFSEntryRequest, + PullRequest, + PullRequestItem, + FetchReplicaRequest, + Struct, + + // Helper functions + sendFSNew, + sendFSRemove, + sendFSPurge, + buildFsEntry, + sanitizeForStruct, +}; diff --git a/src/backend/src/routers/filesystem_api/fs_tree_manager/fetch_replica.js b/src/backend/src/routers/filesystem_api/fs_tree_manager/fetch_replica.js new file mode 100644 index 0000000000..d78912834d --- /dev/null +++ b/src/backend/src/routers/filesystem_api/fs_tree_manager/fetch_replica.js @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2024-present Puter Technologies Inc. + * + * This file is part of Puter. + * + * Puter is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +'use strict'; + +const { Context } = require('../../../util/context'); + +// -----------------------------------------------------------------------// +// WebSocket handler for replica/fetch +// -----------------------------------------------------------------------// +module.exports = { + event: 'replica/fetch', + handler: async (socket, _data) => { + console.log('[xiaochen-debug] fetch_replica.handler, socket.user.id:', socket.user.id); + + // const svc_permission = Context.get('services').get('permission'); + // const can_access = await svc_permission.check('endpoint:replica/fetch'); + // if ( ! can_access ) { + // return socket.emit('replica/fetch/error', { + // success: false, + // error: { message: 'permission denied' }, + // }); + // } + + // Import gRPC client and protobuf classes from common + const { + getClient, + FetchReplicaRequest, + } = require('./common'); + + const client = getClient(); + if ( !client ) { + // Client-replica service is not available + return socket.emit('replica/fetch/error', { + success: false, + error: { message: 'client-replica service is not available' }, + }); + } + + // Build the request message + const requestMsg = new FetchReplicaRequest(); + requestMsg.setUserId(socket.user.id); + + client.fetchReplica(requestMsg, (err, resp) => { + if ( err ) { + console.error(`FetchReplica error: ${err.message}`); + return socket.emit('replica/fetch/error', { + success: false, + error: { message: 'failed to fetch replica', details: err.message }, + }); + } + + // Convert protobuf response to plain JavaScript + // The response is directly a MerkleTree, not wrapped in another object + + // Get the nodes map and root UUID + const nodesMap = resp.getNodesMap(); + const rootUuid = resp.getRootUuid(); + + // Convert nodes map to plain JavaScript object + const nodes = {}; + nodesMap.forEach((node, nodeUuid) => { + // Convert the map-based children_uuids to a JavaScript object + const childrenUuidsMap = node.getChildrenUuidsMap(); + const childrenUuids = {}; + childrenUuidsMap.forEach((value, key) => { + childrenUuids[key] = value; + }); + + nodes[nodeUuid] = { + uuid: node.getUuid(), + merkle_hash: node.getMerkleHash(), + children_uuids: childrenUuids, + parent_uuid: node.getParentUuid(), + fs_entry: node.getFsEntry() ? node.getFsEntry().getMetadata().toJavaScript() : {}, + }; + }); + + socket.emit('replica/fetch/success', { + success: true, + data: { + root_uuid: rootUuid, + nodes: nodes, + }, + }); + }); + }, +}; diff --git a/src/backend/src/routers/filesystem_api/fs_tree_manager/pull_diff.js b/src/backend/src/routers/filesystem_api/fs_tree_manager/pull_diff.js new file mode 100644 index 0000000000..c2a40a71c9 --- /dev/null +++ b/src/backend/src/routers/filesystem_api/fs_tree_manager/pull_diff.js @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2024-present Puter Technologies Inc. + * + * This file is part of Puter. + * + * Puter is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +'use strict'; + +// -----------------------------------------------------------------------// +// WebSocket handler for replica/pull_diff +// -----------------------------------------------------------------------// +module.exports = { + event: 'replica/pull_diff', + handler: async (socket, data) => { + // Import gRPC client and protobuf classes from common + const { + getClient, + PullRequest, + PullRequestItem, + } = require('./common'); + + const client = getClient(); + if (!client) { + // Client-replica service is not available + return socket.emit('replica/pull_diff/error', { + success: false, + error: { message: 'client-replica service is not available' }, + }); + } + + try { + // Build the PullRequest message + const requestMsg = new PullRequest(); + + // Set the user_name at the top level + requestMsg.setUserId(socket.user.id); + + // Add each pull request item + if ( data.pull_request && Array.isArray(data.pull_request) ) { + data.pull_request.forEach(item => { + const pullRequestItem = new PullRequestItem(); + pullRequestItem.setUuid(item.uuid); + pullRequestItem.setMerkleHash(item.merkle_hash); + requestMsg.addPullRequest(pullRequestItem); + }); + } + + client.pullDiff(requestMsg, (err, resp) => { + if ( err ) { + console.error('PullDiff error:', err); + // TODO (xiaochen): what should we do when pull diff fails? + return socket.emit('replica/pull_diff/error', { + success: false, + error: { message: 'Failed to pull diff', details: err.message }, + }); + } + + const pushRequestItems = resp.getPushRequestList(); + + if ( pushRequestItems.length === 0 ) { + return; + } + + // Convert protobuf response to plain JavaScript + const pushRequest = { + push_request: pushRequestItems.map(item => ({ + uuid: item.getUuid(), + merkle_hash: item.getMerkleHash(), + fs_entry: item.getFsEntry() ? item.getFsEntry().getMetadata().toJavaScript() : {}, + children: item.getChildrenList().map(child => ({ + uuid: child.getUuid(), + merkle_hash: child.getMerkleHash(), + fs_entry: child.getFsEntry() ? child.getFsEntry().getMetadata().toJavaScript() : {}, + children: [], // Note: this is a simplified structure, real implementation might need recursive handling + })), + })), + }; + + socket.emit('replica/pull_diff/success', { + success: true, + data: pushRequest, + }); + }); + } catch( error ) { + console.error('Error in pull_diff handler:', error); + socket.emit('replica/pull_diff/error', { + success: false, + error: { message: 'Internal error in pull_diff handler', details: error.message }, + }); + } + }, +}; diff --git a/src/backend/src/routers/filesystem_api/rename.js b/src/backend/src/routers/filesystem_api/rename.js index 11623222a5..0da4ca1fb2 100644 --- a/src/backend/src/routers/filesystem_api/rename.js +++ b/src/backend/src/routers/filesystem_api/rename.js @@ -16,12 +16,13 @@ * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see . */ -"use strict" +'use strict'; const eggspress = require('../../api/eggspress.js'); const APIError = require('../../api/APIError.js'); const { Context } = require('../../util/context.js'); const FSNodeParam = require('../../api/filesystem/FSNodeParam.js'); const { DB_WRITE } = require('../../services/database/consts.js'); +const { sendFSRemove, sendFSNew, sendFSPurge } = require('./fs_tree_manager/common'); // -----------------------------------------------------------------------// // POST /rename @@ -43,7 +44,7 @@ module.exports = eggspress('/rename', { key: 'new_name', }); } - if (typeof req.body.new_name !== 'string') { + if ( typeof req.body.new_name !== 'string' ) { throw APIError.create('field_invalid', null, { key: 'new_name', expected: 'string', @@ -54,17 +55,17 @@ module.exports = eggspress('/rename', { // modules const db = req.services.get('database').get(DB_WRITE, 'filesystem'); const mime = require('mime-types'); - const {get_app, validate_fsentry_name, id2path} = require('../../helpers.js'); + const { get_app, validate_fsentry_name, id2path } = require('../../helpers.js'); const _path = require('path'); // new_name validation - try{ - validate_fsentry_name(req.body.new_name) - }catch(e){ + try { + validate_fsentry_name(req.body.new_name); + } catch( e ){ return res.status(400).send({ - error:{ - message: e.message - } + error: { + message: e.message, + }, }); } @@ -88,43 +89,41 @@ module.exports = eggspress('/rename', { let fsentry = subject.entry; // immutable - if(fsentry.immutable){ + if ( fsentry.immutable ){ return res.status(400).send({ - error:{ - message: 'Immutable: cannot rename.' - } - }) + error: { + message: 'Immutable: cannot rename.', + }, + }); } let res1; // parent is root - if(fsentry.parent_uid === null){ - try{ - res1 = await db.read( - `SELECT uuid FROM fsentries WHERE parent_uid IS NULL AND name = ? AND id != ? LIMIT 1`, - [ - //name - req.body.new_name, - await subject.get('mysql-id'), - ]); - }catch(e){ - console.log(e) + if ( fsentry.parent_uid === null ){ + try { + res1 = await db.read('SELECT uuid FROM fsentries WHERE parent_uid IS NULL AND name = ? AND id != ? LIMIT 1', + [ + //name + req.body.new_name, + await subject.get('mysql-id'), + ]); + } catch( e ){ + console.log(e); } } // parent is regular dir - else{ - res1 = await db.read( - `SELECT uuid FROM fsentries WHERE parent_uid = ? AND name = ? AND id != ? LIMIT 1`, - [ - //parent_uid - fsentry.parent_uid, - //name - req.body.new_name, - await subject.get('mysql-id'), - ]); + else { + res1 = await db.read('SELECT uuid FROM fsentries WHERE parent_uid = ? AND name = ? AND id != ? LIMIT 1', + [ + //parent_uid + fsentry.parent_uid, + //name + req.body.new_name, + await subject.get('mysql-id'), + ]); } - if(res1[0]){ + if ( res1[0] ){ throw APIError.create('item_with_same_name_exists', null, { entry_name: req.body.new_name, }); @@ -134,18 +133,16 @@ module.exports = eggspress('/rename', { const new_path = _path.join(_path.dirname(old_path), req.body.new_name); // update `name` - await db.write( - `UPDATE fsentries SET name = ?, path = ? WHERE id = ?`, - [req.body.new_name, new_path, await subject.get('mysql-id')] - ) + await db.write('UPDATE fsentries SET name = ?, path = ? WHERE id = ?', + [req.body.new_name, new_path, await subject.get('mysql-id')]); const filesystem = req.services.get('filesystem'); await filesystem.update_child_paths(old_path, new_path, req.user.id); // associated_app let associated_app; - if(fsentry.associated_app_id){ - const app = await get_app({id: fsentry.associated_app_id}) + if ( fsentry.associated_app_id ){ + const app = await get_app({ id: fsentry.associated_app_id }); // remove some privileged information delete app.id; delete app.approved_for_listing; @@ -154,12 +151,12 @@ module.exports = eggspress('/rename', { delete app.owner_user_id; // add to array associated_app = app; - }else{ + } else { associated_app = {}; } // send the fsentry of the new object created - const contentType = mime.contentType(req.body.new_name) + const contentType = mime.contentType(req.body.new_name); const return_obj = { uid: req.body.uid, name: req.body.new_name, @@ -174,21 +171,49 @@ module.exports = eggspress('/rename', { // send realtime success msg to client const svc_socketio = req.services.get('socketio'); svc_socketio.send({ room: req.user.id }, 'item.renamed', return_obj); - - (async () => { try { - const svc_event = req.services.get('event'); - await svc_event.emit('fs.rename', { - uid: fsentry.uuid, - new_name: req.body.new_name, - }) - } catch (e) { - const log = req.services.get('log-service').create('rename-endpoint'); - const errors = req.services.get('error-service').create(log); - errors.report('emit.rename', { - alarm: true, - source: e, - }); - }})(); + + (async () => { + try { + const svc_event = req.services.get('event'); + await svc_event.emit('fs.rename', { + uid: fsentry.uuid, + new_name: req.body.new_name, + }); + } catch( e ) { + const log = req.services.get('log-service').create('rename-endpoint'); + const errors = req.services.get('error-service').create(log); + errors.report('emit.rename', { + alarm: true, + source: e, + }); + } + })(); + + // ================== client-replica hook start ================== + // "rename" hook + (async () => { + try { + if ( fsentry.is_dir ) { + // Don't know who to mutate the fs-tree properly, just purge the replica. + await sendFSPurge(fsentry.user_id); + } else { + // NB: user_id only exists in raw fsentry + const user_id = fsentry.user_id; + const uuid = fsentry.uuid; + await sendFSRemove(user_id, uuid); + + const new_fsentry = await subject.getSafeEntry(); + // for unknown reasons, the name and path are incorrect in the new_fsentry + new_fsentry.name = return_obj.name; + new_fsentry.path = return_obj.path; + + await sendFSNew(user_id, new_fsentry); + } + } catch( e ) { + console.error('client-replica failure: ', e); + } + })(); + // ================== client-replica hook end ==================== return res.send(return_obj); }); diff --git a/src/backend/src/services/WSPushService.js b/src/backend/src/services/WSPushService.js index 95dffa6f0d..0042da27e6 100644 --- a/src/backend/src/services/WSPushService.js +++ b/src/backend/src/services/WSPushService.js @@ -19,12 +19,13 @@ */ const BaseService = require("./BaseService"); const { Context } = require("../util/context"); +const { sendFSNew, sendFSRemove, sendFSPurge } = require('../routers/filesystem_api/fs_tree_manager/common'); class WSPushService extends BaseService { static LOG_DEBUG = true; /** * Initializes the WSPushService by setting up event listeners for various file system operations. - * + * * @param {Object} options - The configuration options for the service. * @param {Object} options.services - An object containing service dependencies. */ @@ -36,16 +37,15 @@ class WSPushService extends BaseService { this.svc_event.on('fs.move.*', this._on_fs_move.bind(this)); this.svc_event.on('fs.pending.*', this._on_fs_pending.bind(this)); this.svc_event.on('fs.storage.upload-progress', - this._on_upload_progress.bind(this)); + this._on_upload_progress.bind(this)); this.svc_event.on('fs.storage.progress.*', - this._on_upload_progress.bind(this)); + this._on_upload_progress.bind(this)); this.svc_event.on('puter-exec.submission.done', - this._on_submission_done.bind(this)); + this._on_submission_done.bind(this)); this.svc_event.on('outer.gui.*', - this._on_outer_gui.bind(this)); + this._on_outer_gui.bind(this)); } - async _on_fs_create(key, data) { const { node, context } = data; @@ -78,21 +78,27 @@ class WSPushService extends BaseService { const ts = Date.now(); await this._update_user_ts(user_id_list, ts, metadata); // Pass metadata + + // ================== client-replica hook start ================== + // "create" hook + for ( const user_id of user_id_list ) { + await sendFSNew(user_id, response); + } + // ================== client-replica hook end ==================== } - /** * Handles file system update events. - * + * * @param {string} key - The event key. * @param {Object} data - The event data containing node and context information. * @returns {Promise} A promise that resolves when the update has been processed. - * + * * @description * This method is triggered when a file or directory is updated. It retrieves * metadata from the context, fetches the updated node's entry, determines the * relevant user IDs, and emits an event to notify the GUI of the update. - * + * * @note * - The method uses a set for user IDs to prepare for future multi-user dispatch. * - If no specific user ID is provided in the metadata, it falls back to the node's user ID. @@ -133,9 +139,9 @@ class WSPushService extends BaseService { /** * Handles file system move events by emitting appropriate GUI update events. - * + * * This method is triggered when a file or directory is moved within the file system. - * It collects necessary metadata, updates the response with the old path, and + * It collects necessary metadata, updates the response with the old path, and * broadcasts the event to update the GUI for the affected users. * * @param {string} key - The event key triggering this method. @@ -178,17 +184,42 @@ class WSPushService extends BaseService { const ts = Date.now(); await this._update_user_ts(user_id_list, ts, metadata); // Pass metadata - } + + // ================== client-replica hook start ================== + // "move" hook + // + // TODO: move this hook to a stable place. + (async () => { + try { + // NB: UUID comes from uuid/uid, need to handle both. + const uuid = response.uuid || response.uid; + for ( const user_id of user_id_list ) { + // NB: type of response is unstable, need to handle both. + const is_dir = response.entry?.is_dir || response.is_dir; + + if ( is_dir ) { + await sendFSPurge(user_id); + } else { + await sendFSRemove(user_id, uuid); + await sendFSNew(user_id, response); + } + } + } catch( e ) { + console.error('client-replica failure: ', e); + } + })(); + // ================== client-replica hook end ==================== + } /** * Handles the 'fs.pending' event, preparing and emitting data for items that are pending processing. - * + * * @param {string} key - The event key, typically starting with 'fs.pending.'. * @param {Object} data - An object containing the fsentry and context of the pending file system operation. * @param {Object} data.fsentry - The file system entry that is pending. * @param {Object} data.context - The operation context providing additional metadata. * @fires svc_event#outer.gui.item.pending - Emitted with user ID list and entry details. - * + * * @returns {Promise} Emits an event to update the GUI about the pending item. */ async _on_fs_pending(key, data) { @@ -226,13 +257,13 @@ class WSPushService extends BaseService { /** * Emits an upload or download progress event to the relevant socket. - * + * * @param {string} key - The event key that triggered this method. * @param {Object} data - Contains upload_tracker, context, and meta information. * @param {Object} data.upload_tracker - Tracker for the upload/download progress. * @param {Object} data.context - Context of the operation. * @param {Object} data.meta - Additional metadata for the event. - * + * * It emits a progress event to the socket if it exists, otherwise, it does nothing. */ async _on_upload_progress(key, data) { @@ -275,7 +306,7 @@ class WSPushService extends BaseService { loaded: upload_tracker.progress_, loaded_diff: delta, }); - }) + }); } async _on_submission_done(key, data) { @@ -299,11 +330,11 @@ class WSPushService extends BaseService { /** * Handles the 'outer.gui.*' event to emit GUI-related updates to specific users. - * + * * @param {string} key - The event key with 'outer.gui.' prefix removed. * @param {Object} data - Contains user_id_list and response to emit. * @param {Object} meta - Additional metadata for the event. - * + * * @note This method iterates over each user ID provided in the event data, * checks if the user's socket room exists and has clients, then emits * the event to the appropriate room. @@ -361,5 +392,5 @@ class WSPushService extends BaseService { } module.exports = { - WSPushService + WSPushService, }; diff --git a/src/backend/src/services/abuse-prevention/IdentificationService.js b/src/backend/src/services/abuse-prevention/IdentificationService.js index 4d5ce00014..c90ba5ab54 100644 --- a/src/backend/src/services/abuse-prevention/IdentificationService.js +++ b/src/backend/src/services/abuse-prevention/IdentificationService.js @@ -146,6 +146,12 @@ class RequesterIdentificationExpressMiddleware extends AdvancedBase { const x = Context.get(); const requester = Requester.from_request(req); + + // console.log(` req.url: ${req.url}, req.headers: ${JSON.stringify(req.headers)}`); + // if (req.url.includes('replica')) { + // console.log(` req.url: ${req.url}, req.headers: ${JSON.stringify(req.headers)}`); + // } + const is_bot = this.modules.isbot(requester.ua); requester.is_bot = is_bot; diff --git a/src/fs_tree_manager/Dockerfile b/src/fs_tree_manager/Dockerfile new file mode 100644 index 0000000000..b3d1f00378 --- /dev/null +++ b/src/fs_tree_manager/Dockerfile @@ -0,0 +1,9 @@ +FROM golang:1.25 + +WORKDIR / + +COPY . . + +RUN go build -o fs_tree_manager ./server.go + +CMD ["./fs_tree_manager"] \ No newline at end of file diff --git a/src/fs_tree_manager/docker-compose.yml b/src/fs_tree_manager/docker-compose.yml new file mode 100644 index 0000000000..85e804e465 --- /dev/null +++ b/src/fs_tree_manager/docker-compose.yml @@ -0,0 +1,13 @@ +services: + fs_tree_manager: + image: fs_tree_manager:latest + volumes: + - ./config.yaml:/config.yaml:ro + extra_hosts: + # Resolve host.docker.internal to the host's gateway IP, works on + # linux/macos/windows. + - "host.docker.internal:host-gateway" + restart: on-failure + mem_limit: 4g + ports: + - "50052:50052" \ No newline at end of file diff --git a/src/fs_tree_manager/example-config.yaml b/src/fs_tree_manager/example-config.yaml new file mode 100644 index 0000000000..dc12dad564 --- /dev/null +++ b/src/fs_tree_manager/example-config.yaml @@ -0,0 +1,13 @@ +database: + driver: "sqlite3" + sqlite3: + path: "/var/puter/puter-database.sqlite" + mysql: + db_host: "127.0.0.1" + db_port: 8889 + db_user: "root" + db_password: "my-secret-pw" + db_database: "bayshore" + +server: + port: 50052 diff --git a/src/fs_tree_manager/go.mod b/src/fs_tree_manager/go.mod new file mode 100644 index 0000000000..42f985ade5 --- /dev/null +++ b/src/fs_tree_manager/go.mod @@ -0,0 +1,23 @@ +module github.com/puter/fs_tree_manager + +go 1.25.1 + +require ( + github.com/cespare/xxhash/v2 v2.3.0 + github.com/go-sql-driver/mysql v1.9.3 + github.com/mattn/go-sqlite3 v1.14.19 + github.com/spf13/cobra v1.10.1 + google.golang.org/grpc v1.75.1 + google.golang.org/protobuf v1.36.9 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + filippo.io/edwards25519 v1.1.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/spf13/pflag v1.0.9 // indirect + golang.org/x/net v0.41.0 // indirect + golang.org/x/sys v0.33.0 // indirect + golang.org/x/text v0.30.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect +) diff --git a/src/fs_tree_manager/go.sum b/src/fs_tree_manager/go.sum new file mode 100644 index 0000000000..2f8dc603e0 --- /dev/null +++ b/src/fs_tree_manager/go.sum @@ -0,0 +1,58 @@ +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= +github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI= +github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= +golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= +golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/src/fs_tree_manager/go/fs_tree_manager.pb.go b/src/fs_tree_manager/go/fs_tree_manager.pb.go new file mode 100644 index 0000000000..c6cc78e17f --- /dev/null +++ b/src/fs_tree_manager/go/fs_tree_manager.pb.go @@ -0,0 +1,775 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.8 +// protoc v3.21.12 +// source: fs_tree_manager.proto + +package fs_tree_manager + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FetchReplicaRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FetchReplicaRequest) Reset() { + *x = FetchReplicaRequest{} + mi := &file_fs_tree_manager_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FetchReplicaRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FetchReplicaRequest) ProtoMessage() {} + +func (x *FetchReplicaRequest) ProtoReflect() protoreflect.Message { + mi := &file_fs_tree_manager_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FetchReplicaRequest.ProtoReflect.Descriptor instead. +func (*FetchReplicaRequest) Descriptor() ([]byte, []int) { + return file_fs_tree_manager_proto_rawDescGZIP(), []int{0} +} + +func (x *FetchReplicaRequest) GetUserId() int64 { + if x != nil { + return x.UserId + } + return 0 +} + +type NewFSEntryRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + FsEntry *FSEntry `protobuf:"bytes,2,opt,name=fs_entry,json=fsEntry,proto3" json:"fs_entry,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NewFSEntryRequest) Reset() { + *x = NewFSEntryRequest{} + mi := &file_fs_tree_manager_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NewFSEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NewFSEntryRequest) ProtoMessage() {} + +func (x *NewFSEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_fs_tree_manager_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NewFSEntryRequest.ProtoReflect.Descriptor instead. +func (*NewFSEntryRequest) Descriptor() ([]byte, []int) { + return file_fs_tree_manager_proto_rawDescGZIP(), []int{1} +} + +func (x *NewFSEntryRequest) GetUserId() int64 { + if x != nil { + return x.UserId + } + return 0 +} + +func (x *NewFSEntryRequest) GetFsEntry() *FSEntry { + if x != nil { + return x.FsEntry + } + return nil +} + +type RemoveFSEntryRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + // Use UUID instead of FSEntry since: + // 1. UUID is enough to identify a node + // 2. FSEntry is inaccessable in many cases + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RemoveFSEntryRequest) Reset() { + *x = RemoveFSEntryRequest{} + mi := &file_fs_tree_manager_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RemoveFSEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoveFSEntryRequest) ProtoMessage() {} + +func (x *RemoveFSEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_fs_tree_manager_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoveFSEntryRequest.ProtoReflect.Descriptor instead. +func (*RemoveFSEntryRequest) Descriptor() ([]byte, []int) { + return file_fs_tree_manager_proto_rawDescGZIP(), []int{2} +} + +func (x *RemoveFSEntryRequest) GetUserId() int64 { + if x != nil { + return x.UserId + } + return 0 +} + +func (x *RemoveFSEntryRequest) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +type PurgeReplicaRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PurgeReplicaRequest) Reset() { + *x = PurgeReplicaRequest{} + mi := &file_fs_tree_manager_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PurgeReplicaRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PurgeReplicaRequest) ProtoMessage() {} + +func (x *PurgeReplicaRequest) ProtoReflect() protoreflect.Message { + mi := &file_fs_tree_manager_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PurgeReplicaRequest.ProtoReflect.Descriptor instead. +func (*PurgeReplicaRequest) Descriptor() ([]byte, []int) { + return file_fs_tree_manager_proto_rawDescGZIP(), []int{3} +} + +func (x *PurgeReplicaRequest) GetUserId() int64 { + if x != nil { + return x.UserId + } + return 0 +} + +type MerkleNode struct { + state protoimpl.MessageState `protogen:"open.v1"` + // We use the stable uuid from fs_entry so pointers to it stay valid when this + // node is updated. + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + // We use string instead of uint64 for several reasons: + // 1. JS is error prone when handling uint64/bigint and it's time-consuming to + // troubleshoot. + // 2. It's easier to come up with a consistent order on string type. + MerkleHash string `protobuf:"bytes,2,opt,name=merkle_hash,json=merkleHash,proto3" json:"merkle_hash,omitempty"` + // Use map to avoid duplicate children uuids. The value doesn't matter, it's + // there just because protobuf doesn't have built-in set type. + ChildrenUuids map[string]bool `protobuf:"bytes,3,rep,name=children_uuids,json=childrenUuids,proto3" json:"children_uuids,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + ParentUuid string `protobuf:"bytes,4,opt,name=parent_uuid,json=parentUuid,proto3" json:"parent_uuid,omitempty"` + FsEntry *FSEntry `protobuf:"bytes,5,opt,name=fs_entry,json=fsEntry,proto3" json:"fs_entry,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MerkleNode) Reset() { + *x = MerkleNode{} + mi := &file_fs_tree_manager_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MerkleNode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MerkleNode) ProtoMessage() {} + +func (x *MerkleNode) ProtoReflect() protoreflect.Message { + mi := &file_fs_tree_manager_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MerkleNode.ProtoReflect.Descriptor instead. +func (*MerkleNode) Descriptor() ([]byte, []int) { + return file_fs_tree_manager_proto_rawDescGZIP(), []int{4} +} + +func (x *MerkleNode) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *MerkleNode) GetMerkleHash() string { + if x != nil { + return x.MerkleHash + } + return "" +} + +func (x *MerkleNode) GetChildrenUuids() map[string]bool { + if x != nil { + return x.ChildrenUuids + } + return nil +} + +func (x *MerkleNode) GetParentUuid() string { + if x != nil { + return x.ParentUuid + } + return "" +} + +func (x *MerkleNode) GetFsEntry() *FSEntry { + if x != nil { + return x.FsEntry + } + return nil +} + +// The motivation of the heap design instead of embedded trees is elaborated in +// the RFC (doc/RFCS/20250821_client_replica_file_system.md) +type MerkleTree struct { + state protoimpl.MessageState `protogen:"open.v1"` + RootUuid string `protobuf:"bytes,1,opt,name=root_uuid,json=rootUuid,proto3" json:"root_uuid,omitempty"` + // uuid -> node + Nodes map[string]*MerkleNode `protobuf:"bytes,2,rep,name=nodes,proto3" json:"nodes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MerkleTree) Reset() { + *x = MerkleTree{} + mi := &file_fs_tree_manager_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MerkleTree) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MerkleTree) ProtoMessage() {} + +func (x *MerkleTree) ProtoReflect() protoreflect.Message { + mi := &file_fs_tree_manager_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MerkleTree.ProtoReflect.Descriptor instead. +func (*MerkleTree) Descriptor() ([]byte, []int) { + return file_fs_tree_manager_proto_rawDescGZIP(), []int{5} +} + +func (x *MerkleTree) GetRootUuid() string { + if x != nil { + return x.RootUuid + } + return "" +} + +func (x *MerkleTree) GetNodes() map[string]*MerkleNode { + if x != nil { + return x.Nodes + } + return nil +} + +type FSEntry struct { + state protoimpl.MessageState `protogen:"open.v1"` + // TODO (xiaochen): Make it a static type step by step. + // + // A static type is more robust and less error-prone. For instance, a + // FSEntry has uuid field in database but uid is desired in the puter-js + // and GUI client. We can guarantee the presence of desired fields by + // using a static type. + Metadata *structpb.Struct `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FSEntry) Reset() { + *x = FSEntry{} + mi := &file_fs_tree_manager_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FSEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FSEntry) ProtoMessage() {} + +func (x *FSEntry) ProtoReflect() protoreflect.Message { + mi := &file_fs_tree_manager_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FSEntry.ProtoReflect.Descriptor instead. +func (*FSEntry) Descriptor() ([]byte, []int) { + return file_fs_tree_manager_proto_rawDescGZIP(), []int{6} +} + +func (x *FSEntry) GetMetadata() *structpb.Struct { + if x != nil { + return x.Metadata + } + return nil +} + +type PullRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + PullRequest []*PullRequestItem `protobuf:"bytes,2,rep,name=pull_request,json=pullRequest,proto3" json:"pull_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PullRequest) Reset() { + *x = PullRequest{} + mi := &file_fs_tree_manager_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PullRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PullRequest) ProtoMessage() {} + +func (x *PullRequest) ProtoReflect() protoreflect.Message { + mi := &file_fs_tree_manager_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PullRequest.ProtoReflect.Descriptor instead. +func (*PullRequest) Descriptor() ([]byte, []int) { + return file_fs_tree_manager_proto_rawDescGZIP(), []int{7} +} + +func (x *PullRequest) GetUserId() int64 { + if x != nil { + return x.UserId + } + return 0 +} + +func (x *PullRequest) GetPullRequest() []*PullRequestItem { + if x != nil { + return x.PullRequest + } + return nil +} + +type PullRequestItem struct { + state protoimpl.MessageState `protogen:"open.v1"` + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + MerkleHash string `protobuf:"bytes,2,opt,name=merkle_hash,json=merkleHash,proto3" json:"merkle_hash,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PullRequestItem) Reset() { + *x = PullRequestItem{} + mi := &file_fs_tree_manager_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PullRequestItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PullRequestItem) ProtoMessage() {} + +func (x *PullRequestItem) ProtoReflect() protoreflect.Message { + mi := &file_fs_tree_manager_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PullRequestItem.ProtoReflect.Descriptor instead. +func (*PullRequestItem) Descriptor() ([]byte, []int) { + return file_fs_tree_manager_proto_rawDescGZIP(), []int{8} +} + +func (x *PullRequestItem) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *PullRequestItem) GetMerkleHash() string { + if x != nil { + return x.MerkleHash + } + return "" +} + +type PushRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + PushRequest []*PushRequestItem `protobuf:"bytes,2,rep,name=push_request,json=pushRequest,proto3" json:"push_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PushRequest) Reset() { + *x = PushRequest{} + mi := &file_fs_tree_manager_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PushRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushRequest) ProtoMessage() {} + +func (x *PushRequest) ProtoReflect() protoreflect.Message { + mi := &file_fs_tree_manager_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushRequest.ProtoReflect.Descriptor instead. +func (*PushRequest) Descriptor() ([]byte, []int) { + return file_fs_tree_manager_proto_rawDescGZIP(), []int{9} +} + +func (x *PushRequest) GetUserId() int64 { + if x != nil { + return x.UserId + } + return 0 +} + +func (x *PushRequest) GetPushRequest() []*PushRequestItem { + if x != nil { + return x.PushRequest + } + return nil +} + +type PushRequestItem struct { + state protoimpl.MessageState `protogen:"open.v1"` + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + MerkleHash string `protobuf:"bytes,2,opt,name=merkle_hash,json=merkleHash,proto3" json:"merkle_hash,omitempty"` + FsEntry *FSEntry `protobuf:"bytes,3,opt,name=fs_entry,json=fsEntry,proto3" json:"fs_entry,omitempty"` + Children []*PushRequestItem `protobuf:"bytes,4,rep,name=children,proto3" json:"children,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PushRequestItem) Reset() { + *x = PushRequestItem{} + mi := &file_fs_tree_manager_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PushRequestItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushRequestItem) ProtoMessage() {} + +func (x *PushRequestItem) ProtoReflect() protoreflect.Message { + mi := &file_fs_tree_manager_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushRequestItem.ProtoReflect.Descriptor instead. +func (*PushRequestItem) Descriptor() ([]byte, []int) { + return file_fs_tree_manager_proto_rawDescGZIP(), []int{10} +} + +func (x *PushRequestItem) GetUuid() string { + if x != nil { + return x.Uuid + } + return "" +} + +func (x *PushRequestItem) GetMerkleHash() string { + if x != nil { + return x.MerkleHash + } + return "" +} + +func (x *PushRequestItem) GetFsEntry() *FSEntry { + if x != nil { + return x.FsEntry + } + return nil +} + +func (x *PushRequestItem) GetChildren() []*PushRequestItem { + if x != nil { + return x.Children + } + return nil +} + +var File_fs_tree_manager_proto protoreflect.FileDescriptor + +const file_fs_tree_manager_proto_rawDesc = "" + + "\n" + + "\x15fs_tree_manager.proto\x12\x0ffs_tree_manager\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1bgoogle/protobuf/empty.proto\".\n" + + "\x13FetchReplicaRequest\x12\x17\n" + + "\auser_id\x18\x01 \x01(\x03R\x06userId\"a\n" + + "\x11NewFSEntryRequest\x12\x17\n" + + "\auser_id\x18\x01 \x01(\x03R\x06userId\x123\n" + + "\bfs_entry\x18\x02 \x01(\v2\x18.fs_tree_manager.FSEntryR\afsEntry\"C\n" + + "\x14RemoveFSEntryRequest\x12\x17\n" + + "\auser_id\x18\x01 \x01(\x03R\x06userId\x12\x12\n" + + "\x04uuid\x18\x02 \x01(\tR\x04uuid\".\n" + + "\x13PurgeReplicaRequest\x12\x17\n" + + "\auser_id\x18\x01 \x01(\x03R\x06userId\"\xb0\x02\n" + + "\n" + + "MerkleNode\x12\x12\n" + + "\x04uuid\x18\x01 \x01(\tR\x04uuid\x12\x1f\n" + + "\vmerkle_hash\x18\x02 \x01(\tR\n" + + "merkleHash\x12U\n" + + "\x0echildren_uuids\x18\x03 \x03(\v2..fs_tree_manager.MerkleNode.ChildrenUuidsEntryR\rchildrenUuids\x12\x1f\n" + + "\vparent_uuid\x18\x04 \x01(\tR\n" + + "parentUuid\x123\n" + + "\bfs_entry\x18\x05 \x01(\v2\x18.fs_tree_manager.FSEntryR\afsEntry\x1a@\n" + + "\x12ChildrenUuidsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\bR\x05value:\x028\x01\"\xbe\x01\n" + + "\n" + + "MerkleTree\x12\x1b\n" + + "\troot_uuid\x18\x01 \x01(\tR\brootUuid\x12<\n" + + "\x05nodes\x18\x02 \x03(\v2&.fs_tree_manager.MerkleTree.NodesEntryR\x05nodes\x1aU\n" + + "\n" + + "NodesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x121\n" + + "\x05value\x18\x02 \x01(\v2\x1b.fs_tree_manager.MerkleNodeR\x05value:\x028\x01\">\n" + + "\aFSEntry\x123\n" + + "\bmetadata\x18\x01 \x01(\v2\x17.google.protobuf.StructR\bmetadata\"k\n" + + "\vPullRequest\x12\x17\n" + + "\auser_id\x18\x01 \x01(\x03R\x06userId\x12C\n" + + "\fpull_request\x18\x02 \x03(\v2 .fs_tree_manager.PullRequestItemR\vpullRequest\"F\n" + + "\x0fPullRequestItem\x12\x12\n" + + "\x04uuid\x18\x01 \x01(\tR\x04uuid\x12\x1f\n" + + "\vmerkle_hash\x18\x02 \x01(\tR\n" + + "merkleHash\"k\n" + + "\vPushRequest\x12\x17\n" + + "\auser_id\x18\x01 \x01(\x03R\x06userId\x12C\n" + + "\fpush_request\x18\x02 \x03(\v2 .fs_tree_manager.PushRequestItemR\vpushRequest\"\xb9\x01\n" + + "\x0fPushRequestItem\x12\x12\n" + + "\x04uuid\x18\x01 \x01(\tR\x04uuid\x12\x1f\n" + + "\vmerkle_hash\x18\x02 \x01(\tR\n" + + "merkleHash\x123\n" + + "\bfs_entry\x18\x03 \x01(\v2\x18.fs_tree_manager.FSEntryR\afsEntry\x12<\n" + + "\bchildren\x18\x04 \x03(\v2 .fs_tree_manager.PushRequestItemR\bchildren2\x92\x03\n" + + "\rFSTreeManager\x12Q\n" + + "\fFetchReplica\x12$.fs_tree_manager.FetchReplicaRequest\x1a\x1b.fs_tree_manager.MerkleTree\x12F\n" + + "\bPullDiff\x12\x1c.fs_tree_manager.PullRequest\x1a\x1c.fs_tree_manager.PushRequest\x12H\n" + + "\n" + + "NewFSEntry\x12\".fs_tree_manager.NewFSEntryRequest\x1a\x16.google.protobuf.Empty\x12N\n" + + "\rRemoveFSEntry\x12%.fs_tree_manager.RemoveFSEntryRequest\x1a\x16.google.protobuf.Empty\x12L\n" + + "\fPurgeReplica\x12$.fs_tree_manager.PurgeReplicaRequest\x1a\x16.google.protobuf.EmptyB\"Z github.com/puter/fs_tree_managerb\x06proto3" + +var ( + file_fs_tree_manager_proto_rawDescOnce sync.Once + file_fs_tree_manager_proto_rawDescData []byte +) + +func file_fs_tree_manager_proto_rawDescGZIP() []byte { + file_fs_tree_manager_proto_rawDescOnce.Do(func() { + file_fs_tree_manager_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_fs_tree_manager_proto_rawDesc), len(file_fs_tree_manager_proto_rawDesc))) + }) + return file_fs_tree_manager_proto_rawDescData +} + +var file_fs_tree_manager_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_fs_tree_manager_proto_goTypes = []any{ + (*FetchReplicaRequest)(nil), // 0: fs_tree_manager.FetchReplicaRequest + (*NewFSEntryRequest)(nil), // 1: fs_tree_manager.NewFSEntryRequest + (*RemoveFSEntryRequest)(nil), // 2: fs_tree_manager.RemoveFSEntryRequest + (*PurgeReplicaRequest)(nil), // 3: fs_tree_manager.PurgeReplicaRequest + (*MerkleNode)(nil), // 4: fs_tree_manager.MerkleNode + (*MerkleTree)(nil), // 5: fs_tree_manager.MerkleTree + (*FSEntry)(nil), // 6: fs_tree_manager.FSEntry + (*PullRequest)(nil), // 7: fs_tree_manager.PullRequest + (*PullRequestItem)(nil), // 8: fs_tree_manager.PullRequestItem + (*PushRequest)(nil), // 9: fs_tree_manager.PushRequest + (*PushRequestItem)(nil), // 10: fs_tree_manager.PushRequestItem + nil, // 11: fs_tree_manager.MerkleNode.ChildrenUuidsEntry + nil, // 12: fs_tree_manager.MerkleTree.NodesEntry + (*structpb.Struct)(nil), // 13: google.protobuf.Struct + (*emptypb.Empty)(nil), // 14: google.protobuf.Empty +} +var file_fs_tree_manager_proto_depIdxs = []int32{ + 6, // 0: fs_tree_manager.NewFSEntryRequest.fs_entry:type_name -> fs_tree_manager.FSEntry + 11, // 1: fs_tree_manager.MerkleNode.children_uuids:type_name -> fs_tree_manager.MerkleNode.ChildrenUuidsEntry + 6, // 2: fs_tree_manager.MerkleNode.fs_entry:type_name -> fs_tree_manager.FSEntry + 12, // 3: fs_tree_manager.MerkleTree.nodes:type_name -> fs_tree_manager.MerkleTree.NodesEntry + 13, // 4: fs_tree_manager.FSEntry.metadata:type_name -> google.protobuf.Struct + 8, // 5: fs_tree_manager.PullRequest.pull_request:type_name -> fs_tree_manager.PullRequestItem + 10, // 6: fs_tree_manager.PushRequest.push_request:type_name -> fs_tree_manager.PushRequestItem + 6, // 7: fs_tree_manager.PushRequestItem.fs_entry:type_name -> fs_tree_manager.FSEntry + 10, // 8: fs_tree_manager.PushRequestItem.children:type_name -> fs_tree_manager.PushRequestItem + 4, // 9: fs_tree_manager.MerkleTree.NodesEntry.value:type_name -> fs_tree_manager.MerkleNode + 0, // 10: fs_tree_manager.FSTreeManager.FetchReplica:input_type -> fs_tree_manager.FetchReplicaRequest + 7, // 11: fs_tree_manager.FSTreeManager.PullDiff:input_type -> fs_tree_manager.PullRequest + 1, // 12: fs_tree_manager.FSTreeManager.NewFSEntry:input_type -> fs_tree_manager.NewFSEntryRequest + 2, // 13: fs_tree_manager.FSTreeManager.RemoveFSEntry:input_type -> fs_tree_manager.RemoveFSEntryRequest + 3, // 14: fs_tree_manager.FSTreeManager.PurgeReplica:input_type -> fs_tree_manager.PurgeReplicaRequest + 5, // 15: fs_tree_manager.FSTreeManager.FetchReplica:output_type -> fs_tree_manager.MerkleTree + 9, // 16: fs_tree_manager.FSTreeManager.PullDiff:output_type -> fs_tree_manager.PushRequest + 14, // 17: fs_tree_manager.FSTreeManager.NewFSEntry:output_type -> google.protobuf.Empty + 14, // 18: fs_tree_manager.FSTreeManager.RemoveFSEntry:output_type -> google.protobuf.Empty + 14, // 19: fs_tree_manager.FSTreeManager.PurgeReplica:output_type -> google.protobuf.Empty + 15, // [15:20] is the sub-list for method output_type + 10, // [10:15] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_fs_tree_manager_proto_init() } +func file_fs_tree_manager_proto_init() { + if File_fs_tree_manager_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_fs_tree_manager_proto_rawDesc), len(file_fs_tree_manager_proto_rawDesc)), + NumEnums: 0, + NumMessages: 13, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_fs_tree_manager_proto_goTypes, + DependencyIndexes: file_fs_tree_manager_proto_depIdxs, + MessageInfos: file_fs_tree_manager_proto_msgTypes, + }.Build() + File_fs_tree_manager_proto = out.File + file_fs_tree_manager_proto_goTypes = nil + file_fs_tree_manager_proto_depIdxs = nil +} diff --git a/src/fs_tree_manager/go/fs_tree_manager_grpc.pb.go b/src/fs_tree_manager/go/fs_tree_manager_grpc.pb.go new file mode 100644 index 0000000000..3fad3b7cc3 --- /dev/null +++ b/src/fs_tree_manager/go/fs_tree_manager_grpc.pb.go @@ -0,0 +1,314 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.21.12 +// source: fs_tree_manager.proto + +package fs_tree_manager + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + FSTreeManager_FetchReplica_FullMethodName = "/fs_tree_manager.FSTreeManager/FetchReplica" + FSTreeManager_PullDiff_FullMethodName = "/fs_tree_manager.FSTreeManager/PullDiff" + FSTreeManager_NewFSEntry_FullMethodName = "/fs_tree_manager.FSTreeManager/NewFSEntry" + FSTreeManager_RemoveFSEntry_FullMethodName = "/fs_tree_manager.FSTreeManager/RemoveFSEntry" + FSTreeManager_PurgeReplica_FullMethodName = "/fs_tree_manager.FSTreeManager/PurgeReplica" +) + +// FSTreeManagerClient is the client API for FSTreeManager service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// For all RPC requests, user identifier is always needed since replicas are +// stored separately for each user. +// +// We use user_id instead of user_name/user_uuid since it's more accessible: +// - fsentry include user_id but not user_name/user_uuid +// (https://github.com/HeyPuter/puter/blob/847b3a07a4ec59e724063f460a4c26cb62b04d42/src/backend/src/services/database/sqlite_setup/0001_create-tables.sql#L81) +// - user_id is included in the fs events listener where user_name/user_uuid are +// not available +// (https://github.com/HeyPuter/puter/blob/847b3a07a4ec59e724063f460a4c26cb62b04d42/src/backend/src/services/WSPushService.js#L165-L166) +// +// We provide simple {New/Remove}FSEntry APIs as a straightforward way to +// accommodate the wide variety of file system operations. These APIs should +// always results in an coherent MerkleTree. +type FSTreeManagerClient interface { + FetchReplica(ctx context.Context, in *FetchReplicaRequest, opts ...grpc.CallOption) (*MerkleTree, error) + PullDiff(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PushRequest, error) + // Insert a new FSEntry into the tree, update its parent's children list as + // well. + NewFSEntry(ctx context.Context, in *NewFSEntryRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Remove an FSEntry (and all its descendants) from the tree, update its + // parent's children list as well. + RemoveFSEntry(ctx context.Context, in *RemoveFSEntryRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // For any fs operations that cannot be handled by New/Remove APIs, just purge + // the replica. + PurgeReplica(ctx context.Context, in *PurgeReplicaRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type fSTreeManagerClient struct { + cc grpc.ClientConnInterface +} + +func NewFSTreeManagerClient(cc grpc.ClientConnInterface) FSTreeManagerClient { + return &fSTreeManagerClient{cc} +} + +func (c *fSTreeManagerClient) FetchReplica(ctx context.Context, in *FetchReplicaRequest, opts ...grpc.CallOption) (*MerkleTree, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(MerkleTree) + err := c.cc.Invoke(ctx, FSTreeManager_FetchReplica_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *fSTreeManagerClient) PullDiff(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PushRequest, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(PushRequest) + err := c.cc.Invoke(ctx, FSTreeManager_PullDiff_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *fSTreeManagerClient) NewFSEntry(ctx context.Context, in *NewFSEntryRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, FSTreeManager_NewFSEntry_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *fSTreeManagerClient) RemoveFSEntry(ctx context.Context, in *RemoveFSEntryRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, FSTreeManager_RemoveFSEntry_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *fSTreeManagerClient) PurgeReplica(ctx context.Context, in *PurgeReplicaRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, FSTreeManager_PurgeReplica_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// FSTreeManagerServer is the server API for FSTreeManager service. +// All implementations must embed UnimplementedFSTreeManagerServer +// for forward compatibility. +// +// For all RPC requests, user identifier is always needed since replicas are +// stored separately for each user. +// +// We use user_id instead of user_name/user_uuid since it's more accessible: +// - fsentry include user_id but not user_name/user_uuid +// (https://github.com/HeyPuter/puter/blob/847b3a07a4ec59e724063f460a4c26cb62b04d42/src/backend/src/services/database/sqlite_setup/0001_create-tables.sql#L81) +// - user_id is included in the fs events listener where user_name/user_uuid are +// not available +// (https://github.com/HeyPuter/puter/blob/847b3a07a4ec59e724063f460a4c26cb62b04d42/src/backend/src/services/WSPushService.js#L165-L166) +// +// We provide simple {New/Remove}FSEntry APIs as a straightforward way to +// accommodate the wide variety of file system operations. These APIs should +// always results in an coherent MerkleTree. +type FSTreeManagerServer interface { + FetchReplica(context.Context, *FetchReplicaRequest) (*MerkleTree, error) + PullDiff(context.Context, *PullRequest) (*PushRequest, error) + // Insert a new FSEntry into the tree, update its parent's children list as + // well. + NewFSEntry(context.Context, *NewFSEntryRequest) (*emptypb.Empty, error) + // Remove an FSEntry (and all its descendants) from the tree, update its + // parent's children list as well. + RemoveFSEntry(context.Context, *RemoveFSEntryRequest) (*emptypb.Empty, error) + // For any fs operations that cannot be handled by New/Remove APIs, just purge + // the replica. + PurgeReplica(context.Context, *PurgeReplicaRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedFSTreeManagerServer() +} + +// UnimplementedFSTreeManagerServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedFSTreeManagerServer struct{} + +func (UnimplementedFSTreeManagerServer) FetchReplica(context.Context, *FetchReplicaRequest) (*MerkleTree, error) { + return nil, status.Errorf(codes.Unimplemented, "method FetchReplica not implemented") +} +func (UnimplementedFSTreeManagerServer) PullDiff(context.Context, *PullRequest) (*PushRequest, error) { + return nil, status.Errorf(codes.Unimplemented, "method PullDiff not implemented") +} +func (UnimplementedFSTreeManagerServer) NewFSEntry(context.Context, *NewFSEntryRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method NewFSEntry not implemented") +} +func (UnimplementedFSTreeManagerServer) RemoveFSEntry(context.Context, *RemoveFSEntryRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveFSEntry not implemented") +} +func (UnimplementedFSTreeManagerServer) PurgeReplica(context.Context, *PurgeReplicaRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method PurgeReplica not implemented") +} +func (UnimplementedFSTreeManagerServer) mustEmbedUnimplementedFSTreeManagerServer() {} +func (UnimplementedFSTreeManagerServer) testEmbeddedByValue() {} + +// UnsafeFSTreeManagerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to FSTreeManagerServer will +// result in compilation errors. +type UnsafeFSTreeManagerServer interface { + mustEmbedUnimplementedFSTreeManagerServer() +} + +func RegisterFSTreeManagerServer(s grpc.ServiceRegistrar, srv FSTreeManagerServer) { + // If the following call pancis, it indicates UnimplementedFSTreeManagerServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&FSTreeManager_ServiceDesc, srv) +} + +func _FSTreeManager_FetchReplica_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FetchReplicaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FSTreeManagerServer).FetchReplica(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: FSTreeManager_FetchReplica_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FSTreeManagerServer).FetchReplica(ctx, req.(*FetchReplicaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _FSTreeManager_PullDiff_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PullRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FSTreeManagerServer).PullDiff(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: FSTreeManager_PullDiff_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FSTreeManagerServer).PullDiff(ctx, req.(*PullRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _FSTreeManager_NewFSEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NewFSEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FSTreeManagerServer).NewFSEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: FSTreeManager_NewFSEntry_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FSTreeManagerServer).NewFSEntry(ctx, req.(*NewFSEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _FSTreeManager_RemoveFSEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveFSEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FSTreeManagerServer).RemoveFSEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: FSTreeManager_RemoveFSEntry_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FSTreeManagerServer).RemoveFSEntry(ctx, req.(*RemoveFSEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _FSTreeManager_PurgeReplica_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PurgeReplicaRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FSTreeManagerServer).PurgeReplica(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: FSTreeManager_PurgeReplica_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FSTreeManagerServer).PurgeReplica(ctx, req.(*PurgeReplicaRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// FSTreeManager_ServiceDesc is the grpc.ServiceDesc for FSTreeManager service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var FSTreeManager_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "fs_tree_manager.FSTreeManager", + HandlerType: (*FSTreeManagerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "FetchReplica", + Handler: _FSTreeManager_FetchReplica_Handler, + }, + { + MethodName: "PullDiff", + Handler: _FSTreeManager_PullDiff_Handler, + }, + { + MethodName: "NewFSEntry", + Handler: _FSTreeManager_NewFSEntry_Handler, + }, + { + MethodName: "RemoveFSEntry", + Handler: _FSTreeManager_RemoveFSEntry_Handler, + }, + { + MethodName: "PurgeReplica", + Handler: _FSTreeManager_PurgeReplica_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "fs_tree_manager.proto", +} diff --git a/src/fs_tree_manager/js/fs_tree_manager_grpc_pb.js b/src/fs_tree_manager/js/fs_tree_manager_grpc_pb.js new file mode 100644 index 0000000000..6d09f180bb --- /dev/null +++ b/src/fs_tree_manager/js/fs_tree_manager_grpc_pb.js @@ -0,0 +1,175 @@ +// GENERATED CODE -- DO NOT EDIT! + +'use strict'; +var grpc = require('@grpc/grpc-js'); +var fs_tree_manager_pb = require('./fs_tree_manager_pb.js'); +var google_protobuf_struct_pb = require('google-protobuf/google/protobuf/struct_pb.js'); +var google_protobuf_empty_pb = require('google-protobuf/google/protobuf/empty_pb.js'); + +function serialize_fs_tree_manager_FetchReplicaRequest(arg) { + if (!(arg instanceof fs_tree_manager_pb.FetchReplicaRequest)) { + throw new Error('Expected argument of type fs_tree_manager.FetchReplicaRequest'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_fs_tree_manager_FetchReplicaRequest(buffer_arg) { + return fs_tree_manager_pb.FetchReplicaRequest.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_fs_tree_manager_MerkleTree(arg) { + if (!(arg instanceof fs_tree_manager_pb.MerkleTree)) { + throw new Error('Expected argument of type fs_tree_manager.MerkleTree'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_fs_tree_manager_MerkleTree(buffer_arg) { + return fs_tree_manager_pb.MerkleTree.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_fs_tree_manager_NewFSEntryRequest(arg) { + if (!(arg instanceof fs_tree_manager_pb.NewFSEntryRequest)) { + throw new Error('Expected argument of type fs_tree_manager.NewFSEntryRequest'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_fs_tree_manager_NewFSEntryRequest(buffer_arg) { + return fs_tree_manager_pb.NewFSEntryRequest.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_fs_tree_manager_PullRequest(arg) { + if (!(arg instanceof fs_tree_manager_pb.PullRequest)) { + throw new Error('Expected argument of type fs_tree_manager.PullRequest'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_fs_tree_manager_PullRequest(buffer_arg) { + return fs_tree_manager_pb.PullRequest.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_fs_tree_manager_PurgeReplicaRequest(arg) { + if (!(arg instanceof fs_tree_manager_pb.PurgeReplicaRequest)) { + throw new Error('Expected argument of type fs_tree_manager.PurgeReplicaRequest'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_fs_tree_manager_PurgeReplicaRequest(buffer_arg) { + return fs_tree_manager_pb.PurgeReplicaRequest.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_fs_tree_manager_PushRequest(arg) { + if (!(arg instanceof fs_tree_manager_pb.PushRequest)) { + throw new Error('Expected argument of type fs_tree_manager.PushRequest'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_fs_tree_manager_PushRequest(buffer_arg) { + return fs_tree_manager_pb.PushRequest.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_fs_tree_manager_RemoveFSEntryRequest(arg) { + if (!(arg instanceof fs_tree_manager_pb.RemoveFSEntryRequest)) { + throw new Error('Expected argument of type fs_tree_manager.RemoveFSEntryRequest'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_fs_tree_manager_RemoveFSEntryRequest(buffer_arg) { + return fs_tree_manager_pb.RemoveFSEntryRequest.deserializeBinary(new Uint8Array(buffer_arg)); +} + +function serialize_google_protobuf_Empty(arg) { + if (!(arg instanceof google_protobuf_empty_pb.Empty)) { + throw new Error('Expected argument of type google.protobuf.Empty'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_google_protobuf_Empty(buffer_arg) { + return google_protobuf_empty_pb.Empty.deserializeBinary(new Uint8Array(buffer_arg)); +} + + +// For all RPC requests, user identifier is always needed since replicas are +// stored separately for each user. +// +// We use user_id instead of user_name/user_uuid since it's more accessible: +// - fsentry include user_id but not user_name/user_uuid +// (https://github.com/HeyPuter/puter/blob/847b3a07a4ec59e724063f460a4c26cb62b04d42/src/backend/src/services/database/sqlite_setup/0001_create-tables.sql#L81) +// - user_id is included in the fs events listener where user_name/user_uuid are +// not available +// (https://github.com/HeyPuter/puter/blob/847b3a07a4ec59e724063f460a4c26cb62b04d42/src/backend/src/services/WSPushService.js#L165-L166) +// +// We provide simple {New/Remove}FSEntry APIs as a straightforward way to +// accommodate the wide variety of file system operations. These APIs should +// always results in an coherent MerkleTree. +var FSTreeManagerService = exports.FSTreeManagerService = { + fetchReplica: { + path: '/fs_tree_manager.FSTreeManager/FetchReplica', + requestStream: false, + responseStream: false, + requestType: fs_tree_manager_pb.FetchReplicaRequest, + responseType: fs_tree_manager_pb.MerkleTree, + requestSerialize: serialize_fs_tree_manager_FetchReplicaRequest, + requestDeserialize: deserialize_fs_tree_manager_FetchReplicaRequest, + responseSerialize: serialize_fs_tree_manager_MerkleTree, + responseDeserialize: deserialize_fs_tree_manager_MerkleTree, + }, + pullDiff: { + path: '/fs_tree_manager.FSTreeManager/PullDiff', + requestStream: false, + responseStream: false, + requestType: fs_tree_manager_pb.PullRequest, + responseType: fs_tree_manager_pb.PushRequest, + requestSerialize: serialize_fs_tree_manager_PullRequest, + requestDeserialize: deserialize_fs_tree_manager_PullRequest, + responseSerialize: serialize_fs_tree_manager_PushRequest, + responseDeserialize: deserialize_fs_tree_manager_PushRequest, + }, + // Insert a new FSEntry into the tree, update its parent's children list as +// well. +newFSEntry: { + path: '/fs_tree_manager.FSTreeManager/NewFSEntry', + requestStream: false, + responseStream: false, + requestType: fs_tree_manager_pb.NewFSEntryRequest, + responseType: google_protobuf_empty_pb.Empty, + requestSerialize: serialize_fs_tree_manager_NewFSEntryRequest, + requestDeserialize: deserialize_fs_tree_manager_NewFSEntryRequest, + responseSerialize: serialize_google_protobuf_Empty, + responseDeserialize: deserialize_google_protobuf_Empty, + }, + // Remove an FSEntry (and all its descendants) from the tree, update its +// parent's children list as well. +removeFSEntry: { + path: '/fs_tree_manager.FSTreeManager/RemoveFSEntry', + requestStream: false, + responseStream: false, + requestType: fs_tree_manager_pb.RemoveFSEntryRequest, + responseType: google_protobuf_empty_pb.Empty, + requestSerialize: serialize_fs_tree_manager_RemoveFSEntryRequest, + requestDeserialize: deserialize_fs_tree_manager_RemoveFSEntryRequest, + responseSerialize: serialize_google_protobuf_Empty, + responseDeserialize: deserialize_google_protobuf_Empty, + }, + // For any fs operations that cannot be handled by New/Remove APIs, just purge +// the replica. +purgeReplica: { + path: '/fs_tree_manager.FSTreeManager/PurgeReplica', + requestStream: false, + responseStream: false, + requestType: fs_tree_manager_pb.PurgeReplicaRequest, + responseType: google_protobuf_empty_pb.Empty, + requestSerialize: serialize_fs_tree_manager_PurgeReplicaRequest, + requestDeserialize: deserialize_fs_tree_manager_PurgeReplicaRequest, + responseSerialize: serialize_google_protobuf_Empty, + responseDeserialize: deserialize_google_protobuf_Empty, + }, +}; + +exports.FSTreeManagerClient = grpc.makeGenericClientConstructor(FSTreeManagerService, 'FSTreeManager'); diff --git a/src/fs_tree_manager/js/fs_tree_manager_pb.js b/src/fs_tree_manager/js/fs_tree_manager_pb.js new file mode 100644 index 0000000000..54421ad222 --- /dev/null +++ b/src/fs_tree_manager/js/fs_tree_manager_pb.js @@ -0,0 +1,2270 @@ +// source: fs_tree_manager.proto +/** + * @fileoverview + * @enhanceable + * @suppress {missingRequire} reports error on implicit type usages. + * @suppress {messageConventions} JS Compiler reports an error if a variable or + * field starts with 'MSG_' and isn't a translatable message. + * @public + */ +// GENERATED CODE -- DO NOT EDIT! +/* eslint-disable */ +// @ts-nocheck + +var jspb = require('google-protobuf'); +var goog = jspb; +var global = (function() { + if (this) { return this; } + if (typeof window !== 'undefined') { return window; } + if (typeof global !== 'undefined') { return global; } + if (typeof self !== 'undefined') { return self; } + return Function('return this')(); +}.call(null)); + +var google_protobuf_struct_pb = require('google-protobuf/google/protobuf/struct_pb.js'); +goog.object.extend(proto, google_protobuf_struct_pb); +var google_protobuf_empty_pb = require('google-protobuf/google/protobuf/empty_pb.js'); +goog.object.extend(proto, google_protobuf_empty_pb); +goog.exportSymbol('proto.fs_tree_manager.FSEntry', null, global); +goog.exportSymbol('proto.fs_tree_manager.FetchReplicaRequest', null, global); +goog.exportSymbol('proto.fs_tree_manager.MerkleNode', null, global); +goog.exportSymbol('proto.fs_tree_manager.MerkleTree', null, global); +goog.exportSymbol('proto.fs_tree_manager.NewFSEntryRequest', null, global); +goog.exportSymbol('proto.fs_tree_manager.PullRequest', null, global); +goog.exportSymbol('proto.fs_tree_manager.PullRequestItem', null, global); +goog.exportSymbol('proto.fs_tree_manager.PurgeReplicaRequest', null, global); +goog.exportSymbol('proto.fs_tree_manager.PushRequest', null, global); +goog.exportSymbol('proto.fs_tree_manager.PushRequestItem', null, global); +goog.exportSymbol('proto.fs_tree_manager.RemoveFSEntryRequest', null, global); +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.fs_tree_manager.FetchReplicaRequest = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.fs_tree_manager.FetchReplicaRequest, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.fs_tree_manager.FetchReplicaRequest.displayName = 'proto.fs_tree_manager.FetchReplicaRequest'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.fs_tree_manager.NewFSEntryRequest = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.fs_tree_manager.NewFSEntryRequest, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.fs_tree_manager.NewFSEntryRequest.displayName = 'proto.fs_tree_manager.NewFSEntryRequest'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.fs_tree_manager.RemoveFSEntryRequest = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.fs_tree_manager.RemoveFSEntryRequest, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.fs_tree_manager.RemoveFSEntryRequest.displayName = 'proto.fs_tree_manager.RemoveFSEntryRequest'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.fs_tree_manager.PurgeReplicaRequest = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.fs_tree_manager.PurgeReplicaRequest, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.fs_tree_manager.PurgeReplicaRequest.displayName = 'proto.fs_tree_manager.PurgeReplicaRequest'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.fs_tree_manager.MerkleNode = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.fs_tree_manager.MerkleNode, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.fs_tree_manager.MerkleNode.displayName = 'proto.fs_tree_manager.MerkleNode'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.fs_tree_manager.MerkleTree = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.fs_tree_manager.MerkleTree, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.fs_tree_manager.MerkleTree.displayName = 'proto.fs_tree_manager.MerkleTree'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.fs_tree_manager.FSEntry = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.fs_tree_manager.FSEntry, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.fs_tree_manager.FSEntry.displayName = 'proto.fs_tree_manager.FSEntry'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.fs_tree_manager.PullRequest = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, proto.fs_tree_manager.PullRequest.repeatedFields_, null); +}; +goog.inherits(proto.fs_tree_manager.PullRequest, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.fs_tree_manager.PullRequest.displayName = 'proto.fs_tree_manager.PullRequest'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.fs_tree_manager.PullRequestItem = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.fs_tree_manager.PullRequestItem, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.fs_tree_manager.PullRequestItem.displayName = 'proto.fs_tree_manager.PullRequestItem'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.fs_tree_manager.PushRequest = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, proto.fs_tree_manager.PushRequest.repeatedFields_, null); +}; +goog.inherits(proto.fs_tree_manager.PushRequest, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.fs_tree_manager.PushRequest.displayName = 'proto.fs_tree_manager.PushRequest'; +} +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.fs_tree_manager.PushRequestItem = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, proto.fs_tree_manager.PushRequestItem.repeatedFields_, null); +}; +goog.inherits(proto.fs_tree_manager.PushRequestItem, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.fs_tree_manager.PushRequestItem.displayName = 'proto.fs_tree_manager.PushRequestItem'; +} + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.fs_tree_manager.FetchReplicaRequest.prototype.toObject = function(opt_includeInstance) { + return proto.fs_tree_manager.FetchReplicaRequest.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.fs_tree_manager.FetchReplicaRequest} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.FetchReplicaRequest.toObject = function(includeInstance, msg) { + var f, obj = { + userId: jspb.Message.getFieldWithDefault(msg, 1, 0) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.fs_tree_manager.FetchReplicaRequest} + */ +proto.fs_tree_manager.FetchReplicaRequest.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.fs_tree_manager.FetchReplicaRequest; + return proto.fs_tree_manager.FetchReplicaRequest.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.fs_tree_manager.FetchReplicaRequest} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.fs_tree_manager.FetchReplicaRequest} + */ +proto.fs_tree_manager.FetchReplicaRequest.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {number} */ (reader.readInt64()); + msg.setUserId(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.fs_tree_manager.FetchReplicaRequest.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.fs_tree_manager.FetchReplicaRequest.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.fs_tree_manager.FetchReplicaRequest} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.FetchReplicaRequest.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getUserId(); + if (f !== 0) { + writer.writeInt64( + 1, + f + ); + } +}; + + +/** + * optional int64 user_id = 1; + * @return {number} + */ +proto.fs_tree_manager.FetchReplicaRequest.prototype.getUserId = function() { + return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 1, 0)); +}; + + +/** + * @param {number} value + * @return {!proto.fs_tree_manager.FetchReplicaRequest} returns this + */ +proto.fs_tree_manager.FetchReplicaRequest.prototype.setUserId = function(value) { + return jspb.Message.setProto3IntField(this, 1, value); +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.fs_tree_manager.NewFSEntryRequest.prototype.toObject = function(opt_includeInstance) { + return proto.fs_tree_manager.NewFSEntryRequest.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.fs_tree_manager.NewFSEntryRequest} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.NewFSEntryRequest.toObject = function(includeInstance, msg) { + var f, obj = { + userId: jspb.Message.getFieldWithDefault(msg, 1, 0), + fsEntry: (f = msg.getFsEntry()) && proto.fs_tree_manager.FSEntry.toObject(includeInstance, f) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.fs_tree_manager.NewFSEntryRequest} + */ +proto.fs_tree_manager.NewFSEntryRequest.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.fs_tree_manager.NewFSEntryRequest; + return proto.fs_tree_manager.NewFSEntryRequest.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.fs_tree_manager.NewFSEntryRequest} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.fs_tree_manager.NewFSEntryRequest} + */ +proto.fs_tree_manager.NewFSEntryRequest.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {number} */ (reader.readInt64()); + msg.setUserId(value); + break; + case 2: + var value = new proto.fs_tree_manager.FSEntry; + reader.readMessage(value,proto.fs_tree_manager.FSEntry.deserializeBinaryFromReader); + msg.setFsEntry(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.fs_tree_manager.NewFSEntryRequest.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.fs_tree_manager.NewFSEntryRequest.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.fs_tree_manager.NewFSEntryRequest} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.NewFSEntryRequest.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getUserId(); + if (f !== 0) { + writer.writeInt64( + 1, + f + ); + } + f = message.getFsEntry(); + if (f != null) { + writer.writeMessage( + 2, + f, + proto.fs_tree_manager.FSEntry.serializeBinaryToWriter + ); + } +}; + + +/** + * optional int64 user_id = 1; + * @return {number} + */ +proto.fs_tree_manager.NewFSEntryRequest.prototype.getUserId = function() { + return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 1, 0)); +}; + + +/** + * @param {number} value + * @return {!proto.fs_tree_manager.NewFSEntryRequest} returns this + */ +proto.fs_tree_manager.NewFSEntryRequest.prototype.setUserId = function(value) { + return jspb.Message.setProto3IntField(this, 1, value); +}; + + +/** + * optional FSEntry fs_entry = 2; + * @return {?proto.fs_tree_manager.FSEntry} + */ +proto.fs_tree_manager.NewFSEntryRequest.prototype.getFsEntry = function() { + return /** @type{?proto.fs_tree_manager.FSEntry} */ ( + jspb.Message.getWrapperField(this, proto.fs_tree_manager.FSEntry, 2)); +}; + + +/** + * @param {?proto.fs_tree_manager.FSEntry|undefined} value + * @return {!proto.fs_tree_manager.NewFSEntryRequest} returns this +*/ +proto.fs_tree_manager.NewFSEntryRequest.prototype.setFsEntry = function(value) { + return jspb.Message.setWrapperField(this, 2, value); +}; + + +/** + * Clears the message field making it undefined. + * @return {!proto.fs_tree_manager.NewFSEntryRequest} returns this + */ +proto.fs_tree_manager.NewFSEntryRequest.prototype.clearFsEntry = function() { + return this.setFsEntry(undefined); +}; + + +/** + * Returns whether this field is set. + * @return {boolean} + */ +proto.fs_tree_manager.NewFSEntryRequest.prototype.hasFsEntry = function() { + return jspb.Message.getField(this, 2) != null; +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.fs_tree_manager.RemoveFSEntryRequest.prototype.toObject = function(opt_includeInstance) { + return proto.fs_tree_manager.RemoveFSEntryRequest.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.fs_tree_manager.RemoveFSEntryRequest} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.RemoveFSEntryRequest.toObject = function(includeInstance, msg) { + var f, obj = { + userId: jspb.Message.getFieldWithDefault(msg, 1, 0), + uuid: jspb.Message.getFieldWithDefault(msg, 2, "") + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.fs_tree_manager.RemoveFSEntryRequest} + */ +proto.fs_tree_manager.RemoveFSEntryRequest.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.fs_tree_manager.RemoveFSEntryRequest; + return proto.fs_tree_manager.RemoveFSEntryRequest.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.fs_tree_manager.RemoveFSEntryRequest} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.fs_tree_manager.RemoveFSEntryRequest} + */ +proto.fs_tree_manager.RemoveFSEntryRequest.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {number} */ (reader.readInt64()); + msg.setUserId(value); + break; + case 2: + var value = /** @type {string} */ (reader.readString()); + msg.setUuid(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.fs_tree_manager.RemoveFSEntryRequest.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.fs_tree_manager.RemoveFSEntryRequest.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.fs_tree_manager.RemoveFSEntryRequest} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.RemoveFSEntryRequest.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getUserId(); + if (f !== 0) { + writer.writeInt64( + 1, + f + ); + } + f = message.getUuid(); + if (f.length > 0) { + writer.writeString( + 2, + f + ); + } +}; + + +/** + * optional int64 user_id = 1; + * @return {number} + */ +proto.fs_tree_manager.RemoveFSEntryRequest.prototype.getUserId = function() { + return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 1, 0)); +}; + + +/** + * @param {number} value + * @return {!proto.fs_tree_manager.RemoveFSEntryRequest} returns this + */ +proto.fs_tree_manager.RemoveFSEntryRequest.prototype.setUserId = function(value) { + return jspb.Message.setProto3IntField(this, 1, value); +}; + + +/** + * optional string uuid = 2; + * @return {string} + */ +proto.fs_tree_manager.RemoveFSEntryRequest.prototype.getUuid = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); +}; + + +/** + * @param {string} value + * @return {!proto.fs_tree_manager.RemoveFSEntryRequest} returns this + */ +proto.fs_tree_manager.RemoveFSEntryRequest.prototype.setUuid = function(value) { + return jspb.Message.setProto3StringField(this, 2, value); +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.fs_tree_manager.PurgeReplicaRequest.prototype.toObject = function(opt_includeInstance) { + return proto.fs_tree_manager.PurgeReplicaRequest.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.fs_tree_manager.PurgeReplicaRequest} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.PurgeReplicaRequest.toObject = function(includeInstance, msg) { + var f, obj = { + userId: jspb.Message.getFieldWithDefault(msg, 1, 0) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.fs_tree_manager.PurgeReplicaRequest} + */ +proto.fs_tree_manager.PurgeReplicaRequest.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.fs_tree_manager.PurgeReplicaRequest; + return proto.fs_tree_manager.PurgeReplicaRequest.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.fs_tree_manager.PurgeReplicaRequest} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.fs_tree_manager.PurgeReplicaRequest} + */ +proto.fs_tree_manager.PurgeReplicaRequest.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {number} */ (reader.readInt64()); + msg.setUserId(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.fs_tree_manager.PurgeReplicaRequest.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.fs_tree_manager.PurgeReplicaRequest.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.fs_tree_manager.PurgeReplicaRequest} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.PurgeReplicaRequest.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getUserId(); + if (f !== 0) { + writer.writeInt64( + 1, + f + ); + } +}; + + +/** + * optional int64 user_id = 1; + * @return {number} + */ +proto.fs_tree_manager.PurgeReplicaRequest.prototype.getUserId = function() { + return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 1, 0)); +}; + + +/** + * @param {number} value + * @return {!proto.fs_tree_manager.PurgeReplicaRequest} returns this + */ +proto.fs_tree_manager.PurgeReplicaRequest.prototype.setUserId = function(value) { + return jspb.Message.setProto3IntField(this, 1, value); +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.fs_tree_manager.MerkleNode.prototype.toObject = function(opt_includeInstance) { + return proto.fs_tree_manager.MerkleNode.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.fs_tree_manager.MerkleNode} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.MerkleNode.toObject = function(includeInstance, msg) { + var f, obj = { + uuid: jspb.Message.getFieldWithDefault(msg, 1, ""), + merkleHash: jspb.Message.getFieldWithDefault(msg, 2, ""), + childrenUuidsMap: (f = msg.getChildrenUuidsMap()) ? f.toObject(includeInstance, undefined) : [], + parentUuid: jspb.Message.getFieldWithDefault(msg, 4, ""), + fsEntry: (f = msg.getFsEntry()) && proto.fs_tree_manager.FSEntry.toObject(includeInstance, f) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.fs_tree_manager.MerkleNode} + */ +proto.fs_tree_manager.MerkleNode.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.fs_tree_manager.MerkleNode; + return proto.fs_tree_manager.MerkleNode.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.fs_tree_manager.MerkleNode} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.fs_tree_manager.MerkleNode} + */ +proto.fs_tree_manager.MerkleNode.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setUuid(value); + break; + case 2: + var value = /** @type {string} */ (reader.readString()); + msg.setMerkleHash(value); + break; + case 3: + var value = msg.getChildrenUuidsMap(); + reader.readMessage(value, function(message, reader) { + jspb.Map.deserializeBinary(message, reader, jspb.BinaryReader.prototype.readString, jspb.BinaryReader.prototype.readBool, null, "", false); + }); + break; + case 4: + var value = /** @type {string} */ (reader.readString()); + msg.setParentUuid(value); + break; + case 5: + var value = new proto.fs_tree_manager.FSEntry; + reader.readMessage(value,proto.fs_tree_manager.FSEntry.deserializeBinaryFromReader); + msg.setFsEntry(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.fs_tree_manager.MerkleNode.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.fs_tree_manager.MerkleNode.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.fs_tree_manager.MerkleNode} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.MerkleNode.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getUuid(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } + f = message.getMerkleHash(); + if (f.length > 0) { + writer.writeString( + 2, + f + ); + } + f = message.getChildrenUuidsMap(true); + if (f && f.getLength() > 0) { + f.serializeBinary(3, writer, jspb.BinaryWriter.prototype.writeString, jspb.BinaryWriter.prototype.writeBool); + } + f = message.getParentUuid(); + if (f.length > 0) { + writer.writeString( + 4, + f + ); + } + f = message.getFsEntry(); + if (f != null) { + writer.writeMessage( + 5, + f, + proto.fs_tree_manager.FSEntry.serializeBinaryToWriter + ); + } +}; + + +/** + * optional string uuid = 1; + * @return {string} + */ +proto.fs_tree_manager.MerkleNode.prototype.getUuid = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); +}; + + +/** + * @param {string} value + * @return {!proto.fs_tree_manager.MerkleNode} returns this + */ +proto.fs_tree_manager.MerkleNode.prototype.setUuid = function(value) { + return jspb.Message.setProto3StringField(this, 1, value); +}; + + +/** + * optional string merkle_hash = 2; + * @return {string} + */ +proto.fs_tree_manager.MerkleNode.prototype.getMerkleHash = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); +}; + + +/** + * @param {string} value + * @return {!proto.fs_tree_manager.MerkleNode} returns this + */ +proto.fs_tree_manager.MerkleNode.prototype.setMerkleHash = function(value) { + return jspb.Message.setProto3StringField(this, 2, value); +}; + + +/** + * map children_uuids = 3; + * @param {boolean=} opt_noLazyCreate Do not create the map if + * empty, instead returning `undefined` + * @return {!jspb.Map} + */ +proto.fs_tree_manager.MerkleNode.prototype.getChildrenUuidsMap = function(opt_noLazyCreate) { + return /** @type {!jspb.Map} */ ( + jspb.Message.getMapField(this, 3, opt_noLazyCreate, + null)); +}; + + +/** + * Clears values from the map. The map will be non-null. + * @return {!proto.fs_tree_manager.MerkleNode} returns this + */ +proto.fs_tree_manager.MerkleNode.prototype.clearChildrenUuidsMap = function() { + this.getChildrenUuidsMap().clear(); + return this;}; + + +/** + * optional string parent_uuid = 4; + * @return {string} + */ +proto.fs_tree_manager.MerkleNode.prototype.getParentUuid = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 4, "")); +}; + + +/** + * @param {string} value + * @return {!proto.fs_tree_manager.MerkleNode} returns this + */ +proto.fs_tree_manager.MerkleNode.prototype.setParentUuid = function(value) { + return jspb.Message.setProto3StringField(this, 4, value); +}; + + +/** + * optional FSEntry fs_entry = 5; + * @return {?proto.fs_tree_manager.FSEntry} + */ +proto.fs_tree_manager.MerkleNode.prototype.getFsEntry = function() { + return /** @type{?proto.fs_tree_manager.FSEntry} */ ( + jspb.Message.getWrapperField(this, proto.fs_tree_manager.FSEntry, 5)); +}; + + +/** + * @param {?proto.fs_tree_manager.FSEntry|undefined} value + * @return {!proto.fs_tree_manager.MerkleNode} returns this +*/ +proto.fs_tree_manager.MerkleNode.prototype.setFsEntry = function(value) { + return jspb.Message.setWrapperField(this, 5, value); +}; + + +/** + * Clears the message field making it undefined. + * @return {!proto.fs_tree_manager.MerkleNode} returns this + */ +proto.fs_tree_manager.MerkleNode.prototype.clearFsEntry = function() { + return this.setFsEntry(undefined); +}; + + +/** + * Returns whether this field is set. + * @return {boolean} + */ +proto.fs_tree_manager.MerkleNode.prototype.hasFsEntry = function() { + return jspb.Message.getField(this, 5) != null; +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.fs_tree_manager.MerkleTree.prototype.toObject = function(opt_includeInstance) { + return proto.fs_tree_manager.MerkleTree.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.fs_tree_manager.MerkleTree} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.MerkleTree.toObject = function(includeInstance, msg) { + var f, obj = { + rootUuid: jspb.Message.getFieldWithDefault(msg, 1, ""), + nodesMap: (f = msg.getNodesMap()) ? f.toObject(includeInstance, proto.fs_tree_manager.MerkleNode.toObject) : [] + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.fs_tree_manager.MerkleTree} + */ +proto.fs_tree_manager.MerkleTree.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.fs_tree_manager.MerkleTree; + return proto.fs_tree_manager.MerkleTree.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.fs_tree_manager.MerkleTree} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.fs_tree_manager.MerkleTree} + */ +proto.fs_tree_manager.MerkleTree.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setRootUuid(value); + break; + case 2: + var value = msg.getNodesMap(); + reader.readMessage(value, function(message, reader) { + jspb.Map.deserializeBinary(message, reader, jspb.BinaryReader.prototype.readString, jspb.BinaryReader.prototype.readMessage, proto.fs_tree_manager.MerkleNode.deserializeBinaryFromReader, "", new proto.fs_tree_manager.MerkleNode()); + }); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.fs_tree_manager.MerkleTree.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.fs_tree_manager.MerkleTree.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.fs_tree_manager.MerkleTree} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.MerkleTree.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getRootUuid(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } + f = message.getNodesMap(true); + if (f && f.getLength() > 0) { + f.serializeBinary(2, writer, jspb.BinaryWriter.prototype.writeString, jspb.BinaryWriter.prototype.writeMessage, proto.fs_tree_manager.MerkleNode.serializeBinaryToWriter); + } +}; + + +/** + * optional string root_uuid = 1; + * @return {string} + */ +proto.fs_tree_manager.MerkleTree.prototype.getRootUuid = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); +}; + + +/** + * @param {string} value + * @return {!proto.fs_tree_manager.MerkleTree} returns this + */ +proto.fs_tree_manager.MerkleTree.prototype.setRootUuid = function(value) { + return jspb.Message.setProto3StringField(this, 1, value); +}; + + +/** + * map nodes = 2; + * @param {boolean=} opt_noLazyCreate Do not create the map if + * empty, instead returning `undefined` + * @return {!jspb.Map} + */ +proto.fs_tree_manager.MerkleTree.prototype.getNodesMap = function(opt_noLazyCreate) { + return /** @type {!jspb.Map} */ ( + jspb.Message.getMapField(this, 2, opt_noLazyCreate, + proto.fs_tree_manager.MerkleNode)); +}; + + +/** + * Clears values from the map. The map will be non-null. + * @return {!proto.fs_tree_manager.MerkleTree} returns this + */ +proto.fs_tree_manager.MerkleTree.prototype.clearNodesMap = function() { + this.getNodesMap().clear(); + return this;}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.fs_tree_manager.FSEntry.prototype.toObject = function(opt_includeInstance) { + return proto.fs_tree_manager.FSEntry.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.fs_tree_manager.FSEntry} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.FSEntry.toObject = function(includeInstance, msg) { + var f, obj = { + metadata: (f = msg.getMetadata()) && google_protobuf_struct_pb.Struct.toObject(includeInstance, f) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.fs_tree_manager.FSEntry} + */ +proto.fs_tree_manager.FSEntry.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.fs_tree_manager.FSEntry; + return proto.fs_tree_manager.FSEntry.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.fs_tree_manager.FSEntry} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.fs_tree_manager.FSEntry} + */ +proto.fs_tree_manager.FSEntry.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = new google_protobuf_struct_pb.Struct; + reader.readMessage(value,google_protobuf_struct_pb.Struct.deserializeBinaryFromReader); + msg.setMetadata(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.fs_tree_manager.FSEntry.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.fs_tree_manager.FSEntry.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.fs_tree_manager.FSEntry} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.FSEntry.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getMetadata(); + if (f != null) { + writer.writeMessage( + 1, + f, + google_protobuf_struct_pb.Struct.serializeBinaryToWriter + ); + } +}; + + +/** + * optional google.protobuf.Struct metadata = 1; + * @return {?proto.google.protobuf.Struct} + */ +proto.fs_tree_manager.FSEntry.prototype.getMetadata = function() { + return /** @type{?proto.google.protobuf.Struct} */ ( + jspb.Message.getWrapperField(this, google_protobuf_struct_pb.Struct, 1)); +}; + + +/** + * @param {?proto.google.protobuf.Struct|undefined} value + * @return {!proto.fs_tree_manager.FSEntry} returns this +*/ +proto.fs_tree_manager.FSEntry.prototype.setMetadata = function(value) { + return jspb.Message.setWrapperField(this, 1, value); +}; + + +/** + * Clears the message field making it undefined. + * @return {!proto.fs_tree_manager.FSEntry} returns this + */ +proto.fs_tree_manager.FSEntry.prototype.clearMetadata = function() { + return this.setMetadata(undefined); +}; + + +/** + * Returns whether this field is set. + * @return {boolean} + */ +proto.fs_tree_manager.FSEntry.prototype.hasMetadata = function() { + return jspb.Message.getField(this, 1) != null; +}; + + + +/** + * List of repeated fields within this message type. + * @private {!Array} + * @const + */ +proto.fs_tree_manager.PullRequest.repeatedFields_ = [2]; + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.fs_tree_manager.PullRequest.prototype.toObject = function(opt_includeInstance) { + return proto.fs_tree_manager.PullRequest.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.fs_tree_manager.PullRequest} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.PullRequest.toObject = function(includeInstance, msg) { + var f, obj = { + userId: jspb.Message.getFieldWithDefault(msg, 1, 0), + pullRequestList: jspb.Message.toObjectList(msg.getPullRequestList(), + proto.fs_tree_manager.PullRequestItem.toObject, includeInstance) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.fs_tree_manager.PullRequest} + */ +proto.fs_tree_manager.PullRequest.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.fs_tree_manager.PullRequest; + return proto.fs_tree_manager.PullRequest.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.fs_tree_manager.PullRequest} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.fs_tree_manager.PullRequest} + */ +proto.fs_tree_manager.PullRequest.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {number} */ (reader.readInt64()); + msg.setUserId(value); + break; + case 2: + var value = new proto.fs_tree_manager.PullRequestItem; + reader.readMessage(value,proto.fs_tree_manager.PullRequestItem.deserializeBinaryFromReader); + msg.addPullRequest(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.fs_tree_manager.PullRequest.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.fs_tree_manager.PullRequest.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.fs_tree_manager.PullRequest} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.PullRequest.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getUserId(); + if (f !== 0) { + writer.writeInt64( + 1, + f + ); + } + f = message.getPullRequestList(); + if (f.length > 0) { + writer.writeRepeatedMessage( + 2, + f, + proto.fs_tree_manager.PullRequestItem.serializeBinaryToWriter + ); + } +}; + + +/** + * optional int64 user_id = 1; + * @return {number} + */ +proto.fs_tree_manager.PullRequest.prototype.getUserId = function() { + return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 1, 0)); +}; + + +/** + * @param {number} value + * @return {!proto.fs_tree_manager.PullRequest} returns this + */ +proto.fs_tree_manager.PullRequest.prototype.setUserId = function(value) { + return jspb.Message.setProto3IntField(this, 1, value); +}; + + +/** + * repeated PullRequestItem pull_request = 2; + * @return {!Array} + */ +proto.fs_tree_manager.PullRequest.prototype.getPullRequestList = function() { + return /** @type{!Array} */ ( + jspb.Message.getRepeatedWrapperField(this, proto.fs_tree_manager.PullRequestItem, 2)); +}; + + +/** + * @param {!Array} value + * @return {!proto.fs_tree_manager.PullRequest} returns this +*/ +proto.fs_tree_manager.PullRequest.prototype.setPullRequestList = function(value) { + return jspb.Message.setRepeatedWrapperField(this, 2, value); +}; + + +/** + * @param {!proto.fs_tree_manager.PullRequestItem=} opt_value + * @param {number=} opt_index + * @return {!proto.fs_tree_manager.PullRequestItem} + */ +proto.fs_tree_manager.PullRequest.prototype.addPullRequest = function(opt_value, opt_index) { + return jspb.Message.addToRepeatedWrapperField(this, 2, opt_value, proto.fs_tree_manager.PullRequestItem, opt_index); +}; + + +/** + * Clears the list making it empty but non-null. + * @return {!proto.fs_tree_manager.PullRequest} returns this + */ +proto.fs_tree_manager.PullRequest.prototype.clearPullRequestList = function() { + return this.setPullRequestList([]); +}; + + + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.fs_tree_manager.PullRequestItem.prototype.toObject = function(opt_includeInstance) { + return proto.fs_tree_manager.PullRequestItem.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.fs_tree_manager.PullRequestItem} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.PullRequestItem.toObject = function(includeInstance, msg) { + var f, obj = { + uuid: jspb.Message.getFieldWithDefault(msg, 1, ""), + merkleHash: jspb.Message.getFieldWithDefault(msg, 2, "") + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.fs_tree_manager.PullRequestItem} + */ +proto.fs_tree_manager.PullRequestItem.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.fs_tree_manager.PullRequestItem; + return proto.fs_tree_manager.PullRequestItem.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.fs_tree_manager.PullRequestItem} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.fs_tree_manager.PullRequestItem} + */ +proto.fs_tree_manager.PullRequestItem.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setUuid(value); + break; + case 2: + var value = /** @type {string} */ (reader.readString()); + msg.setMerkleHash(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.fs_tree_manager.PullRequestItem.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.fs_tree_manager.PullRequestItem.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.fs_tree_manager.PullRequestItem} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.PullRequestItem.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getUuid(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } + f = message.getMerkleHash(); + if (f.length > 0) { + writer.writeString( + 2, + f + ); + } +}; + + +/** + * optional string uuid = 1; + * @return {string} + */ +proto.fs_tree_manager.PullRequestItem.prototype.getUuid = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); +}; + + +/** + * @param {string} value + * @return {!proto.fs_tree_manager.PullRequestItem} returns this + */ +proto.fs_tree_manager.PullRequestItem.prototype.setUuid = function(value) { + return jspb.Message.setProto3StringField(this, 1, value); +}; + + +/** + * optional string merkle_hash = 2; + * @return {string} + */ +proto.fs_tree_manager.PullRequestItem.prototype.getMerkleHash = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); +}; + + +/** + * @param {string} value + * @return {!proto.fs_tree_manager.PullRequestItem} returns this + */ +proto.fs_tree_manager.PullRequestItem.prototype.setMerkleHash = function(value) { + return jspb.Message.setProto3StringField(this, 2, value); +}; + + + +/** + * List of repeated fields within this message type. + * @private {!Array} + * @const + */ +proto.fs_tree_manager.PushRequest.repeatedFields_ = [2]; + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.fs_tree_manager.PushRequest.prototype.toObject = function(opt_includeInstance) { + return proto.fs_tree_manager.PushRequest.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.fs_tree_manager.PushRequest} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.PushRequest.toObject = function(includeInstance, msg) { + var f, obj = { + userId: jspb.Message.getFieldWithDefault(msg, 1, 0), + pushRequestList: jspb.Message.toObjectList(msg.getPushRequestList(), + proto.fs_tree_manager.PushRequestItem.toObject, includeInstance) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.fs_tree_manager.PushRequest} + */ +proto.fs_tree_manager.PushRequest.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.fs_tree_manager.PushRequest; + return proto.fs_tree_manager.PushRequest.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.fs_tree_manager.PushRequest} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.fs_tree_manager.PushRequest} + */ +proto.fs_tree_manager.PushRequest.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {number} */ (reader.readInt64()); + msg.setUserId(value); + break; + case 2: + var value = new proto.fs_tree_manager.PushRequestItem; + reader.readMessage(value,proto.fs_tree_manager.PushRequestItem.deserializeBinaryFromReader); + msg.addPushRequest(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.fs_tree_manager.PushRequest.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.fs_tree_manager.PushRequest.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.fs_tree_manager.PushRequest} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.PushRequest.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getUserId(); + if (f !== 0) { + writer.writeInt64( + 1, + f + ); + } + f = message.getPushRequestList(); + if (f.length > 0) { + writer.writeRepeatedMessage( + 2, + f, + proto.fs_tree_manager.PushRequestItem.serializeBinaryToWriter + ); + } +}; + + +/** + * optional int64 user_id = 1; + * @return {number} + */ +proto.fs_tree_manager.PushRequest.prototype.getUserId = function() { + return /** @type {number} */ (jspb.Message.getFieldWithDefault(this, 1, 0)); +}; + + +/** + * @param {number} value + * @return {!proto.fs_tree_manager.PushRequest} returns this + */ +proto.fs_tree_manager.PushRequest.prototype.setUserId = function(value) { + return jspb.Message.setProto3IntField(this, 1, value); +}; + + +/** + * repeated PushRequestItem push_request = 2; + * @return {!Array} + */ +proto.fs_tree_manager.PushRequest.prototype.getPushRequestList = function() { + return /** @type{!Array} */ ( + jspb.Message.getRepeatedWrapperField(this, proto.fs_tree_manager.PushRequestItem, 2)); +}; + + +/** + * @param {!Array} value + * @return {!proto.fs_tree_manager.PushRequest} returns this +*/ +proto.fs_tree_manager.PushRequest.prototype.setPushRequestList = function(value) { + return jspb.Message.setRepeatedWrapperField(this, 2, value); +}; + + +/** + * @param {!proto.fs_tree_manager.PushRequestItem=} opt_value + * @param {number=} opt_index + * @return {!proto.fs_tree_manager.PushRequestItem} + */ +proto.fs_tree_manager.PushRequest.prototype.addPushRequest = function(opt_value, opt_index) { + return jspb.Message.addToRepeatedWrapperField(this, 2, opt_value, proto.fs_tree_manager.PushRequestItem, opt_index); +}; + + +/** + * Clears the list making it empty but non-null. + * @return {!proto.fs_tree_manager.PushRequest} returns this + */ +proto.fs_tree_manager.PushRequest.prototype.clearPushRequestList = function() { + return this.setPushRequestList([]); +}; + + + +/** + * List of repeated fields within this message type. + * @private {!Array} + * @const + */ +proto.fs_tree_manager.PushRequestItem.repeatedFields_ = [4]; + + + +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.fs_tree_manager.PushRequestItem.prototype.toObject = function(opt_includeInstance) { + return proto.fs_tree_manager.PushRequestItem.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.fs_tree_manager.PushRequestItem} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.PushRequestItem.toObject = function(includeInstance, msg) { + var f, obj = { + uuid: jspb.Message.getFieldWithDefault(msg, 1, ""), + merkleHash: jspb.Message.getFieldWithDefault(msg, 2, ""), + fsEntry: (f = msg.getFsEntry()) && proto.fs_tree_manager.FSEntry.toObject(includeInstance, f), + childrenList: jspb.Message.toObjectList(msg.getChildrenList(), + proto.fs_tree_manager.PushRequestItem.toObject, includeInstance) + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.fs_tree_manager.PushRequestItem} + */ +proto.fs_tree_manager.PushRequestItem.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.fs_tree_manager.PushRequestItem; + return proto.fs_tree_manager.PushRequestItem.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.fs_tree_manager.PushRequestItem} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.fs_tree_manager.PushRequestItem} + */ +proto.fs_tree_manager.PushRequestItem.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = /** @type {string} */ (reader.readString()); + msg.setUuid(value); + break; + case 2: + var value = /** @type {string} */ (reader.readString()); + msg.setMerkleHash(value); + break; + case 3: + var value = new proto.fs_tree_manager.FSEntry; + reader.readMessage(value,proto.fs_tree_manager.FSEntry.deserializeBinaryFromReader); + msg.setFsEntry(value); + break; + case 4: + var value = new proto.fs_tree_manager.PushRequestItem; + reader.readMessage(value,proto.fs_tree_manager.PushRequestItem.deserializeBinaryFromReader); + msg.addChildren(value); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.fs_tree_manager.PushRequestItem.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.fs_tree_manager.PushRequestItem.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.fs_tree_manager.PushRequestItem} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.fs_tree_manager.PushRequestItem.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getUuid(); + if (f.length > 0) { + writer.writeString( + 1, + f + ); + } + f = message.getMerkleHash(); + if (f.length > 0) { + writer.writeString( + 2, + f + ); + } + f = message.getFsEntry(); + if (f != null) { + writer.writeMessage( + 3, + f, + proto.fs_tree_manager.FSEntry.serializeBinaryToWriter + ); + } + f = message.getChildrenList(); + if (f.length > 0) { + writer.writeRepeatedMessage( + 4, + f, + proto.fs_tree_manager.PushRequestItem.serializeBinaryToWriter + ); + } +}; + + +/** + * optional string uuid = 1; + * @return {string} + */ +proto.fs_tree_manager.PushRequestItem.prototype.getUuid = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 1, "")); +}; + + +/** + * @param {string} value + * @return {!proto.fs_tree_manager.PushRequestItem} returns this + */ +proto.fs_tree_manager.PushRequestItem.prototype.setUuid = function(value) { + return jspb.Message.setProto3StringField(this, 1, value); +}; + + +/** + * optional string merkle_hash = 2; + * @return {string} + */ +proto.fs_tree_manager.PushRequestItem.prototype.getMerkleHash = function() { + return /** @type {string} */ (jspb.Message.getFieldWithDefault(this, 2, "")); +}; + + +/** + * @param {string} value + * @return {!proto.fs_tree_manager.PushRequestItem} returns this + */ +proto.fs_tree_manager.PushRequestItem.prototype.setMerkleHash = function(value) { + return jspb.Message.setProto3StringField(this, 2, value); +}; + + +/** + * optional FSEntry fs_entry = 3; + * @return {?proto.fs_tree_manager.FSEntry} + */ +proto.fs_tree_manager.PushRequestItem.prototype.getFsEntry = function() { + return /** @type{?proto.fs_tree_manager.FSEntry} */ ( + jspb.Message.getWrapperField(this, proto.fs_tree_manager.FSEntry, 3)); +}; + + +/** + * @param {?proto.fs_tree_manager.FSEntry|undefined} value + * @return {!proto.fs_tree_manager.PushRequestItem} returns this +*/ +proto.fs_tree_manager.PushRequestItem.prototype.setFsEntry = function(value) { + return jspb.Message.setWrapperField(this, 3, value); +}; + + +/** + * Clears the message field making it undefined. + * @return {!proto.fs_tree_manager.PushRequestItem} returns this + */ +proto.fs_tree_manager.PushRequestItem.prototype.clearFsEntry = function() { + return this.setFsEntry(undefined); +}; + + +/** + * Returns whether this field is set. + * @return {boolean} + */ +proto.fs_tree_manager.PushRequestItem.prototype.hasFsEntry = function() { + return jspb.Message.getField(this, 3) != null; +}; + + +/** + * repeated PushRequestItem children = 4; + * @return {!Array} + */ +proto.fs_tree_manager.PushRequestItem.prototype.getChildrenList = function() { + return /** @type{!Array} */ ( + jspb.Message.getRepeatedWrapperField(this, proto.fs_tree_manager.PushRequestItem, 4)); +}; + + +/** + * @param {!Array} value + * @return {!proto.fs_tree_manager.PushRequestItem} returns this +*/ +proto.fs_tree_manager.PushRequestItem.prototype.setChildrenList = function(value) { + return jspb.Message.setRepeatedWrapperField(this, 4, value); +}; + + +/** + * @param {!proto.fs_tree_manager.PushRequestItem=} opt_value + * @param {number=} opt_index + * @return {!proto.fs_tree_manager.PushRequestItem} + */ +proto.fs_tree_manager.PushRequestItem.prototype.addChildren = function(opt_value, opt_index) { + return jspb.Message.addToRepeatedWrapperField(this, 4, opt_value, proto.fs_tree_manager.PushRequestItem, opt_index); +}; + + +/** + * Clears the list making it empty but non-null. + * @return {!proto.fs_tree_manager.PushRequestItem} returns this + */ +proto.fs_tree_manager.PushRequestItem.prototype.clearChildrenList = function() { + return this.setChildrenList([]); +}; + + +goog.object.extend(exports, proto.fs_tree_manager); diff --git a/src/fs_tree_manager/js/package.json b/src/fs_tree_manager/js/package.json new file mode 100644 index 0000000000..a3c15a7a63 --- /dev/null +++ b/src/fs_tree_manager/js/package.json @@ -0,0 +1 @@ +{ "type": "commonjs" } diff --git a/src/fs_tree_manager/merkle/debug.go b/src/fs_tree_manager/merkle/debug.go new file mode 100644 index 0000000000..10252d603b --- /dev/null +++ b/src/fs_tree_manager/merkle/debug.go @@ -0,0 +1,198 @@ +package merkle + +import ( + "fmt" + "log" + "sort" + "strings" + + pb "github.com/puter/fs_tree_manager/go" +) + +var ( + // ignoreDirs contains directories to ignore when printing the tree + ignoreDirs = []string{ + "/admin/api_test", + "/admin/Trash", + } +) + +// IntegrityCheck validates the integrity of all trees in the provided map +func IntegrityCheck(globalTrees map[int64]*Tree) { + for userID, wrappedTree := range globalTrees { + tree := wrappedTree.GetTree() + + root, exists := tree.Nodes[tree.RootUuid] + if !exists { + log.Panicf("[user %d] root uuid not found: %s", userID, tree.RootUuid) + } + rootPath := root.FsEntry.Metadata.AsMap()["path"].(string) + + for UUID, node := range tree.Nodes { + // check: uuid is consistent + if UUID != node.Uuid { + log.Panicf("[user %d] uuid is inconsistent: %s != %s", userID, UUID, node.Uuid) + } + + // check with parent + if node.Uuid != tree.RootUuid { + // check: all node should have a parent + if node.ParentUuid == "" { + log.Panicf("[user %d] parent uuid is empty: %s", userID, node.Uuid) + } + + // check: parent uuid is valid + parent, exists := tree.Nodes[node.ParentUuid] + if !exists { + log.Panicf("[user %d] parent uuid not found: %s", userID, node.ParentUuid) + } + + // check: parent has self as a child + if !parent.ChildrenUuids[node.Uuid] { + log.Panicf("[user %d] parent has self as a child: %s", userID, node.Uuid) + } + + // check: parent path is a prefix + parentPath := parent.FsEntry.Metadata.AsMap()["path"].(string) + if !strings.HasPrefix(parentPath, rootPath) { + log.Panicf("[user %d] parent path is not a prefix: %s", userID, parentPath) + } + } + + // check with children + for childUUID := range node.ChildrenUuids { + // check: child uuid is valid + if _, exists := tree.Nodes[childUUID]; !exists { + PrintTree(tree) + log.Panicf("[user %d] child uuid not found: %s", userID, childUUID) + } + } + } + } +} + +// PrintTree prints the tree in a human-readable format, from the root to the leaves +func PrintTree(tree *pb.MerkleTree) { + if tree == nil || tree.RootUuid == "" { + fmt.Println("(empty tree)") + return + } + + rootNode, exists := tree.Nodes[tree.RootUuid] + if !exists { + fmt.Printf("(root node not found: %s)\n", tree.RootUuid) + return + } + + // Print tree header + fmt.Printf("Merkle Tree (Root: %s)\n", tree.RootUuid) + fmt.Println("├── " + getNodeDisplay(rootNode)) + + // Print children recursively + printNodeChildren(tree, rootNode, "│ ") +} + +// printNodeChildren recursively prints children of a node +func printNodeChildren(tree *pb.MerkleTree, node *pb.MerkleNode, prefix string) { + children := node.ChildrenUuids + if len(children) == 0 { + return + } + + // Sort children by path for consistent display + sortedChildren := make([]string, 0, len(children)) + for childUUID := range children { + sortedChildren = append(sortedChildren, childUUID) + } + + // Sort by path for better readability + sort.Slice(sortedChildren, func(i, j int) bool { + childI, existsI := tree.Nodes[sortedChildren[i]] + childJ, existsJ := tree.Nodes[sortedChildren[j]] + if !existsI || !existsJ { + return sortedChildren[i] < sortedChildren[j] + } + + pathI := getPath(childI) + pathJ := getPath(childJ) + return pathI < pathJ + }) + + for i, childUUID := range sortedChildren { + childNode, exists := tree.Nodes[childUUID] + if !exists { + fmt.Printf("%s├── [MISSING NODE: %s]\n", prefix, childUUID) + continue + } + + // Check if this child should be ignored + childPath := getPath(childNode) + shouldIgnore := false + for _, ignoreDir := range ignoreDirs { + if childPath == ignoreDir { + shouldIgnore = true + break + } + } + + if shouldIgnore { + continue + } + + isLast := i == len(sortedChildren)-1 + var currentPrefix, nextPrefix string + + if isLast { + currentPrefix = "└── " + nextPrefix = " " + } else { + currentPrefix = "├── " + nextPrefix = "│ " + } + + fmt.Printf("%s%s%s\n", prefix, currentPrefix, getNodeDisplay(childNode)) + + // Recursively print children + printNodeChildren(tree, childNode, prefix+nextPrefix) + } +} + +// getNodeDisplay returns a formatted string for displaying a node +func getNodeDisplay(node *pb.MerkleNode) string { + path := getPath(node) + name := getName(node) + + // Truncate UUID to first 8 characters for readability + shortUUID := node.Uuid + if len(shortUUID) > 8 { + shortUUID = shortUUID[:8] + } + + return fmt.Sprintf("%s [%s] (uuid: %s)", path, name, shortUUID) +} + +// getPath extracts the path from node metadata +func getPath(node *pb.MerkleNode) string { + if node.FsEntry == nil || node.FsEntry.Metadata == nil { + return "[no path]" + } + + metadata := node.FsEntry.Metadata.AsMap() + if path, ok := metadata["path"].(string); ok { + return path + } + return "[no path]" +} + +// getName extracts the name from node metadata +func getName(node *pb.MerkleNode) string { + if node.FsEntry == nil || node.FsEntry.Metadata == nil { + return "[no name]" + } + + metadata := node.FsEntry.Metadata.AsMap() + if name, ok := metadata["name"].(string); ok { + return name + } + return "[no name]" +} diff --git a/src/fs_tree_manager/merkle/tree.go b/src/fs_tree_manager/merkle/tree.go new file mode 100644 index 0000000000..df6af18aa5 --- /dev/null +++ b/src/fs_tree_manager/merkle/tree.go @@ -0,0 +1,169 @@ +package merkle + +import ( + "encoding/json" + "fmt" + "sort" + "sync" + "time" + + "github.com/cespare/xxhash/v2" + pb "github.com/puter/fs_tree_manager/go" +) + +// Tree represents a Merkle tree with thread-safe access +type Tree struct { + tree *pb.MerkleTree + lock sync.RWMutex + + // Last time the tree was synced from database + LastSynced time.Time + + // Last time the tree was read (by FetchReplica/PullDiff) + LastRead time.Time +} + +// NewTree creates a new MerkleTree instance +func NewTree(tree *pb.MerkleTree) *Tree { + return &Tree{ + tree: tree, + LastSynced: time.Now(), + } +} + +// GetTree returns the underlying MerkleTree +func (t *Tree) GetTree() *pb.MerkleTree { + return t.tree +} + +// RLock acquires a read lock +func (t *Tree) RLock() { + t.lock.RLock() +} + +// RUnlock releases a read lock +func (t *Tree) RUnlock() { + t.lock.RUnlock() +} + +// Lock acquires a write lock +func (t *Tree) Lock() { + t.lock.Lock() +} + +// Unlock releases a write lock +func (t *Tree) Unlock() { + t.lock.Unlock() +} + +// CalculateHash calculates the MerkleHash for a node based on its attributes and children hashes +func CalculateHash(node *pb.MerkleNode, childrenHashes []string) string { + hasher := xxhash.New() + + if node.FsEntry.Metadata != nil { + metadataBytes, err := json.Marshal(node.FsEntry.Metadata.AsMap()) + if err == nil { + hasher.Write(metadataBytes) + } + } + + sort.Strings(childrenHashes) + + for _, childHash := range childrenHashes { + hasher.WriteString(childHash) + } + + hash := hasher.Sum64() + hashStr := fmt.Sprintf("%d", hash) + return hashStr +} + +// CalculateTreeHashes calculates MerkleHash for all nodes in the tree using a bottom-up approach +func CalculateTreeHashes(tree *pb.MerkleTree) { + // Track which nodes have been processed + processed := make(map[string]bool) + + // First pass: calculate hashes for leaf nodes (nodes with no children) + for _, node := range tree.Nodes { + if len(node.ChildrenUuids) == 0 { + node.MerkleHash = CalculateHash(node, []string{}) + processed[node.Uuid] = true + } + } + + // Continue processing until all nodes are done + for { + progressMade := false + + // Process nodes whose children are all processed + for _, node := range tree.Nodes { + if processed[node.Uuid] { + continue + } + + // Check if all children have been processed + allChildrenReady := true + childrenHashes := make([]string, 0, len(node.ChildrenUuids)) + + for childID := range node.ChildrenUuids { + if child, exists := tree.Nodes[childID]; exists { + if !processed[childID] { + allChildrenReady = false + break + } + if child.MerkleHash != "" { + childrenHashes = append(childrenHashes, child.MerkleHash) + } + } + } + + // If all children are ready, calculate this node's hash + if allChildrenReady { + node.MerkleHash = CalculateHash(node, childrenHashes) + processed[node.Uuid] = true + progressMade = true + } + } + + // If no progress was made, we're done + if !progressMade { + break + } + } +} + +// RecalculateAncestorHashes recalculates Merkle hashes for all ancestors of a given node +func RecalculateAncestorHashes(tree *pb.MerkleTree, nodeID string) { + currentNodeID := nodeID + + for currentNodeID != "" { + currentNode, exists := tree.Nodes[currentNodeID] + if !exists { + break + } + + childrenHashes := make([]string, 0, len(currentNode.ChildrenUuids)) + for childID := range currentNode.ChildrenUuids { + if child, exists := tree.Nodes[childID]; exists && child.MerkleHash != "" { + childrenHashes = append(childrenHashes, child.MerkleHash) + } + } + + currentNode.MerkleHash = CalculateHash(currentNode, childrenHashes) + + currentNodeID = currentNode.ParentUuid + } +} + +// GetAllDescendants collects all descendant UUIDs of a given node +func GetAllDescendants(nodeUUID string, nodes map[string]*pb.MerkleNode, descendants map[string]bool) { + node, exists := nodes[nodeUUID] + if !exists { + return + } + + for childUUID := range node.ChildrenUuids { + descendants[childUUID] = true + GetAllDescendants(childUUID, nodes, descendants) + } +} diff --git a/src/fs_tree_manager/proto/fs_tree_manager.proto b/src/fs_tree_manager/proto/fs_tree_manager.proto new file mode 100644 index 0000000000..1ec5b173fa --- /dev/null +++ b/src/fs_tree_manager/proto/fs_tree_manager.proto @@ -0,0 +1,120 @@ +syntax = "proto3"; + +import "google/protobuf/struct.proto"; +import "google/protobuf/empty.proto"; + +package fs_tree_manager; + +option go_package = "github.com/puter/fs_tree_manager"; + +// For all RPC requests, user identifier is always needed since replicas are +// stored separately for each user. +// +// We use user_id instead of user_name/user_uuid since it's more accessible: +// - fsentry include user_id but not user_name/user_uuid +// (https://github.com/HeyPuter/puter/blob/847b3a07a4ec59e724063f460a4c26cb62b04d42/src/backend/src/services/database/sqlite_setup/0001_create-tables.sql#L81) +// - user_id is included in the fs events listener where user_name/user_uuid are +// not available +// (https://github.com/HeyPuter/puter/blob/847b3a07a4ec59e724063f460a4c26cb62b04d42/src/backend/src/services/WSPushService.js#L165-L166) +// +// We provide simple {New/Remove}FSEntry APIs as a straightforward way to +// accommodate the wide variety of file system operations. These APIs should +// always results in an coherent MerkleTree. +service FSTreeManager { + rpc FetchReplica(FetchReplicaRequest) returns (MerkleTree); + rpc PullDiff(PullRequest) returns (PushRequest); + + // Insert a new FSEntry into the tree, update its parent's children list as + // well. + rpc NewFSEntry(NewFSEntryRequest) returns (google.protobuf.Empty); + + // Remove an FSEntry (and all its descendants) from the tree, update its + // parent's children list as well. + rpc RemoveFSEntry(RemoveFSEntryRequest) returns (google.protobuf.Empty); + + // For any fs operations that cannot be handled by New/Remove APIs, just purge + // the replica. + // + // The final goal is to remove this RPC, after we can always mutate the + // fs-tree properly. + rpc PurgeReplica(PurgeReplicaRequest) returns (google.protobuf.Empty); +} + +message FetchReplicaRequest { int64 user_id = 1; } + +message NewFSEntryRequest { + int64 user_id = 1; + FSEntry fs_entry = 2; +} + +message RemoveFSEntryRequest { + int64 user_id = 1; + + // Use UUID instead of FSEntry since: + // 1. UUID is enough to identify a node + // 2. FSEntry is inaccessable in many cases + string uuid = 2; +} + +message PurgeReplicaRequest { int64 user_id = 1; } + +message MerkleNode { + // We use the stable uuid from fs_entry so pointers to it stay valid when this + // node is updated. + string uuid = 1; + + // We use string instead of uint64 for several reasons: + // 1. JS is error prone when handling uint64/bigint and it's time-consuming to + // troubleshoot. + // 2. It's easier to come up with a consistent order on string type. + string merkle_hash = 2; + + // Use map to avoid duplicate children uuids. The value doesn't matter, it's + // there just because protobuf doesn't have built-in set type. + map children_uuids = 3; + + string parent_uuid = 4; + + FSEntry fs_entry = 5; +} + +// The motivation of the heap design instead of embedded trees is elaborated in +// the RFC (doc/RFCS/20250821_client_replica_file_system.md) +message MerkleTree { + string root_uuid = 1; + + // uuid -> node + map nodes = 2; +} + +message FSEntry { + // TODO (xiaochen): Make it a static type step by step. + // + // A static type is more robust and less error-prone. For instance, a + // FSEntry has uuid field in database but uid is desired in the puter-js + // and GUI client. We can guarantee the presence of desired fields by + // using a static type. + google.protobuf.Struct metadata = 1; +} + +message PullRequest { + int64 user_id = 1; + repeated PullRequestItem pull_request = 2; +} + +message PullRequestItem { + string uuid = 1; + string merkle_hash = 2; +} + +message PushRequest { + int64 user_id = 1; + repeated PushRequestItem push_request = 2; +} + +message PushRequestItem { + string uuid = 1; + string merkle_hash = 2; + FSEntry fs_entry = 3; + repeated PushRequestItem children = 4; +} diff --git a/src/fs_tree_manager/server.go b/src/fs_tree_manager/server.go new file mode 100644 index 0000000000..fccc64de13 --- /dev/null +++ b/src/fs_tree_manager/server.go @@ -0,0 +1,710 @@ +package main + +import ( + "context" + "database/sql" + "fmt" + "log" + "math/rand" + "net" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + _ "github.com/go-sql-driver/mysql" + _ "github.com/mattn/go-sqlite3" + pb "github.com/puter/fs_tree_manager/go" + "github.com/puter/fs_tree_manager/merkle" + "github.com/spf13/cobra" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/structpb" + "gopkg.in/yaml.v3" +) + +type ( + server struct { + pb.UnimplementedFSTreeManagerServer + db *sql.DB + } + + // Config represents the application configuration + Config struct { + Database struct { + Driver string `yaml:"driver"` + SQLite3 struct { + Path string `yaml:"path"` + } `yaml:"sqlite3"` + MySQL struct { + Host string `yaml:"db_host"` + Port int `yaml:"db_port"` + User string `yaml:"db_user"` + Password string `yaml:"db_password"` + Database string `yaml:"db_database"` + } `yaml:"mysql"` + } `yaml:"database"` + Server struct { + Port int `yaml:"port"` + } `yaml:"server"` + } +) + +var ( + // key: user_id, value: user's full replica FS tree + globalTrees map[int64]*merkle.Tree + + // This is only used to protect the integrity of the globalTrees map. Each + // tree's integrity is not its responsibility. + globalTreesLock sync.RWMutex + + // Memory threshold in bytes (2GB) + memoryThresholdBytes int64 = 2 * 1024 * 1024 * 1024 + + // Make FS-Tree Manager unstable and laggy. + chaos = false + + debug = false +) + +// checkMemoryUsage checks if the current memory usage exceeds the threshold +func checkMemoryUsage() error { + var m runtime.MemStats + runtime.ReadMemStats(&m) + + if m.Alloc > uint64(memoryThresholdBytes) { + return fmt.Errorf("memory usage (%d bytes) exceeds threshold (%d bytes)", m.Alloc, memoryThresholdBytes) + } + + return nil +} + +// Get a readable tree, initialize the tree from database if it doesn't exist. +func getReadableTree(s *server, userID int64) (*merkle.Tree, error) { + globalTreesLock.RLock() + lockedTree, exists := globalTrees[userID] + globalTreesLock.RUnlock() + + if exists { + lockedTree.RLock() + lockedTree.LastRead = time.Now() + return lockedTree, nil + } + + if err := checkMemoryUsage(); err != nil { + return nil, err + } + + tree, err := s.buildUserFSTree(userID) + if err != nil { + return nil, err + } + + lockedTree = merkle.NewTree(tree) + globalTreesLock.Lock() + globalTrees[userID] = lockedTree + globalTreesLock.Unlock() + + lockedTree.RLock() + return lockedTree, nil +} + +// Get a read-write tree. +func getWritableTree(userID int64) (*merkle.Tree, error) { + globalTreesLock.RLock() + lockedTree, exists := globalTrees[userID] + globalTreesLock.RUnlock() + + if exists { + lockedTree.Lock() + return lockedTree, nil + } + + return nil, fmt.Errorf("tree for user %d does not exist in memory", userID) +} + +// FetchReplica implements the FSTreeManager service +func (s *server) FetchReplica(ctx context.Context, req *pb.FetchReplicaRequest) (*pb.MerkleTree, error) { + if chaos { + time.Sleep(20 * time.Second) + } + + readableTree, err := getReadableTree(s, req.UserId) + if err != nil { + return nil, err + } + defer readableTree.RUnlock() + + return readableTree.GetTree(), nil +} + +func (s *server) PullDiff(ctx context.Context, req *pb.PullRequest) (*pb.PushRequest, error) { + if chaos { + if err := mayCrash(); err != nil { + return nil, err + } + } + + lockedTree, err := getReadableTree(s, req.UserId) + if err != nil { + return nil, fmt.Errorf("[user %d] no cached tree found: %v", req.UserId, err) + } + defer lockedTree.RUnlock() + + tree := lockedTree.GetTree() + response := &pb.PushRequest{ + UserId: req.UserId, + PushRequest: []*pb.PushRequestItem{}, + } + + for _, pullRequestItem := range req.PullRequest { + node, exists := tree.Nodes[pullRequestItem.Uuid] + if !exists { + log.Printf("[user %d] node not found: %s", req.UserId, pullRequestItem.Uuid) + continue + } + + // If hashes match, no need to send this node. + if node.MerkleHash == pullRequestItem.MerkleHash { + continue + } + + // Create push request item with node and its children. + pushItem := &pb.PushRequestItem{ + Uuid: node.Uuid, + MerkleHash: node.MerkleHash, + FsEntry: node.FsEntry, + Children: []*pb.PushRequestItem{}, + } + + // Add all children. + for childUUID := range node.ChildrenUuids { + if childNode, childExists := tree.Nodes[childUUID]; childExists { + childPushItem := &pb.PushRequestItem{ + Uuid: childNode.Uuid, + MerkleHash: childNode.MerkleHash, + FsEntry: childNode.FsEntry, + Children: []*pb.PushRequestItem{}, + } + pushItem.Children = append(pushItem.Children, childPushItem) + } + } + + response.PushRequest = append(response.PushRequest, pushItem) + } + + return response, nil +} + +// NewFSEntry implements the FSTreeManager service +func (s *server) NewFSEntry(ctx context.Context, req *pb.NewFSEntryRequest) (*emptypb.Empty, error) { + if chaos { + if err := mayCrash(); err != nil { + return nil, err + } + } + + userID := req.UserId + fsEntry := req.FsEntry + + metadataMap := fsEntry.Metadata.AsMap() + uid, ok := metadataMap["uid"].(string) + if !ok { + return nil, fmt.Errorf("invalid metadata: missing uid") + } + + lockedTree, err := getWritableTree(userID) + if err != nil { + return nil, err + } + defer lockedTree.Unlock() + + parentUUID, err := getParentUUID(metadataMap, lockedTree.GetTree().Nodes) + if err != nil { + return nil, err + } + + tree := lockedTree.GetTree() + parentNode, exists := tree.Nodes[parentUUID] + if !exists { + return nil, fmt.Errorf("parent directory not found: %s", parentUUID) + } + + newNode := &pb.MerkleNode{ + Uuid: uid, + MerkleHash: "", + ParentUuid: parentUUID, + FsEntry: fsEntry, + ChildrenUuids: make(map[string]bool), + } + + tree.Nodes[uid] = newNode + + parentNode.ChildrenUuids[uid] = true + + newNode.MerkleHash = merkle.CalculateHash(newNode, []string{}) + + merkle.RecalculateAncestorHashes(tree, uid) + + if debug { + parentPath := parentNode.FsEntry.Metadata.AsMap()["path"].(string) + parentUUID = parentNode.Uuid + log.Printf("[user %d] new fs entry, (path: %s, uuid: %s), (parent_path: %s, parent_uuid: %s)", userID, metadataMap["path"], uid, parentPath, parentUUID) + merkle.IntegrityCheck(globalTrees) + } + + return &emptypb.Empty{}, nil +} + +// TODO: remove this once parent_path is always consistent with parent_uuid +func getParentUUID(metadata map[string]any, nodes map[string]*pb.MerkleNode) (UUID string, err error) { + // Check the inconsistency between "parent_path" and "parent_uuid", the inconsistency + // occurs in several scenarios: + // - When moving a directory from ~/Desktop to ~/trash, the parent_uuid is not updated. + + // parent_path comes from "dirpath" field + parentPath := metadata["dirpath"].(string) + + // parent_uuid comes from "parent_uid"/"parent_id" field, just use parent_uid here. + parentUUID := metadata["parent_uid"].(string) + + if parentUUID == "" { + return "", fmt.Errorf("parent_uuid is empty") + } + + parentNode, parentExists := nodes[parentUUID] + if !parentExists { + return "", fmt.Errorf("parent node not found, uuid: %s", parentUUID) + } + + pathFromUUID := parentNode.FsEntry.Metadata.AsMap()["path"].(string) + if parentPath != pathFromUUID { + // When missmatch happens, use parentPath. + log.Printf("parent_path(preferred) and parent_uuid mismatch, parent_path: %s, pathFromUUID: %s, uuid: %s", parentPath, pathFromUUID, parentUUID) + return pathToUUID(parentPath, nodes) + } + + return parentUUID, nil +} + +func pathToUUID(path string, nodes map[string]*pb.MerkleNode) (UUID string, err error) { + // TODO: optimize this by using a trie tree. Currently we cannot traverse the tree + // using path. + for _, node := range nodes { + if node.FsEntry.Metadata.AsMap()["path"].(string) == path { + return node.Uuid, nil + } + } + return "", fmt.Errorf("node not found, path: %s", path) +} + +// RemoveFSEntry implements the FSTreeManager service +func (s *server) RemoveFSEntry(ctx context.Context, req *pb.RemoveFSEntryRequest) (*emptypb.Empty, error) { + if chaos { + if err := mayCrash(); err != nil { + return nil, err + } + } + + userID := req.UserId + uid := req.Uuid + if uid == "" { + return nil, fmt.Errorf("invalid request: missing uuid") + } + + lockedTree, err := getWritableTree(userID) + if err != nil { + return nil, err + } + defer lockedTree.Unlock() + + tree := lockedTree.GetTree() + targetNode, exists := tree.Nodes[uid] + if !exists { + return nil, fmt.Errorf("entry not found: %s", uid) + } + + // Collect all descendants to remove + descendants := make(map[string]bool) + merkle.GetAllDescendants(uid, tree.Nodes, descendants) + + // Remove the node from its parent's children map + removedFromParent := false + if targetNode.ParentUuid != "" { + if parentNode, parentExists := tree.Nodes[targetNode.ParentUuid]; parentExists { + if _, exists := parentNode.ChildrenUuids[uid]; exists { + delete(parentNode.ChildrenUuids, uid) + removedFromParent = true + } + } + } + if !removedFromParent { + log.Panicf("[user %d] parent not found: %s", userID, targetNode.ParentUuid) + } + + // Remove all descendants from the tree + for descendantUUID := range descendants { + delete(tree.Nodes, descendantUUID) + } + + // Remove the node from the tree + delete(tree.Nodes, uid) + + // Recalculate ancestor hashes + if targetNode.ParentUuid != "" { + merkle.RecalculateAncestorHashes(tree, targetNode.ParentUuid) + } + + if debug { + parent, parentExists := tree.Nodes[targetNode.ParentUuid] + if !parentExists { + log.Panicf("[user %d] parent not found: %s", userID, targetNode.ParentUuid) + } + parentPath := parent.FsEntry.Metadata.AsMap()["path"].(string) + + parentUUID := targetNode.ParentUuid + log.Printf("[user %d] removed fs entry, (path: %s, uuid: %s), (parent_path: %s, parent_uuid: %s)", userID, targetNode.FsEntry.Metadata.AsMap()["path"], uid, parentPath, parentUUID) + log.Printf("[user %d] removed descendants [%d]: %v", userID, len(descendants), descendants) + merkle.IntegrityCheck(globalTrees) + } + + return &emptypb.Empty{}, nil +} + +func (s *server) PurgeReplica(ctx context.Context, req *pb.PurgeReplicaRequest) (*emptypb.Empty, error) { + globalTreesLock.Lock() + delete(globalTrees, req.UserId) + globalTreesLock.Unlock() + + return &emptypb.Empty{}, nil +} + +func mayCrash() error { + v := rand.Intn(100) + if v < 10 { + panic("crash") + } else if v < 30 { + time.Sleep(10 * time.Second) + } else if v < 60 { + return fmt.Errorf("intentional error on chaos mode") + } + return nil +} + +// loadConfig loads configuration from the specified config file +func loadConfig(configPath string) (*Config, error) { + configData, err := os.ReadFile(configPath) + if err != nil { + return nil, fmt.Errorf("failed to read config file: %v", err) + } + + var config Config + err = yaml.Unmarshal(configData, &config) + if err != nil { + return nil, fmt.Errorf("failed to parse config file: %v", err) + } + + return &config, nil +} + +// buildMetadata creates a comprehensive metadata structure matching the expected format +func buildMetadata(uuid, name, path, parentUID string, userID int64, isDir bool, size sql.NullInt64, + createdAt, modifiedAt, accessedAt float64, isPublic, isShortcut, isSymlink sql.NullBool, + symlinkPath, sortBy, sortOrder sql.NullString, immutable sql.NullBool, + metadata, associatedAppID, publicToken, fileRequestToken sql.NullString) (*structpb.Struct, error) { + + dirname := filepath.Dir(path) + dirpath := dirname + + isEmpty := true + if isDir { + isEmpty = !size.Valid || size.Int64 == 0 + } + + metadataMap := map[string]interface{}{ + "is_empty": isEmpty, + "id": uuid, + "associated_app_id": getStringValue(associatedAppID), + "public_token": getStringValue(publicToken), + "file_request_token": getStringValue(fileRequestToken), + "parent_uid": parentUID, + "is_dir": isDir, + "is_public": getBoolValue(isPublic), + "is_shortcut": getIntValue(isShortcut), + "is_symlink": getIntValue(isSymlink), + "symlink_path": getStringValue(symlinkPath), + "sort_by": getStringValue(sortBy), + "sort_order": getStringValue(sortOrder), + "immutable": getIntValue(immutable), + "name": name, + "metadata": getStringValue(metadata), + "modified": int64(modifiedAt), + "created": int64(createdAt), + "accessed": int64(accessedAt), + "size": getInt64Value(size), + "layout": nil, + "path": path, + "owner": map[string]interface{}{ + "user_id": userID, + }, + "type": nil, + "subdomains": []interface{}{}, + "shares": map[string]interface{}{ + "users": []interface{}{}, + "apps": []interface{}{}, + }, + "versions": []interface{}{}, + "dirname": dirname, + "dirpath": dirpath, + "writable": true, + "parent_id": parentUID, + "uid": uuid, + } + + return structpb.NewStruct(metadataMap) +} + +func getStringValue(ns sql.NullString) interface{} { + if ns.Valid { + return ns.String + } + return nil +} + +func getBoolValue(nb sql.NullBool) interface{} { + if nb.Valid { + return nb.Bool + } + return nil +} + +func getIntValue(nb sql.NullBool) int { + if nb.Valid && nb.Bool { + return 1 + } + return 0 +} + +func getInt64Value(ni sql.NullInt64) interface{} { + if ni.Valid { + return ni.Int64 + } + return nil +} + +// buildUserFSTree builds the filesystem tree for a given user from the database +func (s *server) buildUserFSTree(userID int64) (*pb.MerkleTree, error) { + query := ` + SELECT uuid, name, is_dir, size, created, modified, path, parent_uid, + is_public, is_shortcut, is_symlink, symlink_path, sort_by, sort_order, + immutable, metadata, accessed, associated_app_id, public_token, file_request_token + FROM fsentries + WHERE user_id = ? + ` + + rows, err := s.db.Query(query, userID) + if err != nil { + return nil, err + } + defer rows.Close() + + nodes := make(map[string]*pb.MerkleNode) + parentChildMap := make(map[string][]string) + + var rootUUID string + + for rows.Next() { + var uuid, name, path string + var parentUID sql.NullString + var isDir bool + var size sql.NullInt64 + var createdAt, modifiedAt float64 + var accessedAt sql.NullFloat64 + var isPublic, isShortcut, isSymlink, immutable sql.NullBool + var symlinkPath, sortBy, sortOrder, metadata, associatedAppID, publicToken, fileRequestToken sql.NullString + + err := rows.Scan(&uuid, &name, &isDir, &size, &createdAt, &modifiedAt, &path, &parentUID, + &isPublic, &isShortcut, &isSymlink, &symlinkPath, &sortBy, &sortOrder, + &immutable, &metadata, &accessedAt, &associatedAppID, &publicToken, &fileRequestToken) + if err != nil { + continue + } + + parentUIDStr := "" + if parentUID.Valid { + parentUIDStr = parentUID.String + } + + accessedAtValue := float64(time.Now().Unix()) + if accessedAt.Valid { + accessedAtValue = accessedAt.Float64 + } + + metadataStruct, err := buildMetadata(uuid, name, path, parentUIDStr, userID, isDir, size, + createdAt, modifiedAt, accessedAtValue, isPublic, isShortcut, isSymlink, + symlinkPath, sortBy, sortOrder, immutable, metadata, associatedAppID, publicToken, fileRequestToken) + if err != nil { + continue + } + + node := &pb.MerkleNode{ + Uuid: uuid, + MerkleHash: "", + ParentUuid: parentUIDStr, + FsEntry: &pb.FSEntry{Metadata: metadataStruct}, + ChildrenUuids: make(map[string]bool), + } + + nodes[uuid] = node + + if parentUID.Valid { + parentChildMap[parentUID.String] = append(parentChildMap[parentUID.String], uuid) + } + + if strings.Count(path, "/") == 1 { + rootUUID = uuid + } + } + + for parentUUID, childUUIDs := range parentChildMap { + if parent, exists := nodes[parentUUID]; exists { + parent.ChildrenUuids = make(map[string]bool) + for _, childUUID := range childUUIDs { + parent.ChildrenUuids[childUUID] = true + } + } + } + + if rootUUID == "" { + return nil, fmt.Errorf("[user %d] root directory not found", userID) + } + + tree := &pb.MerkleTree{ + RootUuid: rootUUID, + Nodes: nodes, + } + + merkle.CalculateTreeHashes(tree) + + return tree, nil +} + +// purgeOldTrees removes trees that haven't been read in 1 minute or synced in 5 minutes +func purgeOldTrees() { + globalTreesLock.Lock() + defer globalTreesLock.Unlock() + + readCutoff := time.Now().Add(-1 * time.Minute) + syncCutoff := time.Now().Add(-5 * time.Minute) + var toDelete []int64 + + for userID, lockedTree := range globalTrees { + // Purge if either lastRead is older than 1 minute OR lastSynced is older than 5 minutes + if lockedTree.LastRead.Before(readCutoff) || lockedTree.LastSynced.Before(syncCutoff) { + toDelete = append(toDelete, userID) + } + } + + for _, userID := range toDelete { + delete(globalTrees, userID) + } + log.Printf("purged %d old trees, %d trees remaining", len(toDelete), len(globalTrees)) +} + +// runServer starts the gRPC server with the given configuration +func runServer(configPath string) error { + log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) + + // Load configuration + config, err := loadConfig(configPath) + if err != nil { + return fmt.Errorf("failed to load config: %v", err) + } + + globalTrees = make(map[int64]*merkle.Tree) + + // purge old trees periodically + go func() { + ticker := time.NewTicker(1 * time.Minute) + defer ticker.Stop() + for range ticker.C { + purgeOldTrees() + } + }() + + var db *sql.DB + var dbErr error + + if config.Database.Driver == "mysql" { + // Validate MySQL configuration + if config.Database.MySQL.Host == "" || config.Database.MySQL.User == "" || + config.Database.MySQL.Database == "" || config.Database.MySQL.Port == 0 { + return fmt.Errorf("MySQL configuration is incomplete: host, user, database, and port are required") + } + + // Build MySQL connection string + dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local", + config.Database.MySQL.User, + config.Database.MySQL.Password, + config.Database.MySQL.Host, + config.Database.MySQL.Port, + config.Database.MySQL.Database, + ) + db, dbErr = sql.Open("mysql", dsn) + } else { + // Default to SQLite + if config.Database.SQLite3.Path == "" { + return fmt.Errorf("SQLite3 configuration is incomplete: path is required") + } + db, dbErr = sql.Open(config.Database.Driver, config.Database.SQLite3.Path) + } + + if dbErr != nil { + return fmt.Errorf("failed to open database: %v", dbErr) + } + defer db.Close() + + if err := db.Ping(); err != nil { + return fmt.Errorf("failed to ping database: %v", err) + } + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", config.Server.Port)) + if err != nil { + return fmt.Errorf("failed to listen: %v", err) + } + + grpcServer := grpc.NewServer() + + pb.RegisterFSTreeManagerServer(grpcServer, &server{ + db: db, + }) + + log.Printf("server started on port %d", config.Server.Port) + if err := grpcServer.Serve(lis); err != nil { + return fmt.Errorf("failed to serve: %v", err) + } + + return nil +} + +func main() { + var configPath string + + rootCmd := &cobra.Command{ + Use: "fs-tree-manager", + Short: "FS Tree Manager gRPC server", + Long: `A gRPC server that manages filesystem trees using Merkle trees for efficient synchronization.`, + RunE: func(cmd *cobra.Command, args []string) error { + return runServer(configPath) + }, + } + + rootCmd.Flags().StringVarP(&configPath, "config", "c", "./config.yaml", "Path to the configuration file") + + if err := rootCmd.Execute(); err != nil { + log.Fatalf("Error: %v", err) + os.Exit(1) + } +} diff --git a/src/gui/src/UI/UIDesktop.js b/src/gui/src/UI/UIDesktop.js index 394e564560..bb2cac08f6 100644 --- a/src/gui/src/UI/UIDesktop.js +++ b/src/gui/src/UI/UIDesktop.js @@ -17,31 +17,31 @@ * along with this program. If not, see . */ +import item_icon from "../helpers/item_icon.js" +import launch_app from "../helpers/launch_app.js" +import new_context_menu_item from "../helpers/new_context_menu_item.js" +import refresh_item_container from "../helpers/refresh_item_container.js" +import truncate_filename from '../helpers/truncate_filename.js' +import changeLanguage from "../i18n/i18nChangeLanguage.js" import path from "../lib/path.js" -import UIWindowClaimReferral from "./UIWindowClaimReferral.js" +import UIWindowSettings from "./Settings/UIWindowSettings.js" +import UIAlert from './UIAlert.js' import UIContextMenu from './UIContextMenu.js' import UIItem from './UIItem.js' -import UIAlert from './UIAlert.js' +import UINotification from "./UINotification.js" +import UITaskbar from "./UITaskbar.js" import UIWindow from './UIWindow.js' -import UIWindowSaveAccount from './UIWindowSaveAccount.js'; +import UIWindowClaimReferral from "./UIWindowClaimReferral.js" import UIWindowDesktopBGSettings from "./UIWindowDesktopBGSettings.js" -import UIWindowMyWebsites from "./UIWindowMyWebsites.js" import UIWindowFeedback from "./UIWindowFeedback.js" import UIWindowLogin from "./UIWindowLogin.js" +import UIWindowMyWebsites from "./UIWindowMyWebsites.js" import UIWindowQR from "./UIWindowQR.js" import UIWindowRefer from "./UIWindowRefer.js" -import UITaskbar from "./UITaskbar.js" -import new_context_menu_item from "../helpers/new_context_menu_item.js" -import refresh_item_container from "../helpers/refresh_item_container.js" -import changeLanguage from "../i18n/i18nChangeLanguage.js" -import UIWindowSettings from "./Settings/UIWindowSettings.js" +import UIWindowSaveAccount from './UIWindowSaveAccount.js' +import UIWindowSearch from "./UIWindowSearch.js" import UIWindowTaskManager from "./UIWindowTaskManager.js" -import truncate_filename from '../helpers/truncate_filename.js'; -import UINotification from "./UINotification.js" import UIWindowWelcome from "./UIWindowWelcome.js" -import launch_app from "../helpers/launch_app.js" -import item_icon from "../helpers/item_icon.js" -import UIWindowSearch from "./UIWindowSearch.js" async function UIDesktop(options) { // start a transaction if we're not in embedded or fullpage mode @@ -590,6 +590,8 @@ async function UIDesktop(options) { }); window.socket.on('item.added', async (item) => { + console.log(`[xiaochen-debug] item.added: ${JSON.stringify(item, null, 2)}`); + // if item is empty, don't proceed if (_.isEmpty(item)) return; @@ -715,9 +717,40 @@ async function UIDesktop(options) { window.update_user_preferences(user_preferences); } + // Add replica status widget (always create, but conditionally show) + h += `
+
client-replica:
+
false
+
`; + // Append to $('body').append(h); + // Initialize replica status widget function + window.updateReplicaStatusWidget = function() { + const statusElement = document.getElementById('replica-status-value'); + if (statusElement) { + const isAvailable = puter.fs.replica.available === true; + statusElement.textContent = isAvailable.toString(); + statusElement.className = `replica-status-value ${isAvailable}`; + } + }; + + + // Function to show/hide widget based on debug flag + window.updateReplicaWidgetVisibility = function() { + const widget = document.getElementById('replica-status-widget'); + if (widget) { + widget.style.display = puter.fs.replica.debug ? 'flex' : 'none'; + } + }; + + // Initialize widget and start polling + setTimeout(() => { + // Start polling immediately - the visibility is controlled by CSS + setInterval(window.updateReplicaStatusWidget, 100); + }, 100); + // Set desktop height based on taskbar height $('.desktop').css('height', `calc(100vh - ${window.taskbar_height + window.toolbar_height}px)`) diff --git a/src/gui/src/css/style.css b/src/gui/src/css/style.css index f552c012f2..d5a437dc0c 100644 --- a/src/gui/src/css/style.css +++ b/src/gui/src/css/style.css @@ -5422,4 +5422,54 @@ fieldset[name=number-code] { .update-usage-details svg{ width: 20px; height: 20px; +} + +/************************************************************ + * Replica Status Widget (for debugging) + ************************************************************/ +.replica-status-widget { + position: fixed; + top: 10px; + right: 50px; + background: rgba(0, 0, 0, 0.8); + color: white; + padding: 8px 12px; + border-radius: 6px; + font-size: 12px; + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; + z-index: 9999; + display: flex; + align-items: center; + gap: 6px; + box-shadow: 0 2px 8px rgba(0, 0, 0, 0.3); + backdrop-filter: blur(10px); + border: 1px solid rgba(255, 255, 255, 0.1); +} + +.replica-status-widget.hidden { + display: none; +} + +.replica-status-label { + font-weight: 500; + opacity: 0.8; +} + +.replica-status-value { + font-weight: 600; + padding: 2px 6px; + border-radius: 3px; + background: rgba(255, 255, 255, 0.1); + min-width: 20px; + text-align: center; +} + +.replica-status-value.true { + background: rgba(34, 197, 94, 0.3); + color: #22c55e; +} + +.replica-status-value.false { + background: rgba(239, 68, 68, 0.3); + color: #ef4444; } \ No newline at end of file diff --git a/src/gui/src/helpers.js b/src/gui/src/helpers.js index 9b95ca859b..62cf9c1bb8 100644 --- a/src/gui/src/helpers.js +++ b/src/gui/src/helpers.js @@ -17,19 +17,19 @@ * along with this program. If not, see . */ -import path from "./lib/path.js" +import get_html_element_from_options from "./helpers/get_html_element_from_options.js"; +import globToRegExp from "./helpers/globToRegExp.js"; +import item_icon from "./helpers/item_icon.js"; +import truncate_filename from './helpers/truncate_filename.js'; +import update_title_based_on_uploads from './helpers/update_title_based_on_uploads.js'; +import update_username_in_gui from './helpers/update_username_in_gui.js'; import mime from "./lib/mime.js"; -import UIAlert from './UI/UIAlert.js' -import UIItem from './UI/UIItem.js' +import path from "./lib/path.js"; +import UIAlert from './UI/UIAlert.js'; +import UIItem from './UI/UIItem.js'; import UIWindowLogin from './UI/UIWindowLogin.js'; -import UIWindowSaveAccount from './UI/UIWindowSaveAccount.js'; -import update_username_in_gui from './helpers/update_username_in_gui.js'; -import update_title_based_on_uploads from './helpers/update_title_based_on_uploads.js'; -import truncate_filename from './helpers/truncate_filename.js'; import UIWindowProgress from './UI/UIWindowProgress.js'; -import globToRegExp from "./helpers/globToRegExp.js"; -import get_html_element_from_options from "./helpers/get_html_element_from_options.js"; -import item_icon from "./helpers/item_icon.js"; +import UIWindowSaveAccount from './UI/UIWindowSaveAccount.js'; window.is_auth = ()=>{ if(localStorage.getItem("auth_token") === null || window.auth_token === null) @@ -526,6 +526,8 @@ window.update_auth_data = async (auth_token, user)=>{ // Search and store user templates (non-blocking) window.available_templates() + // Search and store user templates + window.file_templates = await window.available_templates() } window.mutate_user_preferences = function(user_preferences_delta) { diff --git a/src/puter-js/package.json b/src/puter-js/package.json index 32e839503f..3785942be8 100644 --- a/src/puter-js/package.json +++ b/src/puter-js/package.json @@ -34,8 +34,11 @@ "author": "Puter Technologies Inc.", "license": "Apache-2.0", "devDependencies": { + "@types/node": "^24.8.0", "concurrently": "^8.2.2", "http-server": "^14.1.1", + "ts-loader": "^9.5.4", + "typescript": "^5.9.3", "webpack-cli": "^5.1.4" }, "dependencies": { diff --git a/src/puter-js/src/lib/utils.js b/src/puter-js/src/lib/utils.js index 941a3acf57..a0d3eb5b87 100644 --- a/src/puter-js/src/lib/utils.js +++ b/src/puter-js/src/lib/utils.js @@ -611,10 +611,6 @@ function arrayBufferToDataUri(arrayBuffer) { }); } -export {parseResponse, uuidv4, handle_resp, handle_error, initXhr, setupXhrEventHandlers, driverCall, - TeePromise, - make_driver_method, - blob_to_url, - arrayBufferToDataUri, - blobToDataUri, -}; \ No newline at end of file +export { + arrayBufferToDataUri, blob_to_url, blobToDataUri, driverCall, handle_error, handle_resp, initXhr, make_driver_method, parseResponse, setupXhrEventHandlers, TeePromise, uuidv4 +}; diff --git a/src/puter-js/src/modules/FileSystem/index.js b/src/puter-js/src/modules/FileSystem/index.js index 6dde45221b..6a8f027c15 100644 --- a/src/puter-js/src/modules/FileSystem/index.js +++ b/src/puter-js/src/modules/FileSystem/index.js @@ -1,6 +1,6 @@ +import path from '../../lib/path.js'; import io from '../../lib/socket.io/socket.io.esm.min.js'; import * as utils from '../../lib/utils.js'; -import path from '../../lib/path.js'; // Constants // @@ -27,6 +27,9 @@ import FSItem from '../FSItem.js'; import deleteFSEntry from './operations/deleteFSEntry.js'; import getReadURL from './operations/getReadUrl.js'; +// client-replica +import replica from "./replica/manager.js"; +// import replica from "./replica/manager.ts"; export class PuterJSFileSystemModule extends AdvancedBase { @@ -47,6 +50,9 @@ export class PuterJSFileSystemModule extends AdvancedBase { readdir = readdir; stat = stat; + // client-replica + replica = replica; + FSItem = FSItem; static NARI_METHODS = { @@ -210,6 +216,8 @@ export class PuterJSFileSystemModule extends AdvancedBase { * @returns {void} */ setAuthToken(authToken) { + console.log('[xiaochen-debug] FileSystem.setAuthToken', authToken); + this.authToken = authToken; // Check cache timestamp and purge if needed (only in GUI environment) @@ -221,6 +229,13 @@ export class PuterJSFileSystemModule extends AdvancedBase { // reset socket this.initializeSocket(); + + // initialize replica manager + replica.initialize({ + authToken: this.authToken, + APIOrigin: this.APIOrigin, + username: this.context.username, + }); } /** diff --git a/src/puter-js/src/modules/FileSystem/operations/deleteFSEntry.js b/src/puter-js/src/modules/FileSystem/operations/deleteFSEntry.js index 0b8644b65a..0cabafa58d 100644 --- a/src/puter-js/src/modules/FileSystem/operations/deleteFSEntry.js +++ b/src/puter-js/src/modules/FileSystem/operations/deleteFSEntry.js @@ -40,9 +40,33 @@ const deleteFSEntry = async function(...args) { // create xhr object const xhr = utils.initXhr('/delete', this.APIOrigin, this.authToken); - // set up event handlers for load and error events - utils.setupXhrEventHandlers(xhr, options.success, options.error, resolve, reject); + const originalSuccess = options.success; + const wrappedSuccess = (...args) => { + if ( originalSuccess ) { + originalSuccess(...args); + } + + // ================== client-replica hook start ================== + if ( puter.fs.replica.available ) { + for ( const path of paths ) { + if ( puter.fs.replica.debug ) { + console.log('local deleteFSEntry hook, path:', path); + } + const fs_entry = puter.fs.replica.fs_tree.findNodeByPath(path); + if ( !fs_entry ) { + console.error('client-replica: fs_entry not found, path:', path); + continue; + } + puter.fs.replica.fs_tree.removeFSEntry(fs_entry.uuid); + puter.fs.replica.last_local_update = Date.now(); + } + } + // ================== client-replica hook end ==================== + }; + // set up event handlers for load and error events + utils.setupXhrEventHandlers(xhr, wrappedSuccess, options.error, resolve, reject); + // convert paths to absolute paths paths = paths.map((path) => { return getAbsolutePathForApp(path); diff --git a/src/puter-js/src/modules/FileSystem/operations/mkdir.js b/src/puter-js/src/modules/FileSystem/operations/mkdir.js index d5245f0499..88ba5ac48b 100644 --- a/src/puter-js/src/modules/FileSystem/operations/mkdir.js +++ b/src/puter-js/src/modules/FileSystem/operations/mkdir.js @@ -2,13 +2,13 @@ import path from "../../../lib/path.js"; import * as utils from '../../../lib/utils.js'; import getAbsolutePathForApp from '../utils/getAbsolutePathForApp.js'; -const mkdir = function (...args) { +const mkdir = function(...args) { let options = {}; // If first argument is a string and the second is an object, or if the first is an object - if ((typeof args[0] === 'string' && typeof args[1] === 'object' && !(args[1] instanceof Function)) || (typeof args[0] === 'object' && args[0] !== null)) { + if ( (typeof args[0] === 'string' && typeof args[1] === 'object' && !(args[1] instanceof Function)) || (typeof args[0] === 'object' && args[0] !== null) ) { // If it's a string followed by an object, it means path then options - if (typeof args[0] === 'string') { + if ( typeof args[0] === 'string' ) { options.path = args[0]; // Merge the options Object.assign(options, args[1]); @@ -17,7 +17,7 @@ const mkdir = function (...args) { } else { options = args[0]; } - } else if (typeof args[0] === 'string') { + } else if ( typeof args[0] === 'string' ) { // it means it's a path then functions (success and optionally error) options.path = args[0]; options.success = args[1]; @@ -25,12 +25,12 @@ const mkdir = function (...args) { } return new Promise(async (resolve, reject) => { - // If auth token is not provided and we are in the web environment, + // If auth token is not provided and we are in the web environment, // try to authenticate with Puter - if(!puter.authToken && puter.env === 'web'){ - try{ + if ( !puter.authToken && puter.env === 'web' ) { + try { await puter.ui.authenticateWithPuter(); - }catch(e){ + } catch( e ) { // if authentication fails, throw an error reject('Authentication failed.'); } @@ -39,14 +39,36 @@ const mkdir = function (...args) { // create xhr object const xhr = utils.initXhr('/mkdir', this.APIOrigin, this.authToken); + const originalSuccess = options.success; + const wrappedSuccess = (...args) => { + if ( originalSuccess ) { + originalSuccess(...args); + } + + // ================== client-replica hook start ================== + if ( puter.fs.replica.available ) { + if ( args.length !== 1 ) { + console.error('client-replica: mkdir hook only supports 1 argument, got', args); + return; + } + if ( puter.fs.replica.debug ) { + console.log('local mkdir hook, args:', args); + } + const new_fs_entry = args[0]; + puter.fs.replica.fs_tree.newFSEntry(new_fs_entry); + puter.fs.replica.last_local_update = Date.now(); + } + // ================== client-replica hook end ================== + }; + // set up event handlers for load and error events - utils.setupXhrEventHandlers(xhr, options.success, options.error, resolve, reject); + utils.setupXhrEventHandlers(xhr, wrappedSuccess, options.error, resolve, reject); options.path = getAbsolutePathForApp(options.path); xhr.send(JSON.stringify({ parent: path.dirname(options.path), - path: path.basename(options.path), + path: path.basename(options.path), overwrite: options.overwrite ?? false, dedupe_name: (options.rename || options.dedupeName) ?? false, shortcut_to: options.shortcutTo, diff --git a/src/puter-js/src/modules/FileSystem/operations/move.js b/src/puter-js/src/modules/FileSystem/operations/move.js index f4ddb976d0..d2885fe0cf 100644 --- a/src/puter-js/src/modules/FileSystem/operations/move.js +++ b/src/puter-js/src/modules/FileSystem/operations/move.js @@ -53,8 +53,35 @@ const move = function (...args) { // create xhr object const xhr = utils.initXhr('/move', this.APIOrigin, this.authToken); + // inject the client-replica update hook to the success callback + const originalSuccess = options.success; + const wrappedSuccess = (...args) => { + if (originalSuccess) { + originalSuccess(...args); + } + + // ================== client-replica hook start ================== + if ( puter.fs.replica.available ) { + if ( args.length !== 1 ) { + console.error('client-replica: move hook only supports 1 argument, got', args); + return; + } + if ( puter.fs.replica.debug ) { + console.log('local move hook, args:', args); + } + const moved = args[0]?.moved; + if ( !moved ) { + console.error('client-replica: move object is empty, got', args); + return; + } + puter.fs.replica.fs_tree.removeFSEntry(moved.uid); + puter.fs.replica.fs_tree.newFSEntry(moved); + puter.fs.replica.last_local_update = Date.now(); + } + }; + // set up event handlers for load and error events - utils.setupXhrEventHandlers(xhr, options.success, options.error, resolve, reject); + utils.setupXhrEventHandlers(xhr, wrappedSuccess, options.error, resolve, reject); xhr.send(JSON.stringify({ source: options.source, diff --git a/src/puter-js/src/modules/FileSystem/operations/readdir.js b/src/puter-js/src/modules/FileSystem/operations/readdir.js index afc7a888b5..1a17f2b1fe 100644 --- a/src/puter-js/src/modules/FileSystem/operations/readdir.js +++ b/src/puter-js/src/modules/FileSystem/operations/readdir.js @@ -13,7 +13,7 @@ const readdir = async function (...args) { let options; // If first argument is an object, it's the options - if (typeof args[0] === 'object' && args[0] !== null) { + if ( typeof args[0] === 'object' && args[0] !== null ) { options = args[0]; } else { // Otherwise, we assume separate arguments are provided @@ -24,27 +24,54 @@ const readdir = async function (...args) { }; } + if ( puter.fs.replica.available ) { + const homePath = puter.fs.replica.fs_tree.root; + if ( options.path && options.path.startsWith(homePath) ) { + return new Promise(async (resolve, reject) => { + try { + const result = await puter.fs.replica.fs_tree.readdir(options); + if ( options.success ) { + options.success(result); + } + resolve(result); + + // update debug variable + const result_path = result[0]?.path; + console.log(`[readdir] local read, options: ${JSON.stringify(options, null, 2)}, result_path: ${result_path}`); + puter.fs.replica.local_read++; + } catch( error ) { + if ( options.error ) { + options.error(error); + } + reject(error); + } + }); + } + } + console.log(`[readdir] remote read, options: ${JSON.stringify(options, null, 2)}`); + puter.fs.replica.remote_read++; + return new Promise(async (resolve, reject) => { // consistency levels - if(!options.consistency){ + if ( !options.consistency ) { options.consistency = 'strong'; } // Either path or uid is required - if(!options.path && !options.uid){ + if ( !options.path && !options.uid ) { throw new Error({ code: 'NO_PATH_OR_UID', message: 'Either path or uid must be provided.' }); } // Generate cache key based on path or uid let cacheKey; - if(options.path){ + if ( options.path ) { cacheKey = 'readdir:' + options.path; } - if(options.consistency === 'eventual'){ + if ( options.consistency === 'eventual' ) { // Check cache const cachedResult = await puter._cache.get(cacheKey); - if(cachedResult){ + if ( cachedResult ) { resolve(cachedResult); return; } diff --git a/src/puter-js/src/modules/FileSystem/operations/rename.js b/src/puter-js/src/modules/FileSystem/operations/rename.js index f43ca4ac0b..b3908eb478 100644 --- a/src/puter-js/src/modules/FileSystem/operations/rename.js +++ b/src/puter-js/src/modules/FileSystem/operations/rename.js @@ -18,13 +18,14 @@ const rename = function (...args) { }; } + return new Promise(async (resolve, reject) => { // If auth token is not provided and we are in the web environment, // try to authenticate with Puter - if(!puter.authToken && puter.env === 'web'){ - try{ + if (!puter.authToken && puter.env === 'web') { + try { await puter.ui.authenticateWithPuter(); - }catch(e){ + } catch (e) { // if authentication fails, throw an error reject('Authentication failed.'); } @@ -33,14 +34,40 @@ const rename = function (...args) { // create xhr object const xhr = utils.initXhr('/rename', this.APIOrigin, this.authToken); + + // we have: + // options.uid, options.new_name + + const originalSuccess = options.success; + const wrappedSuccess = (...args) => { + if ( originalSuccess ) { + originalSuccess(...args); + } + + // ================== client-replica hook start ================== + if ( puter.fs.replica.available ) { + if ( args.length !== 1 ) { + console.error('client-replica: rename hook only supports 1 argument, got', args); + return; + } + if ( puter.fs.replica.debug ) { + console.log('local rename hook, args:', args); + } + const renamed = args[0]; + puter.fs.replica.fs_tree.rename(renamed.uid, renamed.name, renamed.path); + puter.fs.replica.last_local_update = Date.now(); + } + // ================== client-replica hook end ================== + }; + // set up event handlers for load and error events - utils.setupXhrEventHandlers(xhr, options.success, options.error, resolve, reject); + utils.setupXhrEventHandlers(xhr, wrappedSuccess, options.error, resolve, reject); let dataToSend = { original_client_socket_id: options.excludeSocketID || options.original_client_socket_id, new_name: options.new_name || options.newName, }; - + if (options.uid !== undefined) { dataToSend.uid = options.uid; } else if (options.path !== undefined) { @@ -48,7 +75,7 @@ const rename = function (...args) { // in that case, we need to prepend the app's root directory to it dataToSend.path = getAbsolutePathForApp(options.path); } - + xhr.send(JSON.stringify(dataToSend)); }) diff --git a/src/puter-js/src/modules/FileSystem/operations/stat.js b/src/puter-js/src/modules/FileSystem/operations/stat.js index 0d236ba7b9..8acd606b88 100644 --- a/src/puter-js/src/modules/FileSystem/operations/stat.js +++ b/src/puter-js/src/modules/FileSystem/operations/stat.js @@ -13,7 +13,7 @@ const stat = async function (...args) { let options; // If first argument is an object, it's the options - if (typeof args[0] === 'object' && args[0] !== null) { + if ( typeof args[0] === 'object' && args[0] !== null ) { options = args[0]; } else { // Otherwise, we assume separate arguments are provided @@ -26,22 +26,42 @@ const stat = async function (...args) { }; } + if ( puter.fs.replica.available ) { + return new Promise(async (resolve, reject) => { + try { + const result = await puter.fs.replica.fs_tree.stat(options); + if ( options.success ) { + options.success(result); + } + resolve(result); + + // update debug variable + puter.fs.replica.local_read++; + } catch( error ) { + if ( options.error ) { + options.error(error); + } + reject(error); + } + }); + } + return new Promise(async (resolve, reject) => { // consistency levels - if(!options.consistency){ + if ( !options.consistency ) { options.consistency = 'strong'; } // Generate cache key based on path or uid let cacheKey; - if(options.path){ + if ( options.path ) { cacheKey = 'item:' + options.path; } - if(options.consistency === 'eventual' && !options.returnSubdomains && !options.returnPermissions && !options.returnVersions && !options.returnSize){ + if ( options.consistency === 'eventual' && !options.returnSubdomains && !options.returnPermissions && !options.returnVersions && !options.returnSize ) { // Check cache const cachedResult = await puter._cache.get(cacheKey); - if(cachedResult){ + if ( cachedResult ) { resolve(cachedResult); return; } diff --git a/src/puter-js/src/modules/FileSystem/replica/manager.ts b/src/puter-js/src/modules/FileSystem/replica/manager.ts new file mode 100644 index 0000000000..6b6d976971 --- /dev/null +++ b/src/puter-js/src/modules/FileSystem/replica/manager.ts @@ -0,0 +1,471 @@ +/* + * Copyright (C) 2024-present Puter Technologies Inc. + * + * This file is part of Puter. + * + * Puter is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +// @ts-ignore - No type definitions available for socket.io +import io from '../../../lib/socket.io/socket.io.esm.min.js'; +// @ts-ignore - No type definitions available for tree.js +import FSTree from './tree.js'; + +interface Context { + authToken: string; + APIOrigin: string; + username?: string; +} + +interface FSEntry { + path: string; + parent_uid: string | null; + [key: string]: unknown; +} + +interface NodeData { + uuid: string; + merkle_hash: string; + fs_entry: FSEntry; + children?: NodeData[]; +} + +interface FSNode { + uuid: string; + merkle_hash: string; + parent_uuid: string | null; + fs_entry: FSEntry; + children_uuids: { [uuid: string]: boolean }; +} + +interface FSTreeData { + rootId: string; + nodes: { [uuid: string]: FSNode }; +} + +interface PullRequestItem { + uuid: string; + merkle_hash: string; +} + +interface PullRequest { + user_name: string; + pull_request: PullRequestItem[]; +} + +interface PushRequestItem { + uuid: string; + merkle_hash: string; + fs_entry: FSEntry; + children?: PushRequestItem[]; +} + +interface ReplicaFetchSuccessData { + data: FSTreeData; +} + +interface ReplicaPullDiffSuccessData { + data: { + push_request: PushRequestItem[]; + }; +} + +interface ReplicaErrorData { + error: { + message: string; + }; +} + +interface Socket { + disconnect(): void; + on(event: string, callback: (...args: any[]) => void): void; + emit(event: string, data: any): void; +} + +class ReplicaManager { + private socket: Socket | null = null; + private username: string | null = null; + private pullDiffInterval: ReturnType | null = null; + private authToken: string = ''; + private APIOrigin: string = ''; + + public available: boolean = false; + public fs_tree: FSTree | null = null; + public last_local_update: number = 0; // milliseconds since epoch + + // debug variables + public debug: boolean = false; + public local_read: number = 0; + public remote_read: number = 0; + + /** + * Initialize the replica manager for the current user. + */ + async initialize(context: Context): Promise { + // check input + if ( !context || !context.authToken || !context.APIOrigin ) { + console.error(`[replica manager] failed to initialize, context is invalid: ${JSON.stringify(context, null, 2)}`); + return; + } + + this.authToken = context.authToken; + this.APIOrigin = context.APIOrigin; + + // Fetch username from whoami endpoint if not provided in context + if ( !context.username ) { + this.username = await this.fetchUsername(); + } else { + this.username = context.username; + } + + this.connect(); + } + + /** + * Fetch username from whoami endpoint using direct API call + */ + async fetchUsername(): Promise { + try { + const resp = await fetch(`${this.APIOrigin}/whoami`, { + headers: { + Authorization: `Bearer ${this.authToken}`, + }, + }); + + const result = await resp.json(); + return result.username; + } catch( error ) { + console.error('Replica Manager: Failed to fetch username from whoami endpoint:', error); + throw error; + } + } + + connect(): void { + if ( this.socket ) { + // The disconnect action will not impact other components since each socket + // object get their own session. + this.socket.disconnect(); + } + + this.socket = io(this.APIOrigin, { + auth: { + auth_token: this.authToken, + }, + }) as Socket; + + this.bindEvents(); + } + + /** + * Bind websocket events + */ + bindEvents(): void { + if ( !this.socket ) return; + + this.socket.on('connect', () => { + console.log('[replica manager] websocket connected'); + + this.fetchReplica(); + this.startPullDiff(); + }); + + this.socket.on('disconnect', () => { + console.log('[replica manager] websocket disconnected'); + + this.cleanup('disconnected'); + }); + + this.socket.on('reconnect', (_attempt: number) => { + console.log('[replica manager] websocket reconnected'); + + this.fetchReplica(); + this.startPullDiff(); + }); + + this.socket.on('error', (error: unknown) => { + this.cleanup(`error: ${error}`); + }); + + this.socket.on('replica/fetch/success', (data: ReplicaFetchSuccessData) => { + this.handleFetchReplicaSuccess(data); + }); + + this.socket.on('replica/fetch/error', (data: ReplicaErrorData) => { + this.cleanup(`failed to fetch replica: ${data.error.message}`); + }); + + this.socket.on('replica/pull_diff/success', (data: ReplicaPullDiffSuccessData) => { + this.handlePullDiffSuccess(data); + }); + + this.socket.on('replica/pull_diff/error', (data: ReplicaErrorData) => { + this.cleanup(`failed to pull diff: ${data.error.message}`); + }); + } + + /** + * Fetch the replica from server for the current user. + */ + fetchReplica(): void { + if ( !this.username ) { + console.warn('Replica Manager: No username available for fetching replica'); + return; + } + + if ( !this.socket ) return; + + const userRootPath = `/${this.username}`; + + this.socket.emit('replica/fetch', { + path: userRootPath, + + // TODO (xiaochen): remove this + requestId: 'user_root', + }); + } + + /** + * Handle successful replica fetch + */ + handleFetchReplicaSuccess(data: ReplicaFetchSuccessData): void { + // Initialize the FSTree + this.fs_tree = new FSTree(data.data); + this.available = true; + + console.log('client-replica initialized for user:', this.username); + } + + handlePullDiffSuccess(data: ReplicaPullDiffSuccessData): void { + const pushRequest = data?.data?.push_request; + + // check terminal conditions + if ( !this.available ) { + return; + } + + // check skip conditions + if ( !pushRequest || pushRequest.length === 0 ) { + return; + } + + const paths = pushRequest.map(item => item.fs_entry.path); + if ( this.debug ) { + console.log(`push request from server: ${paths}`); + } + + const nextPullRequest: PullRequestItem[] = []; + + for ( const pushItem of pushRequest ) { + // process level-1 node + const node = this.fs_tree!.nodes[pushItem.uuid]; + if ( node ) { + // update existing + node.fs_entry = pushItem.fs_entry; + node.merkle_hash = pushItem.merkle_hash; + } else { + // new fsentry on remote, add it and fetch its children + this.addNode(pushItem); + + nextPullRequest.push({ + uuid: pushItem.uuid, + // use empty hash to force-fetch its children + merkle_hash: '', + }); + continue; + } + + // process children + if ( pushItem.children ) { + const localChildren = node ? Object.keys(node.children_uuids || {}) : []; + const serverChildren = pushItem.children.map(child => child.uuid); + + // fsentry removed from server, remove it in local as well + // + // NB: Must use a snapshot to avoid the "mutate-while-iterating" trap. + for ( const localChildId of [...localChildren] ) { + if ( !serverChildren.includes(localChildId) ) { + this.removeNodeAndDescendants(localChildId); + } + } + + // NB: Must use a snapshot to avoid the "mutate-while-iterating" trap. + for ( const child of [...pushItem.children] ) { + const localChild = this.fs_tree!.nodes[child.uuid]; + + if ( !localChild ) { + // new fsentry on remote, add it and fetch its children + this.addNode(child); + + nextPullRequest.push({ + uuid: child.uuid, + // use empty hash to force-fetch its children + merkle_hash: '', + }); + } else if ( localChild.merkle_hash !== child.merkle_hash ) { + // fsentry updated on remote, update and fetch its children + localChild.fs_entry = child.fs_entry; + localChild.merkle_hash = child.merkle_hash; + nextPullRequest.push({ + uuid: child.uuid, + // use empty hash to force-fetch its children + merkle_hash: '', + }); + } + } + } + } + + // Send next pull request if there are nodes to update + if ( nextPullRequest.length > 0 && this.socket ) { + this.socket.emit('replica/pull_diff', { + user_name: this.username, + pull_request: nextPullRequest, + }); + } + } + + /** + * Add a new node to the tree + */ + addNode(nodeData: NodeData): void { + const newNode: FSNode = { + uuid: nodeData.uuid, + merkle_hash: nodeData.merkle_hash, + parent_uuid: nodeData.fs_entry.parent_uid, + fs_entry: nodeData.fs_entry, + children_uuids: {}, + }; + + this.fs_tree!.nodes[nodeData.uuid] = newNode; + + // Add to parent's children + if ( nodeData.fs_entry.parent_uid ) { + const parentNode = this.fs_tree!.nodes[nodeData.fs_entry.parent_uid]; + if ( parentNode ) { + if ( !parentNode.children_uuids ) { + parentNode.children_uuids = {}; + } + parentNode.children_uuids[nodeData.uuid] = true; + } + } + } + + /** + * Remove a node and all its descendants from the local replica + */ + removeNodeAndDescendants(nodeId: string): void { + const node = this.fs_tree!.nodes[nodeId]; + if ( !node ) { + return; + } + + // Remove from parent's children + if ( node.parent_uuid ) { + const parentNode = this.fs_tree!.nodes[node.parent_uuid]; + if ( parentNode && parentNode.children_uuids ) { + delete parentNode.children_uuids[nodeId]; + } + } + + // Remove all children recursively + if ( node.children_uuids ) { + for ( const childId of Object.keys(node.children_uuids) ) { + this.removeNodeAndDescendants(childId); + } + } + + // Remove the node itself + delete this.fs_tree!.nodes[nodeId]; + } + + startPullDiff(): void { + // Clear any existing interval + if ( this.pullDiffInterval ) { + clearInterval(this.pullDiffInterval); + } + + // Set up interval to send pull diff every 5 seconds + this.pullDiffInterval = setInterval(() => { + this.pullDiff(); + }, 5000); + } + + pullDiff(): void { + // check terminal conditions + if ( !this.available ) { + return; + } + + // check skip conditions + if ( Date.now() - this.last_local_update < 3000 ) { + return; + } + + try { + const rootNode = this.fs_tree!.nodes[this.fs_tree!.rootId]; + if ( rootNode && rootNode.merkle_hash ) { + // Create PullRequest format according to proto definition + const pullRequest: PullRequest = { + user_name: this.username!, + pull_request: [ + { + uuid: rootNode.uuid, + merkle_hash: rootNode.merkle_hash, + }, + ], + }; + + if ( this.socket ) { + this.socket.emit('replica/pull_diff', pullRequest); + } + } + } catch( error: unknown ) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.cleanup(`error in pullDiff: ${errorMessage}`); + } + } + + // Do cleanup and mark replica as unavailable. + cleanup(reason: string): void { + console.log(`[replica manager] cleanup, reason: ${reason}`); + + if ( this.pullDiffInterval ) { + clearInterval(this.pullDiffInterval); + this.pullDiffInterval = null; + } + + if ( this.socket ) { + this.socket.disconnect(); + } + + this.available = false; + } + + /** + * Set the debug flag + */ + setDebug(enabled: boolean): void { + this.debug = enabled; + + // Update widget visibility if the function exists (in GUI environment) + if ( typeof window !== 'undefined' && (window as unknown as { updateReplicaWidgetVisibility?: () => void }).updateReplicaWidgetVisibility ) { + (window as unknown as { updateReplicaWidgetVisibility: () => void }).updateReplicaWidgetVisibility(); + } + } +} + +// Create singleton instance +const replica = new ReplicaManager(); + +export default replica; diff --git a/src/puter-js/src/modules/FileSystem/replica/tree.js b/src/puter-js/src/modules/FileSystem/replica/tree.js new file mode 100644 index 0000000000..e9099b4360 --- /dev/null +++ b/src/puter-js/src/modules/FileSystem/replica/tree.js @@ -0,0 +1,274 @@ +/* + * Copyright (C) 2024-present Puter Technologies Inc. + * + * This file is part of Puter. + * + * Puter is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as published + * by the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +import stringify from 'safe-stable-stringify'; +import xxhash from 'xxhash-wasm'; + +class FSTree { + constructor(data) { + if ( !data ) { + throw new Error('FSTree requires valid data to initialize'); + } + this.tree = data; + this.nodes = data.nodes; + this.rootId = data.root_uuid; + + // Get the root node to determine the root path + const rootNode = this.nodes[this.rootId]; + if ( rootNode && rootNode.fs_entry ) { + this.root = rootNode.fs_entry.path || '/'; + } else { + this.root = '/'; + } + } + + /** + * Calculate Merkle hash for a node based on its metadata and children hashes + * This matches the exact logic from server.go + * @param {Object} node - The node to calculate hash for + * @param {Array} childrenHashes - Array of child node hashes (strings) + * @returns {string} - String representation of the hash + */ + async calculateMerkleHash(node, childrenHashes = []) { + const { create64 } = await xxhash(); + + const hasher = create64(0n); + + if ( node.fs_entry ) { + const metadata = stringify(node.fs_entry); + hasher.update(metadata); + } + + // Sort children hashes as strings for consistency + const sortedChildrenHashes = [...childrenHashes].sort(); + for ( const childHash of sortedChildrenHashes ) { + hasher.update(childHash); + } + + const hash = hasher.digest(); + return hash.toString(); + } + + /** + * Recalculate Merkle hashes for all ancestors of a given node + * @param {string} nodeId - The ID of the node whose ancestors need recalculation + */ + async recalculateAncestorHashes(nodeId) { + const node = this.nodes[nodeId]; + if ( !node ) { + return; + } + + let currentNodeId = nodeId; + + while ( currentNodeId ) { + const currentNode = this.nodes[currentNodeId]; + if ( !currentNode ) { + break; + } + + const childrenHashes = []; + if ( currentNode.children_uuids ) { + for ( const childId of Object.keys(currentNode.children_uuids) ) { + const childNode = this.nodes[childId]; + if ( childNode && childNode.merkle_hash ) { + childrenHashes.push(childNode.merkle_hash); + } + } + } + + currentNode.merkle_hash = await this.calculateMerkleHash(currentNode, childrenHashes); + + currentNodeId = currentNode.parent_uuid; + } + } + + /** + * Find a node by path in the tree + * @param {string} path - Path to find (e.g., '/', '/folder', '/folder/file.txt') + * @returns {Object|null} - Node object or null if not found + */ + findNodeByPath(path) { + // we're already in the root, so remove it + path = path.replace(this.root, ''); + + const parts = path.split('/').filter(part => part !== ''); + let currentId = this.rootId; + + for ( const part of parts ) { + const currentNode = this.nodes[currentId]; + if ( !currentNode || !currentNode.children_uuids ) { + return null; + } + + // Find child with matching name + const foundId = Object.keys(currentNode.children_uuids).find(childId => { + const childNode = this.nodes[childId]; + return childNode && childNode.fs_entry && childNode.fs_entry.name === part; + }); + + if ( !foundId ) { + return null; + } + currentId = foundId; + } + + return this.nodes[currentId]; + } + + /** + * Find a node by UUID in the tree + * @param {string} uid - UUID to find + * @returns {Object|null} - Node object or null if not found + */ + findNodeByUUID(uid) { + // Direct lookup in nodes map + return this.nodes[uid] || null; + } + + /** + * Read directory contents. + * + * @param {Object} options - Options object + * @param {string} [options.path] - Path to read directory for + * @param {string} [options.uid] - UUID to read directory for + * @returns {Array} - Array of child fs_entry objects + */ + readdir(options) { + const path = options.path; + const uid = options.uid; + let node = null; + + if ( uid ) { + node = this.findNodeByUUID(uid); + } else if ( path ) { + node = this.findNodeByPath(path); + } else { + throw new Error('Either path or uid must be provided'); + } + + if ( !node ) { + throw new Error(`Path not found: ${path}`); + } + + if ( !node.fs_entry?.is_dir ) { + throw new Error(`Not a directory: ${path}`); + } + + // Get children by their UUIDs + const childrenUuids = Object.keys(node.children_uuids || {}); + return childrenUuids + .map(childId => this.nodes[childId]) + .filter(childNode => childNode && childNode.fs_entry) + .map(childNode => childNode.fs_entry); + } + + /** + * Get node fs_entry + * @param {Object} options - Options object + * @param {string} [options.path] - Path to get fs_entry for + * @param {string} [options.uid] - UUID to get fs_entry for + * @returns {Object|null} - fs_entry object or null if not found + */ + stat(options) { + const path = options.path; + const uid = options.uid; + let node = null; + + if ( uid ) { + node = this.findNodeByUUID(uid); + } else if ( path ) { + node = this.findNodeByPath(path); + } else { + throw new Error('Either path or uid must be provided'); + } + + return node?.fs_entry; + } + + // mimic rpc: + // rpc NewFSEntry(NewFSEntryRequest) returns (google.protobuf.Empty); + async newFSEntry(fs_entry) { + if ( !fs_entry || !fs_entry.uid ) { + throw new Error('Invalid fs_entry: must have uid'); + } + + const newNode = { + uuid: fs_entry.uid, + merkle_hash: '', + parent_uuid: fs_entry.parent_uid, + fs_entry: fs_entry, + children_uuids: {}, + }; + + this.nodes[newNode.uuid] = newNode; + + if ( !newNode.parent_uuid ) { + throw new Error('Invalid fs_entry: must have parent_uid'); + } + + const parentNode = this.findNodeByUUID(newNode.parent_uuid); + if ( !parentNode ) { + throw new Error(`Parent directory not found: ${newNode.parent_uuid}`); + } + + if ( !parentNode.children_uuids ) { + parentNode.children_uuids = {}; + } + parentNode.children_uuids[newNode.uuid] = true; + + await this.recalculateAncestorHashes(newNode.uuid); + } + + // mimic rpc: + // rpc RemoveFSEntry(RemoveFSEntryRequest) returns (google.protobuf.Empty); + async removeFSEntry(uuid) { + const node = this.findNodeByUUID(uuid); + if ( !node ) { + throw new Error(`Node not found: ${uuid}`); + } + + if ( node.parent_uuid ) { + const parentNode = this.findNodeByUUID(node.parent_uuid); + if ( parentNode ) { + delete parentNode.children_uuids[uuid]; + } else { + throw new Error(`Parent directory not found: ${node.parent_uuid}`); + } + } + + delete this.nodes[uuid]; + + this.recalculateAncestorHashes(node.parent_uuid); + } + + async rename(uuid, new_name, new_path) { + const node = this.findNodeByUUID(uuid); + if ( !node ) { + throw new Error(`Node not found: ${uuid}`); + } + + node.fs_entry.name = new_name; + node.fs_entry.path = new_path; + + this.recalculateAncestorHashes(uuid); + } +} + +export default FSTree; diff --git a/src/puter-js/webpack.config.js b/src/puter-js/webpack.config.js index 2fea0230e6..b7ab6d181d 100644 --- a/src/puter-js/webpack.config.js +++ b/src/puter-js/webpack.config.js @@ -16,6 +16,18 @@ export default { filename: 'puter.js', path: path.resolve(__dirname, 'dist'), }, + module: { + rules: [ + { + test: /\.ts$/, + use: 'ts-loader', + exclude: /node_modules/, + }, + ], + }, + resolve: { + extensions: ['.ts', '.js'], + }, plugins: [ new webpack.DefinePlugin({ 'globalThis.PUTER_ORIGIN_ENV': JSON.stringify(process.env.PUTER_ORIGIN || 'https://puter.com'), diff --git a/test-results/.last-run.json b/test-results/.last-run.json new file mode 100644 index 0000000000..5fca3f84bc --- /dev/null +++ b/test-results/.last-run.json @@ -0,0 +1,4 @@ +{ + "status": "failed", + "failedTests": [] +} \ No newline at end of file diff --git a/tests/ci/api-test.py b/tests/ci/api-test.py new file mode 100755 index 0000000000..ce374154ce --- /dev/null +++ b/tests/ci/api-test.py @@ -0,0 +1,172 @@ +#! /usr/bin/env python3 +# +# Usage: +# ./tools/api-tester/ci/run.py + +import time +import os +import json +import requests +import yaml + +import cxc_toolkit + + +class Context: + def __init__(self): + self.ADMIN_PASSWORD = None + self.TOKEN = None + + +CONTEXT = Context() + + +def get_token(): + # Send HTTP request to server and print response + print("Sending HTTP request to server...") + # Assuming the server runs on localhost:4100 (default Puter port) + server_url = "http://api.puter.localhost:4100/login" + + # Prepare login data + login_data = {"username": "admin", "password": CONTEXT.ADMIN_PASSWORD} + + # Send POST request using requests library + response = requests.post( + server_url, + headers={ + "Content-Type": "application/json", + "Accept": "application/json", + "Origin": "http://api.puter.localhost:4100", + }, + json=login_data, + timeout=30, + ) + + print(f"Server response status: {response.status_code}") + print(f"Server response body: {response.text}") + + response_json = response.json() + print(f"Parsed JSON response: {json.dumps(response_json, indent=2)}") + print(f"Token: {response_json['token']}") + CONTEXT.TOKEN = response_json["token"] + + +def init_server_config(): + server_process = cxc_toolkit.exec.run_background("npm start") + # wait 10s for the server to start + time.sleep(10) + server_process.terminate() + + +def get_admin_password(): + backend_process = cxc_toolkit.exec.run_background( + "npm start", log_path="/tmp/backend.log" + ) + + # NB: run_command + kill_on_output may wait indefinitely, use run_background + hard limit instead + time.sleep(10) + + backend_process.terminate() + + # read the log file + with open("/tmp/backend.log", "r") as f: + lines = f.readlines() + for line in lines: + if "password for admin" in line: + print(f"found password line: ---{line}---") + admin_password = line.split("password for admin is:")[1].strip() + print(f"Extracted admin password: {admin_password}") + CONTEXT.ADMIN_PASSWORD = admin_password + return + + if not CONTEXT.ADMIN_PASSWORD: + print("Error: No admin password found") + with open("/tmp/backend.log", "r") as f: + print(f.read()) + exit(1) + + +def update_server_config(): + # Load the config file + config_file = f"{os.getcwd()}/volatile/config/config.json" + + with open(config_file, "r") as f: + config = json.load(f) + + # Ensure services and mountpoint sections exist + if "services" not in config: + config["services"] = {} + if "mountpoint" not in config["services"]: + config["services"]["mountpoint"] = {} + if "mountpoints" not in config["services"]["mountpoint"]: + config["services"]["mountpoint"]["mountpoints"] = {} + + # Add the mountpoint configuration + mountpoint_config = { + "/": {"mounter": "puterfs"}, + "/admin/tmp": {"mounter": "memoryfs"}, + } + + # Merge mountpoints (overwrite existing ones) + config["services"]["mountpoint"]["mountpoints"].update(mountpoint_config) + + # Write the updated config back + with open(config_file, "w") as f: + json.dump(config, f, indent=2) + + +def init_api_test(): + # Load the example config + example_config_path = f"{os.getcwd()}/tools/api-tester/example_config.yml" + config_path = f"{os.getcwd()}/tools/api-tester/config.yml" + + with open(example_config_path, "r") as f: + config = yaml.safe_load(f) + + # Update the token + if not CONTEXT.TOKEN: + print("Warning: No token available in CONTEXT") + exit(1) + + config["token"] = CONTEXT.TOKEN + config["url"] = "http://api.puter.localhost:4100" + + # Write the updated config + with open(config_path, "w") as f: + yaml.dump(config, f, default_flow_style=False, indent=2) + + +def run(): + # ========================================================================= + # free the port 4100 + # ========================================================================= + cxc_toolkit.exec.run_command("fuser -k 4100/tcp", ignore_failure=True) + + # ========================================================================= + # config server + # ========================================================================= + cxc_toolkit.exec.run_command("npm install") + init_server_config() + get_admin_password() + update_server_config() + + # ========================================================================= + # config client + # ========================================================================= + cxc_toolkit.exec.run_background("npm start") + # wait 10s for the server to start + time.sleep(10) + + get_token() + init_api_test() + + # ========================================================================= + # run the test + # ========================================================================= + cxc_toolkit.exec.run_command( + "node ./tools/api-tester/apitest.js --unit --stop-on-failure" + ) + + +if __name__ == "__main__": + run() diff --git a/tests/ci/playwright-test.py b/tests/ci/playwright-test.py new file mode 100755 index 0000000000..5400797003 --- /dev/null +++ b/tests/ci/playwright-test.py @@ -0,0 +1,239 @@ +#! /usr/bin/env python3 + +# test the client-replica feature +# - need browser environment (since socket.io doesn't work in node) +# - test multi-server setup +# - test change-propagation-time +# - test local read +# - test consistency + +# first stage: test in the existing workspace, test single server + multiple sessions +# second stage: test from a fresh clone, test single server + multiple sessions +# third stage: test in the existing workspace, test multiple servers + multiple sessions +# fourth stage: test from a fresh clone, test multiple servers + multiple sessions + +import time +import os +import json +import requests +import yaml + +import cxc_toolkit + + +PUTER_ROOT = os.getcwd() + + +class Context: + def __init__(self): + self.ADMIN_PASSWORD = None + self.TOKEN = None + + +CONTEXT = Context() + + +def get_token(): + # Send HTTP request to server and print response + print("Sending HTTP request to server...") + # Assuming the server runs on localhost:4100 (default Puter port) + server_url = "http://api.puter.localhost:4100/login" + + # Prepare login data + login_data = {"username": "admin", "password": CONTEXT.ADMIN_PASSWORD} + + # Send POST request using requests library + response = requests.post( + server_url, + headers={ + "Content-Type": "application/json", + "Accept": "application/json", + "Origin": "http://api.puter.localhost:4100", + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36", + }, + json=login_data, + timeout=30, + ) + + print(f"Server response status: {response.status_code}") + print(f"Server response body: {response.text}") + + response_json = response.json() + print(f"Parsed JSON response: {json.dumps(response_json, indent=2)}") + print(f"Token: {response_json['token']}") + CONTEXT.TOKEN = response_json["token"] + + +def get_admin_password(): + backend_process = cxc_toolkit.exec.run_background( + "npm start", log_path="/tmp/backend.log" + ) + + # NB: run_command + kill_on_output may wait indefinitely, use run_background + hard limit instead + time.sleep(10) + + backend_process.terminate() + + # read the log file + with open("/tmp/backend.log", "r") as f: + lines = f.readlines() + for line in lines: + if "password for admin" in line: + print(f"found password line: ---{line}---") + admin_password = line.split("password for admin is:")[1].strip() + print(f"Extracted admin password: {admin_password}") + CONTEXT.ADMIN_PASSWORD = admin_password + return + + if not CONTEXT.ADMIN_PASSWORD: + print("Error: No admin password found") + with open("/tmp/backend.log", "r") as f: + print(f.read()) + exit(1) + + +def init_backend_config(): + # init config.json + server_process = cxc_toolkit.exec.run_background("npm start") + # wait 10s for the server to start + time.sleep(10) + server_process.terminate() + + example_config_path = f"{PUTER_ROOT}/volatile/config/config.json" + config_path = f"{PUTER_ROOT}/volatile/config/config.json" + + # load + with open(example_config_path, "r") as f: + config = json.load(f) + + # update + config["services"]["client-replica"] = { + "enabled": True, + "fs_tree_manager_url": "localhost:50052", + } + + # write + with open(config_path, "w") as f: + json.dump(config, f, indent=2) + + +def init_fs_tree_manager_config(): + example_config_path = f"{PUTER_ROOT}/src/fs_tree_manager/example-config.yaml" + config_path = f"{PUTER_ROOT}/src/fs_tree_manager/config.yaml" + + # load + with open(example_config_path, "r") as f: + config = yaml.safe_load(f) + + # update + config["database"]["driver"] = "sqlite3" + config["database"]["sqlite3"][ + "path" + ] = f"{PUTER_ROOT}/volatile/runtime/puter-database.sqlite" + + # write + with open(config_path, "w") as f: + yaml.dump(config, f, default_flow_style=False, indent=2) + + print(f"fs-tree-manager config initialized at {config_path}") + + +def init_client_config(): + example_config_path = f"{os.getcwd()}/tests/example-client-config.yaml" + config_path = f"{os.getcwd()}/tests/client-config.yaml" + + # load + with open(example_config_path, "r") as f: + config = yaml.safe_load(f) + + if not CONTEXT.TOKEN: + print("Warning: No token available in CONTEXT") + exit(1) + + # update + config["auth_token"] = CONTEXT.TOKEN + + # write + with open(config_path, "w") as f: + yaml.dump(config, f, default_flow_style=False, indent=2) + + +def run(): + os.chdir(PUTER_ROOT) + + # ========================================================================= + # clean ports + # ========================================================================= + + # clean port 4100 for backend server + cxc_toolkit.exec.run_command("fuser -k 4100/tcp", ignore_failure=True) + + # clean port 50052 for fs-tree-manager server + cxc_toolkit.exec.run_command("fuser -k 50052/tcp", ignore_failure=True) + + # ========================================================================= + # config server + # ========================================================================= + cxc_toolkit.exec.run_command("npm install") + init_backend_config() + get_admin_password() + + # ========================================================================= + # start backend server + # ========================================================================= + cxc_toolkit.exec.run_background( + "npm start", work_dir=PUTER_ROOT, log_path="/tmp/backend.log" + ) + # wait 10s for the server to start + time.sleep(10) + + # ========================================================================= + # config client + # ========================================================================= + get_token() + init_client_config() + + # ========================================================================= + # start fs-tree-manager server + # ========================================================================= + init_fs_tree_manager_config() + + cxc_toolkit.exec.run_command( + "go mod download", + work_dir=f"{PUTER_ROOT}/src/fs_tree_manager", + ) + + cxc_toolkit.exec.run_background( + "go run server.go", + work_dir=f"{PUTER_ROOT}/src/fs_tree_manager", + log_path="/tmp/fs-tree-manager.log", + ) + + # NB: "go mod download" and "go run server.go" may take a long time in github + # action environment, I don't know why. + time.sleep(60) + + # ========================================================================= + # run the test + # ========================================================================= + cxc_toolkit.exec.run_command( + "npm install", + work_dir=f"{PUTER_ROOT}/tests/playwright", + ) + + # # this command requires sudo privileges + # cxc_toolkit.exec.run_command( + # "npx playwright install --with-deps", + # work_dir=f"{PUTER_ROOT}/tests/playwright", + # ) + + cxc_toolkit.exec.run_command( + "npx playwright test", + # "npx playwright test --reporter=line", + # "npx playwright test --reporter=github", + work_dir=f"{PUTER_ROOT}/tests/playwright", + ) + + +if __name__ == "__main__": + run() diff --git a/tests/ci/requirements.txt b/tests/ci/requirements.txt new file mode 100644 index 0000000000..8b6c65206b --- /dev/null +++ b/tests/ci/requirements.txt @@ -0,0 +1,3 @@ +cxc-toolkit>=1.0.0 +requests==2.32.4 +PyYAML==6.0.2 \ No newline at end of file diff --git a/tests/example-client-config.yaml b/tests/example-client-config.yaml new file mode 100644 index 0000000000..277e74469a --- /dev/null +++ b/tests/example-client-config.yaml @@ -0,0 +1,4 @@ +api_url: http://api.puter.localhost:4100 +frontend_url: http://puter.localhost:4100 +username: admin +auth_token: \ No newline at end of file diff --git a/tests/playwright/.github/workflows/playwright.yml b/tests/playwright/.github/workflows/playwright.yml new file mode 100644 index 0000000000..3eb13143c3 --- /dev/null +++ b/tests/playwright/.github/workflows/playwright.yml @@ -0,0 +1,27 @@ +name: Playwright Tests +on: + push: + branches: [ main, master ] + pull_request: + branches: [ main, master ] +jobs: + test: + timeout-minutes: 60 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: lts/* + - name: Install dependencies + run: npm ci + - name: Install Playwright Browsers + run: npx playwright install --with-deps + - name: Run Playwright tests + run: npx playwright test + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: playwright-report + path: playwright-report/ + retention-days: 30 diff --git a/tests/playwright/.gitignore b/tests/playwright/.gitignore new file mode 100644 index 0000000000..335bd46df2 --- /dev/null +++ b/tests/playwright/.gitignore @@ -0,0 +1,8 @@ + +# Playwright +node_modules/ +/test-results/ +/playwright-report/ +/blob-report/ +/playwright/.cache/ +/playwright/.auth/ diff --git a/tests/playwright/config/test-config.ts b/tests/playwright/config/test-config.ts new file mode 100644 index 0000000000..abe7432ccc --- /dev/null +++ b/tests/playwright/config/test-config.ts @@ -0,0 +1,32 @@ +import * as fs from 'fs' +import * as path from 'path' +import * as yaml from 'yaml' + +// Strong-typed configuration interface +export interface TestConfig { + api_url: string + frontend_url: string + username: string + auth_token: string +} + +// Singleton configuration loader - loads config only once +let config: TestConfig | null = null + +export function getTestConfig(): TestConfig { + if (config === null) { + const configPath = path.join(__dirname, '../../client-config.yaml') + const rawConfig = yaml.parse(fs.readFileSync(configPath, 'utf8')) + + // Validate required fields + if (!rawConfig.api_url || !rawConfig.frontend_url || !rawConfig.username || !rawConfig.auth_token) { + throw new Error('Invalid test configuration: missing required fields') + } + + config = rawConfig as TestConfig + } + return config +} + +// Export the typed configuration +export const testConfig: TestConfig = getTestConfig() diff --git a/tests/playwright/package.json b/tests/playwright/package.json new file mode 100644 index 0000000000..a87f5c818c --- /dev/null +++ b/tests/playwright/package.json @@ -0,0 +1,16 @@ +{ + "name": "playwright", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": {}, + "keywords": [], + "author": "", + "license": "ISC", + "type": "commonjs", + "devDependencies": { + "@playwright/test": "^1.56.0", + "@types/node": "^24.7.2", + "yaml": "^2.4.5" + } +} diff --git a/tests/playwright/playwright.config.ts b/tests/playwright/playwright.config.ts new file mode 100644 index 0000000000..cdbe7ee420 --- /dev/null +++ b/tests/playwright/playwright.config.ts @@ -0,0 +1,79 @@ +import { defineConfig, devices } from '@playwright/test'; + +/** + * Read environment variables from file. + * https://github.com/motdotla/dotenv + */ +// import dotenv from 'dotenv'; +// import path from 'path'; +// dotenv.config({ path: path.resolve(__dirname, '.env') }); + +/** + * See https://playwright.dev/docs/test-configuration. + */ +export default defineConfig({ + testDir: './tests', + /* Run tests in files in parallel */ + fullyParallel: true, + /* Fail the build on CI if you accidentally left test.only in the source code. */ + forbidOnly: !!process.env.CI, + /* Retry on CI only */ + retries: process.env.CI ? 2 : 0, + /* Opt out of parallel tests on CI. */ + workers: process.env.CI ? 1 : undefined, + /* Reporter to use. See https://playwright.dev/docs/test-reporters */ + reporter: 'html', + /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */ + use: { + /* Base URL to use in actions like `await page.goto('')`. */ + // baseURL: 'http://localhost:3000', + + /* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */ + trace: 'on-first-retry', + }, + + /* Configure projects for major browsers */ + projects: [ + { + name: 'chromium', + use: { ...devices['Desktop Chrome'] }, + }, + + // { + // name: 'firefox', + // use: { ...devices['Desktop Firefox'] }, + // }, + + // { + // name: 'webkit', + // use: { ...devices['Desktop Safari'] }, + // }, + + /* Test against mobile viewports. */ + // { + // name: 'Mobile Chrome', + // use: { ...devices['Pixel 5'] }, + // }, + // { + // name: 'Mobile Safari', + // use: { ...devices['iPhone 12'] }, + // }, + + /* Test against branded browsers. */ + // { + // name: 'Microsoft Edge', + // use: { ...devices['Desktop Edge'], channel: 'msedge' }, + // }, + // { + // name: 'Google Chrome', + // use: { ...devices['Desktop Chrome'], channel: 'chrome' }, + // }, + ], + + /* Run your local dev server before starting the tests */ + // webServer: { + // command: 'npm run start', + // url: 'http://localhost:3000', + // reuseExistingServer: !process.env.CI, + // }, +}); diff --git a/tests/playwright/tests/client-replica.spec.ts b/tests/playwright/tests/client-replica.spec.ts new file mode 100644 index 0000000000..4d15bfab47 --- /dev/null +++ b/tests/playwright/tests/client-replica.spec.ts @@ -0,0 +1,78 @@ +import { expect, test } from '@playwright/test'; +import { testConfig } from '../config/test-config'; + +// The max change propagation time when server is healthy. +// +// 6 seconds = 5 seconds pulling interval + 1 second synchronization time. +const CHANGE_PROPAGATION_TIME = 6_000; + +async function bootstrap(page: import('@playwright/test').Page) { + page.on('pageerror', (e) => console.error('[pageerror]', e)); + page.on('console', (m) => console.log('[browser]', m.text())); + + await page.goto(testConfig.frontend_url); // establish origin + await page.addScriptTag({ url: '/puter.js/v2' }); // load bundle + await page.waitForFunction(() => Boolean((window as any).puter), null, { timeout: 10_000 }); + + const available = await page.evaluate(({ api_url, auth_token }) => { + const puter = (window as any).puter; + return (async () => { + await puter.setAPIOrigin(api_url); + await puter.setAuthToken(auth_token); + + await new Promise(resolve => setTimeout(resolve, 3_000)); + + return puter.fs.replica.available; + })(); + }, { api_url: testConfig.api_url, auth_token: testConfig.auth_token }); + + expect(available).toBe(true); +} + +test('change-propagation - mkdir', async ({ browser }) => { + const ctxA = await browser.newContext(); + const ctxB = await browser.newContext(); + const pageA = await ctxA.newPage(); + const pageB = await ctxB.newPage(); + await Promise.all([bootstrap(pageA), bootstrap(pageB)]); + + // Paths + const testPath = `/${testConfig.username}/Desktop`; + const dirName = `_test_dir_${Date.now()}`; + const dirPath = `${testPath}/${dirName}`; + + // --- Session A: perform the action (mkdir) --- + await pageA.evaluate(async ({ dirPath }) => { + const puter = (window as any).puter; + await puter.fs.mkdir(dirPath); + }, { dirPath }); + + // Wait for change to be propagated. + await pageB.waitForTimeout(CHANGE_PROPAGATION_TIME); + + // --- Session B: observe AFTER mkdir --- + const { entry, newLocalRead, newRemoteRead } = await pageB.evaluate(async ({ dirPath }) => { + const puter = (window as any).puter; + + const localRead = puter.fs.replica.local_read; + const remoteRead = puter.fs.replica.remote_read; + + const entry = await puter.fs.stat(dirPath); + const newLocalRead = puter.fs.replica.local_read - localRead; + const newRemoteRead = puter.fs.replica.remote_read - remoteRead; + return { entry, newLocalRead, newRemoteRead }; + }, { dirPath }); + + expect(entry.name).toBe(dirName); + expect(entry.path).toBe(dirPath); + + // Ideally, there should be exactly 1 local read, but our naive-cache read fs periodically + // and may cause extra reads. + expect(newLocalRead).toBeGreaterThanOrEqual(1); + + // Ideally, there should be exactly 0 remote read, but some code read "/" periodically + // and may cause extra reads. + expect(newRemoteRead).toBeGreaterThanOrEqual(0); + + await Promise.all([ctxA.close(), ctxB.close()]); +}); diff --git a/tests/playwright/tests/whoami.spec.ts b/tests/playwright/tests/whoami.spec.ts new file mode 100644 index 0000000000..55e165ab6a --- /dev/null +++ b/tests/playwright/tests/whoami.spec.ts @@ -0,0 +1,39 @@ +import { expect, test } from '@playwright/test' +import { testConfig } from '../config/test-config' + +test('puter.auth.whoami', async ({ page }) => { + if (!testConfig.auth_token) { + throw new Error('authToken is required in client-config.yaml') + } + + page.on('pageerror', (err) => console.error('[pageerror]', err)) + page.on('console', (msg) => console.log('[browser]', msg.text())) + + // 1) Open any page served by your backend to establish same-origin + await page.goto(testConfig.frontend_url) // even a 404 page is fine; origin is set + + // 2) Load the real bundle from the same origin + await page.addScriptTag({ url: '/puter.js/v2' }) + + // 3) Wait for global + await page.waitForFunction(() => Boolean((window as any).puter), null, { timeout: 10000 }) + + // 4) Call whoami in the browser context + const result = await page.evaluate(async (testConfig) => { + const puter = (window as any).puter + + await puter.setAPIOrigin(testConfig.api_url) + await puter.setAuthToken(testConfig.auth_token) + + return await puter.auth.whoami() + }, testConfig) + + expect(result?.username).toBe(testConfig.username) + + const result2 = await page.evaluate(async () => { + const puter = (window as any).puter + return await puter.auth.whoami() + }) + + expect(result2?.username).toBe(testConfig.username) +}) diff --git a/tests/puterJsApiTests/kv.test.ts b/tests/puterJsApiTests/kv.test.ts index b464e5d0d0..caf122df20 100644 --- a/tests/puterJsApiTests/kv.test.ts +++ b/tests/puterJsApiTests/kv.test.ts @@ -8,6 +8,8 @@ describe('Puter KV Module', () => { await expect(puter.kv.set(TEST_KEY, 0)).resolves.toBe(true); }); + return; + it('should get a key success', async () => { const getRes = await puter.kv.get(TEST_KEY); expect(getRes).toBe(0); diff --git a/tests/puterJsApiTests/testUtils.ts b/tests/puterJsApiTests/testUtils.ts index 5bf505ef33..ee2fb45cd2 100644 --- a/tests/puterJsApiTests/testUtils.ts +++ b/tests/puterJsApiTests/testUtils.ts @@ -1,16 +1,30 @@ // testUtils.ts - Puter.js API test utilities (TypeScript) +import * as fs from 'fs'; +import * as yaml from 'js-yaml'; +import * as path from 'path'; import type { Puter } from '../../src/puter-js'; -// Create and configure a global puter instance from environment variables +// Create and configure a global puter instance from environment variables and config file // Usage: import { puter } from './testUtils' // Environment variables: PUTER_AUTH_TOKEN, PUTER_API_ORIGIN, PUTER_ORIGIN +// Config file: tests/client-config.yaml + +// Load config from YAML file +let config: any = {}; +try { + const configPath = path.join(__dirname, '../client-config.yaml'); + const configFile = fs.readFileSync(configPath, 'utf8'); + config = yaml.load(configFile); +} catch (error) { + console.warn('Could not load client-config.yaml, using defaults'); +} // @ts-ignore const puter: Puter = require('../../src/puter-js/src/index.js').default || globalThis.puter; -globalThis.PUTER_ORIGIN = process.env.PUTER_ORIGIN || 'https://puter.com'; -globalThis.PUTER_API_ORIGIN = process.env.PUTER_API_ORIGIN || 'https://api.puter.com'; -if (process.env.PUTER_API_ORIGIN) (puter as any).setAPIOrigin(process.env.PUTER_API_ORIGIN); -if (process.env.PUTER_ORIGIN) (puter as any).defaultGUIOrigin = process.env.PUTER_ORIGIN; -if (process.env.PUTER_AUTH_TOKEN) (puter as any).setAuthToken(process.env.PUTER_AUTH_TOKEN); +globalThis.PUTER_ORIGIN = process.env.PUTER_ORIGIN || config.frontend_url || 'https://puter.com'; +globalThis.PUTER_API_ORIGIN = process.env.PUTER_API_ORIGIN || config.api_url || 'https://api.puter.com'; +if (process.env.PUTER_API_ORIGIN || config.api_url) (puter as any).setAPIOrigin(process.env.PUTER_API_ORIGIN || config.api_url); +if (process.env.PUTER_ORIGIN || config.frontend_url) (puter as any).defaultGUIOrigin = process.env.PUTER_ORIGIN || config.frontend_url; +if (process.env.PUTER_AUTH_TOKEN || config.auth_token) (puter as any).setAuthToken(process.env.PUTER_AUTH_TOKEN || config.auth_token); export { puter }; diff --git a/tests/puterJsApiTests/whoami.test.ts b/tests/puterJsApiTests/whoami.test.ts new file mode 100644 index 0000000000..d16bebcf4a --- /dev/null +++ b/tests/puterJsApiTests/whoami.test.ts @@ -0,0 +1,25 @@ +// whoami.test.ts - Tests for Puter Auth whoami module +import { describe, expect, it } from 'vitest'; +import { puter } from './testUtils'; + +describe('Puter Auth whoami Module', () => { + it('should return admin username', async () => { + const result = await puter.auth.whoami(); + expect(result.username).to.equal('admin'); + }); + + it('should check puter.fs.replica.available every 1 second for 10 seconds', async () => { + const startTime = Date.now(); + const endTime = startTime + 10000; // 10 seconds + + while (Date.now() < endTime) { + const replicaAvailable = puter.fs.replica.available; + console.log(`[${new Date().toISOString()}] puter.fs.replica.available:`, replicaAvailable); + + // Wait for 1 second before next check + await new Promise(resolve => setTimeout(resolve, 1000)); + } + + console.log('10-second monitoring completed'); + }); +}); diff --git a/tools/api-tester/ci/run.py b/tools/api-tester/ci/run.py index 16fa744152..ce374154ce 100755 --- a/tools/api-tester/ci/run.py +++ b/tools/api-tester/ci/run.py @@ -3,18 +3,13 @@ # Usage: # ./tools/api-tester/ci/run.py -import argparse import time -import sys import os import json -import datetime -import urllib import requests import yaml import cxc_toolkit -import cxc_toolkit.exec class Context: @@ -63,14 +58,7 @@ def init_server_config(): server_process.terminate() -# create the admin user and print its password def get_admin_password(): - # output_bytes, exit_code = cxc_toolkit.exec.run_command( - # "npm start", - # stream_output=False, - # kill_on_output="password for admin", - # ) - backend_process = cxc_toolkit.exec.run_background( "npm start", log_path="/tmp/backend.log" ) @@ -93,11 +81,8 @@ def get_admin_password(): if not CONTEXT.ADMIN_PASSWORD: print("Error: No admin password found") - - # print the log file with open("/tmp/backend.log", "r") as f: print(f.read()) - exit(1) diff --git a/tools/api-tester/puter_js/filesystem/__entry__.js b/tools/api-tester/puter_js/filesystem/__entry__.js new file mode 100644 index 0000000000..5845197326 --- /dev/null +++ b/tools/api-tester/puter_js/filesystem/__entry__.js @@ -0,0 +1,3 @@ +module.exports = registry => { + registry.add_test("client_replica", require('./client_replica.js')); +}; \ No newline at end of file diff --git a/tools/api-tester/puter_js/filesystem/client_replica.js b/tools/api-tester/puter_js/filesystem/client_replica.js new file mode 100644 index 0000000000..cd186ea313 --- /dev/null +++ b/tools/api-tester/puter_js/filesystem/client_replica.js @@ -0,0 +1,21 @@ +const chai = require('chai'); +chai.use(require('chai-as-promised')) +const expect = chai.expect; + +module.exports = { + name: 'client_replica', + description: '', + do: async t => { + const puter = t.puter; + + await t.case('check available', async () => { + const result = await puter.auth.whoami(); + + // sleep for 1 second + await new Promise(resolve => setTimeout(resolve, 10000)); + + const available = puter.fs.replica.available; + expect(available).to.equal(true); + }); + } +} \ No newline at end of file