diff --git a/.gitignore b/.gitignore
index 7cf569d6ff..6d1a318cf2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,3 +29,18 @@ dist/
# Local Netlify folder
.netlify
src/emulator/release/
+
+# ======================================================================
+# vscode
+# ======================================================================
+# vscode configuration
+.vscode/
+
+# JS language server, ref: https://code.visualstudio.com/docs/languages/jsconfig
+jsconfig.json
+
+# ======================================================================
+# node js
+# ======================================================================
+# the exact tree installed in the node_modules folder
+package-lock.json
\ No newline at end of file
diff --git a/src/backend/exports.js b/src/backend/exports.js
index 9cc1f9f894..1766f7f166 100644
--- a/src/backend/exports.js
+++ b/src/backend/exports.js
@@ -20,6 +20,7 @@ const CoreModule = require("./src/CoreModule.js");
const { Kernel } = require("./src/Kernel.js");
const DatabaseModule = require("./src/DatabaseModule.js");
const LocalDiskStorageModule = require("./src/LocalDiskStorageModule.js");
+const MemoryStorageModule = require("./src/MemoryStorageModule.js");
const SelfHostedModule = require("./src/modules/selfhosted/SelfHostedModule.js");
const { testlaunch } = require("./src/index.js");
const BaseService = require("./src/services/BaseService.js");
@@ -73,6 +74,7 @@ module.exports = {
WebModule,
DatabaseModule,
LocalDiskStorageModule,
+ MemoryStorageModule,
SelfHostedModule,
TestDriversModule,
PuterAIModule,
diff --git a/src/backend/src/MemoryStorageModule.js b/src/backend/src/MemoryStorageModule.js
new file mode 100644
index 0000000000..a8985460c4
--- /dev/null
+++ b/src/backend/src/MemoryStorageModule.js
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2024-present Puter Technologies Inc.
+ *
+ * This file is part of Puter.
+ *
+ * Puter is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+class MemoryStorageModule {
+ async install (context) {
+ const services = context.get('services');
+ const MemoryStorageService = require("./services/MemoryStorageService");
+ services.registerService('memory-storage', MemoryStorageService);
+ }
+}
+
+module.exports = MemoryStorageModule;
diff --git a/src/backend/src/api/APIError.js b/src/backend/src/api/APIError.js
index 07755b67d8..856c8f4444 100644
--- a/src/backend/src/api/APIError.js
+++ b/src/backend/src/api/APIError.js
@@ -17,6 +17,7 @@
* along with this program. If not, see .
*/
const { URLSearchParams } = require("node:url");
+const config = require("../config");
const { quot } = require('@heyputer/putility').libs.string;
/**
@@ -518,8 +519,9 @@ module.exports = class APIError {
* is set to null. The first argument is used as the status code.
*
* @static
- * @param {number} status
- * @param {string|Error} message_or_source one of the following:
+ * @param {number|string} status
+ * @param {object} source
+ * @param {string|Error|object} fields one of the following:
* - a string to use as the error message
* - an Error object to use as the source of the error
* - an object with a message property to use as the error message
diff --git a/src/backend/src/filesystem/FSNodeContext.js b/src/backend/src/filesystem/FSNodeContext.js
index fde3e20f96..cc062c94c1 100644
--- a/src/backend/src/filesystem/FSNodeContext.js
+++ b/src/backend/src/filesystem/FSNodeContext.js
@@ -288,7 +288,7 @@ module.exports = class FSNodeContext {
controls,
});
- if ( entry === null ) {
+ if ( ! entry ) {
this.found = false;
this.entry = false;
} else {
diff --git a/src/backend/src/filesystem/hl_operations/hl_copy.js b/src/backend/src/filesystem/hl_operations/hl_copy.js
index 46c71af03e..000ba5f3c3 100644
--- a/src/backend/src/filesystem/hl_operations/hl_copy.js
+++ b/src/backend/src/filesystem/hl_operations/hl_copy.js
@@ -159,8 +159,8 @@ class HLCopy extends HLFilesystemOperation {
throw APIError.create('source_and_dest_are_the_same');
}
- if ( await is_ancestor_of(source.mysql_id, parent.mysql_id) ) {
- throw APIError('cannot_copy_item_into_itself');
+ if ( await is_ancestor_of(source.uid, parent.uid) ) {
+ throw APIError.create('cannot_copy_item_into_itself');
}
let overwritten;
diff --git a/src/backend/src/filesystem/hl_operations/hl_mkdir.js b/src/backend/src/filesystem/hl_operations/hl_mkdir.js
index 17cc12a2cb..0b612148bf 100644
--- a/src/backend/src/filesystem/hl_operations/hl_mkdir.js
+++ b/src/backend/src/filesystem/hl_operations/hl_mkdir.js
@@ -287,7 +287,7 @@ class HLMkdir extends HLFilesystemOperation {
// "top_parent" is the immediate parent of the target directory
// (e.g: /home/foo/bar -> /home/foo)
const top_parent = values.create_missing_parents
- ? await this._create_top_parent({ top_parent: parent_node })
+ ? await this._create_dir(parent_node)
: await this._get_existing_top_parent({ top_parent: parent_node })
;
@@ -331,12 +331,14 @@ class HLMkdir extends HLFilesystemOperation {
});
}
else if ( dedupe_name ) {
- const fsEntryFetcher = context.get('services').get('fsEntryFetcher');
+ const fs = context.get('services').get('filesystem');
+ const parent_selector = parent_node.selector;
for ( let i=1 ;; i++ ) {
let try_new_name = `${target_basename} (${i})`;
- const exists = await fsEntryFetcher.nameExistsUnderParent(
- existing.entry.parent_uid, try_new_name
- );
+ const selector = new NodeChildSelector(parent_selector, try_new_name);
+ const exists = await parent_node.provider.quick_check({
+ selector,
+ });
if ( ! exists ) {
target_basename = try_new_name;
break;
@@ -468,16 +470,24 @@ class HLMkdir extends HLFilesystemOperation {
return node;
}
- async _create_top_parent ({ top_parent }) {
- if ( await top_parent.exists() ) {
- if ( ! top_parent.entry.is_dir ) {
+ /**
+ * Creates a directory and all its ancestors.
+ *
+ * @param {FSNodeContext} dir - The directory to create.
+ * @returns {Promise} The created directory.
+ */
+ async _create_dir (dir) {
+ console.log('CREATING DIR', dir.selector.describe());
+
+ if ( await dir.exists() ) {
+ if ( ! dir.entry.is_dir ) {
throw APIError.create('dest_is_not_a_directory');
}
- return top_parent;
+ return dir;
}
const maybe_path_selector =
- top_parent.get_selector_of_type(NodePathSelector);
+ dir.get_selector_of_type(NodePathSelector);
if ( ! maybe_path_selector ) {
throw APIError.create('dest_does_not_exist');
diff --git a/src/backend/src/filesystem/ll_operations/ll_read.js b/src/backend/src/filesystem/ll_operations/ll_read.js
index 1c51826d51..9ea9a049e9 100644
--- a/src/backend/src/filesystem/ll_operations/ll_read.js
+++ b/src/backend/src/filesystem/ll_operations/ll_read.js
@@ -18,6 +18,7 @@
*/
const APIError = require("../../api/APIError");
const { Sequence } = require("../../codex/Sequence");
+const { MemoryFSProvider } = require("../../modules/puterfs/customfs/MemoryFSProvider");
const { DB_WRITE } = require("../../services/database/consts");
const { buffer_to_stream } = require("../../util/streamutil");
@@ -115,10 +116,13 @@ class LLRead extends LLFilesystemOperation {
},
async function create_S3_read_stream (a) {
const context = a.iget('context');
- const storage = context.get('storage');
const { fsNode, version_id, offset, length, has_range, range } = a.values();
+ const svc_mountpoint = context.get('services').get('mountpoint');
+ const provider = await svc_mountpoint.get_provider(fsNode.selector);
+ const storage = svc_mountpoint.get_storage(provider.constructor);
+
// Empty object here is in the case of local fiesystem,
// where s3:location will return null.
// TODO: storage interface shouldn't have S3-specific properties.
@@ -130,6 +134,7 @@ class LLRead extends LLFilesystemOperation {
bucket_region: location.bucket_region,
version_id,
key: location.key,
+ memory_file: fsNode.entry,
...(range? {range} : (has_range ? {
range: `bytes=${offset}-${offset+length-1}`
} : {})),
@@ -144,8 +149,11 @@ class LLRead extends LLFilesystemOperation {
const { fsNode, stream, has_range, range} = a.values();
if ( ! has_range ) {
- const res = await svc_fileCache.maybe_store(fsNode, stream);
- if ( res.stream ) a.set('stream', res.stream);
+ // only cache for non-memoryfs providers
+ if ( ! (fsNode.provider instanceof MemoryFSProvider) ) {
+ const res = await svc_fileCache.maybe_store(fsNode, stream);
+ if ( res.stream ) a.set('stream', res.stream);
+ }
}
},
async function return_stream (a) {
diff --git a/src/backend/src/filesystem/ll_operations/ll_rmdir.js b/src/backend/src/filesystem/ll_operations/ll_rmdir.js
index 08add7c7d4..302070745a 100644
--- a/src/backend/src/filesystem/ll_operations/ll_rmdir.js
+++ b/src/backend/src/filesystem/ll_operations/ll_rmdir.js
@@ -17,6 +17,7 @@
* along with this program. If not, see .
*/
const APIError = require("../../api/APIError");
+const { MemoryFSProvider } = require("../../modules/puterfs/customfs/MemoryFSProvider");
const { ParallelTasks } = require("../../util/otelutil");
const FSNodeContext = require("../FSNodeContext");
const { NodeUIDSelector } = require("../node/selectors");
@@ -102,14 +103,27 @@ class LLRmDir extends LLFilesystemOperation {
}
await tasks.awaitAll();
- if ( ! descendants_only ) {
- await target.provider.rmdir({
+
+ // TODO (xiaochen): consolidate these two branches
+ if ( target.provider instanceof MemoryFSProvider ) {
+ await target.provider.rmdir( {
context,
node: target,
options: {
- ignore_not_empty: true,
+ recursive,
+ descendants_only,
},
- });
+ } );
+ } else {
+ if ( ! descendants_only ) {
+ await target.provider.rmdir( {
+ context,
+ node: target,
+ options: {
+ ignore_not_empty: true,
+ },
+ } );
+ }
}
}
}
diff --git a/src/backend/src/filesystem/node/selectors.js b/src/backend/src/filesystem/node/selectors.js
index 1501a81fa9..717ecb17af 100644
--- a/src/backend/src/filesystem/node/selectors.js
+++ b/src/backend/src/filesystem/node/selectors.js
@@ -89,7 +89,11 @@ class NodeChildSelector {
setPropertiesKnownBySelector (node) {
node.name = this.name;
- // no properties known
+
+ try_infer_attributes(this);
+ if ( this.path ) {
+ node.path = this.path;
+ }
}
describe () {
@@ -145,6 +149,30 @@ class NodeRawEntrySelector {
}
}
+/**
+ * Try to infer following attributes for a selector:
+ * - path
+ * - uid
+ *
+ * @param {NodePathSelector | NodeUIDSelector | NodeChildSelector | RootNodeSelector | NodeRawEntrySelector} selector
+ */
+function try_infer_attributes (selector) {
+ if ( selector instanceof NodePathSelector ) {
+ selector.path = selector.value;
+ } else if ( selector instanceof NodeUIDSelector ) {
+ selector.uid = selector.value;
+ } else if ( selector instanceof NodeChildSelector ) {
+ try_infer_attributes(selector.parent);
+ if ( selector.parent.path ) {
+ selector.path = _path.join(selector.parent.path, selector.name);
+ }
+ } else if ( selector instanceof RootNodeSelector ) {
+ selector.path = '/';
+ } else {
+ // give up
+ }
+}
+
const relativeSelector = (parent, path) => {
if ( path === '.' ) return parent;
if ( path.startsWith('..') ) {
@@ -169,4 +197,5 @@ module.exports = {
RootNodeSelector,
NodeRawEntrySelector,
relativeSelector,
+ try_infer_attributes,
};
diff --git a/src/backend/src/helpers.js b/src/backend/src/helpers.js
index 9abb815f08..19f6477fa7 100644
--- a/src/backend/src/helpers.js
+++ b/src/backend/src/helpers.js
@@ -966,7 +966,38 @@ const body_parser_error_handler = (err, req, res, next) => {
next();
}
+/**
+ * Given a uid, returns a file node.
+ *
+ * TODO (xiaochen): It only works for MemoryFSProvider currently.
+ *
+ * @param {string} uid - The uid of the file to get.
+ * @returns {Promise} The file node, or null if the file does not exist.
+ */
+async function get_entry(uid) {
+ const svc_mountpoint = Context.get('services').get('mountpoint');
+ const uid_selector = new NodeUIDSelector(uid);
+ const provider = await svc_mountpoint.get_provider(uid_selector);
+
+ // NB: We cannot import MemoryFSProvider here because it will cause a circular dependency.
+ if ( provider.constructor.name !== 'MemoryFSProvider' ) {
+ return null;
+ }
+
+ return provider.stat({
+ selector: uid_selector,
+ });
+}
+
async function is_ancestor_of(ancestor_uid, descendant_uid){
+ const ancestor = await get_entry(ancestor_uid);
+ const descendant = await get_entry(descendant_uid);
+
+ if ( ancestor && descendant ) {
+ return descendant.path.startsWith(ancestor.path);
+ }
+
+
/** @type BaseDatabaseAccessService */
const db = services.get('database').get(DB_READ, 'filesystem');
diff --git a/src/backend/src/modules/puterfs/DatabaseFSEntryFetcher.js b/src/backend/src/modules/puterfs/DatabaseFSEntryFetcher.js
index 0789a6df36..df881265fc 100644
--- a/src/backend/src/modules/puterfs/DatabaseFSEntryFetcher.js
+++ b/src/backend/src/modules/puterfs/DatabaseFSEntryFetcher.js
@@ -222,4 +222,12 @@ module.exports = class DatabaseFSEntryFetcher extends BaseService {
);
return !! check_dupe[0];
}
+
+ async nameExistsUnderParentID (parent_id, name) {
+ const parent = await this.findByID(parent_id);
+ if ( ! parent ) {
+ return false;
+ }
+ return this.nameExistsUnderParent(parent.uuid, name);
+ }
}
diff --git a/src/backend/src/modules/puterfs/MountpointService.js b/src/backend/src/modules/puterfs/MountpointService.js
index 48dddd8ff7..68af55d71e 100644
--- a/src/backend/src/modules/puterfs/MountpointService.js
+++ b/src/backend/src/modules/puterfs/MountpointService.js
@@ -19,7 +19,7 @@
*/
// const Mountpoint = o => ({ ...o });
-const { RootNodeSelector, NodeUIDSelector } = require("../../filesystem/node/selectors");
+const { RootNodeSelector, NodeUIDSelector, NodeChildSelector, NodePathSelector, NodeInternalIDSelector, NodeSelector, try_infer_attributes } = require("../../filesystem/node/selectors");
const BaseService = require("../../services/BaseService");
/**
@@ -57,8 +57,9 @@ class MountpointService extends BaseService {
* @returns {Promise}
*/
async _init () {
- // Temporary solution - we'll develop this incrementally
- this.storage_ = null;
+ // key: provider class (e.g: PuterFSProvider, MemoryFSProvider)
+ // value: storage instance
+ this.storage_ = {};
}
async ['__on_boot.consolidation'] () {
@@ -87,12 +88,32 @@ class MountpointService extends BaseService {
}
async get_provider (selector) {
+ try_infer_attributes(selector);
+
if ( selector instanceof RootNodeSelector ) {
return this.mountpoints_['/'].provider;
}
if ( selector instanceof NodeUIDSelector ) {
- return this.mountpoints_['/'].provider;
+ for ( const [path, { provider }] of Object.entries(this.mountpoints_) ) {
+ const result = await provider.quick_check({
+ selector,
+ });
+ if ( result ) {
+ return provider;
+ }
+ }
+
+ // No provider found, but we shouldn't throw an error here
+ // because it's a valid case for a node that doesn't exist.
+ }
+
+ if ( selector instanceof NodeChildSelector ) {
+ if ( selector.path ) {
+ return this.get_provider(new NodePathSelector(selector.path));
+ } else {
+ return this.get_provider(selector.parent);
+ }
}
const probe = {};
@@ -118,15 +139,16 @@ class MountpointService extends BaseService {
}
// Temporary solution - we'll develop this incrementally
- set_storage (storage) {
- this.storage_ = storage;
+ set_storage (provider, storage) {
+ this.storage_[provider] = storage;
}
+
/**
* Gets the current storage backend instance
* @returns {Object} The storage backend instance
*/
- get_storage () {
- return this.storage_;
+ get_storage (provider) {
+ return this.storage_[provider];
}
}
diff --git a/src/backend/src/modules/puterfs/PuterFSModule.js b/src/backend/src/modules/puterfs/PuterFSModule.js
index 91b88fb52f..234565fdcb 100644
--- a/src/backend/src/modules/puterfs/PuterFSModule.js
+++ b/src/backend/src/modules/puterfs/PuterFSModule.js
@@ -40,6 +40,9 @@ class PuterFSModule extends AdvancedBase {
const DatabaseFSEntryFetcher = require("./DatabaseFSEntryFetcher");
services.registerService('fsEntryFetcher', DatabaseFSEntryFetcher);
+
+ const { MemoryFSService } = require('./customfs/MemoryFSService');
+ services.registerService('memoryfs', MemoryFSService);
}
}
diff --git a/src/backend/src/modules/puterfs/customfs/MemoryFSProvider.js b/src/backend/src/modules/puterfs/customfs/MemoryFSProvider.js
new file mode 100644
index 0000000000..09ec0d657e
--- /dev/null
+++ b/src/backend/src/modules/puterfs/customfs/MemoryFSProvider.js
@@ -0,0 +1,603 @@
+/*
+ * Copyright (C) 2024-present Puter Technologies Inc.
+ *
+ * This file is part of Puter.
+ *
+ * Puter is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+const FSNodeContext = require('../../../filesystem/FSNodeContext');
+const _path = require('path');
+const { Context } = require('../../../util/context');
+const { v4: uuidv4 } = require('uuid');
+const config = require('../../../config');
+const {
+ NodeChildSelector,
+ NodePathSelector,
+ NodeUIDSelector,
+ NodeRawEntrySelector,
+ RootNodeSelector,
+ try_infer_attributes,
+} = require('../../../filesystem/node/selectors');
+const fsCapabilities = require('../../../filesystem/definitions/capabilities');
+const APIError = require('../../../api/APIError');
+
+class MemoryFile {
+ /**
+ * @param {Object} param
+ * @param {string} param.path - Relative path from the mountpoint.
+ * @param {boolean} param.is_dir
+ * @param {Buffer|null} param.content - The content of the file, `null` if the file is a directory.
+ * @param {string|null} [param.parent_uid] - UID of parent directory; null for root.
+ */
+ constructor({ path, is_dir, content, parent_uid = null }) {
+ this.uuid = uuidv4();
+
+ this.is_public = true;
+ this.path = path;
+ this.name = _path.basename(path);
+ this.is_dir = is_dir;
+
+ this.content = content;
+
+ // parent_uid should reflect the actual parent's uid; null for root
+ this.parent_uid = parent_uid;
+
+ // TODO (xiaochen): return sensible values for "user_id", currently
+ // it must be 2 (admin) to pass the test.
+ this.user_id = 2;
+
+ // TODO (xiaochen): return sensible values for following fields
+ this.id = 123;
+ this.parent_id = 123;
+ this.immutable = 0;
+ this.is_shortcut = 0;
+ this.is_symlink = 0;
+ this.symlink_path = null;
+ this.created = Math.floor(Date.now() / 1000);
+ this.accessed = Math.floor(Date.now() / 1000);
+ this.modified = Math.floor(Date.now() / 1000);
+ this.size = is_dir ? 0 : content ? content.length : 0;
+ }
+}
+
+class MemoryFSProvider {
+ constructor(mountpoint) {
+ this.mountpoint = mountpoint;
+
+ // key: relative path from the mountpoint, always starts with `/`
+ // value: entry uuid
+ this.entriesByPath = new Map();
+
+ // key: entry uuid
+ // value: entry (MemoryFile)
+ //
+ // We declare 2 maps to support 2 lookup apis: by-path/by-uuid.
+ this.entriesByUUID = new Map();
+
+ const root = new MemoryFile({
+ path: '/',
+ is_dir: true,
+ content: null,
+ parent_uid: null,
+ });
+ this.entriesByPath.set('/', root.uuid);
+ this.entriesByUUID.set(root.uuid, root);
+ }
+
+ /**
+ * Get the capabilities of this filesystem provider.
+ *
+ * @returns {Set} - Set of capabilities supported by this provider.
+ */
+ get_capabilities() {
+ return new Set([
+ fsCapabilities.READDIR_UUID_MODE,
+ fsCapabilities.UUID,
+ fsCapabilities.READ,
+ fsCapabilities.WRITE,
+ fsCapabilities.COPY_TREE,
+ ]);
+ }
+
+ /**
+ * Normalize the path to be relative to the mountpoint. Returns `/` if the path is empty/undefined.
+ *
+ * @param {string} path - The path to normalize.
+ * @returns {string} - The normalized path, always starts with `/`.
+ */
+ _inner_path(path) {
+ if (!path) {
+ return '/';
+ }
+
+ if (path.startsWith(this.mountpoint)) {
+ path = path.slice(this.mountpoint.length);
+ }
+
+ if (!path.startsWith('/')) {
+ path = '/' + path;
+ }
+
+ return path;
+ }
+
+ /**
+ * Check the integrity of the whole memory filesystem. Throws error if any violation is found.
+ *
+ * @returns {Promise}
+ */
+ _integrity_check() {
+ if (config.env !== 'dev') {
+ // only check in debug mode since it's expensive
+ return;
+ }
+
+ // check the 2 maps are consistent
+ if (this.entriesByPath.size !== this.entriesByUUID.size) {
+ throw new Error('Path map and UUID map have different sizes');
+ }
+
+ for (const [inner_path, uuid] of this.entriesByPath) {
+ const entry = this.entriesByUUID.get(uuid);
+
+ // entry should exist
+ if (!entry) {
+ throw new Error(`Entry ${uuid} does not exist`);
+ }
+
+ // path should match
+ if (this._inner_path(entry.path) !== inner_path) {
+ throw new Error(`Path ${inner_path} does not match entry ${uuid}`);
+ }
+
+ // uuid should match
+ if (entry.uuid !== uuid) {
+ throw new Error(`UUID ${uuid} does not match entry ${entry.uuid}`);
+ }
+
+ // parent should exist
+ if (entry.parent_uid) {
+ const parent_entry = this.entriesByUUID.get(entry.parent_uid);
+ if (!parent_entry) {
+ throw new Error(`Parent ${entry.parent_uid} does not exist`);
+ }
+ }
+
+ // parent's path should be a prefix of the entry's path
+ if (entry.parent_uid) {
+ const parent_entry = this.entriesByUUID.get(entry.parent_uid);
+ if (!entry.path.startsWith(parent_entry.path)) {
+ throw new Error(
+ `Parent ${entry.parent_uid} path ${parent_entry.path} is not a prefix of entry ${entry.path}`,
+ );
+ }
+ }
+
+ // parent should be a directory
+ if (entry.parent_uid) {
+ const parent_entry = this.entriesByUUID.get(entry.parent_uid);
+ if (!parent_entry.is_dir) {
+ throw new Error(`Parent ${entry.parent_uid} is not a directory`);
+ }
+ }
+ }
+ }
+
+ /**
+ * Check if a given node exists.
+ *
+ * @param {Object} param
+ * @param {NodePathSelector | NodeUIDSelector | NodeChildSelector | RootNodeSelector | NodeRawEntrySelector} param.selector - The selector used for checking.
+ * @returns {Promise} - True if the node exists, false otherwise.
+ */
+ async quick_check({ selector }) {
+ if (selector instanceof NodePathSelector) {
+ const inner_path = this._inner_path(selector.value);
+ return this.entriesByPath.has(inner_path);
+ }
+
+ if (selector instanceof NodeUIDSelector) {
+ return this.entriesByUUID.has(selector.value);
+ }
+
+ // fallback to stat
+ const entry = await this.stat({ selector });
+ return !!entry;
+ }
+
+ /**
+ * Performs a stat operation using the given selector.
+ *
+ * NB: Some returned fields currently contain placeholder values. And the
+ * `path` of the absolute path from the root.
+ *
+ * @param {Object} param
+ * @param {NodePathSelector | NodeUIDSelector | NodeChildSelector | RootNodeSelector | NodeRawEntrySelector} param.selector - The selector to stat.
+ * @returns {Promise} - The result of the stat operation, or `null` if the node doesn't exist.
+ */
+ async stat({ selector }) {
+ try_infer_attributes(selector);
+
+ let entry_uuid = null;
+
+ if (selector instanceof NodePathSelector) {
+ // stat by path
+ const inner_path = this._inner_path(selector.value);
+ entry_uuid = this.entriesByPath.get(inner_path);
+ } else if (selector instanceof NodeUIDSelector) {
+ // stat by uid
+ entry_uuid = selector.value;
+ } else if (selector instanceof NodeChildSelector) {
+ if (selector.path) {
+ // Shouldn't care about about parent when the "path" is present
+ // since it might have different provider.
+ return await this.stat({
+ selector: new NodePathSelector(selector.path),
+ });
+ } else {
+ // recursively stat the parent and then stat the child
+ const parent_entry = await this.stat({
+ selector: selector.parent,
+ });
+ if (parent_entry) {
+ const full_path = _path.join(parent_entry.path, selector.name);
+ return await this.stat({
+ selector: new NodePathSelector(full_path),
+ });
+ }
+ }
+ } else {
+ // other selectors shouldn't reach here, i.e., it's an internal logic error
+ throw APIError.create('invalid_node');
+ }
+
+ const entry = this.entriesByUUID.get(entry_uuid);
+ if (!entry) {
+ return null;
+ }
+
+ // Return a copied entry with `full_path`, since external code only cares
+ // about full path.
+ const copied_entry = { ...entry };
+ copied_entry.path = _path.join(this.mountpoint, entry.path);
+ return copied_entry;
+ }
+
+ /**
+ * Read directory contents.
+ *
+ * @param {Object} param
+ * @param {Context} param.context - The context of the operation.
+ * @param {FSNodeContext} param.node - The directory node to read.
+ * @returns {Promise} - Array of child UUIDs.
+ */
+ async readdir({ context, node }) {
+ // prerequistes: get required path via stat
+ const entry = await this.stat({ selector: node.selector });
+ if (!entry) {
+ throw APIError.create('invalid_node');
+ }
+
+ const inner_path = this._inner_path(entry.path);
+ const child_uuids = [];
+
+ // Find all entries that are direct children of this directory
+ for (const [path, uuid] of this.entriesByPath) {
+ if (path === inner_path) {
+ continue; // Skip the directory itself
+ }
+
+ const dirname = _path.dirname(path);
+ if (dirname === inner_path) {
+ child_uuids.push(uuid);
+ }
+ }
+
+ return child_uuids;
+ }
+
+ /**
+ * Create a new directory.
+ *
+ * @param {Object} param
+ * @param {Context} param.context - The context of the operation.
+ * @param {FSNodeContext} param.parent - The parent node to create the directory in. Must exist and be a directory.
+ * @param {string} param.name - The name of the new directory.
+ * @returns {Promise} - The new directory node.
+ */
+ async mkdir({ context, parent, name }) {
+ // prerequistes: get required path via stat
+ const parent_entry = await this.stat({ selector: parent.selector });
+ if (!parent_entry) {
+ throw APIError.create('invalid_node');
+ }
+
+ const full_path = _path.join(parent_entry.path, name);
+ const inner_path = this._inner_path(full_path);
+
+ let entry = null;
+ if (this.entriesByPath.has(inner_path)) {
+ throw APIError.create('item_with_same_name_exists', null, {
+ entry_name: full_path,
+ });
+ } else {
+ entry = new MemoryFile({
+ path: inner_path,
+ is_dir: true,
+ content: null,
+ parent_uid: parent_entry.uuid,
+ });
+ this.entriesByPath.set(inner_path, entry.uuid);
+ this.entriesByUUID.set(entry.uuid, entry);
+ }
+
+ // create the node
+ const fs = context.get('services').get('filesystem');
+ const node = await fs.node(entry.uuid);
+ await node.fetchEntry();
+
+ this._integrity_check();
+
+ return node;
+ }
+
+ /**
+ * Remove a directory.
+ *
+ * @param {Object} param
+ * @param {Context} param.context
+ * @param {FSNodeContext} param.node: The directory to remove.
+ * @param {Object} param.options: The options for the operation.
+ * @returns {Promise}
+ */
+ async rmdir({ context, node, options = {} }) {
+ this._integrity_check();
+
+ // prerequistes: get required path via stat
+ const entry = await this.stat({ selector: node.selector });
+ if (!entry) {
+ throw APIError.create('invalid_node');
+ }
+
+ const inner_path = this._inner_path(entry.path);
+
+ // for mode: non-recursive
+ if (!options.recursive) {
+ const children = await this.readdir({ context, node });
+ if (children.length > 0) {
+ throw APIError.create('not_empty');
+ }
+ }
+
+ // remove all descendants
+ for (const [other_inner_path, other_entry_uuid] of this.entriesByPath) {
+ if (other_entry_uuid === entry.uuid) {
+ // skip the directory itself
+ continue;
+ }
+
+ if (other_inner_path.startsWith(inner_path)) {
+ this.entriesByPath.delete(other_inner_path);
+ this.entriesByUUID.delete(other_entry_uuid);
+ }
+ }
+
+ // for mode: non-descendants-only
+ if (!options.descendants_only) {
+ // remove the directory itself
+ this.entriesByPath.delete(inner_path);
+ this.entriesByUUID.delete(entry.uuid);
+ }
+
+ this._integrity_check();
+ }
+
+ /**
+ * Remove a file.
+ *
+ * @param {Object} param
+ * @param {Context} param.context
+ * @param {FSNodeContext} param.node: The file to remove.
+ * @returns {Promise}
+ */
+ async unlink({ context, node }) {
+ // prerequistes: get required path via stat
+ const entry = await this.stat({ selector: node.selector });
+ if (!entry) {
+ throw APIError.create('invalid_node');
+ }
+
+ const inner_path = this._inner_path(entry.path);
+ this.entriesByPath.delete(inner_path);
+ this.entriesByUUID.delete(entry.uuid);
+ }
+
+ /**
+ * Move a file.
+ *
+ * @param {Object} param
+ * @param {Context} param.context
+ * @param {FSNodeContext} param.node: The file to move.
+ * @param {FSNodeContext} param.new_parent: The new parent directory of the file.
+ * @param {string} param.new_name: The new name of the file.
+ * @param {Object} param.metadata: The metadata of the file.
+ * @returns {Promise}
+ */
+ async move({ context, node, new_parent, new_name, metadata }) {
+ // prerequistes: get required path via stat
+ const new_parent_entry = await this.stat({ selector: new_parent.selector });
+ if (!new_parent_entry) {
+ throw APIError.create('invalid_node');
+ }
+
+ // create the new entry
+ const new_full_path = _path.join(new_parent_entry.path, new_name);
+ const new_inner_path = this._inner_path(new_full_path);
+ const entry = new MemoryFile({
+ path: new_inner_path,
+ is_dir: node.entry.is_dir,
+ content: node.entry.content,
+ parent_uid: new_parent_entry.uuid,
+ });
+ entry.uuid = node.entry.uuid;
+ this.entriesByPath.set(new_inner_path, entry.uuid);
+ this.entriesByUUID.set(entry.uuid, entry);
+
+ // remove the old entry
+ const inner_path = this._inner_path(node.path);
+ this.entriesByPath.delete(inner_path);
+ // NB: should not delete the entry by uuid because uuid does not change
+ // after the move.
+
+ this._integrity_check();
+
+ return entry;
+ }
+
+ /**
+ * Copy a tree of files and directories.
+ *
+ * @param {Object} param
+ * @param {Context} param.context
+ * @param {FSNodeContext} param.source - The source node to copy.
+ * @param {FSNodeContext} param.parent - The parent directory for the copy.
+ * @param {string} param.target_name - The name for the copied item.
+ * @returns {Promise} - The copied node.
+ */
+ async copy_tree({ context, source, parent, target_name }) {
+ const fs = context.get('services').get('filesystem');
+
+ if (source.entry.is_dir) {
+ // Create the directory
+ const new_dir = await this.mkdir({ context, parent, name: target_name });
+
+ // Copy all children
+ const children = await this.readdir({ context, node: source });
+ for (const child_uuid of children) {
+ const child_node = await fs.node(new NodeUIDSelector(child_uuid));
+ await child_node.fetchEntry();
+ const child_name = child_node.entry.name;
+
+ await this.copy_tree({
+ context,
+ source: child_node,
+ parent: new_dir,
+ target_name: child_name,
+ });
+ }
+
+ return new_dir;
+ } else {
+ // Copy the file
+ const new_file = await this.write_new({
+ context,
+ parent,
+ name: target_name,
+ file: { stream: { read: () => source.entry.content } },
+ });
+ return new_file;
+ }
+ }
+
+ /**
+ * Write a new file to the filesystem. Throws an error if the destination
+ * already exists.
+ *
+ * @param {Object} param
+ * @param {Context} param.context
+ * @param {FSNodeContext} param.parent: The parent directory of the destination directory.
+ * @param {string} param.name: The name of the destination directory.
+ * @param {Object} param.file: The file to write.
+ * @returns {Promise}
+ */
+ async write_new({ context, parent, name, file }) {
+ // prerequistes: get required path via stat
+ const parent_entry = await this.stat({ selector: parent.selector });
+ if (!parent_entry) {
+ throw APIError.create('invalid_node');
+ }
+ const full_path = _path.join(parent_entry.path, name);
+ const inner_path = this._inner_path(full_path);
+
+ let entry = null;
+ if (this.entriesByPath.has(inner_path)) {
+ throw APIError.create('item_with_same_name_exists', null, {
+ entry_name: full_path,
+ });
+ } else {
+ entry = new MemoryFile({
+ path: inner_path,
+ is_dir: false,
+ content: file.stream.read(),
+ parent_uid: parent_entry.uuid,
+ });
+ this.entriesByPath.set(inner_path, entry.uuid);
+ this.entriesByUUID.set(entry.uuid, entry);
+ }
+
+ const fs = context.get('services').get('filesystem');
+ const node = await fs.node(entry.uuid);
+ await node.fetchEntry();
+
+ this._integrity_check();
+
+ return node;
+ }
+
+ /**
+ * Overwrite an existing file. Throws an error if the destination does not
+ * exist.
+ *
+ * @param {Object} param
+ * @param {Context} param.context
+ * @param {FSNodeContext} param.node: The node to write to.
+ * @param {Object} param.file: The file to write.
+ * @returns {Promise}
+ */
+ async write_overwrite({ context, node, file }) {
+ const entry = await this.stat({ selector: node.selector });
+ if (!entry) {
+ throw APIError.create('invalid_node');
+ }
+ const inner_path = this._inner_path(entry.path);
+
+ this.entriesByPath.set(inner_path, entry.uuid);
+ let original_entry = this.entriesByUUID.get(entry.uuid);
+ if (!original_entry) {
+ throw new Error(`File ${entry.path} does not exist`);
+ } else {
+ if (original_entry.is_dir) {
+ throw new Error(`Cannot overwrite a directory`);
+ }
+
+ original_entry.content = file.stream.read();
+ original_entry.modified = Math.floor(Date.now() / 1000);
+ original_entry.size = original_entry.content ? original_entry.content.length : 0;
+ this.entriesByUUID.set(entry.uuid, original_entry);
+ }
+
+ const fs = context.get('services').get('filesystem');
+ node = await fs.node(original_entry.uuid);
+ await node.fetchEntry();
+
+ this._integrity_check();
+
+ return node;
+ }
+}
+
+module.exports = {
+ MemoryFSProvider,
+};
diff --git a/src/backend/src/modules/puterfs/customfs/MemoryFSService.js b/src/backend/src/modules/puterfs/customfs/MemoryFSService.js
new file mode 100644
index 0000000000..99397ecfbd
--- /dev/null
+++ b/src/backend/src/modules/puterfs/customfs/MemoryFSService.js
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2024-present Puter Technologies Inc.
+ *
+ * This file is part of Puter.
+ *
+ * Puter is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+const BaseService = require("../../../services/BaseService");
+const { MemoryFSProvider } = require("./MemoryFSProvider");
+
+class MemoryFSService extends BaseService {
+ async _init () {
+ const svc_mountpoint = this.services.get('mountpoint');
+ svc_mountpoint.register_mounter('memoryfs', this.as('mounter'));
+ }
+
+ static IMPLEMENTS = {
+ mounter: {
+ async mount ({ path, options }) {
+ const provider = new MemoryFSProvider(path);
+ return provider;
+ }
+ }
+ }
+}
+
+module.exports = {
+ MemoryFSService,
+};
\ No newline at end of file
diff --git a/src/backend/src/modules/puterfs/customfs/README.md b/src/backend/src/modules/puterfs/customfs/README.md
new file mode 100644
index 0000000000..bd66e79e31
--- /dev/null
+++ b/src/backend/src/modules/puterfs/customfs/README.md
@@ -0,0 +1,15 @@
+# Custom FS Providers
+
+This directory contains custom FS providers that are not part of the core PuterFS.
+
+## MemoryFSProvider
+
+This is a demo FS provider that illustrates how to implement a custom FS provider.
+
+## NullFSProvider
+
+A FS provider that mimics `/dev/null`.
+
+## LinuxFSProvider
+
+Provide the ability to mount a Linux directory as a FS provider.
\ No newline at end of file
diff --git a/src/backend/src/modules/puterfs/lib/PuterFSProvider.js b/src/backend/src/modules/puterfs/lib/PuterFSProvider.js
index 67662d6b35..32ee4d06b2 100644
--- a/src/backend/src/modules/puterfs/lib/PuterFSProvider.js
+++ b/src/backend/src/modules/puterfs/lib/PuterFSProvider.js
@@ -22,7 +22,7 @@ const { MultiDetachable } = putility.libs.listener;
const { TDetachable } = putility.traits;
const { TeePromise } = putility.libs.promise;
-const { NodeInternalIDSelector, NodeChildSelector, NodeUIDSelector, RootNodeSelector, NodePathSelector } = require("../../../filesystem/node/selectors");
+const { NodeInternalIDSelector, NodeChildSelector, NodeUIDSelector, RootNodeSelector, NodePathSelector, NodeSelector } = require("../../../filesystem/node/selectors");
const { Context } = require("../../../util/context");
const fsCapabilities = require('../../../filesystem/definitions/capabilities');
const { UploadProgressTracker } = require('../../../filesystem/storage/UploadProgressTracker');
@@ -66,6 +66,52 @@ class PuterFSProvider extends putility.AdvancedBase {
]);
}
+ /**
+ * Check if a given node exists.
+ *
+ * @param {Object} param
+ * @param {NodeSelector} param.selector - The selector used for checking.
+ * @returns {Promise} - True if the node exists, false otherwise.
+ */
+ async quick_check ({
+ selector,
+ }) {
+ // a wrapper that access underlying database directly
+ const fsEntryFetcher = Context.get('services').get('fsEntryFetcher');
+
+ // shortcut: has full path
+ if ( selector?.path ) {
+ const entry = await fsEntryFetcher.findByPath(selector.path);
+ return Boolean(entry);
+ }
+
+ // shortcut: has uid
+ if ( selector?.uid ) {
+ const entry = await fsEntryFetcher.findByUID(selector.uid);
+ return Boolean(entry);
+ }
+
+ // shortcut: parent uid + child name
+ if ( selector instanceof NodeChildSelector && selector.parent instanceof NodeUIDSelector ) {
+ return await fsEntryFetcher.nameExistsUnderParent(
+ selector.parent.uid,
+ selector.name,
+ );
+ }
+
+ // shortcut: parent id + child name
+ if ( selector instanceof NodeChildSelector && selector.parent instanceof NodeInternalIDSelector ) {
+ return await fsEntryFetcher.nameExistsUnderParentID(
+ selector.parent.id,
+ selector.name,
+ );
+ }
+
+ // TODO (xiaochen): we should fallback to stat but we cannot at this moment
+ // since stat requires a valid `FSNodeContext` argument.
+ return false;
+ }
+
async stat ({
selector,
options,
@@ -656,9 +702,9 @@ class PuterFSProvider extends putility.AdvancedBase {
*
* @param {Object} param
* @param {Context} param.context
- * @param {FSNode} param.node: The node to write to.
+ * @param {FSNodeContext} param.node: The node to write to.
* @param {File} param.file: The file to write.
- * @returns {Promise}
+ * @returns {Promise}
*/
async write_overwrite({ context, node, file }) {
const {
@@ -764,7 +810,7 @@ class PuterFSProvider extends putility.AdvancedBase {
const svc_event = svc.get('event');
const svc_mountpoint = svc.get('mountpoint');
- const storage = svc_mountpoint.get_storage();
+ const storage = svc_mountpoint.get_storage(this.constructor);
bucket ??= config.s3_bucket;
bucket_region ??= config.s3_region ?? config.region;
diff --git a/src/backend/src/modules/selfhosted/DefaultUserService.js b/src/backend/src/modules/selfhosted/DefaultUserService.js
index ad4f641385..b5c633684f 100644
--- a/src/backend/src/modules/selfhosted/DefaultUserService.js
+++ b/src/backend/src/modules/selfhosted/DefaultUserService.js
@@ -89,6 +89,8 @@ class DefaultUserService extends BaseService {
);
if ( ! is_default_password ) return;
+ console.log(`password for admin is: ${tmp_password}`);
+
// show console widget
this.default_user_widget = ({ is_docker }) => {
if ( is_docker ) {
diff --git a/src/backend/src/modules/web/lib/eggspress.js b/src/backend/src/modules/web/lib/eggspress.js
index 2c81242f84..32575d6e9d 100644
--- a/src/backend/src/modules/web/lib/eggspress.js
+++ b/src/backend/src/modules/web/lib/eggspress.js
@@ -24,6 +24,7 @@ const api_error_handler = require('./api_error_handler.js');
const APIError = require('../../../api/APIError.js');
const { Context } = require('../../../util/context.js');
const { subdomain } = require('../../../helpers.js');
+const config = require('../../../config.js');
/**
* eggspress() is a factory function for creating express routers.
@@ -169,6 +170,9 @@ module.exports = function eggspress (route, settings, handler) {
return next();
}
}
+ if ( config.env === 'dev' ) {
+ console.log(`request url: ${req.url}, body: ${JSON.stringify(req.body)}`);
+ }
try {
const expected_ctx = res.locals.ctx;
const received_ctx = Context.get(undefined, { allow_fallback: true });
@@ -179,18 +183,14 @@ module.exports = function eggspress (route, settings, handler) {
});
} else await handler(req, res, next);
} catch (e) {
- if (e instanceof TypeError || e instanceof ReferenceError) {
- // We add a dedicated branch for TypeError/ReferenceError since it usually
- // indicates a bug in the backend. And it's pretty convenient to debug if we
- // set a breakpoint here.
- //
- // Typical TypeError:
- // - read properties of undefined
- console.error(e);
- api_error_handler(e, req, res, next);
- } else {
- api_error_handler(e, req, res, next);
+ if ( config.env === 'dev' ) {
+ if (! (e instanceof APIError)) {
+ // Any non-APIError indicates an unhandled error (i.e. a bug) from the backend.
+ // We add a dedicated branch to facilitate debugging.
+ console.error(e);
+ }
}
+ api_error_handler(e, req, res, next);
}
};
if (settings.allowedMethods.includes('GET')) {
diff --git a/src/backend/src/services/LocalDiskStorageService.js b/src/backend/src/services/LocalDiskStorageService.js
index 79434bde9b..e093f130fb 100644
--- a/src/backend/src/services/LocalDiskStorageService.js
+++ b/src/backend/src/services/LocalDiskStorageService.js
@@ -18,6 +18,7 @@
* along with this program. If not, see .
*/
const { LocalDiskStorageStrategy } = require("../filesystem/strategies/storage_a/LocalDiskStorageStrategy");
+const { PuterFSProvider } = require("../modules/puterfs/lib/PuterFSProvider");
const { TeePromise } = require('@heyputer/putility').libs.promise;
const { progress_stream, size_limit_stream } = require("../util/streamutil");
const BaseService = require("./BaseService");
@@ -52,7 +53,7 @@ class LocalDiskStorageService extends BaseService {
svc_contextInit.register_value('storage', storage);
const svc_mountpoint = this.services.get('mountpoint');
- svc_mountpoint.set_storage(storage);
+ svc_mountpoint.set_storage(PuterFSProvider, storage);
}
diff --git a/src/backend/src/services/MemoryStorageService.js b/src/backend/src/services/MemoryStorageService.js
new file mode 100644
index 0000000000..98b16acee8
--- /dev/null
+++ b/src/backend/src/services/MemoryStorageService.js
@@ -0,0 +1,42 @@
+// METADATA // {"ai-commented":{"service":"mistral","model":"mistral-large-latest"}}
+/*
+ * Copyright (C) 2024-present Puter Technologies Inc.
+ *
+ * This file is part of Puter.
+ *
+ * Puter is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License as published
+ * by the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+const BaseService = require("./BaseService");
+const { MemoryFSProvider } = require("../modules/puterfs/customfs/MemoryFSProvider");
+const { Readable } = require("stream");
+
+class MemoryStorageService extends BaseService {
+ async _init () {
+ console.log('MemoryStorageService._init');
+
+ const svc_mountpoint = this.services.get('mountpoint');
+ svc_mountpoint.set_storage(MemoryFSProvider, this);
+ }
+
+ async create_read_stream (uuid, options) {
+ const memory_file = options?.memory_file;
+ if ( ! memory_file ) {
+ throw new Error('MemoryStorageService.create_read_stream: memory_file is required');
+ }
+
+ return Readable.from(memory_file.content);
+ }
+}
+
+module.exports = MemoryStorageService;
\ No newline at end of file
diff --git a/tools/api-tester/README.md b/tools/api-tester/README.md
index 95d4eaebf5..0ef44d24b1 100644
--- a/tools/api-tester/README.md
+++ b/tools/api-tester/README.md
@@ -4,13 +4,12 @@ A test framework for testing the backend API of puter.
## Table of Contents
-- [API Tester](#api-tester)
- [How to use](#how-to-use)
- [Workflow](#workflow)
- [Shorthands](#shorthands)
- [Basic Concepts](#basic-concepts)
- [Behaviors](#behaviors)
- - [Isolation of `t.cwd`](#isolation-of-t-cwd)
+ - [Working directory (`t.cwd`)](#working-directory-t-cwd)
- [Implementation](#implementation)
- [TODO](#todo)
@@ -34,10 +33,11 @@ All commands below should be run from the root directory of puter.
Fields:
- url: The endpoint of the backend server. (default: http://api.puter.localhost:4100/)
- - username: The username of the admin user. (e.g. admin)
- - token: The token of the user. (can be obtained by typing `puter.authToken` in Developer Tools's console)
+ - username: The username of the user to test. (e.g. `admin`)
+ - token: The token of the user. (can be obtained by logging in on the webpage and typing `puter.authToken` in Developer Tools's console)
+ - mountpoints: The mountpoints to test. (default config includes 2 mountpoints: `/` for "puter fs provider" and `/admin/tmp` for "memory fs provider")
-3. Run the tests:
+3. Run all tests (unit tests and benchmarks):
```bash
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml
@@ -45,19 +45,53 @@ All commands below should be run from the root directory of puter.
### Shorthands
-- Run unit tests only:
+- Run all tests (unit tests and benchmarks):
+
+ ```bash
+ node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml
+ ```
+
+- Run all unit tests:
```bash
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --unit
```
+- Run all benchmarks:
+
+ ```bash
+ node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --bench
+ ```
+
- Filter tests by suite name:
```bash
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --unit --suite=mkdir
```
-- Rerun failed tests in the last run:
+- Filter benchmarks by name:
+
+ ```bash
+ node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --bench --suite=stat_intensive_1
+ ```
+
+- Stop on first failure:
+
+ ```bash
+ node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --unit --stop-on-failure
+ ```
+
+- (unimplemented) Filter tests by test name:
+
+ ```bash
+ # (wildcard matching) Run tests containing "memoryfs" in the name
+ node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --unit --test='*memoryfs*'
+
+ # (exact matching) Run the test "mkdir in memoryfs"
+ node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --unit --test='mkdir in memoryfs'
+ ```
+
+- (unimplemented) Rerun failed tests in the last run:
```bash
node ./tools/api-tester/apitest.js --config=./tools/api-tester/config.yml --rerun-failed
@@ -98,10 +132,13 @@ module.exports = {
## Behaviors
-### Isolation of `t.cwd`
+### Working directory (`t.cwd`)
-- `t.cwd` is reset at the beginning of each test suite, since a test suite usually doesn't want to be affected by other test suites.
-- `t.cwd` will be inherited from the cases in the same test suite, since a leaf case might want to share the context with its parent/sibling cases.
+- The working directory is stored in `t.cwd`.
+- All filesystem operations are performed relative to the working directory, if the given path is not absolute. (e.g., `t.mkdir('foo')`, `t.cd('foo')`, `t.stat('foo')`, etc.)
+- Tests will be run under all mountpoints. The default working directory for a mountpoint is `${mountpoint.path}/{username}/api_test`. (This is subject to change in the future, the reason we include `admin` in the path is to ensure the test user `admin` has write access, see [Permission Documentation](https://github.com/HeyPuter/puter/blob/3290440f4bf7a263f37bc5233565f8fec146f17b/src/backend/doc/A-and-A/permission.md#permission-options) for details.)
+- The working directory is reset at the beginning of each test suite, since a test suite usually doesn't want to be affected by other test suites.
+- The working directory will be inherited from the cases in the same test suite, since a leaf case might want to share the context with its parent/sibling cases.
```js
module.exports = {
@@ -126,5 +163,5 @@ module.exports = {
## TODO
-- [ ] Update usage of apitest.js. (Is it possible to generate the usage automatically?)
-- [ ] Integrate it into CI, optionally running it only in specific scenarios (e.g., when backend code changes).
+- [ ] Reset `t.cwd` if a test case fails. Currently, `t.cwd` is not reset if a test case fails.
+- [ ] Integrate apitest into CI, optionally running it only in specific scenarios (e.g., when backend code changes).
diff --git a/tools/api-tester/apitest.js b/tools/api-tester/apitest.js
index b5dcb9d734..b5d5b33fda 100644
--- a/tools/api-tester/apitest.js
+++ b/tools/api-tester/apitest.js
@@ -9,7 +9,7 @@ const { parseArgs } = require('node:util');
const args = process.argv.slice(2);
-let config, report, suiteName;
+let config, report, suiteName, onlycase, bench, unit, stopOnFailure, id;
try {
const parsed = parseArgs({
@@ -24,6 +24,7 @@ try {
bench: { type: 'boolean' },
unit: { type: 'boolean' },
suite: { type: 'string' },
+ 'stop-on-failure': { type: 'boolean' },
},
allowPositionals: true,
});
@@ -35,6 +36,7 @@ try {
bench,
unit,
suite: suiteName,
+ 'stop-on-failure': stopOnFailure,
}, positionals: [id] } = parsed);
onlycase = onlycase !== undefined ? Number.parseInt(onlycase) : undefined;
@@ -49,6 +51,7 @@ try {
' --config= (required) Path to configuration file\n' +
' --report= (optional) Output file for full test results\n' +
' --suite= (optional) Run only tests with matching suite name\n' +
+ ' --stop-on-failure (optional) Stop execution on first test failure\n' +
''
);
process.exit(1);
@@ -58,19 +61,70 @@ const conf = YAML.parse(fs.readFileSync(config).toString());
const main = async () => {
- const context = {
- options: {
- onlycase,
- suite: suiteName,
+ const unit_test_results = [];
+ const benchmark_results = [];
+ for (const mountpoint of conf.mountpoints) {
+ const { unit_test_results: results, benchmark_results: benchs } = await test({ mountpoint });
+ unit_test_results.push(...results);
+ benchmark_results.push(...benchs);
+ }
+
+ // hard-coded identifier for ci script
+ console.log("==================== nightly build results begin ====================")
+
+ // print unit test results
+ let tbl = {};
+ for ( const result of unit_test_results ) {
+ tbl[result.name + ' - ' + result.settings] = {
+ passed: result.caseCount - result.failCount,
+ failed: result.failCount,
+ total: result.caseCount,
+ 'duration (s)': result.duration ? result.duration.toFixed(2) : 'N/A',
+ }
+ }
+ console.table(tbl);
+
+ // print benchmark results
+ if (benchmark_results.length > 0) {
+ tbl = {};
+ for ( const result of benchmark_results ) {
+ const fs_provider = result.fs_provider || 'unknown';
+ tbl[result.name + ' - ' + fs_provider] = {
+ 'duration (s)': result.duration ? (result.duration / 1000).toFixed(2) : 'N/A',
+ }
+ }
+ console.table(tbl);
+
+ // print description of each benchmark since it's too long to fit in the table
+ const seen = new Set();
+ for ( const result of benchmark_results ) {
+ if ( seen.has(result.name) ) continue;
+ seen.add(result.name);
+
+ if ( result.description ) {
+ console.log(result.name + ': ' + result.description);
+ }
}
- };
- const ts = new TestSDK(conf, context);
- try {
- await ts.delete('api_test', { recursive: true });
- } catch (e) {
}
- await ts.mkdir('api_test', { overwrite: true });
- ts.cd('api_test');
+
+ // hard-coded identifier for ci script
+ console.log("==================== nightly build results end ====================")
+}
+
+/**
+ * Run test using the given config, and return the test results
+ *
+ * @param {Object} options
+ * @param {Object} options.mountpoint
+ * @returns {Promise