Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions server/system_prompt.txt → modelfiles/mem-agent
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
FROM driaforall/mem-agent-mlx-4bit
SYSTEM """
# Memory Agent System Prompt

You are an LLM agent with a self-managed, Obsidian-like memory system. You interact with memory using Python code blocks.
Expand Down Expand Up @@ -304,3 +306,4 @@ result = update_file("user.md", old, new)
## Filtering

In the user query, you might receive a fact-retrieval question that incudes <filter> tags. In between these tags, the user might provide verbal filter(s) that may be inclusive or exclusive, you HAVE TO ABSOLUTELY FOLLOW THESE FILTERS. These filters provide privacy over user information. If there are no filters, just return the answer as is.
"""
3 changes: 3 additions & 0 deletions scripts/bundler.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ set -euo pipefail

BINARY_NAME="tiles"
DIST_DIR="dist"
MODELFILE_DIR="modelfiles"
SERVER_DIR="server"
TARGET="release"

Expand Down Expand Up @@ -36,6 +37,8 @@ rm -rf "${DIST_DIR}/tmp/server/__pycache__"
rm -rf "${DIST_DIR}/tmp/server/.venv"
rm -rf "${DIST_DIR}/tmp/server/stack"

cp -r "${MODELFILE_DIR}" "${DIST_DIR}/tmp/"

echo "📦 Creating ${OUT_NAME}.tar.gz..."
tar --exclude-from=scripts/tar.exclude -czf "${DIST_DIR}/${OUT_NAME}.tar.gz" -C "${DIST_DIR}/tmp" .

Expand Down
12 changes: 10 additions & 2 deletions scripts/install.sh
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
#!/usr/bin/env bash
set -euo pipefail

ENV="prod" # prod is another env, try taking it from github env
ENV="dev" # prod is another env, try taking it from github env
Comment thread
coderabbitai[bot] marked this conversation as resolved.
Outdated
REPO="tilesprivacy/tiles"
# VERSION="${TILES_VERSION:-latest}"
VERSION="0.4.0-rc.1"
INSTALL_DIR="$HOME/.local/bin" # CLI install location
SERVER_DIR="$HOME/.local/share/tiles/server" # Python server folder
SERVER_DIR="$HOME/.local/lib/tiles/server" # Python server folder
MODELFILE_DIR="$HOME/.local/lib/tiles/modelfiles" # Python server folder
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Misleading comment on MODELFILE_DIR.

The comment says "Python server folder" but this is the modelfiles directory. Appears to be a copy-paste error from line 9.

Suggested fix
-MODELFILE_DIR="$HOME/.local/lib/tiles/modelfiles"  # Python server folder
+MODELFILE_DIR="$HOME/.local/lib/tiles/modelfiles"  # Modelfiles folder
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
MODELFILE_DIR="$HOME/.local/lib/tiles/modelfiles" # Python server folder
MODELFILE_DIR="$HOME/.local/lib/tiles/modelfiles" # Modelfiles folder
🤖 Prompt for AI Agents
In `@scripts/install.sh` at line 10, The comment for the MODELFILE_DIR variable is
incorrect; update the comment next to
MODELFILE_DIR="$HOME/.local/lib/tiles/modelfiles" to accurately describe that
this path is for storing model files (the modelfiles directory), not the Python
server folder—ensure the comment clearly states it holds model files for the
application.

TMPDIR="$(mktemp -d)"
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
ARCH=$(uname -m)
Expand Down Expand Up @@ -34,6 +35,13 @@ log "📦 Installing tiles binary to ${INSTALL_DIR}..."
mkdir -p "${INSTALL_DIR}"
install -m 755 "${TMPDIR}/tiles" "${INSTALL_DIR}/tiles"

log "Unpacking libs ..."
rm -rf "${MODELFILE_DIR}"

mkdir -p "${MODELFILE_DIR}"

cp -r "${TMPDIR}/modelfiles"/* "${MODELFILE_DIR}/"

log "📦 Installing Python server to ${SERVER_DIR}..."
rm -rf "${SERVER_DIR}"

Expand Down
3 changes: 1 addition & 2 deletions server/api.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from fastapi import FastAPI, HTTPException

from .schemas import ChatMessage, ChatCompletionRequest, StartRequest, downloadRequest
from .config import SYSTEM_PROMPT
import logging
import sys
from typing import Optional
Expand Down Expand Up @@ -45,7 +44,7 @@ async def start_model(request: StartRequest):
"""Load the model and start the agent"""
global _messages, _runner, _memory_path

_messages = [ChatMessage(role="system", content=SYSTEM_PROMPT)]
_messages = [ChatMessage(role="system", content=request.system_prompt)]
_memory_path = request.memory_path
logger.info(f"{runtime.backend}")
runtime.backend.get_or_load_model(request.model)
Expand Down
4 changes: 0 additions & 4 deletions server/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,4 @@
PORT = 6969
MODEL_ID = "driaforall/mem-agent"

prompt_path = Path(__file__).parent / "system_prompt.txt"
MEMORY_PATH = os.path.expanduser("~") + "/tiles_memory"

with open(prompt_path, "r", encoding="utf-8") as f:
SYSTEM_PROMPT = f.read().strip()
2 changes: 1 addition & 1 deletion server/schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class ModelInfo(BaseModel):
class StartRequest(BaseModel):
model: str
memory_path: str

system_prompt: str
Comment on lines 59 to +62
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

# First, let's find and read the server/schemas.py file
fd -type f -name "schemas.py" | grep -E "server|schemas"

Repository: tilesprivacy/tiles

Length of output: 180


🏁 Script executed:

# Also search for dependency files to identify Pydantic version
fd -type f -name "requirements*.txt" -o -name "setup.py" -o -name "pyproject.toml" -o -name "setup.cfg" -o -name "poetry.lock" -o -name "Pipfile"

Repository: tilesprivacy/tiles

Length of output: 232


🏁 Script executed:

# Search for any pydantic-related imports to understand version usage
rg "from pydantic import|import pydantic" -A 2

Repository: tilesprivacy/tiles

Length of output: 328


🏁 Script executed:

# Read the server/schemas.py file to see current state
cat -n server/schemas.py | head -80

Repository: tilesprivacy/tiles

Length of output: 2088


🏁 Script executed:

# Search for pydantic version in common dependency files
find . -maxdepth 2 -type f \( -name "requirements*.txt" -o -name "setup.py" -o -name "pyproject.toml" -o -name "setup.cfg" \) -exec cat {} \;

Repository: tilesprivacy/tiles

Length of output: 461


🏁 Script executed:

# Also check for environment specs or lock files
find . -maxdepth 2 -type f \( -name "poetry.lock" -o -name "Pipfile" -o -name "Pipfile.lock" \) -exec head -50 {} \;

Repository: tilesprivacy/tiles

Length of output: 44


🏁 Script executed:

# Search for how system_prompt is used throughout the codebase
rg "system_prompt" -B 2 -A 2

Repository: tilesprivacy/tiles

Length of output: 4837


🏁 Script executed:

# Check what version of FastAPI is being used (which implies Pydantic version)
rg "fastapi|pydantic" pyproject.toml setup.py requirements.txt 2>/dev/null | head -20

Repository: tilesprivacy/tiles

Length of output: 44


🏁 Script executed:

# Look for any validators or validators decorator on StartRequest
ast-grep --pattern 'class StartRequest { $$$ }'

Repository: tilesprivacy/tiles

Length of output: 44


Enforce non-empty system_prompt.

The field currently accepts empty strings, which would result in blank system messages. The Rust implementation explicitly rejects empty system prompts, and the Python schema should enforce this constraint consistently.

🔧 Proposed fix
 class StartRequest(BaseModel):
     model: str
     memory_path: str
-    system_prompt: str 
+    system_prompt: str = Field(..., min_length=1)
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
class StartRequest(BaseModel):
model: str
memory_path: str
system_prompt: str
class StartRequest(BaseModel):
model: str
memory_path: str
system_prompt: str = Field(..., min_length=1)
🤖 Prompt for AI Agents
In `@server/schemas.py` around lines 59 - 62, The StartRequest schema allows an
empty system_prompt; update the StartRequest model to reject empty strings by
adding a constraint/validator for system_prompt (e.g., use
pydantic.constr(min_length=1) or Field(..., min_length=1) or add a `@validator`
for "system_prompt" that raises a ValueError if the value is
empty/only-whitespace) so the Python schema matches the Rust behavior and
prevents blank system messages.


class downloadRequest(BaseModel):
model: str
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ markupsafe==3.0.3
mlx-lm==0.28.3
mypy-extensions==1.1.0
numpy==2.4.1
packaging==25.0
packaging==26.0
pathspec==1.0.3
platformdirs==4.5.1
protobuf==6.33.4
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
{
"lock_input_hash": "sha256:182c606e20dd957344cc3adc54391f47f4b6dd80b4481ddf219392a7aad6e0ce",
"lock_version": 1,
"locked_at": "2026-01-21T09:13:58.607286+00:00",
"locked_at": "2026-01-22T05:41:48.443112+00:00",
"other_inputs_hash": "sha256:63b3c2cfe2ec414938e81dace7aac779c7b902bae681618cd8827e9f16880985",
"requirements_hash": "sha256:41b3e6ec3cd37289edeb1c134ce836c0dfa7843d7dd3dc28a1b46880d77bf029",
"version_inputs_hash": "sha256:53726e1053a34cced52a7d0c9b2aa679dad94259b51681758674ae4320bbb7a4"
"requirements_hash": "sha256:a08c15387b6f199fe37fad0855c14ffde941d1c0b49f94fa1ed48a9464fab9a6",
"version_inputs_hash": "sha256:58db986b7cd72eeded675f7c9afd8138fe024fb51451131b5562922bbde3cf43"
}
10 changes: 5 additions & 5 deletions server/stack/requirements/app-server/pylock.app-server.toml
Original file line number Diff line number Diff line change
Expand Up @@ -427,16 +427,16 @@ sha256 = "9171a42fcad32dcf3fa86f0a4faa5e9f8facefdb276f54b8b390d90447cff4e2"

[[packages]]
name = "packaging"
version = "25.0"
version = "26.0"
index = "https://pypi.org/simple"

[[packages.wheels]]
url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl"
upload-time = 2025-04-19T11:48:57Z
size = 66469
url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl"
upload-time = 2026-01-21T20:50:37Z
size = 74366

[packages.wheels.hashes]
sha256 = "29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"
sha256 = "b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529"

[[packages]]
name = "pathspec"
Expand Down
2 changes: 1 addition & 1 deletion tilekit/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "tilekit"
version = "0.1.0"
version = "0.2.0"
edition = "2024"

[dependencies]
Expand Down
2 changes: 1 addition & 1 deletion tiles/src/commands/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use tiles::{core::health, runtime::RunArgs};
pub use tilekit::optimize::optimize;

pub async fn run(runtime: &Runtime, run_args: RunArgs) {
runtime.run(run_args).await;
let _ = runtime.run(run_args).await;
Comment on lines 10 to +11
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

🧩 Analysis chain

🏁 Script executed:

# First, let's check the file and the specific lines
cat -n tiles/src/commands/mod.rs | head -20

Repository: tilesprivacy/tiles

Length of output: 767


🏁 Script executed:

# Find the Runtime type definition and run method signature
fd --type=rs | xargs grep -l "impl Runtime" | head -5

Repository: tilesprivacy/tiles

Length of output: 231


🏁 Script executed:

# Search for all runtime.run call sites as mentioned in the review
rg -nP '\bruntime\.run\s*\(.*\)\.await' --type=rs

Repository: tilesprivacy/tiles

Length of output: 88


🏁 Script executed:

# Also check for just runtime.run without .await in case it's not async
rg -nP '\bruntime\.run\s*\(' --type=rs -A 2

Repository: tilesprivacy/tiles

Length of output: 88


🏁 Script executed:

# Find Runtime type definition
find . -name "*.rs" -type f | xargs grep -l "impl Runtime" | head -5

Repository: tilesprivacy/tiles

Length of output: 88


🏁 Script executed:

# Search for runtime.run call sites without using invalid --type flag
rg 'runtime\.run\s*\(' -A 2 --color=never

Repository: tilesprivacy/tiles

Length of output: 434


🏁 Script executed:

# Look for Runtime::run method signature in the codebase
rg 'fn run\(' -B 3 -A 3 --color=never | head -50

Repository: tilesprivacy/tiles

Length of output: 1603


Don't swallow runtime errors.

Line 11 discards the Result from Runtime::run, which now returns Result<()>. This silently hides failures and allows the CLI to exit with success even when operations fail.

🔧 Proposed fix
 pub async fn run(runtime: &Runtime, run_args: RunArgs) {
-    let _ = runtime.run(run_args).await;
+    if let Err(err) = runtime.run(run_args).await {
+        eprintln!("Runtime failed: {err}");
+        std::process::exit(1);
+    }
 }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
pub async fn run(runtime: &Runtime, run_args: RunArgs) {
runtime.run(run_args).await;
let _ = runtime.run(run_args).await;
pub async fn run(runtime: &Runtime, run_args: RunArgs) {
if let Err(err) = runtime.run(run_args).await {
eprintln!("Runtime failed: {err}");
std::process::exit(1);
}
}
🤖 Prompt for AI Agents
In `@tiles/src/commands/mod.rs` around lines 10 - 11, The current run function in
mod.rs is discarding the Result from Runtime::run, hiding failures; update the
run function to handle the Result returned by Runtime::run (call Runtime::run on
the provided runtime and await it), and on Err(e) either propagate the error
(change run's signature to return Result<()> and return the result of
runtime.run(...)) or log/print the error and exit with a non-zero status (e.g.,
eprintln!("...: {}", e); std::process::exit(1)); ensure you reference
Runtime::run and the run function in mod.rs when making the change.

}

pub fn set_memory(path: &str) {
Expand Down
2 changes: 1 addition & 1 deletion tiles/src/runtime/cpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ impl CPURuntime {
pub fn new() -> Self {
CPURuntime {}
}
pub async fn run(&self, _run_args: super::RunArgs) {
pub async fn run(&self, _run_args: super::RunArgs) -> Result<()> {
unimplemented!()
}

Expand Down
44 changes: 30 additions & 14 deletions tiles/src/runtime/mlx.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use crate::runtime::RunArgs;
use crate::utils::config::{
create_default_memory_folder, get_config_dir, get_default_memory_path, get_memory_path,
get_server_dir, set_memory_path,
create_default_memory_folder, get_config_dir, get_default_memory_path, get_lib_dir,
get_memory_path, set_memory_path,
};
use crate::utils::hf_model_downloader::*;
use anyhow::{Context, Result};
Expand All @@ -17,6 +17,7 @@ use rustyline::{Config, Editor, Helper};
use serde_json::{Value, json};
use std::fs;
use std::fs::File;
use std::path::PathBuf;
use std::process::Stdio;
use std::time::Duration;
use std::{io, process::Command};
Expand All @@ -42,26 +43,29 @@ impl MLXRuntime {
MLXRuntime {}
}

pub async fn run(&self, run_args: super::RunArgs) {
const DEFAULT_MODELFILE: &str = "FROM driaforall/mem-agent-mlx-4bit";
//Parse modelfile
pub async fn run(&self, run_args: super::RunArgs) -> Result<()> {
let default_modelfile_path = get_default_modelfile()?;
let default_modelfile =
tilekit::modelfile::parse_from_file(default_modelfile_path.to_str().unwrap()).unwrap();
let modelfile_parse_result = if let Some(modelfile_str) = &run_args.modelfile_path {
tilekit::modelfile::parse_from_file(modelfile_str.as_str())
} else {
tilekit::modelfile::parse(DEFAULT_MODELFILE)
Err("NOT PROVIDED".to_string())
};
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Avoid using error strings for control flow; handle parsing failures properly.

Several issues in this block:

  1. Line 49: Double unwrap() can panic if the path contains invalid UTF-8.
  2. Lines 52-53: Using Err("NOT PROVIDED") as a sentinel for "no path given" is fragile. Consider using Option<Modelfile> instead of Result to distinguish "not provided" from "parse failed".
  3. Line 58: String matching on "NOT PROVIDED" couples the logic to an error message.
Suggested refactor using Option
-        let default_modelfile =
-            tilekit::modelfile::parse_from_file(default_modelfile_path.to_str().unwrap()).unwrap();
-        let modelfile_parse_result = if let Some(modelfile_str) = &run_args.modelfile_path {
-            tilekit::modelfile::parse_from_file(modelfile_str.as_str())
-        } else {
-            Err("NOT PROVIDED".to_string())
-        };
-
-        let modelfile = match modelfile_parse_result {
-            Ok(mf) => mf,
-            Err(err) if err == "NOT PROVIDED" => default_modelfile.clone(),
-            Err(_err) => {
-                println!("Invalid Modelfile");
-                return Ok(());
-            }
-        };
+        let default_modelfile_str = default_modelfile_path
+            .to_str()
+            .context("Default modelfile path is not valid UTF-8")?;
+        let default_modelfile = tilekit::modelfile::parse_from_file(default_modelfile_str)
+            .map_err(|e| anyhow::anyhow!("Failed to parse default modelfile: {}", e))?;
+
+        let modelfile = match &run_args.modelfile_path {
+            Some(modelfile_str) => {
+                match tilekit::modelfile::parse_from_file(modelfile_str.as_str()) {
+                    Ok(mf) => mf,
+                    Err(err) => {
+                        return Err(anyhow::anyhow!("Invalid Modelfile: {}", err));
+                    }
+                }
+            }
+            None => default_modelfile.clone(),
+        };
🤖 Prompt for AI Agents
In `@tiles/src/runtime/mlx.rs` around lines 47 - 54, The current code uses
unwrap() on default_modelfile_path.to_str() and encodes "no path" as Err("NOT
PROVIDED"), which can panic on invalid UTF-8 and mixes control flow with error
strings; change the logic to: obtain default_modelfile_path via
get_default_modelfile(), convert the Path/OsStr safely (e.g., to_string_lossy()
or work with Path directly) and attempt parse_from_file, and represent the
optional override as Option<Modelfile> instead of Result—i.e., if
run_args.modelfile_path.is_some() try parsing and return a Result<Modelfile,
ParseError> for failures, otherwise fall back to parsed default; update uses of
modelfile_parse_result (and any string checks for "NOT PROVIDED") to match on
Option/Result so parsing failures are handled explicitly and no unwrap() or
sentinel error strings are used (references: get_default_modelfile,
tilekit::modelfile::parse_from_file, run_args.modelfile_path,
modelfile_parse_result).


let modelfile = match modelfile_parse_result {
Ok(mf) => mf,
Err(err) if err == "NOT PROVIDED" => default_modelfile.clone(),
Err(_err) => {
println!("Invalid Modelfile");
return;
return Ok(());
}
Comment thread
coderabbitai[bot] marked this conversation as resolved.
};

let _res = run_model_with_server(self, modelfile, &run_args)
let _res = run_model_with_server(self, modelfile, default_modelfile, &run_args)
.await
.inspect_err(|e| eprintln!("Failed to run the model due to {e}"));
Ok(())
}
Comment thread
coderabbitai[bot] marked this conversation as resolved.
Outdated

#[allow(clippy::zombie_processes)]
Expand All @@ -76,10 +80,10 @@ impl MLXRuntime {
}

let config_dir = get_config_dir()?;
let mut server_dir = get_server_dir()?;
let mut server_dir = get_lib_dir()?;
let pid_file = config_dir.join("server.pid");
fs::create_dir_all(&config_dir).context("Failed to create config directory")?;

server_dir = server_dir.join("server");
let stdout_log = File::create(config_dir.join("server.out.log"))?;
let stderr_log = File::create(config_dir.join("server.err.log"))?;
let server_path = server_dir.join("stack_export_prod/app-server/bin/python");
Expand Down Expand Up @@ -257,6 +261,7 @@ fn show_help(model_name: &str) {
async fn run_model_with_server(
mlx_runtime: &MLXRuntime,
modelfile: Modelfile,
default_modelfile: Modelfile,
run_args: &RunArgs,
) -> Result<()> {
if !cfg!(debug_assertions) {
Expand All @@ -268,7 +273,7 @@ async fn run_model_with_server(
// loading the model from mem-agent via daemon server
let memory_path = get_or_set_memory_path().context("Setting/Retrieving memory_path failed")?;
let modelname = modelfile.from.as_ref().unwrap();
match load_model(modelname, &memory_path).await {
match load_model(&modelfile, &default_modelfile, &memory_path).await {
Ok(_) => start_repl(mlx_runtime, modelname, run_args).await,
Err(err) => return Err(anyhow::anyhow!(err)),
}
Expand Down Expand Up @@ -426,11 +431,17 @@ pub async fn ping() -> Result<(), String> {
}
}

async fn load_model(model_name: &str, memory_path: &str) -> Result<()> {
async fn load_model(
modelfile: &Modelfile,
default_modelfile: &Modelfile,
memory_path: &str,
) -> Result<()> {
let client = Client::new();
let model_name = modelfile.from.clone().unwrap();
let body = json!({
"model": model_name,
"memory_path": memory_path
"memory_path": memory_path,
"system_prompt": modelfile.system.clone().unwrap_or(default_modelfile.system.clone().unwrap())
Comment thread
madclaws marked this conversation as resolved.
});

let res = client
Expand All @@ -442,7 +453,7 @@ async fn load_model(model_name: &str, memory_path: &str) -> Result<()> {
StatusCode::OK => Ok(()),
StatusCode::NOT_FOUND => {
println!("Downloading {}\n", model_name);
match pull_model(model_name).await {
match pull_model(&model_name).await {
Ok(_) => {
println!("\nDownloading completed \n");
Ok(())
Expand Down Expand Up @@ -549,3 +560,8 @@ async fn wait_until_server_is_up() {
}
}
}

fn get_default_modelfile() -> Result<PathBuf> {
let path = get_lib_dir()?.join("modelfiles/mem-agent");
Ok(path)
}
2 changes: 1 addition & 1 deletion tiles/src/runtime/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ pub enum Runtime {
}

impl Runtime {
pub async fn run(&self, run_args: RunArgs) {
pub async fn run(&self, run_args: RunArgs) -> Result<()> {
match self {
Runtime::Mlx(runtime) => runtime.run(run_args).await,
Runtime::Cpu(runtime) => runtime.run(run_args).await,
Expand Down
15 changes: 11 additions & 4 deletions tiles/src/utils/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,17 +60,17 @@ pub fn create_default_memory_folder() -> Result<PathBuf> {
Ok(memory_path)
}

pub fn get_server_dir() -> Result<PathBuf> {
pub fn get_lib_dir() -> Result<PathBuf> {
if cfg!(debug_assertions) {
let base_dir = env::current_dir().context("Failed to fetch CURRENT_DIR")?;
Ok(base_dir.join("server"))
Ok(base_dir)
} else {
let home_dir = env::home_dir().context("Failed to fetch $HOME")?;
let data_dir = match env::var("XDG_DATA_HOME") {
Ok(val) => PathBuf::from(val),
Err(_err) => home_dir.join(".local/share"),
Err(_err) => home_dir.join(".local/lib"),
};
Ok(data_dir.join("tiles/server"))
Ok(data_dir.join("tiles"))
}
}
Comment on lines +63 to 72
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
# First, find and examine the file to verify the actual code structure
echo "=== Checking file structure and line numbers ==="
wc -l tiles/src/utils/config.rs
echo ""
echo "=== Lines 63-75 context ==="
sed -n '60,80p' tiles/src/utils/config.rs
echo ""
echo "=== Lines 100-115 (checking for is_memory_model) ==="
sed -n '100,115p' tiles/src/utils/config.rs
echo ""
echo "=== Searching for get_lib_dir and XDG_DATA_HOME usage ==="
rg -n "get_lib_dir|XDG_DATA_HOME" --type=rust -A2 -B2

Repository: tilesprivacy/tiles

Length of output: 3228


🌐 Web query:

XDG Base Directory Specification default paths

💡 Result:

Default XDG base directories (used if the corresponding environment variable is unset or empty):

  • XDG_CONFIG_HOME = $HOME/.config. [1]
  • XDG_DATA_HOME = $HOME/.local/share. [1]
  • XDG_STATE_HOME = $HOME/.local/state. [1][2]
  • XDG_CACHE_HOME = $HOME/.cache. [1]
  • XDG_RUNTIME_DIR = (no fixed default; typically set to /run/user/$UID by systemd/pam; must be a 0700-owned, per-login runtime directory). [1]
  • XDG_CONFIG_DIRS = /etc/xdg. [1]
  • XDG_DATA_DIRS = /usr/local/share/:/usr/share/. [1]

Sources: XDG Base Directory Specification and reference implementations. [1][2]


XDG_DATA_HOME fallback path is inconsistent with XDG spec and internal patterns.

Using XDG_DATA_HOME but falling back to .local/lib is incorrect per the XDG Base Directory spec, which specifies XDG_DATA_HOME should default to ~/.local/share. This inconsistency is evident in the codebase itself—get_config_dir() correctly uses .local/share as the fallback for the same environment variable. Either use the correct fallback path, use a different environment variable for library files, or remove the XDG check if a custom directory structure is intended.

Additionally, the is_memory_model() function (lines 104-108) can be simplified to modelname.contains("mem") to eliminate unnecessary explicit returns.

🤖 Prompt for AI Agents
In `@tiles/src/utils/config.rs` around lines 63 - 75, get_lib_dir() uses
XDG_DATA_HOME but falls back to "~/.local/lib", which conflicts with the XDG
spec and the project's get_config_dir(); change the fallback for XDG_DATA_HOME
to "~/.local/share" (so the Err branch becomes home_dir.join(".local/share")) or
explicitly use a different env var if libraries are intentionally stored
elsewhere, and ensure the final returned path still appends "tiles"; also
simplify is_memory_model() by replacing its explicit boolean branches with a
direct modelname.contains("mem") expression.

pub fn get_config_dir() -> Result<PathBuf> {
Expand Down Expand Up @@ -100,3 +100,10 @@ pub fn get_data_dir() -> Result<PathBuf> {
Ok(data_dir.join("tiles"))
}
}

pub fn is_memory_model(modelname: &str) -> bool {
if modelname.contains("mem") {
return true;
}
return false;
}
Comment on lines +102 to +107
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Fix clippy warning and simplify the function.

The function can be simplified to a single expression, which also addresses the pipeline failure flagging the unneeded return statement.

Proposed fix
 pub fn is_memory_model(modelname: &str) -> bool {
-    if modelname.contains("mem") {
-        return true;
-    }
-    return false;
+    modelname.contains("mem")
 }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
pub fn is_memory_model(modelname: &str) -> bool {
if modelname.contains("mem") {
return true;
}
return false;
}
pub fn is_memory_model(modelname: &str) -> bool {
modelname.contains("mem")
}
🧰 Tools
🪛 GitHub Actions: Rust

[error] 108-108: clippy: unneeded return statement. Remove 'return false;' (use 'false' directly).

🤖 Prompt for AI Agents
In `@tiles/src/utils/config.rs` around lines 104 - 109, The is_memory_model
function is verbose and triggers a Clippy warning for unnecessary returns;
replace its body with a single expression returning modelname.contains("mem")
(i.e., remove the if block, returns, and explicit braces so the function simply
evaluates and returns modelname.contains("mem")). Ensure the function signature
and name is_memory_model remain unchanged.

Loading