diff --git a/tree-sitter-stack-graphs/Cargo.toml b/tree-sitter-stack-graphs/Cargo.toml index 3c1c01a80..f519fae93 100644 --- a/tree-sitter-stack-graphs/Cargo.toml +++ b/tree-sitter-stack-graphs/Cargo.toml @@ -42,10 +42,12 @@ cli = [ "walkdir", ] lsp = [ - "capture-it", "crossbeam-channel", - "tokio", - "tower-lsp", + "lsp-server", + "lsp-types", + "serde", + "serde_json", + "log", ] [dependencies] @@ -61,19 +63,21 @@ dirs = { version = "5", optional = true } env_logger = { version = "0.9", optional = true } indoc = { version = "1.0", optional = true } itertools = "0.10" -log = "0.4" +log = { version = "0.4", optional = true } lsp-positions = { version="0.3.4", path="../lsp-positions", features=["tree-sitter"] } # explicit version is required to be able to publish crate +lsp-server = { version = "0.7.8", optional = true } +lsp-types = { version = "0.95.0", optional = true } once_cell = "1" pathdiff = { version = "0.2.1", optional = true } regex = "1" rust-ini = "0.18" +serde = { version = "1.0", optional = true, features = ["derive"] } serde_json = { version="1.0", optional=true } sha1 = { version="0.10", optional=true } stack-graphs = { version = "0.14", path="../stack-graphs" } # explicit version is required to be able to publish crate thiserror = "1.0" time = { version = "0.3", optional = true } tokio = { version = "1.26", optional = true, features = ["io-std", "rt", "rt-multi-thread"] } -tower-lsp = { version = "0.19", optional = true } tree-sitter = "0.24" # keep the same minor version as the tree-sitter dependency # of tree-sitter-graph to prevent install problems tree-sitter-config = { version = "0.24", optional = true } diff --git a/tree-sitter-stack-graphs/src/cli/lsp.rs b/tree-sitter-stack-graphs/src/cli/lsp.rs index 57ed770bb..1d66c2b51 100644 --- a/tree-sitter-stack-graphs/src/cli/lsp.rs +++ b/tree-sitter-stack-graphs/src/cli/lsp.rs @@ -5,28 +5,18 @@ // Please see the LICENSE-APACHE or LICENSE-MIT files in this distribution for license details. // ------------------------------------------------------------------------------------------------ -use capture_it::capture; use clap::Args; -use crossbeam_channel::RecvTimeoutError; -use crossbeam_channel::Sender; -use stack_graphs::storage::SQLiteReader; -use stack_graphs::storage::SQLiteWriter; -use stack_graphs::storage::StorageError; -use std::path::Path; -use std::path::PathBuf; -use std::sync::Arc; +use crossbeam_channel::{RecvTimeoutError, Sender}; +use lsp_server::{Connection, ErrorCode, Message, Notification, Request, RequestId, Response, ResponseError}; +use lsp_types::notification::{DidChangeWorkspaceFolders, DidSaveTextDocument, Initialized, Progress}; +use lsp_types::request::{GotoDefinition, Initialize, Shutdown}; +use lsp_types::*; +use serde_json::Value; +use stack_graphs::storage::{SQLiteReader, SQLiteWriter, StorageError}; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, Mutex}; use std::thread; use std::time::Duration; -use tokio::runtime::Handle; -use tower_lsp::jsonrpc::Error; -use tower_lsp::jsonrpc::ErrorCode; -use tower_lsp::jsonrpc::Result; -use tower_lsp::lsp_types::notification::Progress; -use tower_lsp::lsp_types::*; -use tower_lsp::Client; -use tower_lsp::LanguageServer; -use tower_lsp::LspService; -use tower_lsp::Server; use crate::cli::index::Indexer; use crate::cli::query::Querier; @@ -41,9 +31,16 @@ use crate::AtomicCancellationFlag; use crate::CancelAfterDuration; use crate::CancellationFlag; +/// Command line arguments for the LSP server. +/// +/// These arguments control the behavior of the LSP server, such as +/// timeouts for indexing and querying operations. #[derive(Args, Clone)] pub struct LspArgs { /// Maximum index runtime per workspace folder in seconds. + /// + /// If specified, indexing of a workspace folder will be cancelled + /// after this duration. #[clap( long, value_name = "SECONDS", @@ -52,6 +49,9 @@ pub struct LspArgs { pub max_folder_index_time: Option, /// Maximum index runtime per file in seconds. + /// + /// If specified, indexing of a single file will be cancelled + /// after this duration. #[clap( long, value_name = "SECONDS", @@ -60,6 +60,9 @@ pub struct LspArgs { pub max_file_index_time: Option, /// Maximum query runtime in milliseconds. + /// + /// If specified, a query (e.g., goto-definition) will be cancelled + /// after this duration. #[clap( long, value_name = "MILLISECONDS", @@ -69,22 +72,38 @@ pub struct LspArgs { } impl LspArgs { + /// Run the LSP server with the given arguments. + /// + /// This method creates and runs an LSP server with the given database path + /// and loader. It sets up the transport, creates the backend, and runs the + /// server until it shuts down. + /// + /// # Arguments + /// + /// * `db_path` - Path to the SQLite database file + /// * `loader` - Loader for tree-sitter parsers + /// + /// # Returns + /// + /// * `anyhow::Result<()>` - Result indicating success or failure pub fn run(self, db_path: PathBuf, loader: Loader) -> anyhow::Result<()> { - let rt = tokio::runtime::Runtime::new()?; - rt.block_on(async { - let (service, socket) = LspService::new(|client| Backend { - _client: client.clone(), - db_path, - args: self, - loader: Arc::new(std::sync::Mutex::new(loader)), - jobs: Arc::new(tokio::sync::Mutex::new(None)), - logger: BackendLogger { client }, - }); - - let stdin = tokio::io::stdin(); - let stdout = tokio::io::stdout(); - Server::new(stdin, stdout, socket).serve(service).await; - }); + // Create the transport + let (connection, io_threads) = Connection::stdio(); + + // Create the backend + let backend = Backend { + db_path, + args: self, + loader: Arc::new(Mutex::new(loader)), + jobs: Arc::new(Mutex::new(None)), + client: connection.sender.clone(), + }; + + // Run the server + backend.run(connection)?; + + // Wait for the IO threads to finish + io_threads.join()?; Ok(()) } } @@ -112,30 +131,101 @@ impl std::fmt::Display for LspArgs { } } -#[derive(Clone)] +/// Main backend for the LSP server. +/// +/// This struct handles the LSP protocol communication and dispatches +/// requests and notifications to the appropriate handlers. struct Backend { - _client: Client, + /// Path to the SQLite database file + db_path: PathBuf, + /// Loader for tree-sitter parsers + loader: Arc>, + /// Command line arguments + args: LspArgs, + /// Background job handler and cancellation flag + jobs: Arc, AtomicCancellationFlag)>>>, + /// Channel for sending messages to the client + client: Sender, +} + +/// Backend for use in worker threads. +/// +/// This is a lightweight version of the Backend struct that can be +/// safely cloned and passed to worker threads. It contains only the +/// data needed for indexing and querying operations. +struct ThreadBackend { + /// Path to the SQLite database file db_path: PathBuf, - loader: Arc>, + /// Loader for tree-sitter parsers + loader: Arc>, + /// Command line arguments args: LspArgs, - jobs: Arc, AtomicCancellationFlag)>>>, - logger: BackendLogger, } impl Backend { - async fn start_job_handler(&self) -> (Sender, AtomicCancellationFlag) { - let handle = Handle::current(); - let backend = self.clone(); + /// Run the LSP server. + /// + /// This method starts the main loop of the LSP server, processing + /// incoming messages from the client and dispatching them to the + /// appropriate handlers. + /// + /// # Arguments + /// + /// * `connection` - The LSP connection + /// + /// # Returns + /// + /// * `anyhow::Result<()>` - Result indicating success or failure + fn run(mut self, connection: Connection) -> anyhow::Result<()> { + // Process messages + log::info!("Starting LSP server"); + + for msg in &connection.receiver { + match msg { + Message::Request(req) => { + if connection.handle_shutdown(&req)? { + log::info!("Shutting down LSP server"); + // Shutdown the job handler + if let Ok(mut jobs) = self.jobs.lock() { + if let Some((_, flag)) = jobs.as_ref() { + flag.cancel(); + } + } + return Ok(()); + } + self.handle_request(req); + } + Message::Response(resp) => { + log::info!("Response: {:?}", resp); + } + Message::Notification(not) => { + self.handle_notification(not); + } + } + } + Ok(()) + } + + /// Start a background job handler thread. + /// + /// This method creates a new thread that processes indexing and cleaning + /// jobs in the background. It returns a sender channel for submitting + /// jobs and a cancellation flag for stopping the thread. + /// + /// # Returns + /// + /// * `(Sender, AtomicCancellationFlag)` - A channel for sending jobs and a cancellation flag + fn start_job_handler(&self) -> (Sender, AtomicCancellationFlag) { let (sender, receiver) = crossbeam_channel::unbounded::(); let cancellation_flag = AtomicCancellationFlag::new(); let thread_cancellation_flag = cancellation_flag.clone(); + let backend = self.clone_for_thread(); + thread::spawn(move || { - handle.block_on(capture!([logger = &backend.logger], async move { - logger.info("started job handler").await; - })); + log::info!("Started job handler"); loop { match receiver.recv_timeout(Duration::from_millis(10)) { - Ok(job) => job.run(&backend, handle.clone(), &thread_cancellation_flag), + Ok(job) => job.run(&backend, &thread_cancellation_flag), Err(RecvTimeoutError::Timeout) => { if thread_cancellation_flag.check("").is_err() { break; @@ -144,211 +234,204 @@ impl Backend { Err(RecvTimeoutError::Disconnected) => break, } } - handle.block_on(capture!([logger = &backend.logger], async move { - logger.info("stopped job handler").await; - })); + log::info!("Stopped job handler"); }); + (sender, cancellation_flag) } + /// Create a ThreadBackend for use in worker threads. + /// + /// This method creates a lightweight version of the Backend struct + /// that can be safely cloned and passed to worker threads. + /// + /// # Returns + /// + /// * `ThreadBackend` - A thread-safe backend for worker threads + fn clone_for_thread(&self) -> ThreadBackend { + ThreadBackend { + db_path: self.db_path.clone(), + args: self.args.clone(), + loader: self.loader.clone(), + } + } + /// Opens or creates the database. If the database exists with an incompatible /// version, it is recreated. - async fn ensure_compatible_database(&self) -> Result<()> { + fn ensure_compatible_database(&self) -> anyhow::Result<()> { match SQLiteWriter::open(&self.db_path) { Ok(_) => {} Err(StorageError::IncorrectVersion(_)) => { - self.logger - .error(format!( - "Recreating database with new version {}", - self.db_path.display(), - )) - .await; - - std::fs::remove_file(&self.db_path).from_error()?; - SQLiteWriter::open(&self.db_path).from_error()?; + log::error!( + "Recreating database with new version {}", + self.db_path.display() + ); + + std::fs::remove_file(&self.db_path)?; + SQLiteWriter::open(&self.db_path)?; } - Err(err) => return Err(err).from_error(), + Err(err) => return Err(err.into()), }; Ok(()) } - fn index(&self, path: &Path, handle: Handle, cancellation_flag: &dyn CancellationFlag) { - handle.block_on(capture!([logger = &self.logger, path], async move { - logger.info(format!("indexing {}", path.display())).await; - })); - - let mut db = match SQLiteWriter::open(&self.db_path) { - Ok(db) => db, - Err(err) => { - handle.block_on(capture!( - [logger = &self.logger, db_path = &self.db_path], - async move { - logger - .error(format!( - "failed to open database {}: {}", - db_path.display(), - err - )) - .await; + /// Handle an LSP request. + /// + /// This method dispatches requests to the appropriate handler based on the method. + /// + /// # Arguments + /// + /// * `req` - The LSP request + fn handle_request(&mut self, req: Request) { + match req.method.as_str() { + Initialize::METHOD => { + let params: InitializeParams = match serde_json::from_value(req.params) { + Ok(params) => params, + Err(err) => { + self.send_error_response(req.id, err); + return; } - )); - return; - } - }; + }; - let mut loader = match self.loader.lock() { - Ok(l) => l, - Err(e) => { - handle.block_on(capture!([logger = &self.logger], async move { - logger.error(format!("failed to lock loader: {}", e)).await; - })); - return; + self.handle_initialize(req.id, params); } - }; - - let reporter = LspReporter { - handle: handle.clone(), - logger: self.logger.clone(), - }; - let folder_cancellation_flag = - CancelAfterDuration::from_option(self.args.max_folder_index_time); - let cancellation_flag = cancellation_flag | folder_cancellation_flag.as_ref(); - let mut indexer = Indexer::new(&mut db, &mut loader, &reporter); - indexer.max_file_time = self.args.max_file_index_time; - let result = indexer.index_all(vec![path], None::<&Path>, &cancellation_flag); + GotoDefinition::METHOD => { + let params: GotoDefinitionParams = match serde_json::from_value(req.params) { + Ok(params) => params, + Err(err) => { + self.send_error_response(req.id, err); + return; + } + }; - handle.block_on(capture!([logger = &self.logger, path], async move { - match result { - Ok(_) => logger.info(format!("indexed {}", path.display())).await, - Err(err) => { - logger - .info(format!("indexing failed {}: {}", path.display(), err)) - .await - } + self.handle_goto_definition(req.id, params); + } + _ => { + log::warn!("Unhandled request: {}", req.method); + let result = serde_json::json!({ + "error": format!("Unhandled request: {}", req.method) + }); + self.send_response(req.id, result); } - })); + } } - fn clean(&self, path: &Path, handle: Handle, _cancellation_flag: &dyn CancellationFlag) { - handle.block_on(capture!([logger = &self.logger, path], async move { - logger.info(format!("cleaning {}", path.display())).await; - })); - - let mut db = match SQLiteWriter::open(&self.db_path) { - Ok(db) => db, - Err(err) => { - handle.block_on(capture!( - [logger = &self.logger, db_path = &self.db_path], - async move { - logger - .error(format!( - "failed to open database {}: {}", - db_path.display(), - err - )) - .await; + /// Handle an LSP notification. + /// + /// This method dispatches notifications to the appropriate handler based on the method. + /// + /// # Arguments + /// + /// * `not` - The LSP notification + fn handle_notification(&mut self, not: Notification) { + match not.method.as_str() { + Initialized::METHOD => { + log::info!("Initialized with database {}", self.db_path.display()); + } + DidSaveTextDocument::METHOD => { + let params: DidSaveTextDocumentParams = match serde_json::from_value(not.params) { + Ok(params) => params, + Err(err) => { + log::error!("Invalid params for didSave: {}", err); + return; } - )); - return; + }; + + self.handle_did_save(params); } - }; + DidChangeWorkspaceFolders::METHOD => { + let params: DidChangeWorkspaceFoldersParams = match serde_json::from_value(not.params) { + Ok(params) => params, + Err(err) => { + log::error!("Invalid params for didChangeWorkspaceFolders: {}", err); + return; + } + }; - match db.clean_file_or_directory(path) { - Ok(_) => handle.block_on(capture!([logger = &self.logger, path], async move { - logger.info(format!("cleaned {}", path.display())).await; - })), - Err(e) => handle.block_on(capture!([logger = &self.logger, path], async move { - logger - .error(format!("error cleaning {}: {}", path.display(), e)) - .await; - })), + self.handle_did_change_workspace_folders(params); + } + _ => { + log::warn!("Unhandled notification: {}", not.method); + } } } - async fn definitions(&self, reference: SourcePosition) -> Vec { - let mut db = match SQLiteReader::open(&self.db_path) { - Ok(db) => db, - Err(err) => { - self.logger - .error(format!( - "failed to open database {}: {}", - self.db_path.display(), - err - )) - .await; - return Vec::default(); - } + /// Send a successful response to the client. + /// + /// # Arguments + /// + /// * `id` - The request ID + /// * `result` - The result value + fn send_response(&self, id: RequestId, result: Value) { + let response = Response { + id, + result: Some(result), + error: None, }; + self.client.send(Message::Response(response)).unwrap(); + } - let handle = Handle::current(); - let reporter = LspReporter { - handle: handle.clone(), - logger: self.logger.clone(), - }; - let result = { - let mut querier = Querier::new(&mut db, &reporter); - let cancellation_flag = CancelAfterDuration::from_option(self.args.max_query_time); - querier.definitions(reference, cancellation_flag.as_ref()) + /// Send an error response to the client. + /// + /// # Arguments + /// + /// * `id` - The request ID + /// * `err` - The error + fn send_error_response(&self, id: RequestId, err: E) { + let response = Response { + id, + result: None, + error: Some(ResponseError { + code: ErrorCode::InvalidParams as i32, + message: err.to_string(), + data: None, + }), }; - match result { - Ok(result) => result.into_iter().flat_map(|r| r.targets).collect(), - Err(QueryError::Cancelled(at)) => { - self.logger - .error(format!("query timed out at {}", at,)) - .await; - return Vec::default(); - } - Err(err) => { - self.logger.error(format!("query failed {}", err)).await; - return Vec::default(); - } - } + self.client.send(Message::Response(response)).unwrap(); } -} -#[tower_lsp::async_trait] -impl LanguageServer for Backend { - async fn initialize(&self, params: InitializeParams) -> Result { - self.logger.info(format!("Initialize:{}", self.args)).await; + fn handle_initialize(&mut self, id: RequestId, params: InitializeParams) { + log::info!("Initialize:{}", self.args); - self.ensure_compatible_database().await?; + // Ensure the database is compatible + if let Err(err) = self.ensure_compatible_database() { + self.send_error_response(id, err); + return; + } + + // Start the job handler + let mut jobs = self.jobs.lock().unwrap(); + *jobs = Some(self.start_job_handler()); - let mut jobs = self.jobs.lock().await; - *jobs = Some(self.start_job_handler().await); + // Process workspace folders if let Some(folders) = params.workspace_folders { for folder in &folders { - self.logger - .info(format!("Initial workspace folder {}", folder.uri)) - .await; + log::info!("Initial workspace folder {}", folder.uri); if let Ok(path) = folder.uri.to_file_path() { - jobs.as_ref() - .unwrap() - .0 - .send(Job::IndexPath(path)) - .from_error()?; + if let Err(e) = jobs.as_ref().unwrap().0.send(Job::IndexPath(path)) { + log::error!("Scheduling index job failed: {}", e); + } } else { - self.logger - .error(format!("No local path for workspace folder {}", folder.uri)) - .await; + log::error!("No local path for workspace folder {}", folder.uri); } } } drop(jobs); - let result = InitializeResult { - capabilities: ServerCapabilities { + // Send the server capabilities + let result = serde_json::json!({ + "capabilities": ServerCapabilities { definition_provider: Some(OneOf::Right(DefinitionOptions { work_done_progress_options: WorkDoneProgressOptions { - work_done_progress: true.into(), + work_done_progress: Some(true), }, })), - text_document_sync: Some( + text_document_sync: Some(TextDocumentSyncCapability::Options( TextDocumentSyncOptions { - save: Some(true.into()), + save: Some(SaveOptions::Supported(true)), ..Default::default() } - .into(), - ), + )), workspace: Some(WorkspaceServerCapabilities { workspace_folders: Some(WorkspaceFoldersServerCapabilities { supported: Some(true), @@ -357,69 +440,41 @@ impl LanguageServer for Backend { ..Default::default() }), ..Default::default() - }, - ..Default::default() - }; - Ok(result) - } - - async fn initialized(&self, _: InitializedParams) { - self.logger - .info(format!( - "Initialized with database {}", - self.db_path.display() - )) - .await; - } - - async fn did_save(&self, params: DidSaveTextDocumentParams) { - let jobs = self.jobs.lock().await; - self.logger - .info(format!("Saved document {}", params.text_document.uri)) - .await; - if let Ok(path) = params.text_document.uri.to_file_path() { - if let Err(e) = jobs.as_ref().unwrap().0.send(Job::IndexPath(path)) { - self.logger - .error(format!("Scheduling index job failed: {}", e)) - .await; } - } else { - self.logger - .error(format!( - "No local path for document {}", - params.text_document.uri - )) - .await; - } - drop(jobs); + }); + + self.send_response(id, result); } - async fn goto_definition( - &self, - params: GotoDefinitionParams, - ) -> Result> { - self.logger - .info(format!( - "Go to definition {}:{}:{}", - params.text_document_position_params.text_document.uri, - params.text_document_position_params.position.line + 1, - params.text_document_position_params.position.character + 1 - )) - .await; + fn handle_goto_definition(&self, id: RequestId, params: GotoDefinitionParams) { + log::info!( + "Go to definition {}:{}:{}", + params.text_document_position_params.text_document.uri, + params.text_document_position_params.position.line + 1, + params.text_document_position_params.position.character + 1 + ); + // Send progress notification if requested if let Some(token) = ¶ms.work_done_progress_params.work_done_token { - self._client - .send_notification::(ProgressParams { - token: token.clone(), - value: ProgressParamsValue::WorkDone(WorkDoneProgress::Begin( - WorkDoneProgressBegin { - title: "Querying".to_string(), - ..Default::default() - }, - )), - }) - .await; + let progress_params = ProgressParams { + token: token.clone(), + value: ProgressParamsValue::WorkDone(WorkDoneProgress::Begin( + WorkDoneProgressBegin { + title: "Querying".to_string(), + ..Default::default() + }, + )), + }; + + let notification = Notification { + method: Progress::METHOD.to_string(), + params: serde_json::to_value(progress_params).unwrap(), + }; + + self.client.send(Message::Notification(notification)).unwrap(); } + + // Get the file path let path = match params .text_document_position_params .text_document @@ -428,194 +483,270 @@ impl LanguageServer for Backend { { Ok(path) => path, Err(_) => { - self.logger - .error(format!( - "Not a supported file path: {}", - params.text_document_position_params.text_document.uri, - )) - .await; - return Ok(None); + log::error!( + "Not a supported file path: {}", + params.text_document_position_params.text_document.uri, + ); + self.send_response(id, serde_json::json!(null)); + return; } }; + + // Create the source position let line = params.text_document_position_params.position.line as usize; let column = params.text_document_position_params.position.character as usize; let reference = SourcePosition { path, line, column }; + + // Find definitions let locations = self .definitions(reference) - .await .into_iter() .filter_map(|l| l.try_into_location().ok()) .collect::>(); - self.logger - .info(format!( - "Found {} definitions for {}:{}:{}", - locations.len(), - params.text_document_position_params.text_document.uri, - params.text_document_position_params.position.line + 1, - params.text_document_position_params.position.character + 1 - )) - .await; + log::info!( + "Found {} definitions for {}:{}:{}", + locations.len(), + params.text_document_position_params.text_document.uri, + params.text_document_position_params.position.line + 1, + params.text_document_position_params.position.character + 1 + ); + + // Send progress completion if requested if let Some(token) = ¶ms.work_done_progress_params.work_done_token { - self._client - .send_notification::(ProgressParams { - token: token.clone(), - value: ProgressParamsValue::WorkDone(WorkDoneProgress::End( - WorkDoneProgressEnd { - ..Default::default() - }, - )), - }) - .await; + let progress_params = ProgressParams { + token: token.clone(), + value: ProgressParamsValue::WorkDone(WorkDoneProgress::End( + WorkDoneProgressEnd { + ..Default::default() + }, + )), + }; + + let notification = Notification { + method: Progress::METHOD.to_string(), + params: serde_json::to_value(progress_params).unwrap(), + }; + + self.client.send(Message::Notification(notification)).unwrap(); } - match locations.len() { - 0 => Ok(None), - 1 => Ok(Some(locations[0].clone().into())), - _ => Ok(Some(locations.into())), + // Send the response + let result = match locations.len() { + 0 => serde_json::json!(null), + 1 => serde_json::to_value(locations[0].clone()).unwrap(), + _ => serde_json::to_value(locations).unwrap(), + }; + + self.send_response(id, result); + } + + fn handle_did_save(&self, params: DidSaveTextDocumentParams) { + let jobs = self.jobs.lock().unwrap(); + log::info!("Saved document {}", params.text_document.uri); + + if let Ok(path) = params.text_document.uri.to_file_path() { + if let Err(e) = jobs.as_ref().unwrap().0.send(Job::IndexPath(path)) { + log::error!("Scheduling index job failed: {}", e); + } + } else { + log::error!("No local path for document {}", params.text_document.uri); } } - async fn did_change_workspace_folders(&self, params: DidChangeWorkspaceFoldersParams) { - let jobs = self.jobs.lock().await; + fn handle_did_change_workspace_folders(&self, params: DidChangeWorkspaceFoldersParams) { + let jobs = self.jobs.lock().unwrap(); + for folder in ¶ms.event.removed { - self.logger - .info(format!("Removed workspace folder {}", folder.uri)) - .await; + log::info!("Removed workspace folder {}", folder.uri); if let Ok(path) = folder.uri.to_file_path() { if let Err(e) = jobs.as_ref().unwrap().0.send(Job::CleanPath(path)) { - self.logger - .error(format!("Scheduling clean job failed: {}", e)) - .await; + log::error!("Scheduling clean job failed: {}", e); } } else { - self.logger - .error(format!("No local path for workspace folder {}", folder.uri)) - .await; + log::error!("No local path for workspace folder {}", folder.uri); } } + for folder in ¶ms.event.added { - self.logger - .info(format!("Added workspace folder {}", folder.uri)) - .await; + log::info!("Added workspace folder {}", folder.uri); if let Ok(path) = folder.uri.to_file_path() { if let Err(e) = jobs.as_ref().unwrap().0.send(Job::IndexPath(path)) { - self.logger - .error(format!("Scheduling index job failed: {}", e)) - .await; + log::error!("Scheduling index job failed: {}", e); } } else { - self.logger - .error(format!("No local path for workspace folder {}", folder.uri)) - .await; + log::error!("No local path for workspace folder {}", folder.uri); } } - drop(jobs); } - async fn shutdown(&self) -> Result<()> { - self.logger.info("Shutting down").await; - let jobs = self.jobs.lock().await; - jobs.as_ref().unwrap().1.cancel(); - drop(jobs); - Ok(()) + fn definitions(&self, reference: SourcePosition) -> Vec { + let mut db = match SQLiteReader::open(&self.db_path) { + Ok(db) => db, + Err(err) => { + log::error!( + "failed to open database {}: {}", + self.db_path.display(), + err + ); + return Vec::default(); + } + }; + + let reporter = LogReporter {}; + let result = { + let mut querier = Querier::new(&mut db, &reporter); + let cancellation_flag = CancelAfterDuration::from_option(self.args.max_query_time); + querier.definitions(reference, cancellation_flag.as_ref()) + }; + + match result { + Ok(result) => result.into_iter().flat_map(|r| r.targets).collect(), + Err(QueryError::Cancelled(at)) => { + log::error!("query timed out at {}", at); + Vec::default() + } + Err(err) => { + log::error!("query failed {}", err); + Vec::default() + } + } } } -#[derive(Clone)] -struct BackendLogger { - client: Client, -} +impl ThreadBackend { + /// Index a file or directory. + /// + /// This method indexes the given path and adds the results to the database. + /// + /// # Arguments + /// + /// * `path` - The path to index + /// * `cancellation_flag` - Flag to check for cancellation + fn index(&self, path: &Path, cancellation_flag: &dyn CancellationFlag) { + log::info!("Indexing {}", path.display()); -impl BackendLogger { - async fn log(&self, level: MessageType, message: M) { - self.client.log_message(level, message).await - } + let mut db = match SQLiteWriter::open(&self.db_path) { + Ok(db) => db, + Err(err) => { + log::error!("Failed to open database {}: {}", self.db_path.display(), err); + return; + } + }; - async fn info(&self, message: M) { - self.log(MessageType::INFO, message).await - } + let mut loader = match self.loader.lock() { + Ok(l) => l, + Err(e) => { + log::error!("Failed to lock loader: {}", e); + return; + } + }; + + let reporter = LogReporter {}; + let folder_cancellation_flag = CancelAfterDuration::from_option(self.args.max_folder_index_time); + let cancellation_flag = cancellation_flag | folder_cancellation_flag.as_ref(); + let mut indexer = Indexer::new(&mut db, &mut loader, &reporter); + indexer.max_file_time = self.args.max_file_index_time; + let result = indexer.index_all(vec![path], None::<&Path>, &cancellation_flag); - async fn error(&self, message: M) { - self.log(MessageType::ERROR, message).await + match result { + Ok(_) => log::info!("Indexed {}", path.display()), + Err(err) => log::info!("Indexing failed {}: {}", path.display(), err), + } } -} -trait FromStdError { - #[must_use] - fn from_error(self) -> Result; -} + /// Clean a file or directory from the database. + /// + /// This method removes all stack graphs for the given path from the database. + /// + /// # Arguments + /// + /// * `path` - The path to clean + /// * `_cancellation_flag` - Flag to check for cancellation (unused) + fn clean(&self, path: &Path, _cancellation_flag: &dyn CancellationFlag) { + log::info!("Cleaning {}", path.display()); -impl FromStdError for std::result::Result { - #[must_use] - fn from_error(self) -> Result { - match self { - Ok(value) => Ok(value), - Err(err) => Err(Error { - code: ErrorCode::ServerError(-1), - message: err.to_string(), - data: None, - }), + let mut db = match SQLiteWriter::open(&self.db_path) { + Ok(db) => db, + Err(err) => { + log::error!("Failed to open database {}: {}", self.db_path.display(), err); + return; + } + }; + + match db.clean_file_or_directory(path) { + Ok(_) => log::info!("Cleaned {}", path.display()), + Err(e) => log::error!("Error cleaning {}: {}", path.display(), e), } } } +/// Background job types that can be executed by the job handler. #[derive(Debug)] pub enum Job { + /// Index a file or directory IndexPath(PathBuf), + /// Clean a file or directory from the database CleanPath(PathBuf), } impl Job { - fn run(self, backend: &Backend, handle: Handle, cancellation_flag: &dyn CancellationFlag) { + /// Run the job with the given backend and cancellation flag. + /// + /// # Arguments + /// + /// * `backend` - The backend to use for the job + /// * `cancellation_flag` - Flag to check for cancellation + fn run(self, backend: &ThreadBackend, cancellation_flag: &dyn CancellationFlag) { match self { - Self::IndexPath(path) => backend.index(&path, handle, cancellation_flag), - Self::CleanPath(path) => backend.clean(&path, handle, cancellation_flag), + Self::IndexPath(path) => backend.index(&path, cancellation_flag), + Self::CleanPath(path) => backend.clean(&path, cancellation_flag), } } } -struct LspReporter { - handle: Handle, - logger: BackendLogger, -} - -impl LspReporter { - fn report(&self, level: MessageType, path: &Path, status: &str) { - let logger = self.logger.clone(); - let path = path.to_owned(); - let status = status.to_owned(); - self.handle.spawn(async move { - logger - .log(level, format!("{}: {}", path.display(), status)) - .await; - }); - } -} +/// Reporter implementation that logs messages using the log crate. +/// +/// This reporter is used by the indexer and querier to report progress +/// and errors. +struct LogReporter {} -impl Reporter for LspReporter { +impl Reporter for LogReporter { + /// Report that a file was skipped. fn skipped(&self, path: &Path, summary: &str, _details: Option<&dyn std::fmt::Display>) { - self.report(MessageType::INFO, path, summary) + log::info!("{}: {}", path.display(), summary); } + /// Report that processing of a file has started. fn started(&self, path: &Path) { - self.report(MessageType::INFO, path, "started") + log::info!("{}: started", path.display()); } + /// Report that processing of a file has succeeded. fn succeeded(&self, path: &Path, summary: &str, _details: Option<&dyn std::fmt::Display>) { - self.report(MessageType::INFO, path, summary) + log::info!("{}: {}", path.display(), summary); } + /// Report that processing of a file has failed. fn failed(&self, path: &Path, summary: &str, _details: Option<&dyn std::fmt::Display>) { - self.report(MessageType::ERROR, path, summary) + log::error!("{}: {}", path.display(), summary); } + /// Report that processing of a file was cancelled. fn cancelled(&self, path: &Path, summary: &str, _details: Option<&dyn std::fmt::Display>) { - self.report(MessageType::WARNING, path, summary) + log::warn!("{}: {}", path.display(), summary); } } impl SourceSpan { + /// Convert a SourceSpan to an LSP Location. + /// + /// This method converts a SourceSpan to an LSP Location, which can be + /// sent to the client for highlighting or navigation. + /// + /// # Returns + /// + /// * `std::result::Result` - The converted Location or an error fn try_into_location(self) -> std::result::Result { let uri = Url::from_file_path(self.path)?; let start = Position {