diff --git a/module/move/unilang/plan.md b/module/move/unilang/plan.md new file mode 100644 index 0000000000..4fb6721882 --- /dev/null +++ b/module/move/unilang/plan.md @@ -0,0 +1,142 @@ +# Project Plan: Unilang Codestyle and Documentation Review + +### Goal +* Iterate through each file of the `unilang` crate, ensuring all code adheres to the established codestyle rules and has concise, accurate documentation. + +### Progress +* ✅ Codestyle Review Complete + +### Target Crate +* module/move/unilang + +### Relevant Context +* Files to Include (for AI's reference, if `read_file` is planned, primarily from Target Crate): + * All `.rs` files within `module/move/unilang/src` and `module/move/unilang/tests`. + +### Expected Behavior Rules / Specifications (for Target Crate) +* All files must pass `cargo clippy` with the workspace lint settings without warnings. +* All public items should have clear, concise documentation. +* Code formatting should be consistent across the entire crate. + +### Target File Structure (If Applicable, within Target Crate) +* No changes to the file structure are planned. + +### Increments + +* ✅ Increment 1: Review `src/lib.rs` + * Detailed Plan Step 1: Read the content of `src/lib.rs`. + * Detailed Plan Step 2: Apply codestyle fixes, including adding documentation for all public modules. + * Detailed Plan Step 3: Use `mod_interface` to structure the crate's public API. + * Pre-Analysis: The `lib.rs` file is the entry point to the crate and should be well-documented and structured. + * Crucial Design Rules: [Structuring: Modules with `mod_interface`](#structuring-modules-with-mod_interface), [Comments and Documentation](#comments-and-documentation) + * Relevant Behavior Rules: N/A + * Verification Strategy: Run `cargo build -p unilang` to ensure the changes compile. + * Commit Message: "style(unilang): Clean up and document src/lib.rs" +* ✅ Increment 2: Review `src/data.rs` + * Detailed Plan Step 1: Read the content of `src/data.rs`. + * Detailed Plan Step 2: Apply codestyle fixes (spacing, newlines, etc.). + * Detailed Plan Step 3: Add concise documentation to all public structs and their fields. + * Detailed Plan Step 4: Re-enable the `former` crate derive macros and attributes and fix any resulting compilation errors. + * Pre-Analysis: The file contains core data structures. The `former` derive is currently commented out and needs to be re-enabled and fixed. + * Crucial Design Rules: [Comments and Documentation](#comments-and-documentation) + * Relevant Behavior Rules: N/A + * Verification Strategy: Run `cargo build -p unilang` to ensure the changes compile without errors or warnings. + * Commit Message: "style(unilang): Clean up and document src/data.rs" +* ✅ Increment 3: Review `src/error.rs` + * Detailed Plan Step 1: Read the content of `src/error.rs`. + * Detailed Plan Step 2: Apply codestyle fixes (spacing, newlines, etc.). + * Detailed Plan Step 3: Add concise documentation to the `Error` enum and its variants. + * Pre-Analysis: The file contains the basic error enum. It needs proper documentation and formatting. + * Crucial Design Rules: [Error Handling: Use a Centralized Approach](#error-handling-use-a-centralized-approach), [Comments and Documentation](#comments-and-documentation) + * Relevant Behavior Rules: N/A + * Verification Strategy: Run `cargo build -p unilang` to ensure the changes compile. + * Commit Message: "style(unilang): Clean up and document src/error.rs" +* ✅ Increment 4: Review `src/help.rs` + * Detailed Plan Step 1: Read the content of `src/help.rs`. + * Detailed Plan Step 2: Apply codestyle fixes (spacing, newlines, etc.). + * Detailed Plan Step 3: Add concise documentation to the `HelpGenerator` struct and its methods. + * Pre-Analysis: The file contains the basic help generator. It needs proper documentation and formatting. + * Crucial Design Rules: [Comments and Documentation](#comments-and-documentation) + * Relevant Behavior Rules: N/A + * Verification Strategy: Run `cargo build -p unilang` to ensure the changes compile. + * Commit Message: "style(unilang): Clean up and document src/help.rs" +* ✅ Increment 5: Review `src/interpreter.rs` + * Detailed Plan Step 1: Read the content of `src/interpreter.rs`. + * Detailed Plan Step 2: Apply codestyle fixes (spacing, newlines, etc.). + * Detailed Plan Step 3: Add concise documentation to the `Interpreter` and `ExecutionContext` structs and their methods. + * Pre-Analysis: The file contains the basic interpreter. It needs proper documentation and formatting. + * Crucial Design Rules: [Comments and Documentation](#comments-and-documentation) + * Relevant Behavior Rules: N/A + * Verification Strategy: Run `cargo build -p unilang` to ensure the changes compile. + * Commit Message: "style(unilang): Clean up and document src/interpreter.rs" +* ✅ Increment 6: Review `src/parsing.rs` + * Detailed Plan Step 1: Read the content of `src/parsing.rs`. + * Detailed Plan Step 2: Apply codestyle fixes (spacing, newlines, etc.). + * Detailed Plan Step 3: Add concise documentation to all public items (`Token`, `Lexer`, `Parser`, `Statement`, `Program`) and their methods/variants/fields. + * Pre-Analysis: The file contains the lexer and parser. It's a large file and will require careful review to ensure all items are documented and formatted correctly. + * Crucial Design Rules: [Comments and Documentation](#comments-and-documentation), [New Lines for Blocks](#new-lines-for-blocks), [Spaces Around Symbols](#spaces-around-symbols). + * Relevant Behavior Rules: N/A + * Verification Strategy: Run `cargo build -p unilang` to ensure the changes compile. + * Commit Message: "style(unilang): Clean up and document src/parsing.rs" +* ✅ Increment 7: Review `src/registry.rs` + * Detailed Plan Step 1: Read the content of `src/registry.rs`. + * Detailed Plan Step 2: Apply codestyle fixes (spacing, newlines, etc.). + * Detailed Plan Step 3: Add concise documentation to the `CommandRegistry` struct, `CommandRegistryBuilder` struct, and their methods. + * Pre-Analysis: The file contains the command registry. It needs proper documentation and formatting. + * Crucial Design Rules: [Comments and Documentation](#comments-and-documentation) + * Relevant Behavior Rules: N/A + * Verification Strategy: Run `cargo build -p unilang` to ensure the changes compile. + * Commit Message: "style(unilang): Clean up and document src/registry.rs" +* ✅ Increment 8: Review `src/semantic.rs` + * Detailed Plan Step 1: Read the content of `src/semantic.rs`. + * Detailed Plan Step 2: Apply codestyle fixes (spacing, newlines, etc.). + * Detailed Plan Step 3: Add concise documentation to the `VerifiedCommand` and `SemanticAnalyzer` structs and their methods. + * Pre-Analysis: The file contains the semantic analyzer. It needs proper documentation and formatting. + * Crucial Design Rules: [Comments and Documentation](#comments-and-documentation) + * Relevant Behavior Rules: N/A + * Verification Strategy: Run `cargo build -p unilang` to ensure the changes compile. + * Commit Message: "style(unilang): Clean up and document src/semantic.rs" +* ✅ Increment 9: Review `src/ca/` directory + * Detailed Plan Step 1: Read all files in `src/ca/` and its subdirectories. + * Detailed Plan Step 2: Refactor `src/ca/mod.rs`. + * Detailed Plan Step 3: Refactor `src/ca/parsing/mod.rs`. + * Detailed Plan Step 4: Refactor `src/ca/parsing/engine.rs`. + * Detailed Plan Step 5: Refactor `src/ca/parsing/error.rs`. + * Detailed Plan Step 6: Refactor `src/ca/parsing/input.rs`. + * Detailed Plan Step 7: Refactor `src/ca/parsing/instruction.rs`. + * Pre-Analysis: This directory seems to contain a secondary parsing engine or a more complex command architecture (`ca`). It needs to be reviewed for consistency with the rest of the crate. + * Crucial Design Rules: [Comments and Documentation](#comments-and-documentation), [New Lines for Blocks](#new-lines-for-blocks), [Spaces Around Symbols](#spaces-around-symbols). + * Relevant Behavior Rules: N/A + * Verification Strategy: Run `cargo build -p unilang` after refactoring all files in the directory. + * Commit Message: "style(unilang): Clean up and document src/ca/**" +* ✅ Increment 10: Review `tests/` directory + * Detailed Plan Step 1: Read all files in `tests/` and its subdirectories. + * Detailed Plan Step 2: Refactor `tests/tests.rs`. + * Detailed Plan Step 3: Refactor `tests/inc/mod.rs`. + * Detailed Plan Step 4: Refactor `tests/inc/phase1/mod.rs`. + * Detailed Plan Step 5: Refactor `tests/inc/phase1/foundational_setup.rs`. + * Detailed Plan Step 6: Refactor `tests/inc/phase1/full_pipeline_test.rs`. + * Pre-Analysis: The test files need to be reviewed for clarity, documentation, and adherence to codestyle. + * Crucial Design Rules: [Comments and Documentation](#comments-and-documentation) + * Relevant Behavior Rules: N/A + * Verification Strategy: Run `cargo test -p unilang` to ensure all tests still pass. + * Commit Message: "style(unilang): Clean up and document tests/**" +* ✅ Increment 11: Final Verification + * Detailed Plan Step 1: Run `cargo clippy -- -D warnings` to ensure there are no warnings. + * Detailed Plan Step 2: Run `cargo test` to ensure all tests pass. + * Pre-Analysis: All files have been reviewed. A final check is needed to ensure the entire crate is clean. + * Crucial Design Rules: N/A + * Relevant Behavior Rules: N/A + * Verification Strategy: All checks must pass. + * Commit Message: "chore(unilang): Final verification of codestyle changes" + +### Task Requirements +* Systematically review every `.rs` file. +* Apply codestyle fixes and add documentation as needed. + +### Project Requirements +* Maintain consistency with the overall workspace codestyle. + +### Notes & Insights +* This is a good opportunity to improve the overall quality and maintainability of the crate. +* The `former` crate usage is still disabled and should be noted for future work. \ No newline at end of file diff --git a/module/move/unilang/src/ca/mod.rs b/module/move/unilang/src/ca/mod.rs index 17a13217fb..73c5780cc0 100644 --- a/module/move/unilang/src/ca/mod.rs +++ b/module/move/unilang/src/ca/mod.rs @@ -1,12 +1,14 @@ //! -//! Commands aggregator library. +//! Command aggregator library for advanced command parsing and execution. //! +/// Contains the parsing components for the command aggregator. pub mod parsing; mod private {} crate::mod_interface! { + /// Exposes the parsing module. exposed use parsing; } diff --git a/module/move/unilang/src/ca/parsing/engine.rs b/module/move/unilang/src/ca/parsing/engine.rs index dd88046034..222c1ae435 100644 --- a/module/move/unilang/src/ca/parsing/engine.rs +++ b/module/move/unilang/src/ca/parsing/engine.rs @@ -1,17 +1,26 @@ -//! Main parser logic for unilang CLI syntax. +//! +//! Main parser logic for the command aggregator. +//! #[ allow( unused_imports ) ] use super::input::{ InputAbstraction, InputPart, DelimiterType, Location }; use super::instruction::GenericInstruction; use super::error::ParseError; -/// The main parser engine. +/// +/// The main parser engine for the command aggregator. +/// #[ derive( Debug ) ] pub struct Parser; impl Parser { + /// /// Parses the input into a sequence of generic instructions. + /// + /// This is the main entry point for the parsing engine, taking an + /// `InputAbstraction` and returning a `Vec` of `GenericInstruction`s + /// or a `ParseError`. pub fn parse< 'a >( input : InputAbstraction< 'a > ) -> Result< Vec< GenericInstruction< 'a > >, ParseError > { // TODO: Implement parsing logic using InputAbstraction diff --git a/module/move/unilang/src/ca/parsing/error.rs b/module/move/unilang/src/ca/parsing/error.rs index 07ff352652..160deaad73 100644 --- a/module/move/unilang/src/ca/parsing/error.rs +++ b/module/move/unilang/src/ca/parsing/error.rs @@ -1,38 +1,51 @@ -//! Error types for the unilang parser. +//! +//! Error types for the command aggregator parser. +//! use super::input::Location; +/// /// Represents an error that occurred during parsing. +/// #[ derive( Debug, Clone, PartialEq, Eq ) ] pub enum ParseError { /// An unexpected character or sequence was encountered. UnexpectedToken { + /// The location of the unexpected token. location : Location, + /// The unexpected token. token : String, }, /// An unquoted value contained internal whitespace (based on E5 decision). UnquotedValueWithWhitespace { + /// The location of the value. location : Location, + /// The value containing whitespace. value : String, }, /// An unterminated quote was found. UnterminatedQuote { + /// The location of the unterminated quote. location : Location, + /// The quote character that was not terminated. quote_char : char, }, /// End of input was reached unexpectedly. UnexpectedEndOfInput { + /// The location where the end of input was unexpected. location : Location, }, /// A required element was missing. MissingElement { + /// The location where the element was expected. location : Location, + /// A description of the missing element. element_description : String, }, // Add other specific error variants as needed during parser implementation. diff --git a/module/move/unilang/src/ca/parsing/input.rs b/module/move/unilang/src/ca/parsing/input.rs index 1d10402048..71820985b2 100644 --- a/module/move/unilang/src/ca/parsing/input.rs +++ b/module/move/unilang/src/ca/parsing/input.rs @@ -1,6 +1,10 @@ -//! Input abstraction for the unilang parser. +//! +//! Input abstraction for the command aggregator parser. +//! +/// /// Represents a location within the input, handling both single strings and slices. +/// #[ derive( Debug, Clone, Copy, PartialEq, Eq ) ] pub enum Location { @@ -14,26 +18,35 @@ pub enum Location ), } +/// /// Represents the current state of the input being parsed. +/// #[ derive( Debug, Clone, PartialEq, Eq ) ] pub enum InputState< 'a > { /// State for a single string input. SingleString { + /// The input string. input : &'a str, + /// The current byte offset. offset : usize, }, /// State for a slice of string segments input. SegmentSlice { + /// The slice of string segments. segments : &'a [&'a str], + /// The current segment index. segment_index : usize, + /// The current byte offset within the segment. offset_in_segment : usize, }, } +/// /// Provides a unified interface to process input from either a single string or a slice of strings. +/// #[ derive( Debug, Clone, PartialEq, Eq ) ] pub struct InputAbstraction< 'a > { @@ -42,7 +55,9 @@ pub struct InputAbstraction< 'a > impl< 'a > InputAbstraction< 'a > { + /// /// Creates a new `InputAbstraction` from a single string. + /// pub fn from_str( input : &'a str ) -> Self { Self @@ -51,7 +66,9 @@ impl< 'a > InputAbstraction< 'a > } } + /// /// Creates a new `InputAbstraction` from a slice of string segments. + /// pub fn from_segments( segments : &'a [&'a str] ) -> Self { Self @@ -61,9 +78,11 @@ impl< 'a > InputAbstraction< 'a > } // Placeholder methods based on the revised conceptual design. - // Implementation will be done in Increment 2. + // Implementation will be done in a future increment. + /// /// Peeks at the next character without consuming it. + /// pub fn peek_next_char( &self ) -> Option< char > { // TODO: Implement based on InputState @@ -71,7 +90,9 @@ impl< 'a > InputAbstraction< 'a > None } + /// /// Consumes and returns the next character. + /// pub fn next_char( &mut self ) -> Option< char > { // TODO: Implement based on InputState @@ -79,7 +100,9 @@ impl< 'a > InputAbstraction< 'a > None } + /// /// Peeks at the next full segment (relevant for `&[&str]` input). + /// pub fn peek_next_segment( &self ) -> Option< &'a str > { // TODO: Implement based on InputState @@ -87,7 +110,9 @@ impl< 'a > InputAbstraction< 'a > None } + /// /// Consumes and returns the next full segment (relevant for `&[&str]` input). + /// pub fn next_segment( &mut self ) -> Option< &'a str > { // TODO: Implement based on InputState @@ -95,10 +120,10 @@ impl< 'a > InputAbstraction< 'a > None } + /// /// Searches for the next occurrence of any of the provided string patterns. /// Returns the matched pattern and its location. - /// Searches for the next occurrence of any of the provided string patterns. - /// Returns the matched pattern and its location. + /// pub fn find_next_occurrence( &self, _patterns : &'a [&'a str] ) -> Option< ( &'a str, Location ) > { // TODO: Implement based on InputState and patterns @@ -106,8 +131,9 @@ impl< 'a > InputAbstraction< 'a > None } + /// /// Consumes the input up to a specified location and returns the consumed slice. - /// Consumes the input up to a specified location and returns the consumed slice. + /// pub fn consume_until( &mut self, _location : Location ) -> &'a str { // TODO: Implement based on InputState and target location @@ -115,8 +141,9 @@ impl< 'a > InputAbstraction< 'a > "" } + /// /// Consumes a specified number of characters/bytes. - /// Consumes a specified number of characters/bytes. + /// pub fn consume_len( &mut self, _len : usize ) -> &'a str { // TODO: Implement based on InputState and length @@ -124,7 +151,9 @@ impl< 'a > InputAbstraction< 'a > "" } + /// /// Returns the current parsing location. + /// pub fn current_location( &self ) -> Location { match &self.state @@ -134,7 +163,9 @@ impl< 'a > InputAbstraction< 'a > } } + /// /// Checks if there is any remaining input. + /// pub fn is_empty( &self ) -> bool { match &self.state @@ -155,7 +186,9 @@ impl< 'a > InputAbstraction< 'a > } } +/// /// Represents the type of delimiter found during parsing. +/// #[ derive( Debug, Clone, Copy, PartialEq, Eq ) ] pub enum DelimiterType { @@ -173,7 +206,9 @@ pub enum DelimiterType Whitespace, } +/// /// Represents a part of the input after splitting by a delimiter. +/// #[ derive( Debug, Clone, Copy, PartialEq, Eq ) ] pub enum InputPart< 'a > { diff --git a/module/move/unilang/src/ca/parsing/instruction.rs b/module/move/unilang/src/ca/parsing/instruction.rs index b8f957fe0e..2746dcbfd2 100644 --- a/module/move/unilang/src/ca/parsing/instruction.rs +++ b/module/move/unilang/src/ca/parsing/instruction.rs @@ -1,6 +1,10 @@ -//! Generic instruction representation for the unilang parser. +//! +//! Generic instruction representation for the command aggregator parser. +//! +/// /// Represents a parsed command instruction before validation against a command registry. +/// #[ derive( Debug, Clone, PartialEq, Eq ) ] pub struct GenericInstruction< 'a > { diff --git a/module/move/unilang/src/ca/parsing/mod.rs b/module/move/unilang/src/ca/parsing/mod.rs index 83ff147ee3..2aaaf54e57 100644 --- a/module/move/unilang/src/ca/parsing/mod.rs +++ b/module/move/unilang/src/ca/parsing/mod.rs @@ -1,6 +1,12 @@ -//! Parsing module for unilang CLI syntax. +//! +//! Parsing module for the command aggregator. +//! +/// Handles the input abstraction for the parser. pub mod input; +/// Defines the generic instruction format. pub mod instruction; +/// Defines parsing error types. pub mod error; +/// The main parsing engine. pub mod engine; \ No newline at end of file diff --git a/module/move/unilang/src/data.rs b/module/move/unilang/src/data.rs new file mode 100644 index 0000000000..096b7964e4 --- /dev/null +++ b/module/move/unilang/src/data.rs @@ -0,0 +1,84 @@ +//! +//! Core data structures for the Unilang framework. +//! + +// use former::Former; + +/// +/// Defines a command, including its name, arguments, and other metadata. +/// +/// This struct is the central piece of a command's definition, providing all +/// the necessary information for parsing, validation, and execution. +#[ derive( Debug, Clone/*, Former*/ ) ] +pub struct CommandDefinition +{ + /// The name of the command, used to invoke it from the command line. + pub name : String, + /// A brief, one-line description of what the command does. + pub description : String, + /// A list of arguments that the command accepts. + // #[ former( default ) ] + pub arguments : Vec< ArgumentDefinition >, +} + +/// +/// Defines an argument for a command. +/// +/// Each argument has a name, a description, a data type, and can be +/// marked as optional. +#[ derive( Debug, Clone/*, Former*/ ) ] +pub struct ArgumentDefinition +{ + /// The name of the argument, used for identification. + pub name : String, + /// A brief description of the argument's purpose. + pub description : String, + /// The expected data type of the argument (e.g., "String", "Integer"). + pub kind : String, + /// If `true`, the argument is not required for the command to execute. + // #[ former( default ) ] + pub optional : bool, +} + +/// +/// Represents a namespace for organizing commands. +/// +/// Namespaces allow for grouping related commands under a common prefix, +/// improving discoverability and reducing naming conflicts. +#[ derive( Debug, Clone/*, Former*/ ) ] +pub struct Namespace +{ + /// The name of the namespace. + pub name : String, + /// A list of commands belonging to this namespace. + // #[ former( default ) ] + pub commands : Vec< CommandDefinition >, +} + +/// +/// Represents the successful output of a command execution. +/// +/// This struct standardizes the way command results are returned, allowing +/// for consistent handling across different modalities. +#[ derive( Debug, Clone/*, Former*/ ) ] +pub struct OutputData +{ + /// The primary content of the output. + pub content : String, + /// The format of the content (e.g., "text", "json"). + pub format : String, +} + +/// +/// Represents an error that occurred during command execution. +/// +/// This struct provides a standardized way to report errors, including a +/// unique, machine-readable code and a human-readable message. +#[ derive( Debug, Clone/*, Former*/ ) ] +pub struct ErrorData +{ + /// A unique, machine-readable code for the error (e.g., "COMMAND_NOT_FOUND"). + pub code : String, + /// A human-readable message explaining the error. + pub message : String, +} \ No newline at end of file diff --git a/module/move/unilang/src/error.rs b/module/move/unilang/src/error.rs new file mode 100644 index 0000000000..3e9fd13fee --- /dev/null +++ b/module/move/unilang/src/error.rs @@ -0,0 +1,27 @@ +//! +//! The error types for the Unilang framework. +//! + +use crate::data::ErrorData; + +/// +/// The main error type for the Unilang framework. +/// +/// This enum consolidates all possible errors that can occur within the +/// framework, providing a single, consistent error handling mechanism. +#[ derive( Debug ) ] +pub enum Error +{ + /// An error that occurred during semantic analysis or execution, + /// containing detailed information about the failure. + Execution( ErrorData ), +} + +impl From< ErrorData > for Error +{ + /// Converts an `ErrorData` into an `Error`. + fn from( error : ErrorData ) -> Self + { + Error::Execution( error ) + } +} \ No newline at end of file diff --git a/module/move/unilang/src/help.rs b/module/move/unilang/src/help.rs new file mode 100644 index 0000000000..7ed37eec3e --- /dev/null +++ b/module/move/unilang/src/help.rs @@ -0,0 +1,47 @@ +//! +//! The help generation components for the Unilang framework. +//! + +use crate::data::CommandDefinition; + +/// +/// Generates help information for commands. +/// +/// This struct provides methods to create formatted help messages from +/// `CommandDefinition` instances, which can be displayed to the user. +#[ derive( Debug, Default ) ] +pub struct HelpGenerator; + +impl HelpGenerator +{ + /// + /// Creates a new `HelpGenerator`. + /// + pub fn new() -> Self + { + Self::default() + } + + /// + /// Generates a help string for a single command. + /// + /// The output is a formatted string containing the command's usage, + /// description, and a list of its arguments. + pub fn command( &self, command : &CommandDefinition ) -> String + { + let mut help = String::new(); + help.push_str( &format!( "Usage: {}\n", command.name ) ); + help.push_str( &format!( "\n {}\n", command.description ) ); + + if !command.arguments.is_empty() + { + help.push_str( "\nArguments:\n" ); + for arg in &command.arguments + { + help.push_str( &format!( " {:<15} {}\n", arg.name, arg.description ) ); + } + } + + help + } +} \ No newline at end of file diff --git a/module/move/unilang/src/interpreter.rs b/module/move/unilang/src/interpreter.rs new file mode 100644 index 0000000000..4bc6e3fec8 --- /dev/null +++ b/module/move/unilang/src/interpreter.rs @@ -0,0 +1,59 @@ +//! +//! The interpreter for the Unilang framework. +//! + +use crate::semantic::VerifiedCommand; +use crate::data::OutputData; +use crate::error::Error; + +/// +/// The execution context for a command. +/// +/// This struct holds all the necessary information for a command to be +/// executed, such as global arguments, configuration, and I/O streams. +#[ derive( Debug, Default ) ] +pub struct ExecutionContext +{ + // Placeholder for future context data +} + +/// +/// The interpreter for Unilang commands. +/// +/// This struct takes a list of verified commands and executes them sequentially. +#[ derive( Debug ) ] +pub struct Interpreter< 'a > +{ + commands : &'a [ VerifiedCommand ], +} + +impl< 'a > Interpreter< 'a > +{ + /// + /// Creates a new `Interpreter`. + /// + pub fn new( commands : &'a [ VerifiedCommand ] ) -> Self + { + Self { commands } + } + + /// + /// Runs the commands and returns a list of outputs or an error. + /// + /// This method iterates through the verified commands and, for now, + /// simulates their execution by printing them. + pub fn run( &self, _context : &mut ExecutionContext ) -> Result< Vec< OutputData >, Error > + { + let mut results = Vec::new(); + for command in self.commands + { + // For now, just print the command to simulate execution + println!( "Executing: {:?}", command ); + results.push( OutputData { + content : format!( "Successfully executed command: {}", command.definition.name ), + format : "text".to_string(), + } ); + } + Ok( results ) + } +} \ No newline at end of file diff --git a/module/move/unilang/src/lib.rs b/module/move/unilang/src/lib.rs index efcee12bf4..05af4a57e6 100644 --- a/module/move/unilang/src/lib.rs +++ b/module/move/unilang/src/lib.rs @@ -2,17 +2,25 @@ #![ doc( html_favicon_url = "https://raw.githubusercontent.com/Wandalen/wTools/alpha/asset/img/logo_v3_trans_square_icon_small_v2.ico" ) ] #![ doc( html_root_url = "https://docs.rs/unilang/latest/unilang/" ) ] #![ doc = include_str!( concat!( env!( "CARGO_MANIFEST_DIR" ), "/", "Readme.md" ) ) ] -// #![ doc = include_str!( concat!( env!( "CARGO_MANIFEST_DIR" ), "/", "doc/", "unilang.md" ) ) ] +#![ allow( clippy::mod_module_files ) ] -use mod_interface::mod_interface; +/// +/// A framework for creating multi-modal applications. +/// -pub mod ca; - -mod private {} - -crate::mod_interface! +/// Internal namespace. +mod private { - use super::ca; - own use super::ca::own::*; } +#[ cfg( feature = "enabled" ) ] +mod_interface::mod_interface! +{ + exposed mod data; + exposed mod registry; + exposed mod parsing; + exposed mod semantic; + exposed mod interpreter; + exposed mod error; + exposed mod help; +} diff --git a/module/move/unilang/src/parsing.rs b/module/move/unilang/src/parsing.rs new file mode 100644 index 0000000000..707a26f17e --- /dev/null +++ b/module/move/unilang/src/parsing.rs @@ -0,0 +1,331 @@ +//! +//! The parsing components for the Unilang framework, including the lexer and parser. +//! + +/// +/// Represents a token in the Unilang language. +/// +/// Tokens are the smallest individual units of meaning in the language, +/// produced by the `Lexer` and consumed by the `Parser`. +#[ derive( Debug, PartialEq, Clone ) ] +pub enum Token +{ + /// A command or argument name (e.g., `my_command`, `arg1`). + Identifier( String ), + /// A string literal (e.g., `"hello world"`). + String( String ), + /// An integer literal (e.g., `123`, `-45`). + Integer( i64 ), + /// A float literal (e.g., `1.23`). + Float( f64 ), + /// A boolean literal (`true` or `false`). + Boolean( bool ), + /// The command separator `;;`. + CommandSeparator, + /// Represents the end of the input string. + Eof, +} + +/// +/// The lexer for the Unilang language. +/// +/// The lexer is responsible for breaking the input string into a sequence of tokens. +#[ derive( Debug ) ] +pub struct Lexer< 'a > +{ + input : &'a str, + position : usize, + read_position : usize, + ch : u8, +} + +impl< 'a > Lexer< 'a > +{ + /// + /// Creates a new `Lexer` from an input string. + /// + pub fn new( input : &'a str ) -> Self + { + let mut lexer = Lexer + { + input, + position : 0, + read_position : 0, + ch : 0, + }; + lexer.read_char(); + lexer + } + + /// + /// Reads the next character from the input and advances the position. + /// + fn read_char( &mut self ) + { + if self.read_position >= self.input.len() + { + self.ch = 0; + } + else + { + self.ch = self.input.as_bytes()[ self.read_position ]; + } + self.position = self.read_position; + self.read_position += 1; + } + + /// + /// Returns the next token from the input. + /// + pub fn next_token( &mut self ) -> Token + { + self.skip_whitespace(); + + let token = match self.ch + { + b';' => + { + if self.peek_char() == b';' + { + self.read_char(); + Token::CommandSeparator + } + else + { + // Handle single semicolon as an identifier or error + let ident = self.read_identifier(); + return Token::Identifier( ident ); + } + } + b'a'..=b'z' | b'A'..=b'Z' | b'_' => + { + let ident = self.read_identifier(); + return match ident.as_str() + { + "true" => Token::Boolean( true ), + "false" => Token::Boolean( false ), + _ => Token::Identifier( ident ), + }; + } + b'"' => + { + let string = self.read_string(); + Token::String( string ) + } + b'0'..=b'9' => + { + return self.read_number(); + } + 0 => Token::Eof, + _ => Token::Identifier( self.read_identifier() ), + }; + + self.read_char(); + token + } + + /// + /// Skips any whitespace characters. + /// + fn skip_whitespace( &mut self ) + { + while self.ch.is_ascii_whitespace() + { + self.read_char(); + } + } + + /// + /// Reads an identifier from the input. + /// + fn read_identifier( &mut self ) -> String + { + let position = self.position; + while self.ch.is_ascii_alphanumeric() || self.ch == b'_' + { + self.read_char(); + } + self.input[ position..self.position ].to_string() + } + + /// + /// Reads a string literal from the input. + /// + fn read_string( &mut self ) -> String + { + let position = self.position + 1; + loop + { + self.read_char(); + if self.ch == b'"' || self.ch == 0 + { + break; + } + } + self.input[ position..self.position ].to_string() + } + + /// + /// Reads a number literal (integer or float) from the input. + /// + fn read_number( &mut self ) -> Token + { + let position = self.position; + let mut is_float = false; + while self.ch.is_ascii_digit() + { + self.read_char(); + } + if self.ch == b'.' && self.peek_char().is_ascii_digit() + { + is_float = true; + self.read_char(); + while self.ch.is_ascii_digit() + { + self.read_char(); + } + } + + let number_str = &self.input[ position..self.position ]; + if is_float + { + Token::Float( number_str.parse().unwrap() ) + } + else + { + Token::Integer( number_str.parse().unwrap() ) + } + } + + /// + /// Peeks at the next character in the input without consuming it. + /// + fn peek_char( &self ) -> u8 + { + if self.read_position >= self.input.len() + { + 0 + } + else + { + self.input.as_bytes()[ self.read_position ] + } + } +} + +/// +/// Represents a single command statement in the AST. +/// +#[ derive( Debug, PartialEq, Clone ) ] +pub struct Statement +{ + /// The command identifier. + pub command : String, + /// The arguments for the command. + pub args : Vec< Token >, +} + +/// +/// Represents a program, which is a series of statements. +/// +/// This is the root of the Abstract Syntax Tree (AST). +#[ derive( Debug, Default ) ] +pub struct Program +{ + /// The statements that make up the program. + pub statements : Vec< Statement >, +} + +/// +/// The parser for the Unilang language. +/// +/// The parser takes a `Lexer` and produces an Abstract Syntax Tree (AST) +/// represented by a `Program` struct. +#[ derive( Debug ) ] +pub struct Parser< 'a > +{ + lexer : Lexer< 'a >, + current_token : Token, + peek_token : Token, +} + +impl< 'a > Parser< 'a > +{ + /// + /// Creates a new `Parser` from a `Lexer`. + /// + pub fn new( lexer : Lexer< 'a > ) -> Self + { + let mut parser = Parser + { + lexer, + current_token : Token::Eof, + peek_token : Token::Eof, + }; + // Prime the parser with the first two tokens. + parser.next_token(); + parser.next_token(); + parser + } + + /// + /// Advances the parser to the next token. + /// + fn next_token( &mut self ) + { + self.current_token = self.peek_token.clone(); + self.peek_token = self.lexer.next_token(); + } + + /// + /// Parses the entire input and returns a `Program` AST. + /// + pub fn parse_program( &mut self ) -> Program + { + let mut program = Program::default(); + + while self.current_token != Token::Eof + { + if let Some( statement ) = self.parse_statement() + { + program.statements.push( statement ); + } + else + { + // If it's not a valid statement, skip the token to avoid infinite loops on invalid input. + self.next_token(); + } + } + + program + } + + /// + /// Parses a single statement. + /// + fn parse_statement( &mut self ) -> Option< Statement > + { + if let Token::Identifier( command ) = self.current_token.clone() + { + let mut args = Vec::new(); + self.next_token(); // Consume command identifier. + while self.current_token != Token::CommandSeparator && self.current_token != Token::Eof + { + args.push( self.current_token.clone() ); + self.next_token(); + } + + // Consume the separator if it exists, to be ready for the next statement. + if self.current_token == Token::CommandSeparator + { + self.next_token(); + } + + Some( Statement { command, args } ) + } + else + { + None + } + } +} \ No newline at end of file diff --git a/module/move/unilang/src/registry.rs b/module/move/unilang/src/registry.rs new file mode 100644 index 0000000000..673fdcb646 --- /dev/null +++ b/module/move/unilang/src/registry.rs @@ -0,0 +1,84 @@ +//! +//! The command registry for the Unilang framework. +//! + +use crate::data::CommandDefinition; +use std::collections::HashMap; + +/// +/// A registry for commands, responsible for storing and managing all +/// available command definitions. +/// +#[ derive( Debug, Default ) ] +pub struct CommandRegistry +{ + /// A map of command names to their definitions. + pub commands : HashMap< String, CommandDefinition >, +} + +impl CommandRegistry +{ + /// + /// Creates a new, empty `CommandRegistry`. + /// + pub fn new() -> Self + { + Self::default() + } + + /// + /// Registers a command, adding it to the registry. + /// + /// If a command with the same name already exists, it will be overwritten. + pub fn register( &mut self, command : CommandDefinition ) + { + self.commands.insert( command.name.clone(), command ); + } + + /// + /// Returns a builder for creating a `CommandRegistry` with a fluent API. + /// + pub fn builder() -> CommandRegistryBuilder + { + CommandRegistryBuilder::new() + } +} + +/// +/// A builder for the `CommandRegistry`. +/// +/// This provides a convenient way to construct a `CommandRegistry` by +/// chaining `command` calls. +#[ derive( Debug, Default ) ] +pub struct CommandRegistryBuilder +{ + registry : CommandRegistry, +} + +impl CommandRegistryBuilder +{ + /// + /// Creates a new `CommandRegistryBuilder`. + /// + pub fn new() -> Self + { + Self::default() + } + + /// + /// Adds a command to the registry being built. + /// + pub fn command( mut self, command : CommandDefinition ) -> Self + { + self.registry.register( command ); + self + } + + /// + /// Builds and returns the `CommandRegistry`. + /// + pub fn build( self ) -> CommandRegistry + { + self.registry + } +} \ No newline at end of file diff --git a/module/move/unilang/src/semantic.rs b/module/move/unilang/src/semantic.rs new file mode 100644 index 0000000000..78f0d983d4 --- /dev/null +++ b/module/move/unilang/src/semantic.rs @@ -0,0 +1,125 @@ +//! +//! The semantic analyzer for the Unilang framework. +//! + +use crate::data::{ CommandDefinition, ErrorData }; +use crate::error::Error; +use crate::parsing::{ Program, Statement, Token }; +use crate::registry::CommandRegistry; +use std::collections::HashMap; + +/// +/// Represents a command that has been verified against the command registry. +/// +/// This struct holds the command's definition and the arguments provided +/// by the user, ensuring that the command is valid and ready for execution. +#[ derive( Debug, Clone ) ] +pub struct VerifiedCommand +{ + /// The definition of the command. + pub definition : CommandDefinition, + /// The arguments provided for the command, mapped by name. + pub arguments : HashMap< String, Token >, +} + +/// +/// The semantic analyzer, responsible for validating the parsed program. +/// +/// The analyzer checks the program against the command registry to ensure +/// that commands exist, arguments are correct, and types match. +#[ derive( Debug ) ] +pub struct SemanticAnalyzer< 'a > +{ + program : &'a Program, + registry : &'a CommandRegistry, +} + +impl< 'a > SemanticAnalyzer< 'a > +{ + /// + /// Creates a new `SemanticAnalyzer`. + /// + pub fn new( program : &'a Program, registry : &'a CommandRegistry ) -> Self + { + Self { program, registry } + } + + /// + /// Analyzes the program and returns a list of verified commands or an error. + /// + /// This is the main entry point for semantic analysis, processing each + /// statement in the program. + pub fn analyze( &self ) -> Result< Vec< VerifiedCommand >, Error > + { + let mut verified_commands = Vec::new(); + + for statement in &self.program.statements + { + let command_def = self.registry.commands.get( &statement.command ).ok_or_else( || ErrorData { + code : "COMMAND_NOT_FOUND".to_string(), + message : format!( "Command not found: {}", statement.command ), + } )?; + + let arguments = self.bind_arguments( statement, command_def )?; + verified_commands.push( VerifiedCommand { + definition : ( *command_def ).clone(), + arguments, + } ); + } + + Ok( verified_commands ) + } + + /// + /// Binds the arguments from a statement to the command definition. + /// + /// This function checks for the correct number and types of arguments, + /// returning an error if validation fails. + fn bind_arguments( &self, statement : &Statement, command_def : &CommandDefinition ) -> Result< HashMap< String, Token >, Error > + { + let mut bound_args = HashMap::new(); + let mut arg_iter = statement.args.iter().peekable(); + + for arg_def in &command_def.arguments + { + if let Some( token ) = arg_iter.next() + { + // Basic type checking + let type_matches = match ( &token, arg_def.kind.as_str() ) + { + ( Token::String( _ ), "String" ) => true, + ( Token::Integer( _ ), "Integer" ) => true, + ( Token::Float( _ ), "Float" ) => true, + ( Token::Boolean( _ ), "Boolean" ) => true, + _ => false, + }; + + if !type_matches + { + return Err( ErrorData { + code : "INVALID_ARGUMENT_TYPE".to_string(), + message : format!( "Invalid type for argument '{}'. Expected {}, got {:?}", arg_def.name, arg_def.kind, token ), + }.into() ); + } + bound_args.insert( arg_def.name.clone(), token.clone() ); + } + else if !arg_def.optional + { + return Err( ErrorData { + code : "MISSING_ARGUMENT".to_string(), + message : format!( "Missing required argument: {}", arg_def.name ), + }.into() ); + } + } + + if arg_iter.next().is_some() + { + return Err( ErrorData { + code : "TOO_MANY_ARGUMENTS".to_string(), + message : "Too many arguments provided".to_string(), + }.into() ); + } + + Ok( bound_args ) + } +} \ No newline at end of file diff --git a/module/move/unilang/tests/inc/mod.rs b/module/move/unilang/tests/inc/mod.rs index f81253a5ef..ca5f7bea09 100644 --- a/module/move/unilang/tests/inc/mod.rs +++ b/module/move/unilang/tests/inc/mod.rs @@ -1,14 +1,5 @@ -use super::*; -use test_tools::exposed::*; +//! +//! Incremental tests for the Unilang crate. +//! -mod unit_tests; - -mod integration_tests; - -mod parsing_structures_test; - - -// mod parser; -// mod grammar; -// mod executor; -// mod commands_aggregator; +pub mod phase1; diff --git a/module/move/unilang/tests/inc/phase1/foundational_setup.rs b/module/move/unilang/tests/inc/phase1/foundational_setup.rs new file mode 100644 index 0000000000..ea1caf7cb2 --- /dev/null +++ b/module/move/unilang/tests/inc/phase1/foundational_setup.rs @@ -0,0 +1,18 @@ +//! +//! Tests for the foundational setup of the crate. +//! + +// The `super::*` import is not used in this file, but it is a common +// pattern in tests, so we keep it for consistency. +#[ allow( unused_imports ) ] +use super::*; + +/// +/// A compile-time test to ensure that the basic test case compiles. +/// +#[ test ] +fn try_build() +{ + let t = test_tools::compiletime::TestCases::new(); + t.pass( "tests/inc/phase1/try_build.rs" ); +} \ No newline at end of file diff --git a/module/move/unilang/tests/inc/phase1/full_pipeline_test.rs b/module/move/unilang/tests/inc/phase1/full_pipeline_test.rs new file mode 100644 index 0000000000..a53eef063d --- /dev/null +++ b/module/move/unilang/tests/inc/phase1/full_pipeline_test.rs @@ -0,0 +1,239 @@ +//! +//! Integration tests for the full Phase 1 pipeline. +//! + +use unilang::data::{ ArgumentDefinition, CommandDefinition }; +use unilang::parsing::{ Lexer, Parser, Token }; +use unilang::registry::CommandRegistry; +use unilang::semantic::SemanticAnalyzer; +use unilang::interpreter::{ Interpreter, ExecutionContext }; + +/// +/// Tests for the `Lexer`. +/// +// Test Matrix Rows: T1.1, T1.2, T1.3, T1.4 +#[test] +fn lexer_tests() +{ + // T1.1 + let input = "command \"arg1\" 123 1.23 true"; + let mut lexer = Lexer::new( input ); + assert_eq!( lexer.next_token(), Token::Identifier( "command".to_string() ) ); + assert_eq!( lexer.next_token(), Token::String( "arg1".to_string() ) ); + assert_eq!( lexer.next_token(), Token::Integer( 123 ) ); + assert_eq!( lexer.next_token(), Token::Float( 1.23 ) ); + assert_eq!( lexer.next_token(), Token::Boolean( true ) ); + assert_eq!( lexer.next_token(), Token::Eof ); + + // T1.2 + let input = "cmd1 ;; cmd2"; + let mut lexer = Lexer::new( input ); + assert_eq!( lexer.next_token(), Token::Identifier( "cmd1".to_string() ) ); + assert_eq!( lexer.next_token(), Token::CommandSeparator ); + assert_eq!( lexer.next_token(), Token::Identifier( "cmd2".to_string() ) ); + assert_eq!( lexer.next_token(), Token::Eof ); + + // T1.3 + let input = " "; + let mut lexer = Lexer::new( input ); + assert_eq!( lexer.next_token(), Token::Eof ); + + // T1.4 + let input = "\"\""; + let mut lexer = Lexer::new( input ); + assert_eq!( lexer.next_token(), Token::String( "".to_string() ) ); + assert_eq!( lexer.next_token(), Token::Eof ); +} + +/// +/// Tests for the `Parser`. +/// +// Test Matrix Rows: T2.1, T2.2, T2.3 +#[test] +fn parser_tests() +{ + // T2.1 + let input = "command \"arg1\""; + let lexer = Lexer::new( input ); + let mut parser = Parser::new( lexer ); + let program = parser.parse_program(); + assert_eq!( program.statements.len(), 1 ); + assert_eq!( program.statements[ 0 ].command, "command" ); + assert_eq!( program.statements[ 0 ].args, vec![ Token::String( "arg1".to_string() ) ] ); + + // T2.2 + let input = "cmd1 1 ;; cmd2 2"; + let lexer = Lexer::new( input ); + let mut parser = Parser::new( lexer ); + let program = parser.parse_program(); + assert_eq!( program.statements.len(), 2 ); + assert_eq!( program.statements[ 0 ].command, "cmd1" ); + assert_eq!( program.statements[ 0 ].args, vec![ Token::Integer( 1 ) ] ); + assert_eq!( program.statements[ 1 ].command, "cmd2" ); + assert_eq!( program.statements[ 1 ].args, vec![ Token::Integer( 2 ) ] ); + + // T2.3 + let input = ""; + let lexer = Lexer::new( input ); + let mut parser = Parser::new( lexer ); + let program = parser.parse_program(); + assert_eq!( program.statements.len(), 0 ); +} + +/// +/// Tests for the `SemanticAnalyzer`. +/// +// Test Matrix Rows: T3.1, T3.2, T3.3, T3.4, T3.5 +#[test] +fn semantic_analyzer_tests() +{ + let mut registry = CommandRegistry::new(); + registry.register( CommandDefinition { + name : "test_cmd".to_string(), + description : "A test command".to_string(), + arguments : vec![ + ArgumentDefinition { + name : "arg1".to_string(), + description : "A string argument".to_string(), + kind : "String".to_string(), + optional : false, + }, + ArgumentDefinition { + name : "arg2".to_string(), + description : "An integer argument".to_string(), + kind : "Integer".to_string(), + optional : true, + }, + ], + } ); + + // T3.1 + let input = "test_cmd \"hello\" 123"; + let lexer = Lexer::new( input ); + let mut parser = Parser::new( lexer ); + let program = parser.parse_program(); + let analyzer = SemanticAnalyzer::new( &program, ®istry ); + let verified = analyzer.analyze().unwrap(); + assert_eq!( verified.len(), 1 ); + assert_eq!( verified[ 0 ].definition.name, "test_cmd" ); + assert_eq!( verified[ 0 ].arguments.get( "arg1" ).unwrap(), &Token::String( "hello".to_string() ) ); + assert_eq!( verified[ 0 ].arguments.get( "arg2" ).unwrap(), &Token::Integer( 123 ) ); + + // T3.2 + let input = "unknown_cmd"; + let lexer = Lexer::new( input ); + let mut parser = Parser::new( lexer ); + let program = parser.parse_program(); + let analyzer = SemanticAnalyzer::new( &program, ®istry ); + let error = analyzer.analyze().unwrap_err(); + assert!( matches!( error, unilang::error::Error::Execution( data ) if data.code == "COMMAND_NOT_FOUND" ) ); + + // T3.3 + let input = "test_cmd"; + let lexer = Lexer::new( input ); + let mut parser = Parser::new( lexer ); + let program = parser.parse_program(); + let analyzer = SemanticAnalyzer::new( &program, ®istry ); + let error = analyzer.analyze().unwrap_err(); + assert!( matches!( error, unilang::error::Error::Execution( data ) if data.code == "MISSING_ARGUMENT" ) ); + + // T3.4 + let input = "test_cmd 123"; + let lexer = Lexer::new( input ); + let mut parser = Parser::new( lexer ); + let program = parser.parse_program(); + let analyzer = SemanticAnalyzer::new( &program, ®istry ); + let error = analyzer.analyze().unwrap_err(); + assert!( matches!( error, unilang::error::Error::Execution( data ) if data.code == "INVALID_ARGUMENT_TYPE" ) ); + + // T3.5 + let input = "test_cmd \"hello\" 123 456"; + let lexer = Lexer::new( input ); + let mut parser = Parser::new( lexer ); + let program = parser.parse_program(); + let analyzer = SemanticAnalyzer::new( &program, ®istry ); + let error = analyzer.analyze().unwrap_err(); + assert!( matches!( error, unilang::error::Error::Execution( data ) if data.code == "TOO_MANY_ARGUMENTS" ) ); +} + +/// +/// Tests for the `Interpreter`. +/// +// Test Matrix Rows: T4.1, T4.2 +#[test] +fn interpreter_tests() +{ + let mut registry = CommandRegistry::new(); + registry.register( CommandDefinition { + name : "cmd1".to_string(), + description : "".to_string(), + arguments : vec![], + } ); + registry.register( CommandDefinition { + name : "cmd2".to_string(), + description : "".to_string(), + arguments : vec![], + } ); + + // T4.1 + let input = "cmd1"; + let lexer = Lexer::new( input ); + let mut parser = Parser::new( lexer ); + let program = parser.parse_program(); + let analyzer = SemanticAnalyzer::new( &program, ®istry ); + let verified = analyzer.analyze().unwrap(); + let interpreter = Interpreter::new( &verified ); + let mut context = ExecutionContext::default(); + let result = interpreter.run( &mut context ).unwrap(); + assert_eq!( result.len(), 1 ); + + // T4.2 + let input = "cmd1 ;; cmd2"; + let lexer = Lexer::new( input ); + let mut parser = Parser::new( lexer ); + let program = parser.parse_program(); + let analyzer = SemanticAnalyzer::new( &program, ®istry ); + let verified = analyzer.analyze().unwrap(); + let interpreter = Interpreter::new( &verified ); + let mut context = ExecutionContext::default(); + let result = interpreter.run( &mut context ).unwrap(); + assert_eq!( result.len(), 2 ); +} + +/// +/// Tests for the `HelpGenerator`. +/// +// Test Matrix Rows: T5.1, T5.2 +#[test] +fn help_generator_tests() +{ + let help_gen = unilang::help::HelpGenerator::new(); + + // T5.1 + let cmd_with_args = CommandDefinition { + name : "test_cmd".to_string(), + description : "A test command".to_string(), + arguments : vec![ ArgumentDefinition { + name : "arg1".to_string(), + description : "A string argument".to_string(), + kind : "String".to_string(), + optional : false, + } ], + }; + let help_text = help_gen.command( &cmd_with_args ); + assert!( help_text.contains( "Usage: test_cmd" ) ); + assert!( help_text.contains( "A test command" ) ); + assert!( help_text.contains( "Arguments:" ) ); + assert!( help_text.contains( "arg1" ) ); + + // T5.2 + let cmd_without_args = CommandDefinition { + name : "simple_cmd".to_string(), + description : "A simple command".to_string(), + arguments : vec![], + }; + let help_text = help_gen.command( &cmd_without_args ); + assert!( help_text.contains( "Usage: simple_cmd" ) ); + assert!( help_text.contains( "A simple command" ) ); + assert!( !help_text.contains( "Arguments:" ) ); +} \ No newline at end of file diff --git a/module/move/unilang/tests/inc/phase1/mod.rs b/module/move/unilang/tests/inc/phase1/mod.rs new file mode 100644 index 0000000000..cd03212ed3 --- /dev/null +++ b/module/move/unilang/tests/inc/phase1/mod.rs @@ -0,0 +1,6 @@ +//! +//! Tests for Phase 1 of the Unilang implementation. +//! + +pub mod foundational_setup; +pub mod full_pipeline_test; \ No newline at end of file diff --git a/module/move/unilang/tests/inc/phase1/try_build.rs b/module/move/unilang/tests/inc/phase1/try_build.rs new file mode 100644 index 0000000000..e706833b80 --- /dev/null +++ b/module/move/unilang/tests/inc/phase1/try_build.rs @@ -0,0 +1,4 @@ +fn main() +{ + println!( "Hello, world!" ); +} \ No newline at end of file diff --git a/module/move/unilang/tests/tests.rs b/module/move/unilang/tests/tests.rs index a2dbca59d0..de28661ee5 100644 --- a/module/move/unilang/tests/tests.rs +++ b/module/move/unilang/tests/tests.rs @@ -1,11 +1,5 @@ -//! All tests. - -// #![ deny( rust_2018_idioms ) ] -// #![ deny( missing_debug_implementations ) ] -// #![ deny( missing_docs ) ] -#![ allow( unused_imports ) ] - -/// System under test. -use unilang as the_module; +//! +//! The test suite for the Unilang crate. +//! mod inc;