Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 39 additions & 25 deletions src/assembler/lexer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,45 +91,57 @@
}
}

fn check_comma(
token: &Token,
instr: &mut MaybeUnresolvedInstr,

Check warning

Code scanning / clippy

unused variable: instr Warning

unused variable: instr
) -> Result<(), anyhow::Error> {
if let Token::COMMA = token {
Ok(())
} else {
bail!("NOT COMMA")
}

}

let (opcode, sequence) = match op {
Op::ADD => (
ADD_OPCODE,
[check_reg::<9>, check_reg::<6>, check_reg_or_offset::<0, 5>].as_slice(),
[check_reg::<9>, check_comma, check_reg::<6>, check_comma, check_reg_or_offset::<0, 5>].as_slice(),
),
Op::AND => (
AND_OPCODE,
[check_reg::<9>, check_reg::<6>, check_reg_or_offset::<0, 5>].as_slice()),
[check_reg::<9>, check_comma, check_reg::<6>, check_comma, check_reg_or_offset::<0, 5>].as_slice()),
Op::LD => (
ALL_LOAD_OPCODES[0],
[check_reg::<9>, check_offset::<0, 9>].as_slice()
[check_reg::<9>, check_comma, check_offset::<0, 9>].as_slice()
),
Op::LDI => (
ALL_LOAD_OPCODES[1],
[check_reg::<9>, check_offset::<0, 9>].as_slice()
[check_reg::<9>, check_comma, check_offset::<0, 9>].as_slice()
),
Op::LDR => (
ALL_LOAD_OPCODES[2],
[check_reg::<9>, check_reg::<6>, check_offset::<0, 6>].as_slice()
[check_reg::<9>, check_comma, check_reg::<6>, check_comma, check_offset::<0, 6>].as_slice()
),
Op::LEA => (
ALL_LOAD_OPCODES[3],
[check_reg::<9>, check_offset::<0, 9>].as_slice()
[check_reg::<9>, check_comma, check_offset::<0, 9>].as_slice()
),
Op::ST => (
ALL_STORE_OPCODES[0],
[check_reg::<9>, check_offset::<0, 9>].as_slice()
[check_reg::<9>, check_comma, check_offset::<0, 9>].as_slice()
),
Op::STI => (
ALL_STORE_OPCODES[1],
[check_reg::<9>, check_offset::<0, 9>].as_slice()
[check_reg::<9>, check_comma, check_offset::<0, 9>].as_slice()
),
Op::STR => (
ALL_STORE_OPCODES[2],
[check_reg::<9>, check_reg::<6>, check_offset::<0, 6>].as_slice()
[check_reg::<9>, check_comma, check_reg::<6>, check_comma, check_offset::<0, 6>].as_slice()
),
Op::NOT => (
NOT_OPCODE,
[check_reg::<9>, check_reg::<6>].as_slice()
[check_reg::<9>, check_comma, check_reg::<6>].as_slice()
),
_ => todo!(),
};
Expand Down Expand Up @@ -186,7 +198,9 @@
Token::STRING("LABEL1".to_string()),
Token::INSTR(Op::AND),
Token::REGISTER(RegAddr::Zero),
Token::COMMA,
Token::REGISTER(RegAddr::One),
Token::COMMA,
Token::REGISTER(RegAddr::Zero)
];
let (label, instr) = lexer(&test_vec);
Expand All @@ -197,7 +211,9 @@
let test_vec = vec![
Token::INSTR(Op::AND),
Token::REGISTER(RegAddr::Three),
Token::COMMA,
Token::REGISTER(RegAddr::One),
Token::COMMA,
Token::NUM(0b10011)
];
let (label, instr) = lexer(&test_vec);
Expand All @@ -212,7 +228,9 @@
Token::STRING("LABEL1".to_string()),
Token::INSTR(Op::ADD),
Token::REGISTER(RegAddr::Zero),
Token::COMMA,
Token::REGISTER(RegAddr::One),
Token::COMMA,
Token::REGISTER(RegAddr::Zero)
];
let (label, instr) = lexer(&test_vec);
Expand All @@ -223,7 +241,9 @@
let test_vec = vec![
Token::INSTR(Op::ADD),
Token::REGISTER(RegAddr::Three),
Token::COMMA,
Token::REGISTER(RegAddr::One),
Token::COMMA,
Token::NUM(0b10011)
];
let (label, instr) = lexer(&test_vec);
Expand All @@ -237,6 +257,7 @@
let test_vec = vec![
Token::INSTR(Op::LD),
Token::REGISTER(RegAddr::Five),
Token::COMMA,
Token::NUM(0b000111000)
];
let (label, instr) = lexer(&test_vec);
Expand All @@ -247,6 +268,7 @@
let test_vec = vec![
Token::INSTR(Op::LDI),
Token::REGISTER(RegAddr::Five),
Token::COMMA,
Token::NUM(0b000111000)
];
let (label, instr) = lexer(&test_vec);
Expand All @@ -257,7 +279,9 @@
let test_vec = vec![
Token::INSTR(Op::LDR),
Token::REGISTER(RegAddr::Five),
Token::COMMA,
Token::REGISTER(RegAddr::Two),
Token::COMMA,
Token::NUM(0b111000)
];
let (label, instr) = lexer(&test_vec);
Expand All @@ -268,6 +292,7 @@
let test_vec = vec![
Token::INSTR(Op::LEA),
Token::REGISTER(RegAddr::Five),
Token::COMMA,
Token::NUM(0b000111000)
];
let (label, instr) = lexer(&test_vec);
Expand All @@ -281,6 +306,7 @@
let test_vec = vec![
Token::INSTR(Op::ST),
Token::REGISTER(RegAddr::Five),
Token::COMMA,
Token::NUM(0b000111000)
];
let (label, instr) = lexer(&test_vec);
Expand All @@ -291,6 +317,7 @@
let test_vec = vec![
Token::INSTR(Op::STI),
Token::REGISTER(RegAddr::Five),
Token::COMMA,
Token::NUM(0b000111000)
];
let (label, instr) = lexer(&test_vec);
Expand All @@ -301,27 +328,14 @@
let test_vec = vec![
Token::INSTR(Op::STR),
Token::REGISTER(RegAddr::Five),
Token::COMMA,
Token::REGISTER(RegAddr::Two),
Token::COMMA,
Token::NUM(0b111000)
];
let (label, instr) = lexer(&test_vec);

assert_eq!(label, None);
assert_eq!(instr.unwrap().first().unwrap().value, 0b0111101010111000);
}

#[test]
fn lex_not_instr() {
let test_vec = vec![
Token::INSTR(Op::NOT),
Token::REGISTER(RegAddr::Five),
Token::REGISTER(RegAddr::Zero),
];
let (label, instr) = lexer(&test_vec);

assert_eq!(label, None);
// This is the value that should be produced. Currently this fails, as there is no way to
// insert arbitrary bits into instructions when forming them.
assert_eq!(instr.unwrap().first().unwrap().value, 0b1001101000111111);
}
}
43 changes: 41 additions & 2 deletions src/assembler/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use crate::defs::{LC3Word, Op, PseudoOp, RegAddr};
use strum::EnumIs;
use strum_macros::EnumDiscriminants;
use anyhow::Result;

pub mod lexer;
pub mod tokenizer;
Expand All @@ -25,10 +26,48 @@
COMMA,
}

pub fn translate_line(line: &str) -> MaybeUnresolvedInstr {
todo!()
pub fn translate_line(line: &str) -> Result<Vec<MaybeUnresolvedInstr>> {
let (instruction, comment) = line.split_once(';').unwrap();

Check warning

Code scanning / clippy

unused variable: comment Warning

unused variable: comment
// The UNCA examples don't use spaces between instruction args, so we should always just add a
// space after a comma to make sure we're able to parse properly
let instruction = instruction.replace(',', ", ");
let splits = instruction.split_ascii_whitespace();
let mut token_chain: Vec<Token> = Vec::new();

for split in splits {
let mut tokens = tokenizer::tokenize(split)?;
token_chain.append(&mut tokens);
}

let (label, chain) = lexer::lexer(&token_chain);

Check warning

Code scanning / clippy

unused variable: label Warning

unused variable: label

//TODO: add label to symbol table
chain
}

pub fn resolve_instr(instr: MaybeUnresolvedInstr) -> String {
todo!()
}

#[cfg(test)]
mod test {
use super::*;

#[test]
fn translate_instr() {
let instruction: &str = "AND R0, R1, R0;";
let machine_code = translate_line(instruction).unwrap();

assert_eq!(machine_code.len(), 1);
assert_eq!(machine_code.first().unwrap().value, 0b0101000001000000);
}

#[test]
fn translate_instr_no_space() {
let instruction: &str = "AND R0,R1,R0;";
let machine_code = translate_line(instruction).unwrap();

assert_eq!(machine_code.len(), 1);
assert_eq!(machine_code.first().unwrap().value, 0b0101000001000000);
}
}
Loading