diff --git a/.vscode/settings.json b/.vscode/settings.json index e7f36e0bb4..1078f85852 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -16,4 +16,7 @@ // "rust-analyzer.check.allTargets": true, // we don't want the formatter to kick in while we're working on dioxus itself "dioxus.formatOnSave": "disabled", + "cSpell.words": [ + "subsecond" + ], } diff --git a/Cargo.lock b/Cargo.lock index bb482827a2..eb0c74f41b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,13 +4,19 @@ version = 3 [[package]] name = "addr2line" -version = "0.24.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ - "gimli 0.31.1", + "gimli 0.28.1", ] +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + [[package]] name = "adler2" version = "2.0.0" @@ -75,7 +81,7 @@ dependencies = [ "once_cell", "serde", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -286,7 +292,7 @@ dependencies = [ "aws-smithy-types", "base64 0.21.7", "bcder", - "bitflags 2.6.0", + "bitflags 2.9.0", "bytes", "chrono", "clap", @@ -1498,17 +1504,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", + "cc", "cfg-if", "libc", - "miniz_oxide", - "object 0.36.5", + "miniz_oxide 0.7.4", + "object 0.32.2", "rustc-demangle", - "windows-targets 0.52.6", ] [[package]] @@ -1596,13 +1602,22 @@ dependencies = [ "num-traits", ] +[[package]] +name = "bincode" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +dependencies = [ + "serde", +] + [[package]] name = "bindgen" version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -1654,9 +1669,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" dependencies = [ "serde", ] @@ -1966,7 +1981,7 @@ version = "0.18.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ca26ef0159422fb77631dc9d17b102f253b876fe1586b03b803e63a309b4ee2" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "cairo-sys-rs", "glib", "libc", @@ -2081,6 +2096,20 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "cargo_metadata" +version = "0.19.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" +dependencies = [ + "camino", + "cargo-platform", + "semver 1.0.23", + "serde", + "serde_json", + "thiserror 2.0.12", +] + [[package]] name = "cargo_toml" version = "0.20.5" @@ -2183,7 +2212,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" dependencies = [ "smallvec", - "target-lexicon", + "target-lexicon 0.12.16", ] [[package]] @@ -2278,9 +2307,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "6088f3ae8c3608d19260cd7445411865a485688711b78b5be70d78cd96136f83" dependencies = [ "clap_builder", "clap_derive", @@ -2288,9 +2317,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "22a7ef7f676155edfb82daa97f99441f3ebf4a58d5e32f295a56259f1b6facc8" dependencies = [ "anstream", "anstyle", @@ -2301,9 +2330,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -2359,7 +2388,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f79398230a6e2c08f5c9760610eb6924b52aa9e7950a619602baba59dcbbdbb2" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "block", "cocoa-foundation 0.2.0", "core-foundation 0.10.0", @@ -2389,7 +2418,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e14045fb83be07b5acf1c0884b2180461635b433455fa35d1cd6f17f1450679d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "block", "core-foundation 0.10.0", "core-graphics-types 0.2.0", @@ -2413,6 +2442,33 @@ dependencies = [ "unicode-width 0.1.14", ] +[[package]] +name = "color-eyre" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55146f5e46f237f7423d74111267d4597b59b0dad0ffaf7303bce9945d843ad5" +dependencies = [ + "backtrace", + "color-spantrace", + "eyre", + "indenter", + "once_cell", + "owo-colors 3.5.0", + "tracing-error", +] + +[[package]] +name = "color-spantrace" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd6be1b2a7e382e2b98b43b2adcca6bb0e465af0bdd38123873ae61eb17a72c2" +dependencies = [ + "once_cell", + "owo-colors 3.5.0", + "tracing-core", + "tracing-error", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -2582,7 +2638,7 @@ version = "0.6.3" dependencies = [ "const-serialize", "const-serialize-macro", - "rand 0.8.5", + "rand 0.9.0", "serde", ] @@ -2725,7 +2781,7 @@ version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa95a34622365fa5bbf40b20b75dba8dfa8c94c734aea8ac9a5ca38af14316f1" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "core-foundation 0.10.0", "core-graphics-types 0.2.0", "foreign-types 0.5.0", @@ -2749,7 +2805,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d44a101f213f6c4cdc1853d4b78aef6db6bdfa3468798cc1d9912f4735013eb" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "core-foundation 0.10.0", "libc", ] @@ -2935,7 +2991,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "crossterm_winapi", "futures-core", "mio 1.0.3", @@ -3193,7 +3249,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e3d747f100290a1ca24b752186f61f6637e1deffe3bf6320de6fcb29510a307" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "libloading 0.8.6", "winapi", ] @@ -3261,6 +3317,15 @@ dependencies = [ "matches", ] +[[package]] +name = "dbg_breakpoint" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb333c8dc699012c0694fe7c7eca7d625f2bceeeef44fd1de39008d6a6abf777" +dependencies = [ + "libc", +] + [[package]] name = "dbl" version = "0.3.2" @@ -3467,7 +3532,7 @@ name = "dioxus-check" version = "0.6.3" dependencies = [ "indoc", - "owo-colors", + "owo-colors 4.1.0", "pretty_assertions", "proc-macro2", "quote", @@ -3488,7 +3553,7 @@ dependencies = [ "built", "cargo-config2", "cargo-generate", - "cargo_metadata", + "cargo_metadata 0.18.1", "cargo_toml", "chrono", "clap", @@ -3527,14 +3592,15 @@ dependencies = [ "hyper-util", "ignore", "include_dir", - "itertools 0.13.0", + "itertools 0.14.0", "krates", "local-ip-address", "log", "manganis", "manganis-core", + "memmap", "memoize", - "notify", + "notify 6.1.1", "object 0.36.5", "once_cell", "open", @@ -3549,9 +3615,12 @@ dependencies = [ "rustls 0.23.19", "serde", "serde_json", + "shell-words", "strum 0.26.3", + "subsecond-cli-support", "syn 2.0.90", "tar", + "target-lexicon 0.13.2", "tauri-bundler", "tauri-utils", "tempfile", @@ -3666,14 +3735,15 @@ dependencies = [ "generational-box", "longest-increasing-subsequence", "pretty_assertions", - "rand 0.8.5", + "rand 0.9.0", "reqwest 0.12.9", "rustc-hash 1.1.0", "rustversion", "serde", "slab", "slotmap", - "sysinfo", + "subsecond", + "sysinfo 0.33.1", "tokio", "tracing", "tracing-fluent-assertions", @@ -3768,8 +3838,11 @@ dependencies = [ "dioxus-core", "dioxus-devtools-types", "dioxus-signals", + "libc", + "libloading 0.8.6", "serde", "serde_json", + "subsecond", "tokio", "tracing", "tungstenite 0.23.0", @@ -3782,6 +3855,7 @@ version = "0.6.3" dependencies = [ "dioxus-core", "serde", + "subsecond-types", ] [[package]] @@ -3806,7 +3880,7 @@ dependencies = [ name = "dioxus-dx-wire-format" version = "0.6.3" dependencies = [ - "cargo_metadata", + "cargo_metadata 0.18.1", "serde", "serde_json", ] @@ -3826,7 +3900,7 @@ dependencies = [ "http-range", "openssl", "ouroboros", - "rand 0.8.5", + "rand 0.9.0", "reqwest 0.12.9", "separator", "serde", @@ -4197,7 +4271,7 @@ dependencies = [ "generational-box", "once_cell", "parking_lot", - "rand 0.8.5", + "rand 0.9.0", "reqwest 0.12.9", "rustc-hash 1.1.0", "serde", @@ -4255,6 +4329,7 @@ dependencies = [ "serde", "serde-wasm-bindgen", "serde_json", + "subsecond", "tracing", "tracing-wasm", "wasm-bindgen", @@ -4819,18 +4894,34 @@ dependencies = [ "bit_field", "half", "lebe", - "miniz_oxide", + "miniz_oxide 0.8.0", "rayon-core", "smallvec", "zune-inflate", ] +[[package]] +name = "eyre" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" +dependencies = [ + "indenter", + "once_cell", +] + [[package]] name = "fallible-iterator" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + [[package]] name = "faster-hex" version = "0.9.0" @@ -4935,7 +5026,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.8.0", ] [[package]] @@ -5394,7 +5485,7 @@ version = "0.6.3" dependencies = [ "criterion", "parking_lot", - "rand 0.8.5", + "rand 0.9.0", "tracing", ] @@ -5442,6 +5533,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", +] + [[package]] name = "ghash" version = "0.5.1" @@ -5468,16 +5571,27 @@ version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" dependencies = [ - "fallible-iterator", + "fallible-iterator 0.2.0", "indexmap 1.9.3", "stable_deref_trait", ] +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + [[package]] name = "gimli" version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +dependencies = [ + "fallible-iterator 0.3.0", + "indexmap 2.7.0", + "stable_deref_trait", +] [[package]] name = "gio" @@ -5517,7 +5631,7 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b903b73e45dc0c6c596f2d37eccece7c1c8bb6e4407b001096387c63d0d93724" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "libc", "libgit2-sys", "log", @@ -5567,11 +5681,11 @@ version = "0.14.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49aaeef5d98390a3bcf9dbc6440b520b793d1bf3ed99317dc407b02be995b28e" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "bstr", "gix-path", "libc", - "thiserror 2.0.6", + "thiserror 2.0.12", ] [[package]] @@ -5618,7 +5732,7 @@ version = "0.16.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74908b4bbc0a0a40852737e5d7889f676f081e340d5451a16e5b4c50d592f111" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "bstr", "gix-features", "gix-path", @@ -5674,7 +5788,7 @@ dependencies = [ "gix-trace", "home", "once_cell", - "thiserror 2.0.6", + "thiserror 2.0.12", ] [[package]] @@ -5705,7 +5819,7 @@ version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8b876ef997a955397809a2ec398d6a45b7a55b4918f2446344330f778d14fd6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "gix-path", "libc", "windows-sys 0.52.0", @@ -5767,7 +5881,7 @@ version = "0.18.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "233daaf6e83ae6a12a52055f568f9d7cf4671dabb78ff9560ab6da230ce00ee5" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "futures-channel", "futures-core", "futures-executor", @@ -5820,7 +5934,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b436093d1598b05e3b7fddc097b2bad32763f53a1beb25ab6f9718c6a60acd09" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "cocoa 0.25.0", "crossbeam-channel", "keyboard-types", @@ -5959,7 +6073,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbcd2dba93594b227a1f57ee09b8b9da8892c34d55aa332e034a228d0fe6a171" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "gpu-alloc-types", ] @@ -5969,7 +6083,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98ff03b468aa837d70984d55f5d3f846f6ec31fe34bbb97c4f85219caeee1ca4" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", ] [[package]] @@ -5991,7 +6105,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc11df1ace8e7e564511f53af41f3e42ddc95b56fd07b3f4445d2a6048bc682c" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "gpu-descriptor-types", "hashbrown 0.14.5", ] @@ -6002,7 +6116,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6bf0b36e6f090b7e1d8a4b49c0cb81c1f8376f72198c65dd3ad9ff3556b8b78c" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", ] [[package]] @@ -6223,7 +6337,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af2a7e73e1f34c48da31fb668a907f250794837e08faa144fd24f0b8b741e890" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "com", "libc", "libloading 0.8.6", @@ -6882,6 +6996,12 @@ dependencies = [ "quote", ] +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + [[package]] name = "indexmap" version = "1.9.3" @@ -6958,6 +7078,17 @@ dependencies = [ "libc", ] +[[package]] +name = "inotify" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3" +dependencies = [ + "bitflags 2.9.0", + "inotify-sys", + "libc", +] + [[package]] name = "inotify-sys" version = "0.1.5" @@ -7331,7 +7462,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b750dcadc39a09dbadd74e118f6dd6598df77fa01df0cfcdc52c28dece74528a" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "serde", "unicode-segmentation", ] @@ -7473,6 +7604,12 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + [[package]] name = "lebe" version = "0.5.2" @@ -7505,9 +7642,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.168" +version = "0.2.170" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" +checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" [[package]] name = "libfuzzer-sys" @@ -7565,7 +7702,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "libc", "redox_syscall", ] @@ -7633,7 +7770,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20c9e1f991b3861d25bf872ecca2eb6a73f7a9fe671da047cd1f9b49c65cbc40" dependencies = [ "ahash 0.8.11", - "bitflags 2.6.0", + "bitflags 2.9.0", "browserslist-rs", "const-str", "cssparser 0.33.0", @@ -7851,6 +7988,36 @@ dependencies = [ "winapi", ] +[[package]] +name = "macext" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03e7c68cadc41a5342a826858a31b0bd656447b4306873628e93ffb807009599" +dependencies = [ + "mach2", + "process-memory", + "sudo", + "sysinfo 0.30.13", +] + +[[package]] +name = "mach" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +dependencies = [ + "libc", +] + +[[package]] +name = "mach2" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" +dependencies = [ + "libc", +] + [[package]] name = "malloc_buf" version = "0.0.6" @@ -7960,6 +8127,16 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +[[package]] +name = "memmap" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "memmap2" version = "0.9.5" @@ -8007,7 +8184,7 @@ version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c43f73953f8cbe511f021b58f18c3ce1c3d1ae13fe953293e13345bf83217f25" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "block", "core-graphics-types 0.1.3", "foreign-types 0.5.0", @@ -8068,6 +8245,15 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" +[[package]] +name = "miniz_oxide" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +dependencies = [ + "adler", +] + [[package]] name = "miniz_oxide" version = "0.8.0" @@ -8189,7 +8375,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50e3524642f53d9af419ab5e8dd29d3ba155708267667c2f3f06c88c9e130843" dependencies = [ "bit-set", - "bitflags 2.6.0", + "bitflags 2.9.0", "codespan-reporting", "hexf-parse", "indexmap 2.7.0", @@ -8243,7 +8429,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3f42e7bbe13d351b6bead8286a43aac9534b82bd3cc43e47037f012ebfd62d4" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "jni-sys", "log", "ndk-sys 0.6.0+11769913", @@ -8322,7 +8508,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "cfg-if", "cfg_aliases 0.1.1", "libc", @@ -8335,7 +8521,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "cfg-if", "cfg_aliases 0.2.1", "libc", @@ -8394,11 +8580,11 @@ version = "6.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "crossbeam-channel", "filetime", "fsevent-sys", - "inotify", + "inotify 0.9.6", "kqueue", "libc", "log", @@ -8408,6 +8594,31 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "notify" +version = "8.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fee8403b3d66ac7b26aee6e40a897d85dc5ce26f44da36b8b73e987cc52e943" +dependencies = [ + "bitflags 2.9.0", + "filetime", + "fsevent-sys", + "inotify 0.11.0", + "kqueue", + "libc", + "log", + "mio 1.0.3", + "notify-types", + "walkdir", + "windows-sys 0.59.0", +] + +[[package]] +name = "notify-types" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e0826a989adedc2a244799e823aece04662b66609d96af8dff7ac6df9a8925d" + [[package]] name = "ntapi" version = "0.4.1" @@ -8641,7 +8852,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e4e89ad9e3d7d297152b17d39ed92cd50ca8063a89a9fa569046d41568891eff" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "block2", "libc", "objc2", @@ -8657,7 +8868,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "617fbf49e071c178c0b24c080767db52958f716d9eabdf0890523aeae54773ef" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "block2", "objc2", "objc2-foundation", @@ -8687,7 +8898,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ee638a5da3799329310ad4cfa62fbf045d5f56e3ef5ba4149e7452dcf89d5a8" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "block2", "libc", "objc2", @@ -8699,7 +8910,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd0cba1276f6023976a406a14ffa85e1fdd19df6b0f737b063b95f6c8c7aadd6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "block2", "objc2", "objc2-foundation", @@ -8711,7 +8922,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e42bee7bff906b14b167da2bac5efe6b6a07e6f7c0a21a7308d40c960242dc7a" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "block2", "objc2", "objc2-foundation", @@ -8756,7 +8967,10 @@ version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ + "crc32fast", "flate2", + "hashbrown 0.15.2", + "indexmap 2.7.0", "memchr", "ruzstd 0.7.3", "wasmparser 0.218.0", @@ -8828,7 +9042,7 @@ version = "0.10.68" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "cfg-if", "foreign-types 0.3.2", "libc", @@ -8904,9 +9118,9 @@ dependencies = [ [[package]] name = "ouroboros" -version = "0.18.4" +version = "0.18.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "944fa20996a25aded6b4795c6d63f10014a7a83f8be9828a11860b08c5fc4a67" +checksum = "1e0f050db9c44b97a94723127e6be766ac5c340c48f2c4bb3ffa11713744be59" dependencies = [ "aliasable", "ouroboros_macro", @@ -8915,12 +9129,11 @@ dependencies = [ [[package]] name = "ouroboros_macro" -version = "0.18.4" +version = "0.18.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39b0deead1528fd0e5947a8546a9642a9777c25f6e1e26f34c97b204bbb465bd" +checksum = "3c7028bdd3d43083f6d8d4d5187680d0d3560d54df4cc9d752005268b41e64d0" dependencies = [ "heck 0.4.1", - "itertools 0.12.1", "proc-macro2", "proc-macro2-diagnostics", "quote", @@ -8945,6 +9158,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "owo-colors" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" + [[package]] name = "owo-colors" version = "4.1.0" @@ -9021,6 +9240,16 @@ dependencies = [ "sha2", ] +[[package]] +name = "page_size" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "pango" version = "0.18.3" @@ -9052,7 +9281,7 @@ version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7645c578d3a5c4cdf667af1ad39765f5f751c4883d251e050d5e1204b5cad0a9" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "cssparser 0.33.0", "log", "phf 0.11.2", @@ -9208,7 +9437,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.6", + "thiserror 2.0.12", "ucd-trie", ] @@ -9601,7 +9830,7 @@ dependencies = [ "crc32fast", "fdeflate", "flate2", - "miniz_oxide", + "miniz_oxide 0.8.0", ] [[package]] @@ -9655,7 +9884,7 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -9680,6 +9909,12 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "pretty-hex" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbc83ee4a840062f368f9096d80077a9841ec117e17e7f700df81958f1451254" + [[package]] name = "pretty_assertions" version = "1.4.1" @@ -9789,6 +10024,17 @@ dependencies = [ "yansi", ] +[[package]] +name = "process-memory" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae9599c34fcc8067c3105dc746c0ce85e3ea61784568b8234179fad490b1dcc1" +dependencies = [ + "libc", + "mach", + "winapi", +] + [[package]] name = "prodash" version = "28.0.0" @@ -9932,7 +10178,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls 0.23.19", "socket2", - "thiserror 2.0.6", + "thiserror 2.0.12", "tokio", "tracing", ] @@ -9951,7 +10197,7 @@ dependencies = [ "rustls 0.23.19", "rustls-pki-types", "slab", - "thiserror 2.0.6", + "thiserror 2.0.12", "tinyvec", "tracing", "web-time", @@ -10017,6 +10263,17 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", + "zerocopy 0.8.23", +] + [[package]] name = "rand_chacha" version = "0.2.2" @@ -10037,6 +10294,16 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + [[package]] name = "rand_core" version = "0.5.1" @@ -10055,6 +10322,15 @@ dependencies = [ "getrandom 0.2.15", ] +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.1", +] + [[package]] name = "rand_hc" version = "0.2.0" @@ -10123,7 +10399,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdef7f9be5c0122f890d58bdf4d964349ba6a6161f705907526d891efabba57d" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "cassowary", "compact_str", "crossterm", @@ -10235,7 +10511,7 @@ version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", ] [[package]] @@ -10488,7 +10764,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61797318be89b1a268a018a92a7657096d83f3ecb31418b9e9c16dcbb043b702" dependencies = [ "ahash 0.8.11", - "bitflags 2.6.0", + "bitflags 2.9.0", "instant", "num-traits", "once_cell", @@ -10568,7 +10844,7 @@ version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95c49b6baaa0e8fa864b2069d1e94e7a132471da3ac26a132f3fa7e71416772c" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "bzip2", "chrono", "cpio", @@ -10679,7 +10955,7 @@ version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "errno", "libc", "linux-raw-sys", @@ -10968,7 +11244,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -10981,7 +11257,7 @@ version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1415a607e92bec364ea2cf9264646dcce0f91e6d65281bd6f2819cca3bf39c8" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "core-foundation 0.10.0", "core-foundation-sys", "libc", @@ -11642,7 +11918,7 @@ version = "0.3.0+sdk-1.3.268.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eda41003dc44290527a59b13432d4a0379379fa074b70174882adfbdfd917844" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", ] [[package]] @@ -11787,7 +12063,7 @@ dependencies = [ "atoi", "base64 0.21.7", "bigdecimal", - "bitflags 2.6.0", + "bitflags 2.9.0", "byteorder", "bytes", "chrono", @@ -11835,7 +12111,7 @@ dependencies = [ "base64 0.21.7", "bigdecimal", "bit-vec", - "bitflags 2.6.0", + "bitflags 2.9.0", "byteorder", "chrono", "crc 3.2.1", @@ -12040,12 +12316,147 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "subsecond" +version = "0.6.3" +dependencies = [ + "dbg_breakpoint", + "js-sys", + "libc", + "libloading 0.8.6", + "memmap", + "serde", + "serde-wasm-bindgen", + "subsecond-macro", + "subsecond-types", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "subsecond-cli" +version = "0.6.3" +dependencies = [ + "anyhow", + "axum 0.7.9", + "axum-extra", + "axum-server", + "bincode", + "cargo_metadata 0.19.2", + "clap", + "crossterm", + "futures", + "futures-channel", + "futures-util", + "gimli 0.31.1", + "include_dir", + "itertools 0.14.0", + "libc", + "libloading 0.8.6", + "macext", + "memmap", + "notify 8.0.0", + "object 0.36.5", + "ouroboros", + "page_size", + "pretty-hex", + "pretty_assertions", + "rustc-demangle", + "serde", + "serde_json", + "shell-words", + "subsecond-cli-support", + "sysinfo 0.33.1", + "target-lexicon 0.13.2", + "tokio", + "tokio-tungstenite 0.23.1", + "tower-http", + "tracing", + "tracing-subscriber", + "urlencoding", + "walkdir", +] + +[[package]] +name = "subsecond-cli-support" +version = "0.6.3" +dependencies = [ + "anyhow", + "bincode", + "clap", + "id-arena", + "itertools 0.14.0", + "libc", + "memmap", + "object 0.36.5", + "pretty-hex", + "pretty_assertions", + "rayon", + "rustc-demangle", + "serde", + "subsecond-types", + "target-lexicon 0.13.2", + "tokio", + "tracing", + "tracing-subscriber", + "walkdir", + "walrus", + "wasm-encoder 0.227.1", + "wasmparser 0.225.0", +] + +[[package]] +name = "subsecond-harness" +version = "0.6.3" +dependencies = [ + "anyhow", + "bincode", + "color-eyre", + "dioxus", + "rand 0.9.0", + "ratatui", + "serde_json", + "subsecond", + "tokio", + "tungstenite 0.23.0", +] + +[[package]] +name = "subsecond-macro" +version = "0.6.3" +dependencies = [ + "base16", + "digest", + "proc-macro2", + "quote", + "sha2", + "syn 2.0.90", +] + +[[package]] +name = "subsecond-types" +version = "0.6.3" +dependencies = [ + "serde", +] + [[package]] name = "subtle" version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +[[package]] +name = "sudo" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88bd84d4c082e18e37fef52c0088e4407dabcef19d23a607fb4b5ee03b7d5b83" +dependencies = [ + "libc", + "log", +] + [[package]] name = "supports-color" version = "2.1.0" @@ -12203,7 +12614,7 @@ version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82f448db2d1c52ffd2bd3788d89cafd8b5a75b97f0dc8aae00874dda2647f6b6" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "is-macro", "num-bigint", "phf 0.11.2", @@ -12334,7 +12745,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09fdc36d220bcd51f70b1d78bdd8c1e1a172b4e594c385bdd9614b84a7c0e112" dependencies = [ "better_scoped_tls", - "bitflags 2.6.0", + "bitflags 2.9.0", "indexmap 2.7.0", "once_cell", "phf 0.11.2", @@ -12587,6 +12998,21 @@ dependencies = [ "walkdir", ] +[[package]] +name = "sysinfo" +version = "0.30.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a5b4ddaee55fb2bea2bf0e5000747e5f5c0de765e5a5ff87f4cd106439f4bb3" +dependencies = [ + "cfg-if", + "core-foundation-sys", + "libc", + "ntapi", + "once_cell", + "rayon", + "windows 0.52.0", +] + [[package]] name = "sysinfo" version = "0.33.1" @@ -12618,7 +13044,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "core-foundation 0.9.4", "system-configuration-sys 0.6.0", ] @@ -12662,7 +13088,7 @@ version = "0.30.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6682a07cf5bab0b8a2bd20d0a542917ab928b5edb75ebd4eda6b05cbaab872da" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "cocoa 0.26.0", "core-foundation 0.10.0", "core-graphics 0.24.0", @@ -12730,6 +13156,15 @@ version = "0.12.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" +[[package]] +name = "target-lexicon" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a" +dependencies = [ + "serde", +] + [[package]] name = "target-triple" version = "0.1.3" @@ -12918,11 +13353,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.6" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl 2.0.6", + "thiserror-impl 2.0.12", ] [[package]] @@ -12938,9 +13373,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.6" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", @@ -13057,9 +13492,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.42.0" +version = "1.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" +checksum = "9975ea0f48b5aa3972bf2d888c238182458437cc2a19374b81b25cdf1023fb3a" dependencies = [ "backtrace", "bytes", @@ -13086,9 +13521,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", @@ -13309,7 +13744,7 @@ checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ "async-compression", "base64 0.21.7", - "bitflags 2.6.0", + "bitflags 2.9.0", "bytes", "futures-core", "futures-util", @@ -13377,6 +13812,16 @@ dependencies = [ "valuable", ] +[[package]] +name = "tracing-error" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b1581020d7a273442f5b45074a6a57d5757ad0a47dac0e9f0bd57b81936f3db" +dependencies = [ + "tracing", + "tracing-subscriber", +] + [[package]] name = "tracing-fluent-assertions" version = "0.3.0" @@ -13967,7 +14412,7 @@ dependencies = [ "log", "rayon", "walrus-macro", - "wasm-encoder", + "wasm-encoder 0.214.0", "wasmparser 0.214.0", ] @@ -14026,6 +14471,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasite" version = "0.1.0" @@ -14136,6 +14590,16 @@ dependencies = [ "leb128", ] +[[package]] +name = "wasm-encoder" +version = "0.227.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80bb72f02e7fbf07183443b27b0f3d4144abf8c114189f2e088ed95b696a7822" +dependencies = [ + "leb128fmt", + "wasmparser 0.227.1", +] + [[package]] name = "wasm-opt" version = "0.116.1" @@ -14259,7 +14723,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5309c1090e3e84dad0d382f42064e9933fdaedb87e468cc239f0eabea73ddcb6" dependencies = [ "ahash 0.8.11", - "bitflags 2.6.0", + "bitflags 2.9.0", "hashbrown 0.14.5", "indexmap 2.7.0", "semver 1.0.23", @@ -14272,7 +14736,7 @@ version = "0.218.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b09e46c7fceceaa72b2dd1a8a137ea7fd8f93dfaa69806010a709918e496c5dc" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", ] [[package]] @@ -14281,13 +14745,24 @@ version = "0.225.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36e5456165f81e64cb9908a0fe9b9d852c2c74582aa3fe2be3c2da57f937d3ae" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "hashbrown 0.15.2", "indexmap 2.7.0", "semver 1.0.23", "serde", ] +[[package]] +name = "wasmparser" +version = "0.227.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f51cad774fb3c9461ab9bccc9c62dfb7388397b5deda31bf40e8108ccd678b2" +dependencies = [ + "bitflags 2.9.0", + "indexmap 2.7.0", + "semver 1.0.23", +] + [[package]] name = "web-sys" version = "0.3.77" @@ -14459,7 +14934,7 @@ checksum = "28b94525fc99ba9e5c9a9e24764f2bc29bad0911a7446c12f446a8277369bf3a" dependencies = [ "arrayvec", "bit-vec", - "bitflags 2.6.0", + "bitflags 2.9.0", "cfg_aliases 0.1.1", "codespan-reporting", "indexmap 2.7.0", @@ -14487,7 +14962,7 @@ dependencies = [ "arrayvec", "ash", "bit-set", - "bitflags 2.6.0", + "bitflags 2.9.0", "block", "cfg_aliases 0.1.1", "core-graphics-types 0.1.3", @@ -14528,7 +15003,7 @@ version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b671ff9fb03f78b46ff176494ee1ebe7d603393f42664be55b64dc8d53969805" dependencies = [ - "bitflags 2.6.0", + "bitflags 2.9.0", "js-sys", "web-sys", ] @@ -15050,6 +15525,15 @@ version = "0.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.9.0", +] + [[package]] name = "write16" version = "1.0.0" @@ -15321,7 +15805,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd97444d05a4328b90e75e503a34bad781f14e28a823ad3557f0750df1ebcbc6" +dependencies = [ + "zerocopy-derive 0.8.23", ] [[package]] @@ -15335,6 +15828,17 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "zerocopy-derive" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.90", +] + [[package]] name = "zerofrom" version = "0.1.5" @@ -15423,7 +15927,7 @@ dependencies = [ "flate2", "indexmap 2.7.0", "memchr", - "thiserror 2.0.6", + "thiserror 2.0.12", "zopfli", ] diff --git a/Cargo.toml b/Cargo.toml index a3aed4b2e6..6ca90400fc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,6 +78,14 @@ members = [ "packages/wasm-split/wasm-split-cli", "packages/wasm-split/wasm-used", + # subsecond + "packages/subsecond/subsecond", + "packages/subsecond/subsecond-macro", + "packages/subsecond/subsecond-cli-support", + "packages/subsecond/subsecond-types", + "packages/subsecond/subsecond-cli", + "packages/subsecond/subsecond-harness", + # Full project examples "example-projects/fullstack-hackernews", "example-projects/ecommerce-site", @@ -102,6 +110,8 @@ members = [ "packages/playwright-tests/nested-suspense", "packages/playwright-tests/cli-optimization", "packages/playwright-tests/wasm-split-harness", + + ] [workspace.package] @@ -149,11 +159,18 @@ const-serialize-macro = { path = "packages/const-serialize-macro", version = "0. generational-box = { path = "packages/generational-box", version = "0.6.2" } lazy-js-bundle = { path = "packages/lazy-js-bundle", version = "0.6.2" } +# subsecond +subsecond-cli-support = { path = "packages/subsecond/subsecond-cli-support", version = "0.6.3" } +subsecond-types = { path = "packages/subsecond/subsecond-types", version = "0.6.3" } +subsecond-macro = { path = "packages/subsecond/subsecond-macro", version = "0.6.3" } +subsecond = { path = "packages/subsecond/subsecond", version = "0.6.3" } +# manganis manganis = { path = "packages/manganis/manganis", version = "0.6.2" } manganis-core = { path = "packages/manganis/manganis-core", version = "0.6.2" } manganis-macro = { path = "packages/manganis/manganis-macro", version = "0.6.2" } +# wasm-split wasm-split = { path = "packages/wasm-split/wasm-split", version = "0.1.0" } wasm-split-macro = { path = "packages/wasm-split/wasm-split-macro", version = "0.1.0" } wasm-split-cli = { path = "packages/wasm-split/wasm-split-cli", version = "0.1.0" } @@ -224,7 +241,7 @@ chrono = { version = "0.4.34" } gloo = { version = "0.8.0" } gloo-utils = { version = "0.1.6" } rustversion = "1.0.17" -rand = "0.8.5" +rand = "0.9.0" longest-increasing-subsequence = "0.1.0" trybuild = "1.0" dirs = "5.0.1" @@ -245,6 +262,8 @@ async-once-cell = { version = "0.5.3" } rayon = "1.2.0" wasmparser = "0.225.0" itertools = "0.14.0" +object = { version = "0.36.0" } +bincode = "1.3.3" # desktop wry = { version = "0.45.0", default-features = false } @@ -262,9 +281,14 @@ objc = { version = "0.2.7", features = ["exception"] } objc_id = "0.1.1" tray-icon = "0.19" -# disable debug symbols in dev builds - shouldn't matter for downstream crates but makes our binaries (examples, cli, etc) build faster -[profile.dev] -debug = 0 +# tui stuff +ansi-to-tui = "6.0" +ansi-to-html = "0.2.1" +path-absolutize = "3.1" +crossterm = { version = "0.28.0" } +ratatui = { version = "0.28.0" } +shell-words = "1.1.0" +color-eyre = "0.6.3" # our release profile should be fast to compile and fast to run # when we ship our CI builds, we turn on LTO which improves perf leftover by turning on incremental @@ -279,6 +303,11 @@ opt-level = 'z' lto = true debug=true +[profile.subsecond-dev] +inherits = "dev" +debug = 0 +strip = "debuginfo" + # a profile for running the CLI that's also incremental [profile.cli-release-dev] inherits = "release" @@ -338,11 +367,11 @@ wasm-split = { workspace = true } [dev-dependencies] dioxus = { workspace = true, features = ["router"] } dioxus-ssr = { workspace = true } -futures-util = "0.3.31" +futures-util = { workspace = true } separator = "0.4.1" -serde = { version = "1.0.136", features = ["derive"] } -serde_json = "1.0.79" -rand = { version = "0.8.4", features = ["small_rng"] } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +rand = { workspace = true, features = ["small_rng"] } form_urlencoded = "1.2.0" async-std = "1.12.0" web-time = "1.1.0" diff --git a/packages/cli-opt/Cargo.toml b/packages/cli-opt/Cargo.toml index 3587e3c6ee..1d650b56fd 100644 --- a/packages/cli-opt/Cargo.toml +++ b/packages/cli-opt/Cargo.toml @@ -13,7 +13,7 @@ keywords = ["dom", "ui", "gui", "react"] anyhow = { workspace = true } manganis = { workspace = true } manganis-core = { workspace = true } -object = {version="0.36.0", features=["wasm"]} +object = {workspace = true, features = ["wasm"] } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } const-serialize = { workspace = true, features = ["serde"] } diff --git a/packages/cli/Cargo.toml b/packages/cli/Cargo.toml index 5549e53066..3275cd2062 100644 --- a/packages/cli/Cargo.toml +++ b/packages/cli/Cargo.toml @@ -23,6 +23,7 @@ dioxus-cli-config = { workspace = true } dioxus-cli-opt = { workspace = true } dioxus-fullstack = { workspace = true } dioxus-dx-wire-format = { workspace = true } +subsecond-cli-support = { workspace = true } wasm-split-cli = { workspace = true } clap = { workspace = true, features = ["derive", "cargo"] } @@ -50,8 +51,9 @@ console = "0.15.8" ctrlc = "3.2.3" futures-channel = { workspace = true } krates = { version = "0.17.0" } -cargo-config2 = { workspace = true, optional = true } +cargo-config2 = { workspace = true } regex = "1.10.6" +target-lexicon = { version = "0.13.2", features = ["serde", "serde_support"] } axum = { workspace = true, features = ["ws"] } axum-server = { workspace = true, features = ["tls-rustls"] } @@ -96,11 +98,12 @@ tracing-subscriber = { version = "0.3.18", features = ["std", "env-filter", "jso console-subscriber = { version = "0.3.0", optional = true } tracing = { workspace = true } wasm-opt = { version = "0.116.1", optional = true } -crossterm = { version = "0.28.0", features = ["event-stream"] } -ansi-to-tui = "6.0" -ansi-to-html = "0.2.1" -path-absolutize = "3.1" -ratatui = { version = "0.28.0", features = ["crossterm", "unstable"] } +ansi-to-tui = { workspace = true } +ansi-to-html = { workspace = true } +path-absolutize = { workspace = true } +crossterm = { workspace = true, features = ["event-stream"] } +ratatui = { workspace = true, features = ["crossterm", "unstable"] } +shell-words = { workspace = true } # disable `log` entirely since `walrus` uses it and is *much* slower with it enableda log = { version = "0.4", features = ["max_level_off", "release_max_level_off"] } @@ -111,9 +114,9 @@ manganis = { workspace = true } manganis-core = { workspace = true } # Extracting data from an executable -object = {version="0.36.0", features=["wasm"]} +object = { workspace = true, features = ["all"] } tokio-util = { version = "0.7.11", features = ["full"] } -itertools = "0.13.0" +itertools = "0.14.0" throbber-widgets-tui = "=0.7.0" unicode-segmentation = "1.12.0" handlebars = "6.1.0" @@ -129,6 +132,8 @@ dircpy = "0.3.19" plist = "1.7.0" memoize = "0.5.1" +memmap = "0.7.0" + [build-dependencies] built = { version = "=0.7.4", features = ["git2"] } diff --git a/packages/cli/assets/android/gen/app/src/main/AndroidManifest.xml.hbs b/packages/cli/assets/android/gen/app/src/main/AndroidManifest.xml.hbs index 3164e10e9d..c3d984afe2 100644 --- a/packages/cli/assets/android/gen/app/src/main/AndroidManifest.xml.hbs +++ b/packages/cli/assets/android/gen/app/src/main/AndroidManifest.xml.hbs @@ -3,6 +3,8 @@ diff --git a/packages/cli/assets/web/dev.index.html b/packages/cli/assets/web/dev.index.html new file mode 100644 index 0000000000..2e08b4a250 --- /dev/null +++ b/packages/cli/assets/web/dev.index.html @@ -0,0 +1,339 @@ + + + + {app_title} + + + + + + + + + + + +
+ + diff --git a/packages/cli/assets/web/loading.html b/packages/cli/assets/web/dev.loading.html similarity index 100% rename from packages/cli/assets/web/loading.html rename to packages/cli/assets/web/dev.loading.html diff --git a/packages/cli/assets/web/index.html b/packages/cli/assets/web/index.html deleted file mode 100644 index e07ae83045..0000000000 --- a/packages/cli/assets/web/index.html +++ /dev/null @@ -1,12 +0,0 @@ - - - - {app_title} - - - - - -
- - diff --git a/packages/cli/assets/web/prod.index.html b/packages/cli/assets/web/prod.index.html new file mode 100644 index 0000000000..937c428f23 --- /dev/null +++ b/packages/cli/assets/web/prod.index.html @@ -0,0 +1,27 @@ + + + {app_title} + + + + + +
+ + + diff --git a/packages/cli/assets/web/toast.html b/packages/cli/assets/web/toast.html deleted file mode 100644 index c7018c485f..0000000000 --- a/packages/cli/assets/web/toast.html +++ /dev/null @@ -1,206 +0,0 @@ - - -
-
- -
-
-
- - -
- -
- - - - - - - -

Your app is being rebuilt.

-
- - -

A non-hot-reloadable change occurred and we must rebuild.

-
-
-
- - \ No newline at end of file diff --git a/packages/cli/src/cli/autoformat.rs b/packages/cli/src/args/autoformat.rs similarity index 93% rename from packages/cli/src/cli/autoformat.rs rename to packages/cli/src/args/autoformat.rs index 367eecbf19..e13313e4fd 100644 --- a/packages/cli/src/cli/autoformat.rs +++ b/packages/cli/src/args/autoformat.rs @@ -1,5 +1,4 @@ use super::*; -use crate::DioxusCrate; use anyhow::Context; use dioxus_autofmt::{IndentOptions, IndentType}; use rayon::prelude::*; @@ -38,7 +37,7 @@ pub(crate) struct Autoformat { } impl Autoformat { - pub(crate) fn autoformat(self) -> Result { + pub(crate) async fn autoformat(self) -> Result { let Autoformat { check, raw, @@ -62,15 +61,17 @@ impl Autoformat { } else { // Default to formatting the project. let crate_dir = if let Some(package) = self.package { - // TODO (matt): Do we need to use the entire `DioxusCrate` here? - let target_args = TargetArgs { - package: Some(package), - ..Default::default() - }; - let dx_crate = - DioxusCrate::new(&target_args).context("failed to parse crate graph")?; - - Cow::Owned(dx_crate.crate_dir()) + todo!() + // // TODO (matt): Do we need to use the entire `DioxusCrate` here? + // let target_args = TargetArgs { + // package: Some(package), + // ..Default::default() + // }; + // let dx_crate = DioxusCrate::new(&target_args) + // .await + // .context("failed to parse crate graph")?; + + // Cow::Owned(dx_crate.crate_dir()) } else { Cow::Borrowed(Path::new(".")) }; @@ -311,5 +312,5 @@ async fn test_auto_fmt() { package: None, }; - fmt.autoformat().unwrap(); + fmt.autoformat().await.unwrap(); } diff --git a/packages/cli/src/args/build.rs b/packages/cli/src/args/build.rs new file mode 100644 index 0000000000..e86a72cbcb --- /dev/null +++ b/packages/cli/src/args/build.rs @@ -0,0 +1,123 @@ +use crate::Platform; +use crate::{args::*, BuildRequest, AppBuilder}; +use target_lexicon::Triple; + +/// Build the Rust Dioxus app and all of its assets. +/// +/// Produces a final output build. For fullstack builds you need to build the server and client separately. +/// +/// ``` +/// dx build --platform web +/// dx build --platform server +/// ``` +#[derive(Clone, Debug, Default, Deserialize, Parser)] +pub(crate) struct BuildArgs { + #[clap(long)] + pub(crate) name: Option, + + /// Build for nightly [default: false] + #[clap(long)] + pub(crate) nightly: bool, + + /// Build platform: support Web & Desktop [default: "default_platform"] + #[clap(long, value_enum)] + pub(crate) platform: Option, + + /// Build in release mode [default: false] + #[clap(long, short)] + #[serde(default)] + pub(crate) release: bool, + + /// The package to build + #[clap(short, long)] + pub(crate) package: Option, + + /// Build a specific binary [default: ""] + #[clap(long)] + pub(crate) bin: Option, + + /// Build a specific example [default: ""] + #[clap(long)] + pub(crate) example: Option, + + /// Build the app with custom a profile + #[clap(long)] + pub(crate) profile: Option, + + /// Space separated list of features to activate + #[clap(long)] + pub(crate) features: Vec, + + /// Don't include the default features in the build + #[clap(long)] + pub(crate) no_default_features: bool, + + /// Include all features in the build + #[clap(long)] + pub(crate) all_features: bool, + + /// Rustc platform triple + #[clap(long)] + pub(crate) target: Option, + + // todo -- make a subcommand called "--" that takes all the remaining args + /// Extra arguments passed to `rustc` + /// + /// cargo rustc -- -Clinker + #[clap(value_delimiter = ',')] + pub(crate) cargo_args: Vec, + + /// This flag only applies to fullstack builds. By default fullstack builds will run the server and client builds in parallel. This flag will force the build to run the server build first, then the client build. [default: false] + #[clap(long)] + #[serde(default)] + pub(crate) force_sequential: bool, + + /// Skip collecting assets from dependencies [default: false] + #[clap(long)] + #[serde(default)] + pub(crate) skip_assets: bool, + + /// Inject scripts to load the wasm and js files for your dioxus app if they are not already present [default: true] + #[clap(long, default_value_t = true)] + pub(crate) inject_loading_scripts: bool, + + /// Experimental: Bundle split the wasm binary into multiple chunks based on `#[wasm_split]` annotations [default: false] + #[clap(long, default_value_t = false)] + pub(crate) wasm_split: bool, + + /// Generate debug symbols for the wasm binary [default: true] + /// + /// This will make the binary larger and take longer to compile, but will allow you to debug the + /// wasm binary + #[clap(long, default_value_t = true)] + pub(crate) debug_symbols: bool, + + /// Use the cranelift backend to compile the app [default: false] + /// + /// This can speed up compile times by up to 100% but is experimental within the compiler. + #[clap(long)] + pub(crate) cranelift: bool, + + /// Are we building for a device or just the simulator. + /// If device is false, then we'll build for the simulator + #[clap(long)] + pub(crate) device: Option, +} + +impl BuildArgs { + pub async fn build(self) -> Result { + tracing::info!("Building project..."); + + let build = BuildRequest::new(&self) + .await + .context("Failed to load Dioxus workspace")?; + + AppBuilder::start(&build)?.finish().await?; + + tracing::info!(path = ?build.root_dir(), "Build completed successfully! 🚀"); + + Ok(StructuredOutput::BuildFinished { + path: build.root_dir(), + }) + } +} diff --git a/packages/cli/src/cli/bundle.rs b/packages/cli/src/args/bundle.rs similarity index 73% rename from packages/cli/src/cli/bundle.rs rename to packages/cli/src/args/bundle.rs index c742ac48ee..568dfd382d 100644 --- a/packages/cli/src/cli/bundle.rs +++ b/packages/cli/src/args/bundle.rs @@ -1,4 +1,4 @@ -use crate::{AppBundle, BuildArgs, Builder, DioxusCrate, Platform}; +use crate::{BuildArgs, BuildRequest, AppBuilder, Platform}; use anyhow::{anyhow, Context}; use path_absolutize::Absolutize; use std::collections::HashMap; @@ -6,7 +6,22 @@ use tauri_bundler::{BundleBinary, BundleSettings, PackageSettings, SettingsBuild use super::*; -/// Bundle the Rust desktop app and all of its assets +/// Bundle an app and its assets. +/// +/// This only takes a single build into account. To build multiple targets, use multiple calls to bundle. +/// +/// ``` +/// dioxus bundle --target +/// dioxus bundle --target +/// ``` +/// +/// Note that building the server will perform a client build as well: +/// +/// ``` +/// dioxus bundle --platform server +/// ``` +/// +/// This will produce a client `public` folder and the associated server executable in the output folder. #[derive(Clone, Debug, Parser)] pub struct Bundle { /// The package types to bundle @@ -23,61 +38,70 @@ pub struct Bundle { #[clap(long)] pub out_dir: Option, + /// Build the fullstack variant of this app, using that as the fileserver and backend + /// + /// This defaults to `false` but will be overridden to true if the `fullstack` feature is enabled. + #[clap(long)] + pub(crate) fullstack: bool, + + /// Run the ssg config of the app and generate the files + #[clap(long)] + pub(crate) ssg: bool, + /// The arguments for the dioxus build #[clap(flatten)] - pub(crate) build_arguments: BuildArgs, + pub(crate) args: BuildArgs, } impl Bundle { pub(crate) async fn bundle(mut self) -> Result { tracing::info!("Bundling project..."); - let krate = DioxusCrate::new(&self.build_arguments.target_args) - .context("Failed to load Dioxus workspace")?; - // We always use `release` mode for bundling - self.build_arguments.release = true; - self.build_arguments.resolve(&krate).await?; + // todo - maybe not? what if you want a devmode bundle? + self.args.release = true; + + let build = BuildRequest::new(&self.args) + .await + .context("Failed to load Dioxus workspace")?; tracing::info!("Building app..."); - let bundle = Builder::start(&krate, self.build_arguments.clone())? - .finish() - .await?; + let bundle = AppBuilder::start(&build)?.finish().await?; // If we're building for iOS, we need to bundle the iOS bundle - if self.build_arguments.platform() == Platform::Ios && self.package_types.is_none() { + if build.platform == Platform::Ios && self.package_types.is_none() { self.package_types = Some(vec![crate::PackageType::IosBundle]); } let mut bundles = vec![]; - // Copy the server over if it exists - if bundle.build.build.fullstack { - bundles.push(bundle.server_exe().unwrap()); - } + // // Copy the server over if it exists + // if build.fullstack { + // bundles.push(build.server_exe().unwrap()); + // } // Create a list of bundles that we might need to copy - match self.build_arguments.platform() { + match build.platform { // By default, mac/win/linux work with tauri bundle Platform::MacOS | Platform::Linux | Platform::Windows => { tracing::info!("Running desktop bundler..."); - for bundle in self.bundle_desktop(&krate, &bundle)? { + for bundle in Self::bundle_desktop(&build, &self.package_types)? { bundles.extend(bundle.bundle_paths); } } // Web/ios can just use their root_dir - Platform::Web => bundles.push(bundle.build.root_dir()), + Platform::Web => bundles.push(build.root_dir()), Platform::Ios => { tracing::warn!("iOS bundles are not currently codesigned! You will need to codesign the app before distributing."); - bundles.push(bundle.build.root_dir()) + bundles.push(build.root_dir()) } - Platform::Server => bundles.push(bundle.build.root_dir()), - Platform::Liveview => bundles.push(bundle.build.root_dir()), + Platform::Server => bundles.push(build.root_dir()), + Platform::Liveview => bundles.push(build.root_dir()), Platform::Android => { - let aab = bundle + let aab = build .android_gradle_bundle() .await .context("Failed to run gradle bundleRelease")?; @@ -86,7 +110,7 @@ impl Bundle { }; // Copy the bundles to the output directory if one was specified - let crate_outdir = bundle.build.krate.crate_out_dir(); + let crate_outdir = build.crate_out_dir(); if let Some(outdir) = self.out_dir.clone().or(crate_outdir) { let outdir = outdir .absolutize() @@ -130,31 +154,28 @@ impl Bundle { } fn bundle_desktop( - &self, - krate: &DioxusCrate, - bundle: &AppBundle, + build: &BuildRequest, + package_types: &Option>, ) -> Result, Error> { - _ = std::fs::remove_dir_all(krate.bundle_dir(self.build_arguments.platform())); + let krate = &build; + let exe = build.main_exe(); + + _ = std::fs::remove_dir_all(krate.bundle_dir(build.platform)); let package = krate.package(); let mut name: PathBuf = krate.executable_name().into(); if cfg!(windows) { name.set_extension("exe"); } - std::fs::create_dir_all(krate.bundle_dir(self.build_arguments.platform())) + std::fs::create_dir_all(krate.bundle_dir(build.platform)) .context("Failed to create bundle directory")?; - std::fs::copy( - &bundle.app.exe, - krate - .bundle_dir(self.build_arguments.platform()) - .join(&name), - ) - .with_context(|| "Failed to copy the output executable into the bundle directory")?; + std::fs::copy(&exe, krate.bundle_dir(build.platform).join(&name)) + .with_context(|| "Failed to copy the output executable into the bundle directory")?; let binaries = vec![ // We use the name of the exe but it has to be in the same directory BundleBinary::new(krate.executable_name().to_string(), true) - .set_src_path(Some(bundle.app.exe.display().to_string())), + .set_src_path(Some(exe.display().to_string())), ]; let mut bundle_settings: BundleSettings = krate.config.bundle.clone().into(); @@ -185,7 +206,7 @@ impl Bundle { bundle_settings.resources_map = Some(HashMap::new()); } - let asset_dir = bundle.build.asset_dir(); + let asset_dir = build.asset_dir(); if asset_dir.exists() { let asset_dir_entries = std::fs::read_dir(&asset_dir) .with_context(|| format!("failed to read asset directory {:?}", asset_dir))?; @@ -214,7 +235,7 @@ impl Bundle { } let mut settings = SettingsBuilder::new() - .project_out_directory(krate.bundle_dir(self.build_arguments.platform())) + .project_out_directory(krate.bundle_dir(build.platform)) .package_settings(PackageSettings { product_name: krate.bundled_app_name(), version: package.version.to_string(), @@ -227,17 +248,11 @@ impl Bundle { .binaries(binaries) .bundle_settings(bundle_settings); - if let Some(packages) = &self.package_types { + if let Some(packages) = &package_types { settings = settings.package_types(packages.iter().map(|p| (*p).into()).collect()); } - if let Some(target) = self.build_arguments.target_args.target.as_ref() { - settings = settings.target(target.to_string()); - } - - if self.build_arguments.platform() == Platform::Ios { - settings = settings.target("aarch64-apple-ios".to_string()); - } + settings = settings.target(build.target.to_string()); let settings = settings .build() @@ -257,3 +272,18 @@ impl Bundle { Ok(bundles) } } + +// async fn pre_render_ssg_routes(&self) -> Result<()> { +// // Run SSG and cache static routes +// if !self.ssg { +// return Ok(()); +// } +// self.status_prerendering_routes(); +// pre_render_static_routes( +// &self +// .server_exe() +// .context("Failed to find server executable")?, +// ) +// .await?; +// Ok(()) +// } diff --git a/packages/cli/src/args/chained.rs b/packages/cli/src/args/chained.rs new file mode 100644 index 0000000000..148d61adcc --- /dev/null +++ b/packages/cli/src/args/chained.rs @@ -0,0 +1,191 @@ +use clap::{ArgMatches, Args, FromArgMatches, Parser, Subcommand}; +use serde::{de::DeserializeOwned, Deserialize}; + +// https://github.com/clap-rs/clap/issues/2222#issuecomment-2524152894 +// +// +/// `[Args]` wrapper to match `T` variants recursively in `U`. +#[derive(Debug, Clone)] +pub struct ChainedCommand { + /// Specific Variant. + inner: T, + + /// Enum containing `Self` variants, in other words possible follow-up commands. + next: Option>, +} + +impl ChainedCommand +where + T: Args, + U: Subcommand, +{ + fn commands(self) -> Vec { + let mut commands = vec![]; + commands + } +} + +impl Args for ChainedCommand +where + T: Args, + U: Subcommand, +{ + fn augment_args(cmd: clap::Command) -> clap::Command { + // We use the special `defer` method whcih lets us recursively call `augment_args` on the inner command + // and thus `from_arg_matches` + T::augment_args(cmd).defer(|cmd| U::augment_subcommands(cmd.disable_help_subcommand(true))) + } + + fn augment_args_for_update(_cmd: clap::Command) -> clap::Command { + unimplemented!() + } +} + +impl FromArgMatches for ChainedCommand +where + T: Args, + U: Subcommand, +{ + fn from_arg_matches(matches: &ArgMatches) -> Result { + // Parse the first command before we try to parse the next one. + let inner = T::from_arg_matches(matches)?; + + // Try to parse the remainder of the command as a subcommand. + let next = match matches.subcommand() { + // Subcommand skips into the matched .subcommand, hence we need to pass *outer* matches, ignoring the inner matches + // (which in the average case should only match enumerated T) + // + // Here, we might want to eventually enable arbitrary names of subcommands if they're prefixed + // with a prefix like "@" ie `dx serve @dog-app/backend --args @dog-app/frontend --args` + // + // we are done, since sub-sub commmands are matched in U:: + Some(_) => Some(Box::new(U::from_arg_matches(matches)?)), + + // no subcommand matched, we are done + None => None, + }; + + Ok(Self { inner, next }) + } + + fn update_from_arg_matches(&mut self, _matches: &ArgMatches) -> Result<(), clap::Error> { + unimplemented!() + } +} + +impl<'de, T: Deserialize<'de>, U: Deserialize<'de>> Deserialize<'de> for ChainedCommand { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + todo!() + } +} + +// #[cfg(test)] +// mod tests { +// use super::*; + +// #[derive(Debug, Parser)] +// struct TestCli { +// #[clap(long)] +// top: Option, + +// #[command(subcommand)] +// cmd: TopCmd, +// } + +// /// Launch a specific target +// /// +// /// You can specify multiple targets using `@client --args` syntax. +// #[derive(Debug, Parser)] +// struct ServeCommand { +// #[clap(flatten)] +// args: Target, + +// #[command(subcommand)] +// targets: TopCmd, +// } + +// #[derive(Debug, Subcommand, Clone)] +// enum TopCmd { +// Serve { +// #[clap(subcommand)] +// cmd: Cmd, +// }, +// } + +// /// Launch a specific target +// #[derive(Debug, Subcommand, Clone)] +// #[command(subcommand_precedence_over_arg = true)] +// enum Cmd { +// /// Specify the arguments for the client build +// #[clap(name = "client")] +// Client(ReClap), + +// /// Specify the arguments for the server build +// #[clap(name = "server")] +// Server(ReClap), + +// /// Specify the arguments for any number of additional targets +// #[clap(name = "target")] +// Target(ReClap), +// } + +// #[derive(Clone, Args, Debug)] +// struct Target { +// #[arg(short, long)] +// profile: Option, + +// #[arg(short, long)] +// target: Option, + +// #[arg(short, long)] +// bin: Option, +// } + +// #[test] +// fn test_parse_args() { +// let args = r#" +// dx serve +// @client --release +// @server --target wasm32 +// @target --bin mybin +// @target --bin mybin +// @target --bin mybin +// @target --bin mybin +// "# +// .trim() +// .split_ascii_whitespace(); + +// let cli = TestCli::parse_from(args); + +// dbg!(&cli); + +// match cli.cmd { +// TopCmd::Serve { cmd } => { +// let mut next = Some(cmd); + +// // let mut next = cmd.cmd; +// while let Some(cmd) = next { +// // println!("{cmd:?}"); +// // could use enum_dispatch +// next = match cmd { +// Cmd::Client(rec) => { +// // +// (rec.next).map(|d| *d) +// } +// Cmd::Server(rec) => { +// // +// (rec.next).map(|d| *d) +// } +// Cmd::Target(rec) => { +// // +// (rec.next).map(|d| *d) +// } +// } +// } +// } +// } +// } +// } diff --git a/packages/cli/src/cli/check.rs b/packages/cli/src/args/check.rs similarity index 88% rename from packages/cli/src/cli/check.rs rename to packages/cli/src/args/check.rs index 3c28df0126..e578d3da71 100644 --- a/packages/cli/src/cli/check.rs +++ b/packages/cli/src/args/check.rs @@ -4,7 +4,6 @@ //! https://github.com/rust-lang/rustfmt/blob/master/src/bin/main.rs use super::*; -use crate::DioxusCrate; use anyhow::Context; use futures_util::{stream::FuturesUnordered, StreamExt}; use std::path::Path; @@ -18,7 +17,7 @@ pub(crate) struct Check { /// Information about the target to check #[clap(flatten)] - pub(crate) target_args: TargetArgs, + pub(crate) build_args: BuildArgs, } impl Check { @@ -27,8 +26,7 @@ impl Check { match self.file { // Default to checking the project None => { - let dioxus_crate = DioxusCrate::new(&self.target_args)?; - check_project_and_report(dioxus_crate) + check_project_and_report(&self.build_args) .await .context("error checking project")?; } @@ -52,10 +50,11 @@ async fn check_file_and_report(path: PathBuf) -> Result<()> { /// Runs using Tokio for multithreading, so it should be really really fast /// /// Doesn't do mod-descending, so it will still try to check unreachable files. TODO. -async fn check_project_and_report(dioxus_crate: DioxusCrate) -> Result<()> { - let mut files_to_check = vec![dioxus_crate.main_source_file()]; - collect_rs_files(&dioxus_crate.crate_dir(), &mut files_to_check); - check_files_and_report(files_to_check).await +async fn check_project_and_report(krate: &BuildArgs) -> Result<()> { + todo!("check_project_and_report"); + // let mut files_to_check = vec![dioxus_crate.main_source_file()]; + // collect_rs_files(&dioxus_crate.crate_dir(), &mut files_to_check); + // check_files_and_report(files_to_check).await } /// Check a list of files and report the issues. diff --git a/packages/cli/src/cli/clean.rs b/packages/cli/src/args/clean.rs similarity index 100% rename from packages/cli/src/cli/clean.rs rename to packages/cli/src/args/clean.rs diff --git a/packages/cli/src/cli/config.rs b/packages/cli/src/args/config.rs similarity index 91% rename from packages/cli/src/cli/config.rs rename to packages/cli/src/args/config.rs index a31321a2f8..30b5e220c7 100644 --- a/packages/cli/src/cli/config.rs +++ b/packages/cli/src/args/config.rs @@ -75,7 +75,7 @@ impl From for bool { } impl Config { - pub(crate) fn config(self) -> Result { + pub(crate) async fn config(self) -> Result { let crate_root = crate_root()?; match self { Config::Init { @@ -98,15 +98,18 @@ impl Config { tracing::info!(dx_src = ?TraceSrc::Dev, "🚩 Init config file completed."); } Config::FormatPrint {} => { - tracing::info!( - "{:#?}", - crate::dioxus_crate::DioxusCrate::new(&TargetArgs::default())?.config - ); + todo!("Load workspace and print its config?") + // tracing::info!( + // "{:#?}", + // crate::dioxus_crate::DioxusCrate::new(&TargetArgs::default()) + // .await? + // .config + // ); } Config::CustomHtml {} => { let html_path = crate_root.join("index.html"); let mut file = File::create(html_path)?; - let content = include_str!("../../assets/web/index.html"); + let content = include_str!("../../assets/web/dev.index.html"); file.write_all(content.as_bytes())?; tracing::info!(dx_src = ?TraceSrc::Dev, "🚩 Create custom html file done."); } diff --git a/packages/cli/src/cli/create.rs b/packages/cli/src/args/create.rs similarity index 100% rename from packages/cli/src/cli/create.rs rename to packages/cli/src/args/create.rs diff --git a/packages/cli/src/cli/init.rs b/packages/cli/src/args/init.rs similarity index 100% rename from packages/cli/src/cli/init.rs rename to packages/cli/src/args/init.rs diff --git a/packages/cli/src/args/link.rs b/packages/cli/src/args/link.rs new file mode 100644 index 0000000000..6bfc88147f --- /dev/null +++ b/packages/cli/src/args/link.rs @@ -0,0 +1,159 @@ +use crate::{Platform, Result}; +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; +use target_lexicon::Triple; +use tokio::process::Command; + +#[derive(Debug, Serialize, Deserialize)] +pub enum LinkAction { + BaseLink { + linker: PathBuf, + extra_flags: Vec, + }, + ThinLink { + save_link_args: PathBuf, + triple: Triple, + }, +} + +impl LinkAction { + pub(crate) const ENV_VAR_NAME: &'static str = "dx_magic_link_file"; + + /// Should we write the input arguments to a file (aka act as a linker subprocess)? + /// + /// Just check if the magic env var is set + pub(crate) fn from_env() -> Option { + std::env::var(Self::ENV_VAR_NAME) + .ok() + .map(|var| serde_json::from_str(&var).expect("Failed to parse magic env var")) + } + + pub(crate) fn to_json(&self) -> String { + serde_json::to_string(self).unwrap() + } + + /// Write the incoming linker args to a file + /// + /// The file will be given by the dx-magic-link-arg env var itself, so we use + /// it both for determining if we should act as a linker and the for the file name itself. + pub(crate) async fn run(self) -> Result<()> { + let args = std::env::args().collect::>(); + + match self { + // Run the system linker but (maybe) keep any unused sections. + LinkAction::BaseLink { + linker, + extra_flags, + } => { + let mut cmd = std::process::Command::new(linker); + cmd.args(args.iter().skip(1)); + cmd.args(extra_flags); + let res = cmd + .stderr(std::process::Stdio::piped()) + .stdout(std::process::Stdio::piped()) + .output() + .expect("Failed to run android linker"); + + let err = String::from_utf8_lossy(&res.stderr); + std::fs::write( + "/Users/jonkelley/Development/dioxus/packages/subsecond/data/link-err.txt", + format!("err: {err}"), + ) + .unwrap(); + + // Make sure we *don't* dead-strip the binary so every library symbol still exists. + // This is required for thin linking to work correctly. + // let args = args.into_iter().skip(1).collect::>(); + // let res = Command::new(linker).args(args).output().await?; + // let err = String::from_utf8_lossy(&res.stderr); + + // .filter(|arg| arg != "-Wl,-dead_strip" && !strip) + + // this is ld64 only, we need --whole-archive for gnu/ld + // args.push("-Wl,-all_load".to_string()); + + // // Persist the cache of incremental files + // cache_incrementals( + // &incremental_dir.join("old"), + // &incremental_dir.join("new"), + // args.iter() + // .filter(|arg| arg.ends_with(".o")) + // .collect::>() + // .as_ref(), + // ); + + // Run ld with the args + } + + // Run the linker but without rlibs + LinkAction::ThinLink { + save_link_args, + triple, + } => { + // Write the linker args to a file for the main process to read + std::fs::write(save_link_args, args.join("\n"))?; + + // Extract the out + let out = args.iter().position(|arg| arg == "-o").unwrap(); + let out_file: PathBuf = args[out + 1].clone().into(); + + // Write a dummy object file to satisfy rust/linker since it'll run llvm-objcopy + // ... I wish it *didn't* do that but I can't tell how to disable the linker without + // using --emit=obj which is not exactly what we want since that will still pull in + // the dependencies. + std::fs::create_dir_all(out_file.parent().unwrap())?; + std::fs::write(out_file, make_dummy_object_file(triple))?; + } + } + + Ok(()) + } +} + +/// This creates an object file that satisfies rust's use of llvm-objcopy +/// +/// I'd rather we *not* do this and instead generate a truly linked file (and then delete it) but +/// this at least lets us delay linking until the host compiler is ready. +/// +/// This is because our host compiler is a stateful server and not a stateless linker. +fn make_dummy_object_file(triple: Triple) -> Vec { + let triple = Triple::host(); + + let format = match triple.binary_format { + target_lexicon::BinaryFormat::Elf => object::BinaryFormat::Elf, + target_lexicon::BinaryFormat::Coff => object::BinaryFormat::Coff, + target_lexicon::BinaryFormat::Macho => object::BinaryFormat::MachO, + target_lexicon::BinaryFormat::Wasm => object::BinaryFormat::Wasm, + target_lexicon::BinaryFormat::Xcoff => object::BinaryFormat::Xcoff, + target_lexicon::BinaryFormat::Unknown => todo!(), + _ => todo!("Binary format not supported"), + }; + + let arch = match triple.architecture { + target_lexicon::Architecture::Wasm32 => object::Architecture::Wasm32, + target_lexicon::Architecture::Wasm64 => object::Architecture::Wasm64, + target_lexicon::Architecture::X86_64 => object::Architecture::X86_64, + target_lexicon::Architecture::Arm(_) => object::Architecture::Arm, + target_lexicon::Architecture::Aarch64(_) => object::Architecture::Aarch64, + target_lexicon::Architecture::LoongArch64 => object::Architecture::LoongArch64, + target_lexicon::Architecture::Unknown => object::Architecture::Unknown, + _ => todo!("Architecture not supported"), + }; + + let endian = match triple.endianness() { + Ok(target_lexicon::Endianness::Little) => object::Endianness::Little, + Ok(target_lexicon::Endianness::Big) => object::Endianness::Big, + Err(_) => todo!("Endianness not supported"), + }; + + object::write::Object::new(format, arch, endian) + .write() + .unwrap() +} + +#[test] +fn test_make_dummy_object_file() { + let triple: Triple = "wasm32-unknown-unknown".parse().unwrap(); + let obj = make_dummy_object_file(triple); + assert!(!obj.is_empty()); +} diff --git a/packages/cli/src/cli/mod.rs b/packages/cli/src/args/mod.rs similarity index 98% rename from packages/cli/src/cli/mod.rs rename to packages/cli/src/args/mod.rs index 02d362b71d..731537f39b 100644 --- a/packages/cli/src/cli/mod.rs +++ b/packages/cli/src/args/mod.rs @@ -1,6 +1,7 @@ pub(crate) mod autoformat; pub(crate) mod build; pub(crate) mod bundle; +pub(crate) mod chained; pub(crate) mod check; pub(crate) mod clean; pub(crate) mod config; @@ -9,13 +10,11 @@ pub(crate) mod init; pub(crate) mod link; pub(crate) mod run; pub(crate) mod serve; -pub(crate) mod target; pub(crate) mod translate; pub(crate) mod verbosity; pub(crate) use build::*; pub(crate) use serve::*; -pub(crate) use target::*; pub(crate) use verbosity::*; use crate::{error::Result, Error, StructuredOutput}; diff --git a/packages/cli/src/args/run.rs b/packages/cli/src/args/run.rs new file mode 100644 index 0000000000..4f8f40963e --- /dev/null +++ b/packages/cli/src/args/run.rs @@ -0,0 +1,51 @@ +use super::*; +use crate::{BuildArgs, BuildRequest, AppBuilder, Platform, Result}; + +/// Run the project with the given arguments +#[derive(Clone, Debug, Parser)] +pub(crate) struct RunArgs { + /// Information about the target to build + #[clap(flatten)] + pub(crate) build_args: BuildArgs, +} + +impl RunArgs { + pub(crate) async fn run(self) -> Result { + let build = BuildRequest::new(&self.build_args) + .await + .context("error building project")?; + + let mut builder = AppBuilder::start(&build)?; + let artifacts = builder.finish().await?; + + let devserver_ip = "127.0.0.1:8081".parse().unwrap(); + let fullstack_ip = "127.0.0.1:8080".parse().unwrap(); + + if build.platform == Platform::Web || build.fullstack { + tracing::info!("Serving at: {}", fullstack_ip); + } + + builder.open(devserver_ip, Some(fullstack_ip), true).await?; + + todo!(); + // // Run the app, but mostly ignore all the other messages + // // They won't generally be emitted + // loop { + // match builder.wait().await { + // HandleUpdate::StderrReceived { platform, msg } => { + // tracing::info!("[{platform}]: {msg}") + // } + // HandleUpdate::StdoutReceived { platform, msg } => { + // tracing::info!("[{platform}]: {msg}") + // } + // HandleUpdate::ProcessExited { platform, status } => { + // builder.cleanup().await; + // tracing::info!("[{platform}]: process exited with status: {status:?}"); + // break; + // } + // } + // } + + Ok(StructuredOutput::Success) + } +} diff --git a/packages/cli/src/args/serve.rs b/packages/cli/src/args/serve.rs new file mode 100644 index 0000000000..98a190ea8f --- /dev/null +++ b/packages/cli/src/args/serve.rs @@ -0,0 +1,159 @@ +use super::{chained::ChainedCommand, *}; +use crate::{AddressArguments, BuildArgs, Platform, PROFILE_SERVER}; +use target_lexicon::Triple; + +/// Serve the project +/// +/// `dx serve` takes cargo args by default, except with a required `--platform` arg: +/// +/// ``` +/// dx serve --example blah --target blah --platform android +/// ``` +/// +/// As of dioxus 0.7, `dx serve` allows multiple builds at the same type by chaining the `crate` subcommand: +/// ``` +/// dx serve +/// @crate --blah +/// @crate --blah +/// @crate --blah +/// @crate --blah +/// ``` +/// +#[derive(Clone, Debug, Default, Parser)] +#[command(group = clap::ArgGroup::new("release-incompatible").multiple(true).conflicts_with("release"))] +pub(crate) struct ServeArgs { + /// The arguments for the address the server will run on + #[clap(flatten)] + pub(crate) address: AddressArguments, + + /// Open the app in the default browser [default: true - unless cli settings are set] + #[arg(long, default_missing_value="true", num_args=0..=1)] + pub(crate) open: Option, + + /// Enable full hot reloading for the app [default: true - unless cli settings are set] + #[clap(long, group = "release-incompatible")] + pub(crate) hot_reload: Option, + + /// Configure always-on-top for desktop apps [default: true - unless cli settings are set] + #[clap(long, default_missing_value = "true")] + pub(crate) always_on_top: Option, + + /// Set cross-origin-policy to same-origin [default: false] + #[clap(name = "cross-origin-policy")] + #[clap(long)] + pub(crate) cross_origin_policy: bool, + + /// Additional arguments to pass to the executable + #[clap(long)] + pub(crate) args: Vec, + + /// Sets the interval in seconds that the CLI will poll for file changes on WSL. + #[clap(long, default_missing_value = "2")] + pub(crate) wsl_file_poll_interval: Option, + + /// Run the server in interactive mode + #[arg(long, default_missing_value="true", num_args=0..=1, short = 'i')] + pub(crate) interactive: Option, + + /// Build this binary using binary patching instead of a full rebuild [default: false] + #[arg(long, default_value_t = false)] + pub(crate) hot_patch: bool, + + /// The feature to use for the client in a fullstack app [default: "web"] + #[clap(long)] + pub(crate) client_features: Vec, + + /// The feature to use for the server in a fullstack app [default: "server"] + #[clap(long)] + pub(crate) server_features: Vec, + + /// Build with custom profile for the fullstack server + #[clap(long, default_value_t = PROFILE_SERVER.to_string())] + pub(crate) server_profile: String, + + /// The target to build for the server. + /// + /// This can be different than the host allowing cross-compilation of the server. This is useful for + /// platforms like Cloudflare Workers where the server is compiled to wasm and then uploaded to the edge. + #[clap(long)] + pub(crate) server_target: Option, + + /// Arguments for the build itself + #[clap(flatten)] + pub(crate) build_arguments: BuildArgs, + + /// A list of additional targets to build. + /// + /// Server and Client are special targets that receive features from the top-level command. + /// + /// + /// ``` + /// dx serve \ + /// client --target aarch64-apple-darwin \ + /// server --target wasm32-unknown-unknown \ + /// crate --target aarch64-unknown-linux-gnu + /// crate --target x86_64-unknown-linux-gnu + /// ``` + #[command(subcommand)] + pub(crate) targets: Option, +} + +/// Launch a specific target +#[derive(Debug, Subcommand, Clone, Deserialize)] +#[command(subcommand_precedence_over_arg = true)] +pub(crate) enum TargetCmd { + /// Specify the arguments for the client build + #[clap(name = "client")] + Client(ChainedCommand), + + /// Specify the arguments for the server build + #[clap(name = "server")] + Server(ChainedCommand), + + /// Specify the arguments for any number of additional targets + #[clap(name = "crate")] + Target(ChainedCommand), +} + +impl ServeArgs { + /// Start the tui, builder, etc by resolving the arguments and then running the actual top-level serve function + /// + /// Make sure not to do any intermediate logging since our tracing infra has now enabled much + /// higher log levels + pub(crate) async fn serve(self) -> Result { + crate::serve::serve_all(self).await?; + Ok(StructuredOutput::Success) + } + + pub(crate) fn should_hotreload(&self) -> bool { + self.hot_reload.unwrap_or(true) + } + + pub(crate) fn build_args(&self) -> &BuildArgs { + &self.build_arguments + } + + pub(crate) fn is_interactive_tty(&self) -> bool { + use std::io::IsTerminal; + std::io::stdout().is_terminal() && self.interactive.unwrap_or(true) + } + + pub(crate) fn should_proxy_build(&self) -> bool { + tracing::error!("todo: should_proxy_build is not implemented"); + false + + // match self.build_arguments.platform() { + // Platform::Server => true, + // // During SSG, just serve the static files instead of running the server + // _ => self.build_arguments.fullstack && !self.build_arguments.ssg, + // } + } +} + +impl std::ops::Deref for ServeArgs { + type Target = BuildArgs; + + fn deref(&self) -> &Self::Target { + &self.build_arguments + } +} diff --git a/packages/cli/src/cli/translate.rs b/packages/cli/src/args/translate.rs similarity index 100% rename from packages/cli/src/cli/translate.rs rename to packages/cli/src/args/translate.rs diff --git a/packages/cli/src/cli/verbosity.rs b/packages/cli/src/args/verbosity.rs similarity index 100% rename from packages/cli/src/cli/verbosity.rs rename to packages/cli/src/args/verbosity.rs diff --git a/packages/cli/src/build/builder.rs b/packages/cli/src/build/builder.rs index 68e90c9548..7a2501c205 100644 --- a/packages/cli/src/build/builder.rs +++ b/packages/cli/src/build/builder.rs @@ -1,10 +1,25 @@ use crate::{ - AppBundle, BuildArgs, BuildRequest, BuildStage, BuildUpdate, DioxusCrate, ProgressRx, + BuildArgs, BuildArtifacts, BuildRequest, BuildStage, BuildUpdate, Platform, ProgressRx, ProgressTx, Result, StructuredOutput, }; +use anyhow::Context; +use dioxus_cli_opt::process_file_to; +use futures_util::future::OptionFuture; use std::time::{Duration, Instant}; +use std::{ + net::SocketAddr, + path::{Path, PathBuf}, + process::{ExitStatus, Stdio}, +}; +use tokio::{ + io::{AsyncBufReadExt, BufReader, Lines}, + process::{Child, ChildStderr, ChildStdout, Command}, + task::JoinHandle, +}; -/// The component of the serve engine that watches ongoing builds and manages their state, handle, +use super::BuildMode; + +/// The component of the serve engine that watches ongoing builds and manages their state, open handle, /// and progress. /// /// Previously, the builder allowed multiple apps to be built simultaneously, but this newer design @@ -12,58 +27,111 @@ use std::time::{Duration, Instant}; /// /// Here, we track the number of crates being compiled, assets copied, the times of these events, and /// other metadata that gives us useful indicators for the UI. -pub(crate) struct Builder { - // Components of the build - pub krate: DioxusCrate, - pub request: BuildRequest, - pub build: tokio::task::JoinHandle>, +/// +/// A handle to a running app. +/// +/// Also includes a handle to its server if it exists. +/// The actual child processes might not be present (web) or running (died/killed). +/// +/// The purpose of this struct is to accumulate state about the running app and its server, like +/// any runtime information needed to hotreload the app or send it messages. +/// +/// We might want to bring in websockets here too, so we know the exact channels the app is using to +/// communicate with the devserver. Currently that's a broadcast-type system, so this struct isn't super +/// duper useful. +/// +/// todo: restructure this such that "open" is a running task instead of blocking the main thread +pub(crate) struct AppBuilder { pub tx: ProgressTx, pub rx: ProgressRx, + // The original request with access to its build directory + pub app: BuildRequest, + + // Ongoing build task, if any + pub build: JoinHandle>, + + // If a build has already finished, we'll have its artifacts (rustc, link args, etc) to work witha + pub artifacts: Option, + + // These might be None if the app died or the user did not specify a server + pub app_child: Option, + + // stdio for the app so we can read its stdout/stderr + // we don't map stdin today (todo) but most apps don't need it + pub app_stdout: Option>>, + pub app_stderr: Option>>, + + /// The executables but with some extra entropy in their name so we can run two instances of the + /// same app without causing collisions on the filesystem. + pub entropy_app_exe: Option, + + /// The virtual directory that assets will be served from + /// Used mostly for apk/ipa builds since they live in simulator + pub runtime_asst_dir: Option, + // Metadata about the build that needs to be managed by watching build updates // used to render the TUI pub stage: BuildStage, pub compiled_crates: usize, - pub compiled_crates_server: usize, pub expected_crates: usize, - pub expected_crates_server: usize, pub bundling_progress: f64, pub compile_start: Option, pub compile_end: Option, - pub compile_end_server: Option, pub bundle_start: Option, pub bundle_end: Option, } -impl Builder { +pub enum HandleUpdate { + /// A running process has received a stdout. + /// May or may not be a complete line - do not treat it as a line. It will include a line if it is a complete line. + /// + /// We will poll lines and any content in a 50ms interval + StdoutReceived { + msg: String, + }, + + /// A running process has received a stderr. + /// May or may not be a complete line - do not treat it as a line. It will include a line if it is a complete line. + /// + /// We will poll lines and any content in a 50ms interval + StderrReceived { + msg: String, + }, + + ProcessExited { + status: ExitStatus, + }, +} + +impl AppBuilder { /// Create a new builder and immediately start a build - pub(crate) fn start(krate: &DioxusCrate, args: BuildArgs) -> Result { + pub(crate) fn start(request: &BuildRequest) -> Result { let (tx, rx) = futures_channel::mpsc::unbounded(); - let request = BuildRequest::new(krate.clone(), args, tx.clone()); + // let request = BuildRequest::new(args.clone(), krate.clone(), tx.clone(), BuildMode::Fat)?; Ok(Self { - krate: krate.clone(), - request: request.clone(), + app: request.clone(), stage: BuildStage::Initializing, build: tokio::spawn(async move { - // On the first build, we want to verify the tooling - // We wont bother verifying on subsequent builds - request.verify_tooling().await?; - - request.build_all().await + // request.build_all().await + todo!() }), tx, rx, compiled_crates: 0, expected_crates: 1, - expected_crates_server: 1, - compiled_crates_server: 0, bundling_progress: 0.0, compile_start: Some(Instant::now()), compile_end: None, - compile_end_server: None, bundle_start: None, bundle_end: None, + runtime_asst_dir: None, + app_child: None, + app_stderr: None, + app_stdout: None, + entropy_app_exe: None, + artifacts: None, }) } @@ -85,6 +153,42 @@ impl Builder { }, }; + // let platform = self.app.platform; + // use HandleUpdate::*; + // tokio::select! { + // Some(Ok(Some(msg))) = OptionFuture::from(self.app_stdout.as_mut().map(|f| f.next_line())) => { + // StdoutReceived { platform, msg } + // }, + // Some(Ok(Some(msg))) = OptionFuture::from(self.app_stderr.as_mut().map(|f| f.next_line())) => { + // StderrReceived { platform, msg } + // }, + // Some(status) = OptionFuture::from(self.app_child.as_mut().map(|f| f.wait())) => { + // match status { + // Ok(status) => { + // self.app_child = None; + // ProcessExited { status, platform } + // }, + // Err(_err) => todo!("handle error in process joining?"), + // } + // } + // Some(Ok(Some(msg))) = OptionFuture::from(self.server_stdout.as_mut().map(|f| f.next_line())) => { + // StdoutReceived { platform: Platform::Server, msg } + // }, + // Some(Ok(Some(msg))) = OptionFuture::from(self.server_stderr.as_mut().map(|f| f.next_line())) => { + // StderrReceived { platform: Platform::Server, msg } + // }, + // Some(status) = OptionFuture::from(self.server_child.as_mut().map(|f| f.wait())) => { + // match status { + // Ok(status) => { + // self.server_child = None; + // ProcessExited { status, platform } + // }, + // Err(_err) => todo!("handle error in process joining?"), + // } + // } + // else => futures_util::future::pending().await + // } + // Update the internal stage of the build so the UI can render it match &update { BuildUpdate::Progress { stage } => { @@ -95,33 +199,24 @@ impl Builder { match stage { BuildStage::Initializing => { self.compiled_crates = 0; - self.compiled_crates_server = 0; self.bundling_progress = 0.0; } - BuildStage::Starting { - crate_count, - is_server, - } => { - if *is_server { - self.expected_crates_server = *crate_count; - } else { - self.expected_crates = *crate_count; - } + BuildStage::Starting { crate_count, .. } => { + // if *is_server { + // self.expected_crates_server = *crate_count; + // } else { + self.expected_crates = *crate_count; + // } } BuildStage::InstallingTooling {} => {} - BuildStage::Compiling { - current, - total, - is_server, - .. - } => { - if *is_server { - self.compiled_crates_server = *current; - self.expected_crates_server = *total; - } else { - self.compiled_crates = *current; - self.expected_crates = *total; - } + BuildStage::Compiling { current, total, .. } => { + // if *is_server { + // self.compiled_crates_server = *current; + // self.expected_crates_server = *total; + // } else { + self.compiled_crates = *current; + self.expected_crates = *total; + // } if self.compile_start.is_none() { self.compile_start = Some(Instant::now()); @@ -138,18 +233,18 @@ impl Builder { } BuildStage::Success => { self.compiled_crates = self.expected_crates; - self.compiled_crates_server = self.expected_crates_server; + // self.compiled_crates_server = self.expected_crates_server; self.bundling_progress = 1.0; } BuildStage::Failed => { self.compiled_crates = self.expected_crates; - self.compiled_crates_server = self.expected_crates_server; + // self.compiled_crates_server = self.expected_crates_server; self.bundling_progress = 1.0; } BuildStage::Aborted => {} BuildStage::Restarting => { self.compiled_crates = 0; - self.compiled_crates_server = 0; + // self.compiled_crates_server = 0; self.expected_crates = 1; self.bundling_progress = 0.0; } @@ -161,7 +256,6 @@ impl Builder { BuildUpdate::CompilerMessage { .. } => {} BuildUpdate::BuildReady { .. } => { self.compiled_crates = self.expected_crates; - self.compiled_crates_server = self.expected_crates_server; self.bundling_progress = 1.0; self.stage = BuildStage::Success; @@ -177,18 +271,52 @@ impl Builder { update } + pub(crate) fn patch_rebuild( + &mut self, + args: BuildArgs, + direct_rustc: Vec, + changed_files: Vec, + aslr_offset: u64, + ) -> Result<()> { + todo!() + // // Initialize a new build, resetting our progress/stage to the beginning and replacing the old tokio task + // let request = BuildRequest::new( + // args, + // self.krate.clone(), + // self.tx.clone(), + // BuildMode::Thin { + // direct_rustc, + // changed_files, + // aslr_reference: aslr_offset, + // }, + // )?; + + // // Abort all the ongoing builds, cleaning up any loose artifacts and waiting to cleanly exit + // self.abort_all(); + // self.request = request.clone(); + // self.stage = BuildStage::Restarting; + + // // This build doesn't have any extra special logging - rebuilds would get pretty noisy + // self.build = tokio::spawn(async move { request.build_all().await }); + + // Ok(()) + } + /// Restart this builder with new build arguments. - pub(crate) fn rebuild(&mut self, args: BuildArgs) { - // Abort all the ongoing builds, cleaning up any loose artifacts and waiting to cleanly exit - self.abort_all(); + pub(crate) fn rebuild(&mut self, args: BuildArgs) -> Result<()> { + todo!() + // let request = BuildRequest::new(args, self.krate.clone(), self.tx.clone(), BuildMode::Fat)?; + + // // Abort all the ongoing builds, cleaning up any loose artifacts and waiting to cleanly exit + // // And then start a new build, resetting our progress/stage to the beginning and replacing the old tokio task + // self.abort_all(); + // self.request = request.clone(); + // self.stage = BuildStage::Restarting; - // And then start a new build, resetting our progress/stage to the beginning and replacing the old tokio task - let request = BuildRequest::new(self.krate.clone(), args, self.tx.clone()); - self.request = request.clone(); - self.stage = BuildStage::Restarting; + // // This build doesn't have any extra special logging - rebuilds would get pretty noisy + // self.build = tokio::spawn(async move { request.build_all().await }); - // This build doesn't have any extra special logging - rebuilds would get pretty noisy - self.build = tokio::spawn(async move { request.build_all().await }); + // Ok(()) } /// Shutdown the current build process @@ -198,7 +326,6 @@ impl Builder { self.build.abort(); self.stage = BuildStage::Aborted; self.compiled_crates = 0; - self.compiled_crates_server = 0; self.expected_crates = 1; self.bundling_progress = 0.0; self.compile_start = None; @@ -212,7 +339,7 @@ impl Builder { /// /// todo(jon): maybe we want to do some logging here? The build/bundle/run screens could be made to /// use the TUI output for prettier outputs. - pub(crate) async fn finish(&mut self) -> Result { + pub(crate) async fn finish(&mut self) -> Result { loop { match self.wait().await { BuildUpdate::Progress { stage } => { @@ -244,7 +371,7 @@ impl Builder { } BuildUpdate::BuildReady { bundle } => { tracing::debug!(json = ?StructuredOutput::BuildFinished { - path: bundle.build.root_dir(), + path: self.app.root_dir(), }); return Ok(bundle); } @@ -263,11 +390,801 @@ impl Builder { } } + pub(crate) async fn open( + &mut self, + devserver_ip: SocketAddr, + start_fullstack_on_address: Option, + open_browser: bool, + ) -> Result<()> { + let krate = &self.app; + + // Set the env vars that the clients will expect + // These need to be stable within a release version (ie 0.6.0) + let mut envs = vec![ + (dioxus_cli_config::CLI_ENABLED_ENV, "true".to_string()), + ( + dioxus_cli_config::ALWAYS_ON_TOP_ENV, + krate + .workspace + .settings + .always_on_top + .unwrap_or(true) + .to_string(), + ), + ( + dioxus_cli_config::APP_TITLE_ENV, + krate.config.web.app.title.clone(), + ), + ("RUST_BACKTRACE", "1".to_string()), + ( + dioxus_cli_config::DEVSERVER_IP_ENV, + devserver_ip.ip().to_string(), + ), + ( + dioxus_cli_config::DEVSERVER_PORT_ENV, + devserver_ip.port().to_string(), + ), + // unset the cargo dirs in the event we're running `dx` locally + // since the child process will inherit the env vars, we don't want to confuse the downstream process + ("CARGO_MANIFEST_DIR", "".to_string()), + ( + dioxus_cli_config::SESSION_CACHE_DIR, + self.app.session_cache_dir().display().to_string(), + ), + ]; + + if let Some(base_path) = &krate.config.web.app.base_path { + envs.push((dioxus_cli_config::ASSET_ROOT_ENV, base_path.clone())); + } + + // // Launch the server if we were given an address to start it on, and the build includes a server. After we + // // start the server, consume its stdout/stderr. + // if let (Some(addr), Some(server)) = (start_fullstack_on_address, self.server_exe()) { + // tracing::debug!("Proxying fullstack server from port {:?}", addr); + // envs.push((dioxus_cli_config::SERVER_IP_ENV, addr.ip().to_string())); + // envs.push((dioxus_cli_config::SERVER_PORT_ENV, addr.port().to_string())); + // tracing::debug!("Launching server from path: {server:?}"); + // let mut child = Command::new(server) + // .envs(envs.clone()) + // .stderr(Stdio::piped()) + // .stdout(Stdio::piped()) + // .kill_on_drop(true) + // .spawn()?; + // let stdout = BufReader::new(child.stdout.take().unwrap()); + // let stderr = BufReader::new(child.stderr.take().unwrap()); + // self.server_stdout = Some(stdout.lines()); + // self.server_stderr = Some(stderr.lines()); + // self.server_child = Some(child); + // } + + // We try to use stdin/stdout to communicate with the app + let running_process = match self.app.platform { + // Unfortunately web won't let us get a proc handle to it (to read its stdout/stderr) so instead + // use use the websocket to communicate with it. I wish we could merge the concepts here, + // like say, opening the socket as a subprocess, but alas, it's simpler to do that somewhere else. + Platform::Web => { + // Only the first build we open the web app, after that the user knows it's running + if open_browser { + self.open_web(devserver_ip); + } + + None + } + + Platform::Ios => Some(self.open_ios_sim(envs).await?), + + // https://developer.android.com/studio/run/emulator-commandline + Platform::Android => { + self.open_android_sim(devserver_ip, envs).await; + None + } + + // These are all just basically running the main exe, but with slightly different resource dir paths + Platform::Server + | Platform::MacOS + | Platform::Windows + | Platform::Linux + | Platform::Liveview => Some(self.open_with_main_exe(envs)?), + }; + + // If we have a running process, we need to attach to it and wait for its outputs + if let Some(mut child) = running_process { + let stdout = BufReader::new(child.stdout.take().unwrap()); + let stderr = BufReader::new(child.stderr.take().unwrap()); + self.app_stdout = Some(stdout.lines()); + self.app_stderr = Some(stderr.lines()); + self.app_child = Some(child); + } + + Ok(()) + } + + /// Gracefully kill the process and all of its children + /// + /// Uses the `SIGTERM` signal on unix and `taskkill` on windows. + /// This complex logic is necessary for things like window state preservation to work properly. + /// + /// Also wipes away the entropy executables if they exist. + pub(crate) async fn cleanup(&mut self) { + // Soft-kill the process by sending a sigkill, allowing the process to clean up + self.soft_kill().await; + + // Wipe out the entropy executables if they exist + if let Some(entropy_app_exe) = self.entropy_app_exe.take() { + _ = std::fs::remove_file(entropy_app_exe); + } + + // if let Some(entropy_server_exe) = self.entropy_server_exe.take() { + // _ = std::fs::remove_file(entropy_server_exe); + // } + } + + /// Kill the app and server exes + pub(crate) async fn soft_kill(&mut self) { + use futures_util::FutureExt; + + // Kill any running executables on Windows + let Some(mut process) = self.app_child.take() else { + return; + }; + + let Some(pid) = process.id() else { + _ = process.kill().await; + return; + }; + + // on unix, we can send a signal to the process to shut down + #[cfg(unix)] + { + _ = Command::new("kill") + .args(["-s", "TERM", &pid.to_string()]) + .spawn(); + } + + // on windows, use the `taskkill` command + #[cfg(windows)] + { + _ = Command::new("taskkill") + .args(["/F", "/PID", &pid.to_string()]) + .spawn(); + } + + // join the wait with a 100ms timeout + futures_util::select! { + _ = process.wait().fuse() => {} + _ = tokio::time::sleep(std::time::Duration::from_millis(1000)).fuse() => {} + }; + } + + /// Hotreload an asset in the running app. + /// + /// This will modify the build dir in place! Be careful! We generally assume you want all bundles + /// to reflect the latest changes, so we will modify the bundle. + /// + /// However, not all platforms work like this, so we might also need to update a separate asset + /// dir that the system simulator might be providing. We know this is the case for ios simulators + /// and haven't yet checked for android. + /// + /// This will return the bundled name of the asset such that we can send it to the clients letting + /// them know what to reload. It's not super important that this is robust since most clients will + /// kick all stylsheets without necessarily checking the name. + pub(crate) async fn hotreload_bundled_asset(&self, changed_file: &PathBuf) -> Option { + let mut bundled_name = None; + + let Some(artifacts) = self.artifacts.as_ref() else { + tracing::debug!("No artifacts to hotreload asset"); + return None; + }; + + // Use the build dir if there's no runtime asset dir as the override. For the case of ios apps, + // we won't actually be using the build dir. + let asset_dir = match self.runtime_asst_dir.as_ref() { + Some(dir) => dir.to_path_buf().join("assets/"), + None => self.app.asset_dir(), + }; + + tracing::debug!("Hotreloading asset {changed_file:?} in target {asset_dir:?}"); + + // If the asset shares the same name in the bundle, reload that + if let Some(legacy_asset_dir) = self.app.legacy_asset_dir() { + if changed_file.starts_with(&legacy_asset_dir) { + tracing::debug!("Hotreloading legacy asset {changed_file:?}"); + let trimmed = changed_file.strip_prefix(legacy_asset_dir).unwrap(); + let res = std::fs::copy(changed_file, asset_dir.join(trimmed)); + bundled_name = Some(trimmed.to_path_buf()); + if let Err(e) = res { + tracing::debug!("Failed to hotreload legacy asset {e}"); + } + } + } + + // Canonicalize the path as Windows may use long-form paths "\\\\?\\C:\\". + let changed_file = dunce::canonicalize(changed_file) + .inspect_err(|e| tracing::debug!("Failed to canonicalize hotreloaded asset: {e}")) + .ok()?; + + // The asset might've been renamed thanks to the manifest, let's attempt to reload that too + if let Some(resource) = artifacts.assets.assets.get(&changed_file).as_ref() { + let output_path = asset_dir.join(resource.bundled_path()); + // Remove the old asset if it exists + _ = std::fs::remove_file(&output_path); + // And then process the asset with the options into the **old** asset location. If we recompiled, + // the asset would be in a new location because the contents and hash have changed. Since we are + // hotreloading, we need to use the old asset location it was originally written to. + let options = *resource.options(); + let res = process_file_to(&options, &changed_file, &output_path); + bundled_name = Some(PathBuf::from(resource.bundled_path())); + if let Err(e) = res { + tracing::debug!("Failed to hotreload asset {e}"); + } + } + + // If the emulator is android, we need to copy the asset to the device with `adb push asset /data/local/tmp/dx/assets/filename.ext` + if self.app.platform == Platform::Android { + if let Some(bundled_name) = bundled_name.as_ref() { + _ = self + .copy_file_to_android_tmp(&changed_file, &bundled_name) + .await; + } + } + + // Now we can return the bundled asset name to send to the hotreload engine + bundled_name + } + + /// Copy this file to the tmp folder on the android device, returning the path to the copied file + pub(crate) async fn copy_file_to_android_tmp( + &self, + changed_file: &Path, + bundled_name: &Path, + ) -> Result { + let target = PathBuf::from("/data/app/~~OE9KIaCNz0l5pwJue6zY8Q==/com.example.SubsecondHarness-pilWFhddpEHdzmzy-khHRA==/lib/arm64/").join(bundled_name); + // let target = dioxus_cli_config::android_session_cache_dir().join(bundled_name); + tracing::debug!("Pushing asset to device: {target:?}"); + let res = tokio::process::Command::new(crate::build::android_tools().unwrap().adb) + .arg("push") + .arg(&changed_file) + .arg(&target) + .output() + .await + .context("Failed to push asset to device"); + + if let Err(e) = res { + tracing::debug!("Failed to push asset to device: {e}"); + } + + Ok(target) + } + + /// Open the native app simply by running its main exe + /// + /// Eventually, for mac, we want to run the `.app` with `open` to fix issues with `dylib` paths, + /// but for now, we just run the exe directly. Very few users should be caring about `dylib` search + /// paths right now, but they will when we start to enable things like swift integration. + /// + /// Server/liveview/desktop are all basically the same, though + fn open_with_main_exe(&mut self, envs: Vec<(&str, String)>) -> Result { + // Create a new entropy app exe if we need to + let main_exe = self.app_exe(); + let child = Command::new(main_exe) + .envs(envs) + .stderr(Stdio::piped()) + .stdout(Stdio::piped()) + .kill_on_drop(true) + .spawn()?; + + Ok(child) + } + + /// Open the web app by opening the browser to the given address. + /// Check if we need to use https or not, and if so, add the protocol. + /// Go to the basepath if that's set too. + fn open_web(&self, address: SocketAddr) { + let base_path = self.app.config.web.app.base_path.clone(); + let https = self.app.config.web.https.enabled.unwrap_or_default(); + let protocol = if https { "https" } else { "http" }; + let base_path = match base_path.as_deref() { + Some(base_path) => format!("/{}", base_path.trim_matches('/')), + None => "".to_owned(), + }; + _ = open::that(format!("{protocol}://{address}{base_path}")); + } + + /// Use `xcrun` to install the app to the simulator + /// With simulators, we're free to basically do anything, so we don't need to do any fancy codesigning + /// or entitlements, or anything like that. + /// + /// However, if there's no simulator running, this *might* fail. + /// + /// TODO(jon): we should probably check if there's a simulator running before trying to install, + /// and open the simulator if we have to. + async fn open_ios_sim(&mut self, envs: Vec<(&str, String)>) -> Result { + tracing::debug!("Installing app to simulator {:?}", self.app.root_dir()); + + let res = Command::new("xcrun") + .arg("simctl") + .arg("install") + .arg("booted") + .arg(self.app.root_dir()) + .stderr(Stdio::piped()) + .stdout(Stdio::piped()) + .output() + .await?; + + tracing::debug!("Installed app to simulator with exit code: {res:?}"); + + // Remap the envs to the correct simctl env vars + // iOS sim lets you pass env vars but they need to be in the format "SIMCTL_CHILD_XXX=XXX" + let ios_envs = envs + .iter() + .map(|(k, v)| (format!("SIMCTL_CHILD_{k}"), v.clone())); + + let child = Command::new("xcrun") + .arg("simctl") + .arg("launch") + .arg("--console") + .arg("booted") + .arg(self.app.bundle_identifier()) + .envs(ios_envs) + .stderr(Stdio::piped()) + .stdout(Stdio::piped()) + .kill_on_drop(true) + .spawn()?; + + Ok(child) + } + + /// We have this whole thing figured out, but we don't actually use it yet. + /// + /// Launching on devices is more complicated and requires us to codesign the app, which we don't + /// currently do. + /// + /// Converting these commands shouldn't be too hard, but device support would imply we need + /// better support for codesigning and entitlements. + #[allow(unused)] + async fn open_ios_device(&self) -> Result<()> { + use serde_json::Value; + let app_path = self.app.root_dir(); + + install_app(&app_path).await?; + + // 2. Determine which device the app was installed to + let device_uuid = get_device_uuid().await?; + + // 3. Get the installation URL of the app + let installation_url = get_installation_url(&device_uuid, &app_path).await?; + + // 4. Launch the app into the background, paused + launch_app_paused(&device_uuid, &installation_url).await?; + + // 5. Pick up the paused app and resume it + resume_app(&device_uuid).await?; + + async fn install_app(app_path: &PathBuf) -> Result<()> { + let output = Command::new("xcrun") + .args(["simctl", "install", "booted"]) + .arg(app_path) + .output() + .await?; + + if !output.status.success() { + return Err(format!("Failed to install app: {:?}", output).into()); + } + + Ok(()) + } + + async fn get_device_uuid() -> Result { + let output = Command::new("xcrun") + .args([ + "devicectl", + "list", + "devices", + "--json-output", + "target/deviceid.json", + ]) + .output() + .await?; + + let json: Value = + serde_json::from_str(&std::fs::read_to_string("target/deviceid.json")?) + .context("Failed to parse xcrun output")?; + let device_uuid = json["result"]["devices"][0]["identifier"] + .as_str() + .ok_or("Failed to extract device UUID")? + .to_string(); + + Ok(device_uuid) + } + + async fn get_installation_url(device_uuid: &str, app_path: &Path) -> Result { + // xcrun devicectl device install app --device --path --json-output + let output = Command::new("xcrun") + .args([ + "devicectl", + "device", + "install", + "app", + "--device", + device_uuid, + &app_path.display().to_string(), + "--json-output", + "target/xcrun.json", + ]) + .output() + .await?; + + if !output.status.success() { + return Err(format!("Failed to install app: {:?}", output).into()); + } + + let json: Value = serde_json::from_str(&std::fs::read_to_string("target/xcrun.json")?) + .context("Failed to parse xcrun output")?; + let installation_url = json["result"]["installedApplications"][0]["installationURL"] + .as_str() + .ok_or("Failed to extract installation URL")? + .to_string(); + + Ok(installation_url) + } + + async fn launch_app_paused(device_uuid: &str, installation_url: &str) -> Result<()> { + let output = Command::new("xcrun") + .args([ + "devicectl", + "device", + "process", + "launch", + "--no-activate", + "--verbose", + "--device", + device_uuid, + installation_url, + "--json-output", + "target/launch.json", + ]) + .output() + .await?; + + if !output.status.success() { + return Err(format!("Failed to launch app: {:?}", output).into()); + } + + Ok(()) + } + + async fn resume_app(device_uuid: &str) -> Result<()> { + let json: Value = serde_json::from_str(&std::fs::read_to_string("target/launch.json")?) + .context("Failed to parse xcrun output")?; + + let status_pid = json["result"]["process"]["processIdentifier"] + .as_u64() + .ok_or("Failed to extract process identifier")?; + + let output = Command::new("xcrun") + .args([ + "devicectl", + "device", + "process", + "resume", + "--device", + device_uuid, + "--pid", + &status_pid.to_string(), + ]) + .output() + .await?; + + if !output.status.success() { + return Err(format!("Failed to resume app: {:?}", output).into()); + } + + Ok(()) + } + + unimplemented!("dioxus-cli doesn't support ios devices yet.") + } + + #[allow(unused)] + async fn codesign_ios(&self) -> Result<()> { + const CODESIGN_ERROR: &str = r#"This is likely because you haven't +- Created a provisioning profile before +- Accepted the Apple Developer Program License Agreement + +The agreement changes frequently and might need to be accepted again. +To accept the agreement, go to https://developer.apple.com/account + +To create a provisioning profile, follow the instructions here: +https://developer.apple.com/documentation/xcode/sharing-your-teams-signing-certificates"#; + + let profiles_folder = dirs::home_dir() + .context("Your machine has no home-dir")? + .join("Library/MobileDevice/Provisioning Profiles"); + + if !profiles_folder.exists() || profiles_folder.read_dir()?.next().is_none() { + tracing::error!( + r#"No provisioning profiles found when trying to codesign the app. +We checked the folder: {} + +{CODESIGN_ERROR} +"#, + profiles_folder.display() + ) + } + + let identities = Command::new("security") + .args(["find-identity", "-v", "-p", "codesigning"]) + .output() + .await + .context("Failed to run `security find-identity -v -p codesigning`") + .map(|e| { + String::from_utf8(e.stdout) + .context("Failed to parse `security find-identity -v -p codesigning`") + })??; + + // Parsing this: + // 51ADE4986E0033A5DB1C794E0D1473D74FD6F871 "Apple Development: jkelleyrtp@gmail.com (XYZYZY)" + let app_dev_name = regex::Regex::new(r#""Apple Development: (.+)""#) + .unwrap() + .captures(&identities) + .and_then(|caps| caps.get(1)) + .map(|m| m.as_str()) + .context( + "Failed to find Apple Development in `security find-identity -v -p codesigning`", + )?; + + // Acquire the provision file + let provision_file = profiles_folder + .read_dir()? + .flatten() + .find(|entry| { + entry + .file_name() + .to_str() + .map(|s| s.contains("mobileprovision")) + .unwrap_or_default() + }) + .context("Failed to find a provisioning profile. \n\n{CODESIGN_ERROR}")?; + + // The .mobileprovision file has some random binary thrown into into, but it's still basically a plist + // Let's use the plist markers to find the start and end of the plist + fn cut_plist(bytes: &[u8], byte_match: &[u8]) -> Option { + bytes + .windows(byte_match.len()) + .enumerate() + .rev() + .find(|(_, slice)| *slice == byte_match) + .map(|(i, _)| i + byte_match.len()) + } + let bytes = std::fs::read(provision_file.path())?; + let cut1 = cut_plist(&bytes, b""#.as_bytes()) + .context("Failed to parse .mobileprovision file")?; + let sub_bytes = &bytes[(cut1 - 6)..cut2]; + let mbfile: ProvisioningProfile = + plist::from_bytes(sub_bytes).context("Failed to parse .mobileprovision file")?; + + #[derive(serde::Deserialize, Debug)] + struct ProvisioningProfile { + #[serde(rename = "TeamIdentifier")] + team_identifier: Vec, + #[serde(rename = "ApplicationIdentifierPrefix")] + application_identifier_prefix: Vec, + #[serde(rename = "Entitlements")] + entitlements: Entitlements, + } + + #[derive(serde::Deserialize, Debug)] + struct Entitlements { + #[serde(rename = "application-identifier")] + application_identifier: String, + #[serde(rename = "keychain-access-groups")] + keychain_access_groups: Vec, + } + + let entielements_xml = format!( + r#" + + + + application-identifier + {APPLICATION_IDENTIFIER} + keychain-access-groups + + {APP_ID_ACCESS_GROUP}.* + + get-task-allow + + com.apple.developer.team-identifier + {TEAM_IDENTIFIER} + + "#, + APPLICATION_IDENTIFIER = mbfile.entitlements.application_identifier, + APP_ID_ACCESS_GROUP = mbfile.entitlements.keychain_access_groups[0], + TEAM_IDENTIFIER = mbfile.team_identifier[0], + ); + + // write to a temp file + let temp_file = tempfile::NamedTempFile::new()?; + std::fs::write(temp_file.path(), entielements_xml)?; + + // codesign the app + let output = Command::new("codesign") + .args([ + "--force", + "--entitlements", + temp_file.path().to_str().unwrap(), + "--sign", + app_dev_name, + ]) + .arg(self.app.root_dir()) + .output() + .await + .context("Failed to codesign the app")?; + + if !output.status.success() { + let stderr = String::from_utf8(output.stderr).unwrap_or_default(); + return Err(format!("Failed to codesign the app: {stderr}").into()); + } + + Ok(()) + } + + async fn open_android_sim( + &self, + devserver_socket: SocketAddr, + envs: Vec<(&'static str, String)>, + ) { + let apk_path = self.app.apk_path(); + let session_cache = self.app.session_cache_dir(); + let full_mobile_app_name = self.app.full_mobile_app_name(); + + // Start backgrounded since .open() is called while in the arm of the top-level match + tokio::task::spawn(async move { + let adb = crate::build::android_tools().unwrap().adb; + + let port = devserver_socket.port(); + if let Err(e) = Command::new("adb") + .arg("reverse") + .arg(format!("tcp:{}", port)) + .arg(format!("tcp:{}", port)) + .stderr(Stdio::piped()) + .stdout(Stdio::piped()) + .output() + .await + { + tracing::error!("failed to forward port {port}: {e}"); + } + + // Install + // adb install -r app-debug.apk + if let Err(e) = Command::new(&adb) + .arg("install") + .arg("-r") + .arg(apk_path) + .stderr(Stdio::piped()) + .stdout(Stdio::piped()) + .output() + .await + { + tracing::error!("Failed to install apk with `adb`: {e}"); + }; + + // Write the env vars to a .env file in our session cache + let env_file = session_cache.join(".env"); + let contents: String = envs + .iter() + .map(|(key, value)| format!("{key}={value}")) + .collect::>() + .join("\n"); + _ = std::fs::write(&env_file, contents); + + // Push the env file to the device + if let Err(e) = tokio::process::Command::new(&adb) + .arg("push") + .arg(env_file) + .arg(dioxus_cli_config::android_session_cache_dir().join(".env")) + .output() + .await + .context("Failed to push asset to device") + { + tracing::error!("Failed to push .env file to device: {e}"); + } + + // eventually, use the user's MainActivity, not our MainActivity + // adb shell am start -n dev.dioxus.main/dev.dioxus.main.MainActivity + let activity_name = format!("{}/dev.dioxus.main.MainActivity", full_mobile_app_name,); + + if let Err(e) = Command::new(&adb) + .arg("shell") + .arg("am") + .arg("start") + .arg("-n") + .arg(activity_name) + .stderr(Stdio::piped()) + .stdout(Stdio::piped()) + .output() + .await + { + tracing::error!("Failed to start app with `adb`: {e}"); + }; + }); + } + + fn make_entropy_path(exe: &PathBuf) -> PathBuf { + let id = uuid::Uuid::new_v4(); + let name = id.to_string(); + let some_entropy = name.split('-').next().unwrap(); + + // Make a copy of the server exe with a new name + let entropy_server_exe = exe.with_file_name(format!( + "{}-{}", + exe.file_name().unwrap().to_str().unwrap(), + some_entropy + )); + + std::fs::copy(exe, &entropy_server_exe).unwrap(); + + entropy_server_exe + } + + fn server_exe(&mut self) -> Option { + todo!() + // let mut server = self.app.server_exe()?; + + // // Create a new entropy server exe if we need to + // if cfg!(target_os = "windows") || cfg!(target_os = "linux") { + // // If we already have an entropy server exe, return it - this is useful for re-opening the same app + // if let Some(existing_server) = self.entropy_server_exe.clone() { + // return Some(existing_server); + // } + + // // Otherwise, create a new entropy server exe and save it for re-opning + // let entropy_server_exe = Self::make_entropy_path(&server); + // self.entropy_server_exe = Some(entropy_server_exe.clone()); + // server = entropy_server_exe; + // } + + // Some(server) + } + + fn app_exe(&mut self) -> PathBuf { + let mut main_exe = self.app.main_exe(); + + // The requirement here is based on the platform, not necessarily our current architecture. + let requires_entropy = match self.app.platform { + // When running "bundled", we don't need entropy + Platform::Web => false, + Platform::MacOS => false, + Platform::Ios => false, + Platform::Android => false, + + // But on platforms that aren't running as "bundled", we do. + Platform::Windows => true, + Platform::Linux => true, + Platform::Server => true, + Platform::Liveview => true, + }; + + if requires_entropy || std::env::var("DIOXUS_ENTROPY").is_ok() { + // If we already have an entropy app exe, return it - this is useful for re-opening the same app + if let Some(existing_app_exe) = self.entropy_app_exe.clone() { + return existing_app_exe; + } + + let entropy_app_exe = Self::make_entropy_path(&main_exe); + self.entropy_app_exe = Some(entropy_app_exe.clone()); + main_exe = entropy_app_exe; + } + + main_exe + } + fn complete_compile(&mut self) { if self.compile_end.is_none() { self.compiled_crates = self.expected_crates; self.compile_end = Some(Instant::now()); - self.compile_end_server = Some(Instant::now()); + // self.compile_end_server = Some(Instant::now()); } } @@ -299,7 +1216,8 @@ impl Builder { /// Return a number between 0 and 1 representing the progress of the server build pub(crate) fn server_compile_progress(&self) -> f64 { - self.compiled_crates_server as f64 / self.expected_crates_server as f64 + todo!() + // self.compiled_crates_server as f64 / self.expected_crates_server as f64 } pub(crate) fn bundle_progress(&self) -> f64 { diff --git a/packages/cli/src/build/bundle.rs b/packages/cli/src/build/bundle.rs index e2b769f9d2..2676309bf5 100644 --- a/packages/cli/src/build/bundle.rs +++ b/packages/cli/src/build/bundle.rs @@ -1,955 +1,260 @@ +//! ## Web: +//! Create a folder that is somewhat similar to an app-image (exe + asset) +//! The server is dropped into the `web` folder, even if there's no `public` folder. +//! If there's no server (SPA), we still use the `web` folder, but it only contains the +//! public folder. +//! ``` +//! web/ +//! server +//! assets/ +//! public/ +//! index.html +//! wasm/ +//! app.wasm +//! glue.js +//! snippets/ +//! ... +//! assets/ +//! logo.png +//! ``` +//! +//! ## Linux: +//! https://docs.appimage.org/reference/appdir.html#ref-appdir +//! current_exe.join("Assets") +//! ``` +//! app.appimage/ +//! AppRun +//! app.desktop +//! package.json +//! assets/ +//! logo.png +//! ``` +//! +//! ## Macos +//! We simply use the macos format where binaries are in `Contents/MacOS` and assets are in `Contents/Resources` +//! We put assets in an assets dir such that it generally matches every other platform and we can +//! output `/assets/blah` from manganis. +//! ``` +//! App.app/ +//! Contents/ +//! Info.plist +//! MacOS/ +//! Frameworks/ +//! Resources/ +//! assets/ +//! blah.icns +//! blah.png +//! CodeResources +//! _CodeSignature/ +//! ``` +//! +//! ## iOS +//! Not the same as mac! ios apps are a bit "flattened" in comparison. simpler format, presumably +//! since most ios apps don't ship frameworks/plugins and such. +//! +//! todo(jon): include the signing and entitlements in this format diagram. +//! ``` +//! App.app/ +//! main +//! assets/ +//! ``` +//! +//! ## Android: +//! +//! Currently we need to generate a `src` type structure, not a pre-packaged apk structure, since +//! we need to compile kotlin and java. This pushes us into using gradle and following a structure +//! similar to that of cargo mobile2. Eventually I'd like to slim this down (drop buildSrc) and +//! drive the kotlin build ourselves. This would let us drop gradle (yay! no plugins!) but requires +//! us to manage dependencies (like kotlinc) ourselves (yuck!). +//! +//! https://github.com/WanghongLin/miscellaneous/blob/master/tools/build-apk-manually.sh +//! +//! Unfortunately, it seems that while we can drop the `android` build plugin, we still will need +//! gradle since kotlin is basically gradle-only. +//! +//! Pre-build: +//! ``` +//! app.apk/ +//! .gradle +//! app/ +//! src/ +//! main/ +//! assets/ +//! jniLibs/ +//! java/ +//! kotlin/ +//! res/ +//! AndroidManifest.xml +//! build.gradle.kts +//! proguard-rules.pro +//! buildSrc/ +//! build.gradle.kts +//! src/ +//! main/ +//! kotlin/ +//! BuildTask.kt +//! build.gradle.kts +//! gradle.properties +//! gradlew +//! gradlew.bat +//! settings.gradle +//! ``` +//! +//! Final build: +//! ``` +//! app.apk/ +//! AndroidManifest.xml +//! classes.dex +//! assets/ +//! logo.png +//! lib/ +//! armeabi-v7a/ +//! libmyapp.so +//! arm64-v8a/ +//! libmyapp.so +//! ``` +//! Notice that we *could* feasibly build this ourselves :) +//! +//! ## Windows: +//! https://superuser.com/questions/749447/creating-a-single-file-executable-from-a-directory-in-windows +//! Windows does not provide an AppImage format, so instead we're going build the same folder +//! structure as an AppImage, but when distributing, we'll create a .exe that embeds the resources +//! as an embedded .zip file. When the app runs, it will implicitly unzip its resources into the +//! Program Files folder. Any subsequent launches of the parent .exe will simply call the AppRun.exe +//! entrypoint in the associated Program Files folder. +//! +//! This is, in essence, the same as an installer, so we might eventually just support something like msi/msix +//! which functionally do the same thing but with a sleeker UI. +//! +//! This means no installers are required and we can bake an updater into the host exe. +//! +//! ## Handling asset lookups: +//! current_exe.join("assets") +//! ``` +//! app.appimage/ +//! main.exe +//! main.desktop +//! package.json +//! assets/ +//! logo.png +//! ``` +//! +//! Since we support just a few locations, we could just search for the first that exists +//! - usr +//! - ../Resources +//! - assets +//! - Assets +//! - $cwd/assets +//! +//! ``` +//! assets::root() -> +//! mac -> ../Resources/ +//! ios -> ../Resources/ +//! android -> assets/ +//! server -> assets/ +//! liveview -> assets/ +//! web -> /assets/ +//! root().join(bundled) +//! ``` +// / The end result of a build. +// / +// / Contains the final asset manifest, the executables, and the workdir. +// / +// / Every dioxus app can have an optional server executable which will influence the final bundle. +// / This is built in parallel with the app executable during the `build` phase and the progres/status +// / of the build is aggregated. +// / +// / The server will *always* be dropped into the `web` folder since it is considered "web" in nature, +// / and will likely need to be combined with the public dir to be useful. +// / +// / We do our best to assemble read-to-go bundles here, such that the "bundle" step for each platform +// / can just use the build dir +// / +// / When we write the AppBundle to a folder, it'll contain each bundle for each platform under the app's name: +// / ``` +// / dog-app/ +// / build/ +// / web/ +// / server.exe +// / assets/ +// / some-secret-asset.txt (a server-side asset) +// / public/ +// / index.html +// / assets/ +// / logo.png +// / desktop/ +// / App.app +// / App.appimage +// / App.exe +// / server/ +// / server +// / assets/ +// / some-secret-asset.txt (a server-side asset) +// / ios/ +// / App.app +// / App.ipa +// / android/ +// / App.apk +// / bundle/ +// / build.json +// / Desktop.app +// / Mobile_x64.ipa +// / Mobile_arm64.ipa +// / Mobile_rosetta.ipa +// / web.appimage +// / web/ +// / server.exe +// / assets/ +// / some-secret-asset.txt +// / public/ +// / index.html +// / assets/ +// / logo.png +// / style.css +// / ``` +// / +// / When deploying, the build.json file will provide all the metadata that dx-deploy will use to +// / push the app to stores, set up infra, manage versions, etc. +// / +// / The format of each build will follow the name plus some metadata such that when distributing you +// / can easily trim off the metadata. +// / +// / The idea here is that we can run any of the programs in the same way that they're deployed. +// / +// / +// / ## Bundle structure links +// / - apple: https://developer.apple.com/documentation/bundleresources/placing_content_in_a_bundle +// / - appimage: https://docs.appimage.org/packaging-guide/manual.html#ref-manual +// / +// / ## Extra links +// / - xbuild: https://github.com/rust-mobile/xbuild/blob/master/xbuild/src/command/build.rs +// pub(crate) struct BuildArtifacts { +// pub(crate) build: BuildRequest, +// pub(crate) exe: PathBuf, +// pub(crate) direct_rustc: Vec, +// pub(crate) time_start: SystemTime, +// pub(crate) time_end: SystemTime, +// pub(crate) assets: AssetManifest, +// } + +// impl AppBundle {} + use super::prerender::pre_render_static_routes; -use super::templates::InfoPlistData; -use crate::{BuildRequest, Platform, WasmOptConfig}; +use crate::{BuildMode, BuildRequest, Platform, WasmOptConfig}; use crate::{Result, TraceSrc}; -use anyhow::Context; +use anyhow::{bail, Context}; use dioxus_cli_opt::{process_file_to, AssetManifest}; +use itertools::Itertools; use manganis::{AssetOptions, JsAssetOptions}; use rayon::prelude::{IntoParallelRefIterator, ParallelIterator}; -use std::future::Future; -use std::path::{Path, PathBuf}; -use std::pin::Pin; -use std::sync::atomic::Ordering; use std::{collections::HashSet, io::Write}; +use std::{future::Future, time::Instant}; +use std::{ + path::{Path, PathBuf}, + time::UNIX_EPOCH, +}; +use std::{pin::Pin, time::SystemTime}; +use std::{process::Stdio, sync::atomic::Ordering}; use std::{sync::atomic::AtomicUsize, time::Duration}; +use target_lexicon::{Environment, OperatingSystem}; use tokio::process::Command; - -/// The end result of a build. -/// -/// Contains the final asset manifest, the executables, and the workdir. -/// -/// Every dioxus app can have an optional server executable which will influence the final bundle. -/// This is built in parallel with the app executable during the `build` phase and the progres/status -/// of the build is aggregated. -/// -/// The server will *always* be dropped into the `web` folder since it is considered "web" in nature, -/// and will likely need to be combined with the public dir to be useful. -/// -/// We do our best to assemble read-to-go bundles here, such that the "bundle" step for each platform -/// can just use the build dir -/// -/// When we write the AppBundle to a folder, it'll contain each bundle for each platform under the app's name: -/// ``` -/// dog-app/ -/// build/ -/// web/ -/// server.exe -/// assets/ -/// some-secret-asset.txt (a server-side asset) -/// public/ -/// index.html -/// assets/ -/// logo.png -/// desktop/ -/// App.app -/// App.appimage -/// App.exe -/// server/ -/// server -/// assets/ -/// some-secret-asset.txt (a server-side asset) -/// ios/ -/// App.app -/// App.ipa -/// android/ -/// App.apk -/// bundle/ -/// build.json -/// Desktop.app -/// Mobile_x64.ipa -/// Mobile_arm64.ipa -/// Mobile_rosetta.ipa -/// web.appimage -/// web/ -/// server.exe -/// assets/ -/// some-secret-asset.txt -/// public/ -/// index.html -/// assets/ -/// logo.png -/// style.css -/// ``` -/// -/// When deploying, the build.json file will provide all the metadata that dx-deploy will use to -/// push the app to stores, set up infra, manage versions, etc. -/// -/// The format of each build will follow the name plus some metadata such that when distributing you -/// can easily trim off the metadata. -/// -/// The idea here is that we can run any of the programs in the same way that they're deployed. -/// -/// -/// ## Bundle structure links -/// - apple: https://developer.apple.com/documentation/bundleresources/placing_content_in_a_bundle -/// - appimage: https://docs.appimage.org/packaging-guide/manual.html#ref-manual -/// -/// ## Extra links -/// - xbuild: https://github.com/rust-mobile/xbuild/blob/master/xbuild/src/command/build.rs -#[derive(Debug)] -pub(crate) struct AppBundle { - pub(crate) build: BuildRequest, - pub(crate) app: BuildArtifacts, - pub(crate) server: Option, -} - -#[derive(Debug)] -pub struct BuildArtifacts { - pub(crate) exe: PathBuf, - pub(crate) assets: AssetManifest, - pub(crate) time_taken: Duration, -} - -impl AppBundle { - /// ## Web: - /// Create a folder that is somewhat similar to an app-image (exe + asset) - /// The server is dropped into the `web` folder, even if there's no `public` folder. - /// If there's no server (SPA), we still use the `web` folder, but it only contains the - /// public folder. - /// ``` - /// web/ - /// server - /// assets/ - /// public/ - /// index.html - /// wasm/ - /// app.wasm - /// glue.js - /// snippets/ - /// ... - /// assets/ - /// logo.png - /// ``` - /// - /// ## Linux: - /// https://docs.appimage.org/reference/appdir.html#ref-appdir - /// current_exe.join("Assets") - /// ``` - /// app.appimage/ - /// AppRun - /// app.desktop - /// package.json - /// assets/ - /// logo.png - /// ``` - /// - /// ## Macos - /// We simply use the macos format where binaries are in `Contents/MacOS` and assets are in `Contents/Resources` - /// We put assets in an assets dir such that it generally matches every other platform and we can - /// output `/assets/blah` from manganis. - /// ``` - /// App.app/ - /// Contents/ - /// Info.plist - /// MacOS/ - /// Frameworks/ - /// Resources/ - /// assets/ - /// blah.icns - /// blah.png - /// CodeResources - /// _CodeSignature/ - /// ``` - /// - /// ## iOS - /// Not the same as mac! ios apps are a bit "flattened" in comparison. simpler format, presumably - /// since most ios apps don't ship frameworks/plugins and such. - /// - /// todo(jon): include the signing and entitlements in this format diagram. - /// ``` - /// App.app/ - /// main - /// assets/ - /// ``` - /// - /// ## Android: - /// - /// Currently we need to generate a `src` type structure, not a pre-packaged apk structure, since - /// we need to compile kotlin and java. This pushes us into using gradle and following a structure - /// similar to that of cargo mobile2. Eventually I'd like to slim this down (drop buildSrc) and - /// drive the kotlin build ourselves. This would let us drop gradle (yay! no plugins!) but requires - /// us to manage dependencies (like kotlinc) ourselves (yuck!). - /// - /// https://github.com/WanghongLin/miscellaneous/blob/master/tools/build-apk-manually.sh - /// - /// Unfortunately, it seems that while we can drop the `android` build plugin, we still will need - /// gradle since kotlin is basically gradle-only. - /// - /// Pre-build: - /// ``` - /// app.apk/ - /// .gradle - /// app/ - /// src/ - /// main/ - /// assets/ - /// jniLibs/ - /// java/ - /// kotlin/ - /// res/ - /// AndroidManifest.xml - /// build.gradle.kts - /// proguard-rules.pro - /// buildSrc/ - /// build.gradle.kts - /// src/ - /// main/ - /// kotlin/ - /// BuildTask.kt - /// build.gradle.kts - /// gradle.properties - /// gradlew - /// gradlew.bat - /// settings.gradle - /// ``` - /// - /// Final build: - /// ``` - /// app.apk/ - /// AndroidManifest.xml - /// classes.dex - /// assets/ - /// logo.png - /// lib/ - /// armeabi-v7a/ - /// libmyapp.so - /// arm64-v8a/ - /// libmyapp.so - /// ``` - /// Notice that we *could* feasibly build this ourselves :) - /// - /// ## Windows: - /// https://superuser.com/questions/749447/creating-a-single-file-executable-from-a-directory-in-windows - /// Windows does not provide an AppImage format, so instead we're going build the same folder - /// structure as an AppImage, but when distributing, we'll create a .exe that embeds the resources - /// as an embedded .zip file. When the app runs, it will implicitly unzip its resources into the - /// Program Files folder. Any subsequent launches of the parent .exe will simply call the AppRun.exe - /// entrypoint in the associated Program Files folder. - /// - /// This is, in essence, the same as an installer, so we might eventually just support something like msi/msix - /// which functionally do the same thing but with a sleeker UI. - /// - /// This means no installers are required and we can bake an updater into the host exe. - /// - /// ## Handling asset lookups: - /// current_exe.join("assets") - /// ``` - /// app.appimage/ - /// main.exe - /// main.desktop - /// package.json - /// assets/ - /// logo.png - /// ``` - /// - /// Since we support just a few locations, we could just search for the first that exists - /// - usr - /// - ../Resources - /// - assets - /// - Assets - /// - $cwd/assets - /// - /// ``` - /// assets::root() -> - /// mac -> ../Resources/ - /// ios -> ../Resources/ - /// android -> assets/ - /// server -> assets/ - /// liveview -> assets/ - /// web -> /assets/ - /// root().join(bundled) - /// ``` - pub(crate) async fn new( - build: BuildRequest, - app: BuildArtifacts, - server: Option, - ) -> Result { - let mut bundle = Self { app, server, build }; - - tracing::debug!("Assembling app bundle"); - - bundle.build.status_start_bundle(); - /* - assume the build dir is already created by BuildRequest - todo(jon): maybe refactor this a bit to force AppBundle to be created before it can be filled in - */ - bundle - .write_main_executable() - .await - .context("Failed to write main executable")?; - bundle.write_server_executable().await?; - bundle - .write_assets() - .await - .context("Failed to write assets")?; - bundle.write_metadata().await?; - bundle.optimize().await?; - bundle.pre_render_ssg_routes().await?; - bundle - .assemble() - .await - .context("Failed to assemble app bundle")?; - - tracing::debug!("Bundle created at {}", bundle.build.root_dir().display()); - - Ok(bundle) - } - - /// Take the output of rustc and make it into the main exe of the bundle - /// - /// For wasm, we'll want to run `wasm-bindgen` to make it a wasm binary along with some other optimizations - /// Other platforms we might do some stripping or other optimizations - /// Move the executable to the workdir - async fn write_main_executable(&mut self) -> Result<()> { - match self.build.build.platform() { - // Run wasm-bindgen on the wasm binary and set its output to be in the bundle folder - // Also run wasm-opt on the wasm binary, and sets the index.html since that's also the "executable". - // - // The wasm stuff will be in a folder called "wasm" in the workdir. - // - // Final output format: - // ``` - // dx/ - // app/ - // web/ - // bundle/ - // build/ - // public/ - // index.html - // wasm/ - // app.wasm - // glue.js - // snippets/ - // ... - // assets/ - // logo.png - // ``` - Platform::Web => { - self.bundle_web().await?; - } - - // this will require some extra oomf to get the multi architecture builds... - // for now, we just copy the exe into the current arch (which, sorry, is hardcoded for my m1) - // we'll want to do multi-arch builds in the future, so there won't be *one* exe dir to worry about - // eventually `exe_dir` and `main_exe` will need to take in an arch and return the right exe path - // - // todo(jon): maybe just symlink this rather than copy it? - Platform::Android => { - self.copy_android_exe(&self.app.exe, &self.main_exe()) - .await?; - } - - // These are all super simple, just copy the exe into the folder - // eventually, perhaps, maybe strip + encrypt the exe? - Platform::MacOS - | Platform::Windows - | Platform::Linux - | Platform::Ios - | Platform::Liveview - | Platform::Server => { - std::fs::copy(&self.app.exe, self.main_exe())?; - } - } - - Ok(()) - } - - /// Copy the assets out of the manifest and into the target location - /// - /// Should be the same on all platforms - just copy over the assets from the manifest into the output directory - async fn write_assets(&self) -> Result<()> { - // Server doesn't need assets - web will provide them - if self.build.build.platform() == Platform::Server { - return Ok(()); - } - - let asset_dir = self.build.asset_dir(); - - // First, clear the asset dir of any files that don't exist in the new manifest - _ = tokio::fs::create_dir_all(&asset_dir).await; - // Create a set of all the paths that new files will be bundled to - let mut keep_bundled_output_paths: HashSet<_> = self - .app - .assets - .assets - .values() - .map(|a| asset_dir.join(a.bundled_path())) - .collect(); - // The CLI creates a .version file in the asset dir to keep track of what version of the optimizer - // the asset was processed. If that version doesn't match the CLI version, we need to re-optimize - // all assets. - let version_file = self.build.asset_optimizer_version_file(); - let clear_cache = std::fs::read_to_string(&version_file) - .ok() - .filter(|s| s == crate::VERSION.as_str()) - .is_none(); - if clear_cache { - keep_bundled_output_paths.clear(); - } - - // one possible implementation of walking a directory only visiting files - fn remove_old_assets<'a>( - path: &'a Path, - keep_bundled_output_paths: &'a HashSet, - ) -> Pin> + Send + 'a>> { - Box::pin(async move { - // If this asset is in the manifest, we don't need to remove it - let canon_path = dunce::canonicalize(path)?; - if keep_bundled_output_paths.contains(canon_path.as_path()) { - return Ok(()); - } - - // Otherwise, if it is a directory, we need to walk it and remove child files - if path.is_dir() { - for entry in std::fs::read_dir(path)?.flatten() { - let path = entry.path(); - remove_old_assets(&path, keep_bundled_output_paths).await?; - } - if path.read_dir()?.next().is_none() { - // If the directory is empty, remove it - tokio::fs::remove_dir(path).await?; - } - } else { - // If it is a file, remove it - tokio::fs::remove_file(path).await?; - } - - Ok(()) - }) - } - - tracing::debug!("Removing old assets"); - tracing::trace!( - "Keeping bundled output paths: {:#?}", - keep_bundled_output_paths - ); - remove_old_assets(&asset_dir, &keep_bundled_output_paths).await?; - - // todo(jon): we also want to eventually include options for each asset's optimization and compression, which we currently aren't - let mut assets_to_transfer = vec![]; - - // Queue the bundled assets - for (asset, bundled) in &self.app.assets.assets { - let from = asset.clone(); - let to = asset_dir.join(bundled.bundled_path()); - - // prefer to log using a shorter path relative to the workspace dir by trimming the workspace dir - let from_ = from - .strip_prefix(self.build.krate.workspace_dir()) - .unwrap_or(from.as_path()); - let to_ = from - .strip_prefix(self.build.krate.workspace_dir()) - .unwrap_or(to.as_path()); - - tracing::debug!("Copying asset {from_:?} to {to_:?}"); - assets_to_transfer.push((from, to, *bundled.options())); - } - - // And then queue the legacy assets - // ideally, one day, we can just check the rsx!{} calls for references to assets - for from in self.build.krate.legacy_asset_dir_files() { - let to = asset_dir.join(from.file_name().unwrap()); - tracing::debug!("Copying legacy asset {from:?} to {to:?}"); - assets_to_transfer.push((from, to, AssetOptions::Unknown)); - } - - let asset_count = assets_to_transfer.len(); - let started_processing = AtomicUsize::new(0); - let copied = AtomicUsize::new(0); - - // Parallel Copy over the assets and keep track of progress with an atomic counter - let progress = self.build.progress.clone(); - let ws_dir = self.build.krate.workspace_dir(); - // Optimizing assets is expensive and blocking, so we do it in a tokio spawn blocking task - tokio::task::spawn_blocking(move || { - assets_to_transfer - .par_iter() - .try_for_each(|(from, to, options)| { - let processing = started_processing.fetch_add(1, Ordering::SeqCst); - let from_ = from.strip_prefix(&ws_dir).unwrap_or(from); - tracing::trace!( - "Starting asset copy {processing}/{asset_count} from {from_:?}" - ); - - let res = process_file_to(options, from, to); - if let Err(err) = res.as_ref() { - tracing::error!("Failed to copy asset {from:?}: {err}"); - } - - let finished = copied.fetch_add(1, Ordering::SeqCst); - BuildRequest::status_copied_asset( - &progress, - finished, - asset_count, - from.to_path_buf(), - ); - - res.map(|_| ()) - }) - }) - .await - .map_err(|e| anyhow::anyhow!("A task failed while trying to copy assets: {e}"))??; - - // // Remove the wasm bindgen output directory if it exists - // _ = std::fs::remove_dir_all(self.build.wasm_bindgen_out_dir()); - - // Write the version file so we know what version of the optimizer we used - std::fs::write( - self.build.asset_optimizer_version_file(), - crate::VERSION.as_str(), - )?; - - Ok(()) - } - - /// The item that we'll try to run directly if we need to. - /// - /// todo(jon): we should name the app properly instead of making up the exe name. It's kinda okay for dev mode, but def not okay for prod - pub fn main_exe(&self) -> PathBuf { - self.build.exe_dir().join(self.build.platform_exe_name()) - } - - /// We always put the server in the `web` folder! - /// Only the `web` target will generate a `public` folder though - async fn write_server_executable(&self) -> Result<()> { - if let Some(server) = &self.server { - let to = self - .server_exe() - .expect("server should be set if we're building a server"); - - std::fs::create_dir_all(self.server_exe().unwrap().parent().unwrap())?; - - tracing::debug!("Copying server executable to: {to:?} {server:#?}"); - - // Remove the old server executable if it exists, since copying might corrupt it :( - // todo(jon): do this in more places, I think - _ = std::fs::remove_file(&to); - std::fs::copy(&server.exe, to)?; - } - - Ok(()) - } - - /// todo(jon): use handlebars templates instead of these prebaked templates - async fn write_metadata(&self) -> Result<()> { - // write the Info.plist file - match self.build.build.platform() { - Platform::MacOS => { - let dest = self.build.root_dir().join("Contents").join("Info.plist"); - let plist = self.macos_plist_contents()?; - std::fs::write(dest, plist)?; - } - - Platform::Ios => { - let dest = self.build.root_dir().join("Info.plist"); - let plist = self.ios_plist_contents()?; - std::fs::write(dest, plist)?; - } - - // AndroidManifest.xml - // er.... maybe even all the kotlin/java/gradle stuff? - Platform::Android => {} - - // Probably some custom format or a plist file (haha) - // When we do the proper bundle, we'll need to do something with wix templates, I think? - Platform::Windows => {} - - // eventually we'll create the .appimage file, I guess? - Platform::Linux => {} - - // These are served as folders, not appimages, so we don't need to do anything special (I think?) - // Eventually maybe write some secrets/.env files for the server? - // We could also distribute them as a deb/rpm for linux and msi for windows - Platform::Web => {} - Platform::Server => {} - Platform::Liveview => {} - } - - Ok(()) - } - - /// Run the optimizers, obfuscators, minimizers, signers, etc - pub(crate) async fn optimize(&self) -> Result<()> { - match self.build.build.platform() { - Platform::Web => { - // Compress the asset dir - // If pre-compressing is enabled, we can pre_compress the wasm-bindgen output - let pre_compress = self - .build - .krate - .should_pre_compress_web_assets(self.build.build.release); - - self.build.status_compressing_assets(); - let asset_dir = self.build.asset_dir(); - tokio::task::spawn_blocking(move || { - crate::fastfs::pre_compress_folder(&asset_dir, pre_compress) - }) - .await - .unwrap()?; - } - Platform::MacOS => {} - Platform::Windows => {} - Platform::Linux => {} - Platform::Ios => {} - Platform::Android => {} - Platform::Server => {} - Platform::Liveview => {} - } - - Ok(()) - } - - pub(crate) fn server_exe(&self) -> Option { - if let Some(_server) = &self.server { - let mut path = self - .build - .krate - .build_dir(Platform::Server, self.build.build.release); - - if cfg!(windows) { - path.push("server.exe"); - } else { - path.push("server"); - } - - return Some(path); - } - - None - } - - /// Bundle the web app - /// - Run wasm-bindgen - /// - Bundle split - /// - Run wasm-opt - /// - Register the .wasm and .js files with the asset system - async fn bundle_web(&mut self) -> Result<()> { - use crate::{wasm_bindgen::WasmBindgen, wasm_opt}; - use std::fmt::Write; - - // Locate the output of the build files and the bindgen output - // We'll fill these in a second if they don't already exist - let bindgen_outdir = self.build.wasm_bindgen_out_dir(); - let prebindgen = self.app.exe.clone(); - let post_bindgen_wasm = self.build.wasm_bindgen_wasm_output_file(); - let should_bundle_split = self.build.build.experimental_wasm_split; - let rustc_exe = self.app.exe.with_extension("wasm"); - let bindgen_version = self - .build - .krate - .wasm_bindgen_version() - .expect("this should have been checked by tool verification"); - - // Prepare any work dirs - std::fs::create_dir_all(&bindgen_outdir)?; - - // Prepare our configuration - // - // we turn off debug symbols in dev mode but leave them on in release mode (weird!) since - // wasm-opt and wasm-split need them to do better optimizations. - // - // We leave demangling to false since it's faster and these tools seem to prefer the raw symbols. - // todo(jon): investigate if the chrome extension needs them demangled or demangles them automatically. - let will_wasm_opt = (self.build.build.release || self.build.build.experimental_wasm_split) - && crate::wasm_opt::wasm_opt_available(); - let keep_debug = self.build.krate.config.web.wasm_opt.debug - || self.build.build.debug_symbols - || self.build.build.experimental_wasm_split - || !self.build.build.release - || will_wasm_opt; - let demangle = false; - let wasm_opt_options = WasmOptConfig { - memory_packing: self.build.build.experimental_wasm_split, - debug: self.build.build.debug_symbols, - ..self.build.krate.config.web.wasm_opt.clone() - }; - - // Run wasm-bindgen. Some of the options are not "optimal" but will be fixed up by wasm-opt - // - // There's performance implications here. Running with --debug is slower than without - // We're keeping around lld sections and names but wasm-opt will fix them - // todo(jon): investigate a good balance of wiping debug symbols during dev (or doing a double build?) - self.build.status_wasm_bindgen_start(); - tracing::debug!(dx_src = ?TraceSrc::Bundle, "Running wasm-bindgen"); - let start = std::time::Instant::now(); - WasmBindgen::new(&bindgen_version) - .input_path(&rustc_exe) - .target("web") - .debug(keep_debug) - .demangle(demangle) - .keep_debug(keep_debug) - .keep_lld_sections(true) - .out_name(self.build.krate.executable_name()) - .out_dir(&bindgen_outdir) - .remove_name_section(!will_wasm_opt) - .remove_producers_section(!will_wasm_opt) - .run() - .await - .context("Failed to generate wasm-bindgen bindings")?; - tracing::debug!(dx_src = ?TraceSrc::Bundle, "wasm-bindgen complete in {:?}", start.elapsed()); - - // Run bundle splitting if the user has requested it - // It's pretty expensive but because of rayon should be running separate threads, hopefully - // not blocking this thread. Dunno if that's true - if should_bundle_split { - self.build.status_splitting_bundle(); - - if !will_wasm_opt { - return Err(anyhow::anyhow!( - "Bundle splitting requires wasm-opt to be installed or the CLI to be built with `--features optimizations`. Please install wasm-opt and try again." - ) - .into()); - } - - // Load the contents of these binaries since we need both of them - // We're going to use the default makeLoad glue from wasm-split - let original = std::fs::read(&prebindgen)?; - let bindgened = std::fs::read(&post_bindgen_wasm)?; - let mut glue = wasm_split_cli::MAKE_LOAD_JS.to_string(); - - // Run the emitter - let splitter = wasm_split_cli::Splitter::new(&original, &bindgened); - let modules = splitter - .context("Failed to parse wasm for splitter")? - .emit() - .context("Failed to emit wasm split modules")?; - - // Write the chunks that contain shared imports - // These will be in the format of chunk_0_modulename.wasm - this is hardcoded in wasm-split - tracing::debug!("Writing split chunks to disk"); - for (idx, chunk) in modules.chunks.iter().enumerate() { - let path = bindgen_outdir.join(format!("chunk_{}_{}.wasm", idx, chunk.module_name)); - wasm_opt::write_wasm(&chunk.bytes, &path, &wasm_opt_options).await?; - writeln!( - glue, "export const __wasm_split_load_chunk_{idx} = makeLoad(\"/assets/{url}\", [], fusedImports);", - url = self - .app - .assets - .register_asset(&path, AssetOptions::Unknown)?.bundled_path(), - )?; - } - - // Write the modules that contain the entrypoints - tracing::debug!("Writing split modules to disk"); - for (idx, module) in modules.modules.iter().enumerate() { - let comp_name = module - .component_name - .as_ref() - .context("generated bindgen module has no name?")?; - - let path = bindgen_outdir.join(format!("module_{}_{}.wasm", idx, comp_name)); - wasm_opt::write_wasm(&module.bytes, &path, &wasm_opt_options).await?; - - let hash_id = module.hash_id.as_ref().unwrap(); - - writeln!( - glue, - "export const __wasm_split_load_{module}_{hash_id}_{comp_name} = makeLoad(\"/assets/{url}\", [{deps}], fusedImports);", - module = module.module_name, - - - // Again, register this wasm with the asset system - url = self - .app - .assets - .register_asset(&path, AssetOptions::Unknown)?.bundled_path(), - - // This time, make sure to write the dependencies of this chunk - // The names here are again, hardcoded in wasm-split - fix this eventually. - deps = module - .relies_on_chunks - .iter() - .map(|idx| format!("__wasm_split_load_chunk_{idx}")) - .collect::>() - .join(", ") - )?; - } - - // Write the js binding - // It's not registered as an asset since it will get included in the main.js file - let js_output_path = bindgen_outdir.join("__wasm_split.js"); - std::fs::write(&js_output_path, &glue)?; - - // Make sure to write some entropy to the main.js file so it gets a new hash - // If we don't do this, the main.js file will be cached and never pick up the chunk names - let uuid = uuid::Uuid::new_v5(&uuid::Uuid::NAMESPACE_URL, glue.as_bytes()); - std::fs::OpenOptions::new() - .append(true) - .open(self.build.wasm_bindgen_js_output_file()) - .context("Failed to open main.js file")? - .write_all(format!("/*{uuid}*/").as_bytes())?; - - // Write the main wasm_bindgen file and register it with the asset system - // This will overwrite the file in place - // We will wasm-opt it in just a second... - std::fs::write(&post_bindgen_wasm, modules.main.bytes)?; - } - - // Make sure to optimize the main wasm file if requested or if bundle splitting - if should_bundle_split || self.build.build.release { - self.build.status_optimizing_wasm(); - wasm_opt::optimize(&post_bindgen_wasm, &post_bindgen_wasm, &wasm_opt_options).await?; - } - - // Make sure to register the main wasm file with the asset system - self.app - .assets - .register_asset(&post_bindgen_wasm, AssetOptions::Unknown)?; - - // Register the main.js with the asset system so it bundles in the snippets and optimizes - self.app.assets.register_asset( - &self.build.wasm_bindgen_js_output_file(), - AssetOptions::Js(JsAssetOptions::new().with_minify(true).with_preload(true)), - )?; - - // Write the index.html file with the pre-configured contents we got from pre-rendering - std::fs::write( - self.build.root_dir().join("index.html"), - self.prepare_html()?, - )?; - - Ok(()) - } - - async fn pre_render_ssg_routes(&self) -> Result<()> { - // Run SSG and cache static routes - if !self.build.build.ssg { - return Ok(()); - } - self.build.status_prerendering_routes(); - pre_render_static_routes( - &self - .server_exe() - .context("Failed to find server executable")?, - ) - .await?; - Ok(()) - } - - fn macos_plist_contents(&self) -> Result { - handlebars::Handlebars::new() - .render_template( - include_str!("../../assets/macos/mac.plist.hbs"), - &InfoPlistData { - display_name: self.build.krate.bundled_app_name(), - bundle_name: self.build.krate.bundled_app_name(), - executable_name: self.build.platform_exe_name(), - bundle_identifier: self.build.krate.bundle_identifier(), - }, - ) - .map_err(|e| e.into()) - } - - fn ios_plist_contents(&self) -> Result { - handlebars::Handlebars::new() - .render_template( - include_str!("../../assets/ios/ios.plist.hbs"), - &InfoPlistData { - display_name: self.build.krate.bundled_app_name(), - bundle_name: self.build.krate.bundled_app_name(), - executable_name: self.build.platform_exe_name(), - bundle_identifier: self.build.krate.bundle_identifier(), - }, - ) - .map_err(|e| e.into()) - } - - /// Run any final tools to produce apks or other artifacts we might need. - async fn assemble(&self) -> Result<()> { - if let Platform::Android = self.build.build.platform() { - self.build.status_running_gradle(); - - let output = Command::new(self.gradle_exe()?) - .arg("assembleDebug") - .current_dir(self.build.root_dir()) - .stderr(std::process::Stdio::piped()) - .stdout(std::process::Stdio::piped()) - .output() - .await?; - - if !output.status.success() { - return Err(anyhow::anyhow!("Failed to assemble apk: {output:?}").into()); - } - } - - Ok(()) - } - - /// Run bundleRelease and return the path to the `.aab` file - /// - /// https://stackoverflow.com/questions/57072558/whats-the-difference-between-gradlewassemblerelease-gradlewinstallrelease-and - pub(crate) async fn android_gradle_bundle(&self) -> Result { - let output = Command::new(self.gradle_exe()?) - .arg("bundleRelease") - .current_dir(self.build.root_dir()) - .output() - .await - .context("Failed to run gradle bundleRelease")?; - - if !output.status.success() { - return Err(anyhow::anyhow!("Failed to bundleRelease: {output:?}").into()); - } - - let app_release = self - .build - .root_dir() - .join("app") - .join("build") - .join("outputs") - .join("bundle") - .join("release"); - - // Rename it to Name-arch.aab - let from = app_release.join("app-release.aab"); - let to = app_release.join(format!( - "{}-{}.aab", - self.build.krate.bundled_app_name(), - self.build.build.target_args.arch() - )); - - std::fs::rename(from, &to).context("Failed to rename aab")?; - - Ok(to) - } - - fn gradle_exe(&self) -> Result { - // make sure we can execute the gradlew script - #[cfg(unix)] - { - use std::os::unix::prelude::PermissionsExt; - std::fs::set_permissions( - self.build.root_dir().join("gradlew"), - std::fs::Permissions::from_mode(0o755), - )?; - } - - let gradle_exec_name = match cfg!(windows) { - true => "gradlew.bat", - false => "gradlew", - }; - - Ok(self.build.root_dir().join(gradle_exec_name)) - } - - pub(crate) fn apk_path(&self) -> PathBuf { - self.build - .root_dir() - .join("app") - .join("build") - .join("outputs") - .join("apk") - .join("debug") - .join("app-debug.apk") - } - - /// Copy the Android executable to the target directory, and rename the hardcoded com_hardcoded_dioxuslabs entries - /// to the user's app name. - async fn copy_android_exe(&self, source: &Path, destination: &Path) -> Result<()> { - // we might want to eventually use the objcopy logic to handle this - // - // https://github.com/rust-mobile/xbuild/blob/master/xbuild/template/lib.rs - // https://github.com/rust-mobile/xbuild/blob/master/apk/src/lib.rs#L19 - std::fs::copy(source, destination)?; - Ok(()) - } -} diff --git a/packages/cli/src/build/mod.rs b/packages/cli/src/build/mod.rs index 56d9eb40b6..73856da681 100644 --- a/packages/cli/src/build/mod.rs +++ b/packages/cli/src/build/mod.rs @@ -4,17 +4,27 @@ //! //! Uses a request -> response architecture that allows you to monitor the progress with an optional message //! receiver. +//! +//! +//! Targets +//! - Request +//! - State +//! - Bundle +//! - Handle mod builder; mod bundle; +mod patch; +mod platform; mod prerender; mod progress; mod request; -mod templates; mod verify; mod web; pub(crate) use builder::*; pub(crate) use bundle::*; +pub(crate) use patch::*; +pub(crate) use platform::*; pub(crate) use progress::*; pub(crate) use request::*; diff --git a/packages/cli/src/build/patch.rs b/packages/cli/src/build/patch.rs new file mode 100644 index 0000000000..ee0051aef8 --- /dev/null +++ b/packages/cli/src/build/patch.rs @@ -0,0 +1,31 @@ +use anyhow::{Context, Result}; +use itertools::Itertools; +use memmap::{Mmap, MmapOptions}; +use object::{ + read::File, Architecture, BinaryFormat, Endianness, Object, ObjectSection, ObjectSymbol, + Relocation, RelocationTarget, SectionIndex, +}; +use std::{cmp::Ordering, ffi::OsStr, fs, ops::Deref, path::PathBuf}; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + path::Path, +}; +use tokio::process::Command; + +use crate::Platform; + +pub enum ReloadKind { + /// An RSX-only patch + Rsx, + + /// A patch that includes both RSX and binary assets + Binary, + + /// A full rebuild + Full, +} + +#[derive(Debug, Clone)] +pub struct PatchData { + pub direct_rustc: Vec, +} diff --git a/packages/cli/src/build/platform.rs b/packages/cli/src/build/platform.rs new file mode 100644 index 0000000000..206a63afad --- /dev/null +++ b/packages/cli/src/build/platform.rs @@ -0,0 +1,243 @@ +use crate::Result; +use anyhow::Context; +use itertools::Itertools; +use std::{path::PathBuf, sync::Arc}; +use target_lexicon::Triple; + +/// The tools for Android (ndk, sdk, etc) +#[derive(Debug, Clone)] +pub(crate) struct AndroidTools { + pub(crate) ndk: PathBuf, + pub(crate) adb: PathBuf, + pub(crate) java_home: Option, +} + +#[memoize::memoize] +pub fn android_tools() -> Option { + // We check for SDK first since users might install Android Studio and then install the SDK + // After that they might install the NDK, so the SDK drives the source of truth. + let sdk = var_or_debug("ANDROID_SDK_ROOT") + .or_else(|| var_or_debug("ANDROID_SDK")) + .or_else(|| var_or_debug("ANDROID_HOME")); + + // Check the ndk. We look for users's overrides first and then look into the SDK. + // Sometimes users set only the NDK (especially if they're somewhat advanced) so we need to look for it manually + // + // Might look like this, typically under "sdk": + // "/Users/jonkelley/Library/Android/sdk/ndk/25.2.9519653/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android24-clang" + let ndk = var_or_debug("NDK_HOME") + .or_else(|| var_or_debug("ANDROID_NDK_HOME")) + .or_else(|| { + // Look for the most recent NDK in the event the user has installed multiple NDKs + // Eventually we might need to drive this from Dioxus.toml + let sdk = sdk.as_ref()?; + let ndk_dir = sdk.join("ndk").read_dir().ok()?; + ndk_dir + .flatten() + .map(|dir| (dir.file_name(), dir.path())) + .sorted() + .last() + .map(|(_, path)| path.to_path_buf()) + })?; + + // Look for ADB in the SDK. If it's not there we'll use `adb` from the PATH + let adb = sdk + .as_ref() + .and_then(|sdk| { + let tools = sdk.join("platform-tools"); + if tools.join("adb").exists() { + return Some(tools.join("adb")); + } + if tools.join("adb.exe").exists() { + return Some(tools.join("adb.exe")); + } + None + }) + .unwrap_or_else(|| PathBuf::from("adb")); + + // https://stackoverflow.com/questions/71381050/java-home-is-set-to-an-invalid-directory-android-studio-flutter + // always respect the user's JAVA_HOME env var above all other options + // + // we only attempt autodetection if java_home is not set + // + // this is a better fallback than falling onto the users' system java home since many users might + // not even know which java that is - they just know they have android studio installed + let java_home = std::env::var_os("JAVA_HOME") + .map(PathBuf::from) + .or_else(|| { + // Attempt to autodetect java home from the android studio path or jdk path on macos + #[cfg(target_os = "macos")] + { + let jbr_home = + PathBuf::from("/Applications/Android Studio.app/Contents/jbr/Contents/Home/"); + if jbr_home.exists() { + return Some(jbr_home); + } + + let jre_home = + PathBuf::from("/Applications/Android Studio.app/Contents/jre/Contents/Home"); + if jre_home.exists() { + return Some(jre_home); + } + + let jdk_home = + PathBuf::from("/Library/Java/JavaVirtualMachines/openjdk.jdk/Contents/Home/"); + if jdk_home.exists() { + return Some(jdk_home); + } + } + + #[cfg(target_os = "windows")] + { + let jbr_home = PathBuf::from("C:\\Program Files\\Android\\Android Studio\\jbr"); + if jbr_home.exists() { + return Some(jbr_home); + } + } + + // todo(jon): how do we detect java home on linux? + #[cfg(target_os = "linux")] + { + let jbr_home = PathBuf::from("/usr/lib/jvm/java-11-openjdk-amd64"); + if jbr_home.exists() { + return Some(jbr_home); + } + } + + None + }); + + Some(AndroidTools { + ndk, + adb, + java_home, + }) +} + +impl AndroidTools { + pub(crate) fn android_tools_dir(&self) -> PathBuf { + let prebuilt = self.ndk.join("toolchains").join("llvm").join("prebuilt"); + + if cfg!(target_os = "macos") { + // for whatever reason, even on aarch64 macos, the linker is under darwin-x86_64 + return prebuilt.join("darwin-x86_64").join("bin"); + } + + if cfg!(target_os = "linux") { + return prebuilt.join("linux-x86_64").join("bin"); + } + + if cfg!(target_os = "windows") { + return prebuilt.join("windows-x86_64").join("bin"); + } + + // Otherwise return the first entry in the prebuilt directory + prebuilt + .read_dir() + .expect("Failed to read android toolchains directory") + .next() + .expect("Failed to find android toolchains directory") + .expect("Failed to read android toolchain file") + .path() + } + + pub(crate) fn android_cc(&self, triple: &Triple) -> PathBuf { + // "/Users/jonkelley/Library/Android/sdk/ndk/25.2.9519653/toolchains/llvm/prebuilt/darwin-x86_64/bin/aarch64-linux-android24-clang" + let suffix = if cfg!(target_os = "windows") { + ".cmd" + } else { + "" + }; + + self.android_tools_dir().join(format!( + "{}{}-clang{}", + triple, + self.min_sdk_version(), + suffix + )) + } + + pub(crate) fn android_ld(&self, triple: &Triple) -> PathBuf { + // "/Users/jonkelley/Library/Android/sdk/ndk/25.2.9519653/toolchains/llvm/prebuilt/darwin-x86_64/bin/ld" + let suffix = if cfg!(target_os = "windows") { + ".cmd" + } else { + "" + }; + + self.android_tools_dir().join(format!( + "{}{}-clang++{}", + triple, + self.min_sdk_version(), + suffix + )) + } + + // todo(jon): this should be configurable + pub(crate) fn min_sdk_version(&self) -> u32 { + 24 + } + + pub(crate) fn ar_path(&self) -> PathBuf { + self.android_tools_dir().join("llvm-ar") + } + + pub(crate) fn target_cc(&self) -> PathBuf { + self.android_tools_dir().join("clang") + } + + pub(crate) fn target_cxx(&self) -> PathBuf { + self.android_tools_dir().join("clang++") + } + + pub(crate) fn java_home(&self) -> Option { + self.java_home.clone() + // copilot suggested this?? + // self.ndk.join("platforms").join("android-24").join("arch-arm64").join("usr").join("lib") + // .join("jvm") + // .join("default") + // .join("lib") + // .join("server") + // .join("libjvm.so") + } + + pub(crate) fn android_jnilib(triple: &Triple) -> &'static str { + use target_lexicon::Architecture; + match triple.architecture { + Architecture::Arm(_) => "armeabi-v7a", + Architecture::Aarch64(_) => "arm64-v8a", + Architecture::X86_32(_) => "x86", + Architecture::X86_64 => "x86_64", + _ => unimplemented!("Unsupported architecture"), + } + } + + // todo: the new Triple type might be able to handle the different arm flavors + // ie armv7 vs armv7a + pub(crate) fn android_clang_triplet(triple: &Triple) -> String { + use target_lexicon::Architecture; + match triple.architecture { + Architecture::Arm(_) => "armv7a-linux-androideabi".to_string(), + _ => triple.to_string(), + } + } + + // pub(crate) fn android_target_triplet(&self) -> &'static str { + // match self { + // Arch::Arm => "armv7-linux-androideabi", + // Arch::Arm64 => "aarch64-linux-android", + // Arch::X86 => "i686-linux-android", + // Arch::X64 => "x86_64-linux-android", + // } + // } +} + +fn var_or_debug(name: &str) -> Option { + use std::env::var; + use tracing::debug; + + var(name) + .inspect_err(|_| debug!("{name} not set")) + .ok() + .map(PathBuf::from) +} diff --git a/packages/cli/src/build/progress.rs b/packages/cli/src/build/progress.rs index e0efa41023..ff6b481f43 100644 --- a/packages/cli/src/build/progress.rs +++ b/packages/cli/src/build/progress.rs @@ -1,5 +1,5 @@ //! Report progress about the build to the user. We use channels to report progress back to the CLI. -use crate::{AppBundle, BuildRequest, BuildStage, Platform, TraceSrc}; +use crate::{BuildArtifacts, BuildRequest, BuildStage, Platform, TraceSrc}; use cargo_metadata::CompilerMessage; use futures_channel::mpsc::{UnboundedReceiver, UnboundedSender}; use std::path::PathBuf; @@ -7,12 +7,14 @@ use std::path::PathBuf; pub(crate) type ProgressTx = UnboundedSender; pub(crate) type ProgressRx = UnboundedReceiver; -#[derive(Debug)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct BuildId(u64); + #[allow(clippy::large_enum_variant)] pub(crate) enum BuildUpdate { Progress { stage: BuildStage }, CompilerMessage { message: CompilerMessage }, - BuildReady { bundle: AppBundle }, + BuildReady { bundle: BuildArtifacts }, BuildFailed { err: crate::Error }, } @@ -61,7 +63,6 @@ impl BuildRequest { current: count, total, krate: name, - is_server: self.is_server(), }, }); } @@ -69,7 +70,7 @@ impl BuildRequest { pub(crate) fn status_starting_build(&self, crate_count: usize) { _ = self.progress.unbounded_send(BuildUpdate::Progress { stage: BuildStage::Starting { - is_server: self.build.platform() == Platform::Server, + patch: self.is_patch(), crate_count, }, }); @@ -115,6 +116,6 @@ impl BuildRequest { } pub(crate) fn is_server(&self) -> bool { - self.build.platform() == Platform::Server + self.platform == Platform::Server } } diff --git a/packages/cli/src/build/request.rs b/packages/cli/src/build/request.rs index c4e288ea1f..02c1187c36 100644 --- a/packages/cli/src/build/request.rs +++ b/packages/cli/src/build/request.rs @@ -1,156 +1,1505 @@ -use super::{progress::ProgressTx, BuildArtifacts}; -use crate::dioxus_crate::DioxusCrate; -use crate::{link::LinkAction, BuildArgs}; -use crate::{AppBundle, Platform, Result, TraceSrc}; +use super::{prerender::pre_render_static_routes, progress::ProgressTx, AndroidTools, PatchData}; +use crate::{link::LinkAction, BuildArgs, WasmOptConfig}; +use crate::{DioxusConfig, Workspace}; +use crate::{Platform, Result, TraceSrc}; use anyhow::Context; use dioxus_cli_config::{APP_TITLE_ENV, ASSET_ROOT_ENV}; -use dioxus_cli_opt::AssetManifest; +use dioxus_cli_opt::{process_file_to, AssetManifest}; +use itertools::Itertools; +use krates::{cm::TargetKind, KrateDetails, Krates, NodeId, Utf8PathBuf}; +use manganis::{AssetOptions, JsAssetOptions}; +use rayon::prelude::{IntoParallelRefIterator, ParallelIterator}; use serde::Deserialize; use std::{ + collections::HashSet, + future::Future, + io::Write, path::{Path, PathBuf}, + pin::Pin, process::Stdio, - time::Instant, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::{Instant, SystemTime, UNIX_EPOCH}, }; +use target_lexicon::{Environment, OperatingSystem, Triple}; use tokio::{io::AsyncBufReadExt, process::Command}; - -#[derive(Clone, Debug)] +use toml_edit::Item; +use uuid::Uuid; + +/// This struct is used to plan the build process. +/// +/// The point here is to be able to take in the user's config from the CLI without modifying the +/// arguments in place. Creating a buildplan "resolves" their config into a build plan that can be +/// introspected. For example, the users might not specify a "Triple" in the CLI but the triple will +/// be guaranteed to be resolved here. +/// +/// Creating a buildplan also lets us introspect build requests and modularize our build process. +/// This will, however, lead to duplicate fields between the CLI and the build engine. This is fine +/// since we have the freedom to evolve the schema internally without breaking the API. +/// +/// Since we resolve the build request before initializing the CLI, it also serves as a place to store +/// resolved "serve" arguments, which is why it takes ServeArgs instead of BuildArgs. Simply wrap the +/// BuildArgs in a default ServeArgs and pass it in. +#[derive(Clone)] pub(crate) struct BuildRequest { - /// The configuration for the crate we are building - pub(crate) krate: DioxusCrate, + pub(crate) workspace: Arc, + pub(crate) crate_package: NodeId, + pub(crate) config: DioxusConfig, + pub(crate) crate_target: Arc, - /// The arguments for the build - pub(crate) build: BuildArgs, + // / + pub(crate) fullstack: bool, - /// Status channel to send our progress updates to - pub(crate) progress: ProgressTx, + pub(crate) profile: String, + + pub(crate) release: bool, + + /// + pub(crate) platform: Platform, + + /// + pub(crate) target: Triple, + + pub(crate) device: bool, + + /// Build for nightly [default: false] + pub(crate) nightly: bool, + + /// The package to build + pub(crate) package: Option, + + /// Space separated list of features to activate + pub(crate) features: Vec, + + /// Extra arguments to pass to cargo + pub(crate) cargo_args: Vec, + + /// Don't include the default features in the build + pub(crate) no_default_features: bool, /// The target directory for the build pub(crate) custom_target_dir: Option, + + /// How we'll go about building + pub(crate) mode: BuildMode, + + /// Status channel to send our progress updates to + pub(crate) progress: ProgressTx, + + pub(crate) cranelift: bool, + + pub(crate) skip_assets: bool, + + pub(crate) ssg: bool, + + pub(crate) wasm_split: bool, + + pub(crate) debug_symbols: bool, + + pub(crate) inject_loading_scripts: bool, +} + +/// dx can produce different "modes" of a build. A "regular" build is a "base" build. The Fat and Thin +/// modes are used together to achieve binary patching and linking. +#[derive(Clone, Debug, PartialEq)] +pub enum BuildMode { + /// A normal build generated using `cargo rustc` + Base, + + /// A "Fat" build generated with cargo rustc and dx as a custom linker without -Wl,-dead-strip + Fat, + + /// A "thin" build generated with `rustc` directly and dx as a custom linker + Thin { + direct_rustc: Vec, + changed_files: Vec, + aslr_reference: u64, + }, +} + +/// The results of the build from cargo +pub struct BuildArtifacts { + pub(crate) exe: PathBuf, + pub(crate) direct_rustc: Vec, + pub(crate) time_start: SystemTime, + pub(crate) time_end: SystemTime, + pub(crate) assets: AssetManifest, } +pub(crate) static PROFILE_WASM: &str = "wasm-dev"; +pub(crate) static PROFILE_ANDROID: &str = "android-dev"; +pub(crate) static PROFILE_SERVER: &str = "server-dev"; + impl BuildRequest { - pub fn new(krate: DioxusCrate, build: BuildArgs, progress: ProgressTx) -> Self { - Self { - build, - krate, - progress, - custom_target_dir: None, + /// Create a new build request + /// + /// This will combine the many inputs here into a single source of truth. Fields will be duplicated + /// from the inputs since various things might need to be autodetected. + /// + /// When creating a new build request we need to take into account + /// - The user's command line arguments + /// - The crate's Cargo.toml + /// - The dioxus.toml + /// - The user's CliSettings + /// - The workspace + /// - The host (android tools, installed frameworks, etc) + /// - The intended platform + /// + /// We will attempt to autodetect a number of things if not provided. + pub async fn new(args: &BuildArgs) -> Result { + let workspace = Workspace::current().await?; + + let package = Self::find_main_package(&workspace.krates, args.package.clone())?; + + let dioxus_config = DioxusConfig::load(&workspace.krates, package)?.unwrap_or_default(); + + let target_kind = match args.example.is_some() { + true => TargetKind::Example, + false => TargetKind::Bin, + }; + + let main_package = &workspace.krates[package]; + + let target_name = args + .example + .clone() + .or(args.bin.clone()) + .or_else(|| { + if let Some(default_run) = &main_package.default_run { + return Some(default_run.to_string()); + } + + let bin_count = main_package + .targets + .iter() + .filter(|x| x.kind.contains(&target_kind)) + .count(); + + if bin_count != 1 { + return None; + } + + main_package.targets.iter().find_map(|x| { + if x.kind.contains(&target_kind) { + Some(x.name.clone()) + } else { + None + } + }) + }) + .unwrap_or(workspace.krates[package].name.clone()); + + let target = main_package + .targets + .iter() + .find(|target| { + target_name == target.name.as_str() && target.kind.contains(&target_kind) + }) + .with_context(|| { + let target_of_kind = |kind|-> String { + let filtered_packages = main_package + .targets + .iter() + .filter_map(|target| { + target.kind.contains(kind).then_some(target.name.as_str()) + }).collect::>(); + filtered_packages.join(", ")}; + if let Some(example) = &args.example { + let examples = target_of_kind(&TargetKind::Example); + format!("Failed to find example {example}. \nAvailable examples are:\n{}", examples) + } else if let Some(bin) = &args.bin { + let binaries = target_of_kind(&TargetKind::Bin); + format!("Failed to find binary {bin}. \nAvailable binaries are:\n{}", binaries) + } else { + format!("Failed to find target {target_name}. \nIt looks like you are trying to build dioxus in a library crate. \ + You either need to run dx from inside a binary crate or build a specific example with the `--example` flag. \ + Available examples are:\n{}", target_of_kind(&TargetKind::Example)) + } + })? + .clone(); + + // // Make sure we have a server feature if we're building a fullstack app + // // + // // todo(jon): eventually we want to let users pass a `--server ` flag to specify a package to use as the server + // // however, it'll take some time to support that and we don't have a great RPC binding layer between the two yet + // if self.fullstack && self.server_features.is_empty() { + // return Err(anyhow::anyhow!("Fullstack builds require a server feature on the target crate. Add a `server` feature to the crate and try again.").into()); + // } + + todo!(); + + // let default_platform = krate.default_platform(); + // let mut features = vec![]; + // let mut no_default_features = false; + + // // The user passed --platform XYZ but already has `default = ["ABC"]` in their Cargo.toml + // // We want to strip out the default platform and use the one they passed, setting no-default-features + // if args.platform.is_some() && default_platform.is_some() { + // no_default_features = true; + // features.extend(krate.platformless_features()); + // } + + // // Inherit the platform from the args, or auto-detect it + // let platform = args + // .platform + // .map(|p| Some(p)) + // .unwrap_or_else(|| krate.autodetect_platform().map(|a| a.0)) + // .context("No platform was specified and could not be auto-detected. Please specify a platform with `--platform ` or set a default platform using a cargo feature.")?; + + // // Add any features required to turn on the client + // features.push(krate.feature_for_platform(platform)); + + // // Make sure we set the fullstack platform so we actually build the fullstack variant + // // Users need to enable "fullstack" in their default feature set. + // // todo(jon): fullstack *could* be a feature of the app, but right now we're assuming it's always enabled + // let fullstack = args.fullstack || krate.has_dioxus_feature("fullstack"); + + // // Set the profile of the build if it's not already set + // // This is mostly used for isolation of builds (preventing thrashing) but also useful to have multiple performance profiles + // // We might want to move some of these profiles into dioxus.toml and make them "virtual". + // let profile = match args.args.profile { + // Some(profile) => profile, + // None if args.args.release => "release".to_string(), + // None => match platform { + // Platform::Android => PROFILE_ANDROID.to_string(), + // Platform::Web => PROFILE_WASM.to_string(), + // Platform::Server => PROFILE_SERVER.to_string(), + // _ => "dev".to_string(), + // }, + // }; + + // let device = args.device.unwrap_or(false); + + // // We want a real triple to build with, so we'll autodetect it if it's not provided + // // The triple ends up being a source of truth for us later hence this work to figure it out + // let target = match args.target { + // Some(target) => target, + // None => match platform { + // // Generally just use the host's triple for native executables unless specified otherwisea + // Platform::MacOS + // | Platform::Windows + // | Platform::Linux + // | Platform::Server + // | Platform::Liveview => target_lexicon::HOST, + // Platform::Web => "wasm32-unknown-unknown".parse().unwrap(), + + // // For iOS we should prefer the actual architecture for the simulator, but in lieu of actually + // // figuring that out, we'll assume aarch64 on m-series and x86_64 otherwise + // Platform::Ios => { + // // use the host's architecture and sim if --device is passed + // use target_lexicon::{Architecture, HOST}; + // match HOST.architecture { + // Architecture::Aarch64(_) if device => "aarch64-apple-ios".parse().unwrap(), + // Architecture::Aarch64(_) => "aarch64-apple-ios-sim".parse().unwrap(), + // _ if device => "x86_64-apple-ios".parse().unwrap(), + // _ => "x86_64-apple-ios-sim".parse().unwrap(), + // } + // } + + // // Same idea with android but we figure out the connected device using adb + // // for now we use + // Platform::Android => { + // "aarch64-linux-android".parse().unwrap() + // // "unknown-linux-android".parse().unwrap() + // } + // }, + // }; + + // // Enable hot reload. + // if self.hot_reload.is_none() { + // self.hot_reload = Some(krate.workspace.settings.always_hot_reload.unwrap_or(true)); + // } + + // // Open browser. + // if self.open.is_none() { + // self.open = Some( + // krate + // .workspace + // .settings + // .always_open_browser + // .unwrap_or_default(), + // ); + // } + + // // Set WSL file poll interval. + // if self.wsl_file_poll_interval.is_none() { + // self.wsl_file_poll_interval = + // Some(krate.workspace.settings.wsl_file_poll_interval.unwrap_or(2)); + // } + + // // Set always-on-top for desktop. + // if self.always_on_top.is_none() { + // self.always_on_top = Some(krate.workspace.settings.always_on_top.unwrap_or(true)) + // } + + // Determine arch if android + + // if platform == Platform::Android && args.target_args.target.is_none() { + // tracing::debug!("No android arch provided, attempting to auto detect."); + + // let arch = DioxusCrate::autodetect_android_arch().await; + + // // Some extra logs + // let arch = match arch { + // Some(a) => { + // tracing::debug!( + // "Autodetected `{}` Android arch.", + // a.android_target_triplet() + // ); + // a.to_owned() + // } + // None => { + // let a = Arch::default(); + // tracing::debug!( + // "Could not detect Android arch, defaulting to `{}`", + // a.android_target_triplet() + // ); + // a + // } + // }; + + // self.arch = Some(arch); + // } + + todo!() + // Ok(Self { + // hotreload: todo!(), + // open_browser: todo!(), + // wsl_file_poll_interval: todo!(), + // always_on_top: todo!(), + // progress, + // mode, + // platform, + // features, + // no_default_features, + // krate, + // custom_target_dir: None, + // profile, + // fullstack, + // target, + // device, + // nightly: args.nightly, + // package: args.package, + // release: args.release, + // skip_assets: args.skip_assets, + // ssg: args.ssg, + // cranelift: args.cranelift, + // cargo_args: args.args.cargo_args, + // wasm_split: args.wasm_split, + // debug_symbols: args.debug_symbols, + // inject_loading_scripts: args.inject_loading_scripts, + // force_sequential: args.force_sequential, + // }) + } + + pub(crate) async fn build(&self) -> Result { + // // Create the bundle in an incomplete state and fill it in + // let mut bundle = Self { + // // server_assets: Default::default(), + // // server, + // build, + // // exe: todo!(), + // // app, + // // direct_rustc: todo!(), + // // time_start: todo!(), + // // time_end: todo!(), + // }; + + let bundle = self; + + // Run the cargo build to produce our artifacts + let exe = PathBuf::new(); + let mut assets = AssetManifest::default(); + + // Now handle + match bundle.mode { + BuildMode::Base | BuildMode::Fat => { + tracing::debug!("Assembling app bundle"); + + bundle.status_start_bundle(); + bundle + .write_executable(&exe, &mut assets) + .await + .context("Failed to write main executable")?; + bundle + .write_assets(&assets) + .await + .context("Failed to write assets")?; + bundle.write_metadata().await?; + bundle.optimize().await?; + // bundle.pre_render_ssg_routes().await?; + bundle + .assemble() + .await + .context("Failed to assemble app bundle")?; + + tracing::debug!("Bundle created at {}", bundle.root_dir().display()); + } + + BuildMode::Thin { aslr_reference, .. } => { + tracing::debug!("Patching existing bundle"); + bundle.write_patch(aslr_reference).await?; + } + } + + todo!() + } + + /// Traverse the target directory and collect all assets from the incremental cache + /// + /// This uses "known paths" that have stayed relatively stable during cargo's lifetime. + /// One day this system might break and we might need to go back to using the linker approach. + pub(crate) async fn collect_assets(&self, exe: &Path) -> Result { + tracing::debug!("Collecting assets ..."); + + if self.skip_assets { + return Ok(AssetManifest::default()); + } + + // walk every file in the incremental cache dir, reading and inserting items into the manifest. + let mut manifest = AssetManifest::default(); + + // And then add from the exe directly, just in case it's LTO compiled and has no incremental cache + _ = manifest.add_from_object_path(exe); + + Ok(manifest) + } + + /// Take the output of rustc and make it into the main exe of the bundle + /// + /// For wasm, we'll want to run `wasm-bindgen` to make it a wasm binary along with some other optimizations + /// Other platforms we might do some stripping or other optimizations + /// Move the executable to the workdir + async fn write_executable(&self, exe: &Path, assets: &mut AssetManifest) -> Result<()> { + match self.platform { + // Run wasm-bindgen on the wasm binary and set its output to be in the bundle folder + // Also run wasm-opt on the wasm binary, and sets the index.html since that's also the "executable". + // + // The wasm stuff will be in a folder called "wasm" in the workdir. + // + // Final output format: + // ``` + // dx/ + // app/ + // web/ + // bundle/ + // build/ + // public/ + // index.html + // wasm/ + // app.wasm + // glue.js + // snippets/ + // ... + // assets/ + // logo.png + // ``` + Platform::Web => { + self.bundle_web(exe, assets).await?; + } + + // this will require some extra oomf to get the multi architecture builds... + // for now, we just copy the exe into the current arch (which, sorry, is hardcoded for my m1) + // we'll want to do multi-arch builds in the future, so there won't be *one* exe dir to worry about + // eventually `exe_dir` and `main_exe` will need to take in an arch and return the right exe path + // + // todo(jon): maybe just symlink this rather than copy it? + // we might want to eventually use the objcopy logic to handle this + // + // https://github.com/rust-mobile/xbuild/blob/master/xbuild/template/lib.rs + // https://github.com/rust-mobile/xbuild/blob/master/apk/src/lib.rs#L19 + Platform::Android | + + // These are all super simple, just copy the exe into the folder + // eventually, perhaps, maybe strip + encrypt the exe? + Platform::MacOS + | Platform::Windows + | Platform::Linux + | Platform::Ios + | Platform::Liveview + | Platform::Server => { + _ = std::fs::remove_dir_all(self.exe_dir()); + std::fs::create_dir_all(self.exe_dir())?; + std::fs::copy(&exe, self.main_exe())?; + } + } + + Ok(()) + } + + /// Copy the assets out of the manifest and into the target location + /// + /// Should be the same on all platforms - just copy over the assets from the manifest into the output directory + async fn write_assets(&self, assets: &AssetManifest) -> Result<()> { + // Server doesn't need assets - web will provide them + if self.platform == Platform::Server { + return Ok(()); + } + + let asset_dir = self.asset_dir(); + + // First, clear the asset dir of any files that don't exist in the new manifest + _ = tokio::fs::create_dir_all(&asset_dir).await; + + // Create a set of all the paths that new files will be bundled to + let mut keep_bundled_output_paths: HashSet<_> = assets + .assets + .values() + .map(|a| asset_dir.join(a.bundled_path())) + .collect(); + + // The CLI creates a .version file in the asset dir to keep track of what version of the optimizer + // the asset was processed. If that version doesn't match the CLI version, we need to re-optimize + // all assets. + let version_file = self.asset_optimizer_version_file(); + let clear_cache = std::fs::read_to_string(&version_file) + .ok() + .filter(|s| s == crate::VERSION.as_str()) + .is_none(); + if clear_cache { + keep_bundled_output_paths.clear(); + } + + // one possible implementation of walking a directory only visiting files + fn remove_old_assets<'a>( + path: &'a Path, + keep_bundled_output_paths: &'a HashSet, + ) -> Pin> + Send + 'a>> { + Box::pin(async move { + // If this asset is in the manifest, we don't need to remove it + let canon_path = dunce::canonicalize(path)?; + if keep_bundled_output_paths.contains(canon_path.as_path()) { + return Ok(()); + } + + // Otherwise, if it is a directory, we need to walk it and remove child files + if path.is_dir() { + for entry in std::fs::read_dir(path)?.flatten() { + let path = entry.path(); + remove_old_assets(&path, keep_bundled_output_paths).await?; + } + if path.read_dir()?.next().is_none() { + // If the directory is empty, remove it + tokio::fs::remove_dir(path).await?; + } + } else { + // If it is a file, remove it + tokio::fs::remove_file(path).await?; + } + + Ok(()) + }) + } + + tracing::debug!("Removing old assets"); + tracing::trace!( + "Keeping bundled output paths: {:#?}", + keep_bundled_output_paths + ); + remove_old_assets(&asset_dir, &keep_bundled_output_paths).await?; + + // todo(jon): we also want to eventually include options for each asset's optimization and compression, which we currently aren't + let mut assets_to_transfer = vec![]; + + // Queue the bundled assets + for (asset, bundled) in &assets.assets { + let from = asset.clone(); + let to = asset_dir.join(bundled.bundled_path()); + + // prefer to log using a shorter path relative to the workspace dir by trimming the workspace dir + let from_ = from + .strip_prefix(self.workspace_dir()) + .unwrap_or(from.as_path()); + let to_ = from + .strip_prefix(self.workspace_dir()) + .unwrap_or(to.as_path()); + + tracing::debug!("Copying asset {from_:?} to {to_:?}"); + assets_to_transfer.push((from, to, *bundled.options())); + } + + // And then queue the legacy assets + // ideally, one day, we can just check the rsx!{} calls for references to assets + for from in self.legacy_asset_dir_files() { + let to = asset_dir.join(from.file_name().unwrap()); + tracing::debug!("Copying legacy asset {from:?} to {to:?}"); + assets_to_transfer.push((from, to, AssetOptions::Unknown)); + } + + let asset_count = assets_to_transfer.len(); + let started_processing = AtomicUsize::new(0); + let copied = AtomicUsize::new(0); + + // Parallel Copy over the assets and keep track of progress with an atomic counter + let progress = self.progress.clone(); + let ws_dir = self.workspace_dir(); + // Optimizing assets is expensive and blocking, so we do it in a tokio spawn blocking task + tokio::task::spawn_blocking(move || { + assets_to_transfer + .par_iter() + .try_for_each(|(from, to, options)| { + let processing = started_processing.fetch_add(1, Ordering::SeqCst); + let from_ = from.strip_prefix(&ws_dir).unwrap_or(from); + tracing::trace!( + "Starting asset copy {processing}/{asset_count} from {from_:?}" + ); + + let res = process_file_to(options, from, to); + if let Err(err) = res.as_ref() { + tracing::error!("Failed to copy asset {from:?}: {err}"); + } + + let finished = copied.fetch_add(1, Ordering::SeqCst); + BuildRequest::status_copied_asset( + &progress, + finished, + asset_count, + from.to_path_buf(), + ); + + res.map(|_| ()) + }) + }) + .await + .map_err(|e| anyhow::anyhow!("A task failed while trying to copy assets: {e}"))??; + + // // Remove the wasm bindgen output directory if it exists + // _ = std::fs::remove_dir_all(self.wasm_bindgen_out_dir()); + + // Write the version file so we know what version of the optimizer we used + std::fs::write(self.asset_optimizer_version_file(), crate::VERSION.as_str())?; + + Ok(()) + } + + /// libpatch-{time}.(so/dll/dylib) (next to the main exe) + pub fn patch_exe(&self) -> PathBuf { + todo!() + // let path = self.main_exe().with_file_name(format!( + // "libpatch-{}", + // self.time_start + // .duration_since(UNIX_EPOCH) + // .unwrap() + // .as_millis(), + // )); + + // let extension = match self.target.operating_system { + // OperatingSystem::Darwin(_) => "dylib", + // OperatingSystem::MacOSX(_) => "dylib", + // OperatingSystem::IOS(_) => "dylib", + // OperatingSystem::Unknown if self.platform == Platform::Web => "wasm", + // OperatingSystem::Windows => "dll", + // OperatingSystem::Linux => "so", + // OperatingSystem::Wasi => "wasm", + // _ => "", + // }; + + // path.with_extension(extension) + } + + /// Run our custom linker setup to generate a patch file in the right location + async fn write_patch(&self, aslr_reference: u64) -> Result<()> { + let raw_args = std::fs::read_to_string(&self.link_args_file()) + .context("Failed to read link args from file")?; + + let args = raw_args.lines().collect::>(); + + let orig_exe = self.main_exe(); + tracing::debug!("writing patch - orig_exe: {:?}", orig_exe); + + let object_files = args + .iter() + .filter(|arg| arg.ends_with(".rcgu.o")) + .sorted() + .map(|arg| PathBuf::from(arg)) + .collect::>(); + + let resolved_patch_bytes = subsecond_cli_support::resolve_undefined( + &orig_exe, + &object_files, + &self.target, + aslr_reference, + ) + .expect("failed to resolve patch symbols"); + + let patch_file = self.main_exe().with_file_name("patch-syms.o"); + std::fs::write(&patch_file, resolved_patch_bytes)?; + + let linker = match self.platform { + Platform::Web => self.workspace.wasm_ld(), + Platform::Android => { + let tools = + crate::build::android_tools().context("Could not determine android tools")?; + tools.android_cc(&self.target) + } + + // Note that I think rust uses rust-lld + // https://blog.rust-lang.org/2024/05/17/enabling-rust-lld-on-linux.html + Platform::MacOS + | Platform::Ios + | Platform::Linux + | Platform::Server + | Platform::Liveview => PathBuf::from("cc"), + + // I think this is right?? does windows use cc? + Platform::Windows => PathBuf::from("cc"), + }; + + let thin_args = self.thin_link_args(&args, aslr_reference)?; + + // let mut env_vars = vec![]; + // self.build_android_env(&mut env_vars, false)?; + + // todo: we should throw out symbols that we don't need and/or assemble them manually + // also we should make sure to propagate the right arguments (target, sysroot, etc) + // + // also, https://developer.apple.com/forums/thread/773907 + // -undefined,dynamic_lookup is deprecated for ios but supposedly cpython is using it + // we might need to link a new patch file that implements the lookups + let res = Command::new(linker) + .args(object_files.iter()) + .arg(patch_file) + .args(thin_args) + .arg("-v") + .arg("-o") // is it "-o" everywhere? + .arg(&self.patch_exe()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .output() + .await?; + + let errs = String::from_utf8_lossy(&res.stderr); + if !errs.is_empty() { + if !self.patch_exe().exists() { + tracing::error!("Failed to generate patch: {}", errs.trim()); + } else { + tracing::debug!("Warnings during thin linking: {}", errs.trim()); + } + } + + if self.platform == Platform::Web {} + + // // Clean up the temps manually + // // todo: we might want to keep them around for debugging purposes + // for file in object_files { + // _ = std::fs::remove_file(file); + // } + + // Also clean up the original fat file since that's causing issues with rtld_global + // todo: this might not be platform portable + let link_orig = args + .iter() + .position(|arg| *arg == "-o") + .expect("failed to find -o"); + let link_file: PathBuf = args[link_orig + 1].clone().into(); + _ = std::fs::remove_file(&link_file); + + Ok(()) + } + + fn thin_link_args(&self, original_args: &[&str], aslr_reference: u64) -> Result> { + use target_lexicon::OperatingSystem; + + let triple = self.target.clone(); + let mut args = vec![]; + + tracing::debug!("original args:\n{}", original_args.join("\n")); + + match triple.operating_system { + // wasm32-unknown-unknown + // use wasm-ld (gnu-lld) + OperatingSystem::Unknown if self.platform == Platform::Web => { + const WASM_PAGE_SIZE: u64 = 65536; + let table_base = 2000 * (aslr_reference + 1); + let global_base = + ((aslr_reference * WASM_PAGE_SIZE * 3) + (WASM_PAGE_SIZE * 32)) as i32; + tracing::info!( + "using aslr of table: {} and global: {}", + table_base, + global_base + ); + + args.extend([ + // .arg("-z") + // .arg("stack-size=1048576") + "--import-memory".to_string(), + "--import-table".to_string(), + "--growable-table".to_string(), + "--export".to_string(), + "main".to_string(), + "--export-all".to_string(), + "--stack-first".to_string(), + "--allow-undefined".to_string(), + "--no-demangle".to_string(), + "--no-entry".to_string(), + "--emit-relocs".to_string(), + // todo: we need to modify the post-processing code + format!("--table-base={}", table_base).to_string(), + format!("--global-base={}", global_base).to_string(), + ]); + } + + // this uses "cc" and these args need to be ld compatible + // aarch64-apple-ios + // aarch64-apple-darwin + OperatingSystem::IOS(_) | OperatingSystem::MacOSX(_) | OperatingSystem::Darwin(_) => { + args.extend([ + "-Wl,-dylib".to_string(), + // "-Wl,-export_dynamic".to_string(), + // "-Wl,-unexported_symbol,_main".to_string(), + // "-Wl,-undefined,dynamic_lookup".to_string(), + ]); + + match triple.architecture { + target_lexicon::Architecture::Aarch64(_) => { + args.push("-arch".to_string()); + args.push("arm64".to_string()); + } + target_lexicon::Architecture::X86_64 => { + args.push("-arch".to_string()); + args.push("x86_64".to_string()); + } + _ => {} + } + } + + // android/linux + // need to be compatible with lld + OperatingSystem::Linux if triple.environment == Environment::Android => { + args.extend( + [ + "-shared".to_string(), + "-Wl,--eh-frame-hdr".to_string(), + "-Wl,-z,noexecstack".to_string(), + "-landroid".to_string(), + "-llog".to_string(), + "-lOpenSLES".to_string(), + "-landroid".to_string(), + "-ldl".to_string(), + "-ldl".to_string(), + "-llog".to_string(), + "-lunwind".to_string(), + "-ldl".to_string(), + "-lm".to_string(), + "-lc".to_string(), + "-Wl,-z,relro,-z,now".to_string(), + "-nodefaultlibs".to_string(), + "-Wl,-Bdynamic".to_string(), + ] + .iter() + .map(|s| s.to_string()), + ); + + match triple.architecture { + target_lexicon::Architecture::Aarch64(_) => { + // args.push("-Wl,--target=aarch64-linux-android".to_string()); + } + target_lexicon::Architecture::X86_64 => { + // args.push("-Wl,--target=x86_64-linux-android".to_string()); + } + _ => {} + } + } + + OperatingSystem::Linux => { + args.extend([ + "-Wl,--eh-frame-hdr".to_string(), + "-Wl,-z,noexecstack".to_string(), + "-Wl,-z,relro,-z,now".to_string(), + "-nodefaultlibs".to_string(), + "-Wl,-Bdynamic".to_string(), + ]); + } + + OperatingSystem::Windows => {} + + _ => return Err(anyhow::anyhow!("Unsupported platform for thin linking").into()), + } + + let extract_value = |arg: &str| -> Option { + original_args + .iter() + .position(|a| *a == arg) + .map(|i| original_args[i + 1].to_string()) + }; + + if let Some(vale) = extract_value("-target") { + args.push("-target".to_string()); + args.push(vale); + } + + if let Some(vale) = extract_value("-isysroot") { + args.push("-isysroot".to_string()); + args.push(vale); + } + + tracing::info!("final args:{:#?}", args); + + Ok(args) + } + + /// The item that we'll try to run directly if we need to. + /// + /// todo(jon): we should name the app properly instead of making up the exe name. It's kinda okay for dev mode, but def not okay for prod + pub fn main_exe(&self) -> PathBuf { + self.exe_dir().join(self.platform_exe_name()) + } + + // /// We always put the server in the `web` folder! + // /// Only the `web` target will generate a `public` folder though + // async fn write_server_executable(&self) -> Result<()> { + // if let Some(server) = &self.server { + // let to = self + // .server_exe() + // .expect("server should be set if we're building a server"); + + // std::fs::create_dir_all(self.server_exe().unwrap().parent().unwrap())?; + + // tracing::debug!("Copying server executable to: {to:?} {server:#?}"); + + // // Remove the old server executable if it exists, since copying might corrupt it :( + // // todo(jon): do this in more places, I think + // _ = std::fs::remove_file(&to); + // std::fs::copy(&server.exe, to)?; + // } + + // Ok(()) + // } + + /// todo(jon): use handlebars templates instead of these prebaked templates + async fn write_metadata(&self) -> Result<()> { + // write the Info.plist file + match self.platform { + Platform::MacOS => { + let dest = self.root_dir().join("Contents").join("Info.plist"); + let plist = self.macos_plist_contents()?; + std::fs::write(dest, plist)?; + } + + Platform::Ios => { + let dest = self.root_dir().join("Info.plist"); + let plist = self.ios_plist_contents()?; + std::fs::write(dest, plist)?; + } + + // AndroidManifest.xml + // er.... maybe even all the kotlin/java/gradle stuff? + Platform::Android => {} + + // Probably some custom format or a plist file (haha) + // When we do the proper bundle, we'll need to do something with wix templates, I think? + Platform::Windows => {} + + // eventually we'll create the .appimage file, I guess? + Platform::Linux => {} + + // These are served as folders, not appimages, so we don't need to do anything special (I think?) + // Eventually maybe write some secrets/.env files for the server? + // We could also distribute them as a deb/rpm for linux and msi for windows + Platform::Web => {} + Platform::Server => {} + Platform::Liveview => {} + } + + Ok(()) + } + + /// Run the optimizers, obfuscators, minimizers, signers, etc + pub(crate) async fn optimize(&self) -> Result<()> { + match self.platform { + Platform::Web => { + // Compress the asset dir + // If pre-compressing is enabled, we can pre_compress the wasm-bindgen output + let pre_compress = self.should_pre_compress_web_assets(self.release); + + self.status_compressing_assets(); + let asset_dir = self.asset_dir(); + tokio::task::spawn_blocking(move || { + crate::fastfs::pre_compress_folder(&asset_dir, pre_compress) + }) + .await + .unwrap()?; + } + Platform::MacOS => {} + Platform::Windows => {} + Platform::Linux => {} + Platform::Ios => {} + Platform::Android => {} + Platform::Server => {} + Platform::Liveview => {} + } + + Ok(()) + } + + // pub(crate) fn server_exe(&self) -> Option { + // if let Some(_server) = &self.server { + // let mut path = self.build_dir(Platform::Server, self.release); + + // if cfg!(windows) { + // path.push("server.exe"); + // } else { + // path.push("server"); + // } + + // return Some(path); + // } + + // None + // } + + /// Bundle the web app + /// - Run wasm-bindgen + /// - Bundle split + /// - Run wasm-opt + /// - Register the .wasm and .js files with the asset system + async fn bundle_web(&self, exe: &Path, assets: &mut AssetManifest) -> Result<()> { + use crate::{wasm_bindgen::WasmBindgen, wasm_opt}; + use std::fmt::Write; + + // Locate the output of the build files and the bindgen output + // We'll fill these in a second if they don't already exist + let bindgen_outdir = self.wasm_bindgen_out_dir(); + let prebindgen = exe.clone(); + let post_bindgen_wasm = self.wasm_bindgen_wasm_output_file(); + let should_bundle_split: bool = self.wasm_split; + let rustc_exe = exe.with_extension("wasm"); + let bindgen_version = self + .wasm_bindgen_version() + .expect("this should have been checked by tool verification"); + + // Prepare any work dirs + std::fs::create_dir_all(&bindgen_outdir)?; + + // Prepare our configuration + // + // we turn off debug symbols in dev mode but leave them on in release mode (weird!) since + // wasm-opt and wasm-split need them to do better optimizations. + // + // We leave demangling to false since it's faster and these tools seem to prefer the raw symbols. + // todo(jon): investigate if the chrome extension needs them demangled or demangles them automatically. + let will_wasm_opt = + (self.release || self.wasm_split) && crate::wasm_opt::wasm_opt_available(); + let keep_debug = self.config.web.wasm_opt.debug + || self.debug_symbols + || self.wasm_split + || !self.release + || will_wasm_opt; + let demangle = false; + let wasm_opt_options = WasmOptConfig { + memory_packing: self.wasm_split, + debug: self.debug_symbols, + ..self.config.web.wasm_opt.clone() + }; + + // Run wasm-bindgen. Some of the options are not "optimal" but will be fixed up by wasm-opt + // + // There's performance implications here. Running with --debug is slower than without + // We're keeping around lld sections and names but wasm-opt will fix them + // todo(jon): investigate a good balance of wiping debug symbols during dev (or doing a double build?) + self.status_wasm_bindgen_start(); + tracing::debug!(dx_src = ?TraceSrc::Bundle, "Running wasm-bindgen"); + let start = std::time::Instant::now(); + WasmBindgen::new(&bindgen_version) + .input_path(&rustc_exe) + .target("web") + .debug(keep_debug) + .demangle(demangle) + .keep_debug(keep_debug) + .keep_lld_sections(true) + .out_name(self.executable_name()) + .out_dir(&bindgen_outdir) + .remove_name_section(!will_wasm_opt) + .remove_producers_section(!will_wasm_opt) + .run() + .await + .context("Failed to generate wasm-bindgen bindings")?; + tracing::debug!(dx_src = ?TraceSrc::Bundle, "wasm-bindgen complete in {:?}", start.elapsed()); + + // Run bundle splitting if the user has requested it + // It's pretty expensive but because of rayon should be running separate threads, hopefully + // not blocking this thread. Dunno if that's true + if should_bundle_split { + self.status_splitting_bundle(); + + if !will_wasm_opt { + return Err(anyhow::anyhow!( + "Bundle splitting requires wasm-opt to be installed or the CLI to be built with `--features optimizations`. Please install wasm-opt and try again." + ) + .into()); + } + + // Load the contents of these binaries since we need both of them + // We're going to use the default makeLoad glue from wasm-split + let original = std::fs::read(&prebindgen)?; + let bindgened = std::fs::read(&post_bindgen_wasm)?; + let mut glue = wasm_split_cli::MAKE_LOAD_JS.to_string(); + + // Run the emitter + let splitter = wasm_split_cli::Splitter::new(&original, &bindgened); + let modules = splitter + .context("Failed to parse wasm for splitter")? + .emit() + .context("Failed to emit wasm split modules")?; + + // Write the chunks that contain shared imports + // These will be in the format of chunk_0_modulename.wasm - this is hardcoded in wasm-split + tracing::debug!("Writing split chunks to disk"); + for (idx, chunk) in modules.chunks.iter().enumerate() { + let path = bindgen_outdir.join(format!("chunk_{}_{}.wasm", idx, chunk.module_name)); + wasm_opt::write_wasm(&chunk.bytes, &path, &wasm_opt_options).await?; + writeln!( + glue, "export const __wasm_split_load_chunk_{idx} = makeLoad(\"/assets/{url}\", [], fusedImports);", + url = assets + .register_asset(&path, AssetOptions::Unknown)?.bundled_path(), + )?; + } + + // Write the modules that contain the entrypoints + tracing::debug!("Writing split modules to disk"); + for (idx, module) in modules.modules.iter().enumerate() { + let comp_name = module + .component_name + .as_ref() + .context("generated bindgen module has no name?")?; + + let path = bindgen_outdir.join(format!("module_{}_{}.wasm", idx, comp_name)); + wasm_opt::write_wasm(&module.bytes, &path, &wasm_opt_options).await?; + + let hash_id = module.hash_id.as_ref().unwrap(); + + writeln!( + glue, + "export const __wasm_split_load_{module}_{hash_id}_{comp_name} = makeLoad(\"/assets/{url}\", [{deps}], fusedImports);", + module = module.module_name, + + + // Again, register this wasm with the asset system + url = assets + .register_asset(&path, AssetOptions::Unknown)?.bundled_path(), + + // This time, make sure to write the dependencies of this chunk + // The names here are again, hardcoded in wasm-split - fix this eventually. + deps = module + .relies_on_chunks + .iter() + .map(|idx| format!("__wasm_split_load_chunk_{idx}")) + .collect::>() + .join(", ") + )?; + } + + // Write the js binding + // It's not registered as an asset since it will get included in the main.js file + let js_output_path = bindgen_outdir.join("__wasm_split.js"); + std::fs::write(&js_output_path, &glue)?; + + // Make sure to write some entropy to the main.js file so it gets a new hash + // If we don't do this, the main.js file will be cached and never pick up the chunk names + let uuid = uuid::Uuid::new_v5(&uuid::Uuid::NAMESPACE_URL, glue.as_bytes()); + std::fs::OpenOptions::new() + .append(true) + .open(self.wasm_bindgen_js_output_file()) + .context("Failed to open main.js file")? + .write_all(format!("/*{uuid}*/").as_bytes())?; + + // Write the main wasm_bindgen file and register it with the asset system + // This will overwrite the file in place + // We will wasm-opt it in just a second... + std::fs::write(&post_bindgen_wasm, modules.main.bytes)?; + } + + // Make sure to optimize the main wasm file if requested or if bundle splitting + if should_bundle_split || self.release { + self.status_optimizing_wasm(); + wasm_opt::optimize(&post_bindgen_wasm, &post_bindgen_wasm, &wasm_opt_options).await?; + } + + // Make sure to register the main wasm file with the asset system + assets.register_asset(&post_bindgen_wasm, AssetOptions::Unknown)?; + + // Register the main.js with the asset system so it bundles in the snippets and optimizes + assets.register_asset( + &self.wasm_bindgen_js_output_file(), + AssetOptions::Js(JsAssetOptions::new().with_minify(true).with_preload(true)), + )?; + + // Write the index.html file with the pre-configured contents we got from pre-rendering + std::fs::write( + self.root_dir().join("index.html"), + self.prepare_html(&assets)?, + )?; + + Ok(()) + } + + fn macos_plist_contents(&self) -> Result { + handlebars::Handlebars::new() + .render_template( + include_str!("../../assets/macos/mac.plist.hbs"), + &InfoPlistData { + display_name: self.bundled_app_name(), + bundle_name: self.bundled_app_name(), + executable_name: self.platform_exe_name(), + bundle_identifier: self.bundle_identifier(), + }, + ) + .map_err(|e| e.into()) + } + + fn ios_plist_contents(&self) -> Result { + handlebars::Handlebars::new() + .render_template( + include_str!("../../assets/ios/ios.plist.hbs"), + &InfoPlistData { + display_name: self.bundled_app_name(), + bundle_name: self.bundled_app_name(), + executable_name: self.platform_exe_name(), + bundle_identifier: self.bundle_identifier(), + }, + ) + .map_err(|e| e.into()) + } + + /// Run any final tools to produce apks or other artifacts we might need. + /// + /// This might include codesigning, zipping, creating an appimage, etc + async fn assemble(&self) -> Result<()> { + if let Platform::Android = self.platform { + self.status_running_gradle(); + + let output = Command::new(self.gradle_exe()?) + .arg("assembleDebug") + .current_dir(self.root_dir()) + .stderr(std::process::Stdio::piped()) + .stdout(std::process::Stdio::piped()) + .output() + .await?; + + if !output.status.success() { + return Err(anyhow::anyhow!("Failed to assemble apk: {output:?}").into()); + } + } + + Ok(()) + } + + /// Run bundleRelease and return the path to the `.aab` file + /// + /// https://stackoverflow.com/questions/57072558/whats-the-difference-between-gradlewassemblerelease-gradlewinstallrelease-and + pub(crate) async fn android_gradle_bundle(&self) -> Result { + let output = Command::new(self.gradle_exe()?) + .arg("bundleRelease") + .current_dir(self.root_dir()) + .output() + .await + .context("Failed to run gradle bundleRelease")?; + + if !output.status.success() { + return Err(anyhow::anyhow!("Failed to bundleRelease: {output:?}").into()); + } + + let app_release = self + .root_dir() + .join("app") + .join("build") + .join("outputs") + .join("bundle") + .join("release"); + + // Rename it to Name-arch.aab + let from = app_release.join("app-release.aab"); + let to = app_release.join(format!("{}-{}.aab", self.bundled_app_name(), self.target)); + + std::fs::rename(from, &to).context("Failed to rename aab")?; + + Ok(to) + } + + fn gradle_exe(&self) -> Result { + // make sure we can execute the gradlew script + #[cfg(unix)] + { + use std::os::unix::prelude::PermissionsExt; + std::fs::set_permissions( + self.root_dir().join("gradlew"), + std::fs::Permissions::from_mode(0o755), + )?; + } + + let gradle_exec_name = match cfg!(windows) { + true => "gradlew.bat", + false => "gradlew", + }; + + Ok(self.root_dir().join(gradle_exec_name)) + } + + pub(crate) fn apk_path(&self) -> PathBuf { + self.root_dir() + .join("app") + .join("build") + .join("outputs") + .join("apk") + .join("debug") + .join("app-debug.apk") + } + + /// We only really currently care about: + /// + /// - app dir (.app, .exe, .apk, etc) + /// - assetas dir + /// - exe dir (.exe, .app, .apk, etc) + /// - extra scaffolding + /// + /// It's not guaranteed that they're different from any other folder + fn prepare_build_dir(&self) -> Result<()> { + // self.prepare_build_dir()?; + + use once_cell::sync::OnceCell; + use std::fs::{create_dir_all, remove_dir_all}; + + static INITIALIZED: OnceCell> = OnceCell::new(); + + let success = INITIALIZED.get_or_init(|| { + _ = remove_dir_all(self.exe_dir()); + + create_dir_all(self.root_dir())?; + create_dir_all(self.exe_dir())?; + create_dir_all(self.asset_dir())?; + + tracing::debug!("Initialized Root dir: {:?}", self.root_dir()); + tracing::debug!("Initialized Exe dir: {:?}", self.exe_dir()); + tracing::debug!("Initialized Asset dir: {:?}", self.asset_dir()); + + // we could download the templates from somewhere (github?) but after having banged my head against + // cargo-mobile2 for ages, I give up with that. We're literally just going to hardcode the templates + // by writing them here. + if let Platform::Android = self.platform { + self.build_android_app_dir()?; + } + + Ok(()) + }); + + if let Err(e) = success.as_ref() { + return Err(format!("Failed to initialize build directory: {e}").into()); + } + + Ok(()) + } + + pub fn asset_dir(&self) -> PathBuf { + match self.platform { + Platform::MacOS => self + .root_dir() + .join("Contents") + .join("Resources") + .join("assets"), + + Platform::Android => self + .root_dir() + .join("app") + .join("src") + .join("main") + .join("assets"), + + // everyone else is soooo normal, just app/assets :) + Platform::Web + | Platform::Ios + | Platform::Windows + | Platform::Linux + | Platform::Server + | Platform::Liveview => self.root_dir().join("assets"), } } - /// Run the build command with a pretty loader, returning the executable output location - /// - /// This will also run the fullstack build. Note that fullstack is handled separately within this - /// code flow rather than outside of it. - pub(crate) async fn build_all(self) -> Result { - tracing::debug!( - "Running build command... {}", - if self.build.force_sequential { - "(sequentially)" - } else { - "" - } - ); - - let (app, server) = match self.build.force_sequential { - true => self.build_sequential().await?, - false => self.build_concurrent().await?, - }; + pub fn incremental_cache_dir(&self) -> PathBuf { + self.platform_dir().join("incremental-cache") + } - AppBundle::new(self, app, server).await + pub fn link_args_file(&self) -> PathBuf { + self.incremental_cache_dir().join("link_args.txt") } - /// Run the build command with a pretty loader, returning the executable output location - async fn build_concurrent(&self) -> Result<(BuildArtifacts, Option)> { - let (app, server) = - futures_util::future::try_join(self.build_app(), self.build_server()).await?; + /// The directory in which we'll put the main exe + /// + /// Mac, Android, Web are a little weird + /// - mac wants to be in Contents/MacOS + /// - android wants to be in jniLibs/arm64-v8a (or others, depending on the platform / architecture) + /// - web wants to be in wasm (which... we don't really need to, we could just drop the wasm into public and it would work) + /// + /// I think all others are just in the root folder + /// + /// todo(jon): investigate if we need to put .wasm in `wasm`. It kinda leaks implementation details, which ideally we don't want to do. + pub fn exe_dir(&self) -> PathBuf { + match self.platform { + Platform::MacOS => self.root_dir().join("Contents").join("MacOS"), + Platform::Web => self.root_dir().join("wasm"), + + // Android has a whole build structure to it + Platform::Android => self + .root_dir() + .join("app") + .join("src") + .join("main") + .join("jniLibs") + .join(AndroidTools::android_jnilib(&self.target)), - Ok((app, server)) + // these are all the same, I think? + Platform::Windows + | Platform::Linux + | Platform::Ios + | Platform::Server + | Platform::Liveview => self.root_dir(), + } } - async fn build_sequential(&self) -> Result<(BuildArtifacts, Option)> { - let app = self.build_app().await?; - let server = self.build_server().await?; - Ok((app, server)) + /// Get the path to the wasm bindgen temporary output folder + pub fn wasm_bindgen_out_dir(&self) -> PathBuf { + self.root_dir().join("wasm") } - pub(crate) async fn build_app(&self) -> Result { - tracing::debug!("Building app..."); + /// Get the path to the wasm bindgen javascript output file + pub fn wasm_bindgen_js_output_file(&self) -> PathBuf { + self.wasm_bindgen_out_dir() + .join(self.executable_name()) + .with_extension("js") + } - let start = Instant::now(); - self.prepare_build_dir()?; - let exe = self.build_cargo().await?; - let assets = self.collect_assets(&exe).await?; + /// Get the path to the wasm bindgen wasm output file + pub fn wasm_bindgen_wasm_output_file(&self) -> PathBuf { + self.wasm_bindgen_out_dir() + .join(format!("{}_bg", self.executable_name())) + .with_extension("wasm") + } - Ok(BuildArtifacts { - exe, - assets, - time_taken: start.elapsed(), - }) + /// Get the path to the asset optimizer version file + pub fn asset_optimizer_version_file(&self) -> PathBuf { + self.platform_dir().join(".cli-version") } - pub(crate) async fn build_server(&self) -> Result> { - tracing::debug!("Building server..."); + pub(crate) async fn cargo_build(&self) -> Result { + let start = SystemTime::now(); - if !self.build.fullstack { - return Ok(None); - } + tracing::debug!("Executing cargo..."); - let mut cloned = self.clone(); - cloned.build.platform = Some(Platform::Server); - Ok(Some(cloned.build_app().await?)) - } + let mut cmd = self.build_command()?; - /// Run `cargo`, returning the location of the final executable - /// - /// todo: add some stats here, like timing reports, crate-graph optimizations, etc - pub(crate) async fn build_cargo(&self) -> Result { - tracing::debug!("Executing cargo..."); + tracing::trace!(dx_src = ?TraceSrc::Build, "Rust cargo args: {:#?}", cmd); // Extract the unit count of the crate graph so build_cargo has more accurate data - let crate_count = self.get_unit_count_estimate().await; + // "Thin" builds only build the final exe, so we only need to build one crate + let crate_count = match self.mode { + BuildMode::Thin { .. } => 1, + _ => self.get_unit_count_estimate().await, + }; // Update the status to show that we're starting the build and how many crates we expect to build self.status_starting_build(crate_count); - let mut cmd = Command::new("cargo"); - - cmd.arg("rustc") - .current_dir(self.krate.crate_dir()) - .arg("--message-format") - .arg("json-diagnostic-rendered-ansi") - .args(self.build_arguments()) - .envs(self.env_vars()?); - - if let Some(target_dir) = self.custom_target_dir.as_ref() { - cmd.env("CARGO_TARGET_DIR", target_dir); - } - - // Android needs a special linker since the linker is actually tied to the android toolchain. - // For the sake of simplicity, we're going to pass the linker here using ourselves as the linker, - // but in reality we could simply use the android toolchain's linker as the path. - // - // We don't want to overwrite the user's .cargo/config.toml since that gets committed to git - // and we want everyone's install to be the same. - if self.build.platform() == Platform::Android { - let ndk = self - .krate - .android_ndk() - .context("Could not autodetect android linker")?; - let arch = self.build.target_args.arch(); - let linker = arch.android_linker(&ndk); - - let link_action = LinkAction::LinkAndroid { - linker, - extra_flags: vec![], - } - .to_json(); - - cmd.env(LinkAction::ENV_VAR_NAME, link_action); - } - - tracing::trace!(dx_src = ?TraceSrc::Build, "Rust cargo args: {:#?}", cmd); - let mut child = cmd .stdout(Stdio::piped()) .stderr(Stdio::piped()) @@ -159,11 +1508,12 @@ impl BuildRequest { let stdout = tokio::io::BufReader::new(child.stdout.take().unwrap()); let stderr = tokio::io::BufReader::new(child.stderr.take().unwrap()); - let mut output_location = None; + let mut output_location: Option = None; let mut stdout = stdout.lines(); let mut stderr = stderr.lines(); let mut units_compiled = 0; let mut emitting_error = false; + let mut direct_rustc = Vec::new(); loop { use cargo_metadata::Message; @@ -181,6 +1531,30 @@ impl BuildRequest { match message { Message::BuildScriptExecuted(_) => units_compiled += 1, Message::TextLine(line) => { + // Try to extract the direct rustc args from the output + if line.trim().starts_with("Running ") { + // trim everyting but the contents between the quotes + let args = line + .trim() + .trim_start_matches("Running `") + .trim_end_matches('`'); + + // Parse these as shell words so we can get the direct rustc args + direct_rustc = shell_words::split(args).unwrap(); + } + + #[derive(Debug, Deserialize)] + struct RustcArtifact { + artifact: PathBuf, + emit: String, + } + + if let Ok(artifact) = serde_json::from_str::(&line) { + if artifact.emit == "link" { + output_location = Some(artifact.artifact); + } + } + // For whatever reason, if there's an error while building, we still receive the TextLine // instead of an "error" message. However, the following messages *also* tend to // be the error message, and don't start with "error:". So we'll check if we've already @@ -223,174 +1597,188 @@ impl BuildRequest { tracing::error!("Cargo build failed - no output location. Toggle tracing mode (press `t`) for more information."); } - let out_location = output_location.context("Build did not return an executable")?; + let exe = output_location.context("Build did not return an executable")?; - tracing::debug!( - "Build completed successfully - output location: {:?}", - out_location - ); + tracing::debug!("Build completed successfully - output location: {:?}", exe); - Ok(out_location) + Ok(BuildArtifacts { + exe, + direct_rustc, + time_start: start, + time_end: SystemTime::now(), + assets: Default::default(), + }) } - /// Traverse the target directory and collect all assets from the incremental cache - /// - /// This uses "known paths" that have stayed relatively stable during cargo's lifetime. - /// One day this system might break and we might need to go back to using the linker approach. - pub(crate) async fn collect_assets(&self, exe: &Path) -> Result { - tracing::debug!("Collecting assets ..."); - - if self.build.skip_assets { - return Ok(AssetManifest::default()); - } - - // Experimental feature for testing - if the env var is set, we'll use the deeplinker - if std::env::var("DEEPLINK").is_ok() { - tracing::debug!("Using deeplinker instead of incremental cache"); - return self.deep_linker_asset_extract().await; + #[tracing::instrument( + skip(self), + level = "trace", + fields(dx_src = ?TraceSrc::Build) + )] + fn build_command(&self) -> Result { + // Prefer using the direct rustc if we have it + if let BuildMode::Thin { direct_rustc, .. } = &self.mode { + tracing::debug!("Using direct rustc: {:?}", direct_rustc); + if !direct_rustc.is_empty() { + let mut cmd = Command::new(direct_rustc[0].clone()); + cmd.args(direct_rustc[1..].iter()); + cmd.envs(self.env_vars()?); + cmd.current_dir(self.workspace_dir()); + cmd.arg(format!( + "-Clinker={}", + dunce::canonicalize(std::env::current_exe().unwrap()) + .unwrap() + .display() + )); + return Ok(cmd); + } } - // walk every file in the incremental cache dir, reading and inserting items into the manifest. - let mut manifest = AssetManifest::default(); - - // And then add from the exe directly, just in case it's LTO compiled and has no incremental cache - _ = manifest.add_from_object_path(exe); - - Ok(manifest) + // Otherwise build up the command using cargo rustc + let mut cmd = Command::new("cargo"); + cmd.arg("rustc") + .current_dir(self.crate_dir()) + .arg("--message-format") + .arg("json-diagnostic-rendered-ansi") + .args(self.build_arguments()) + .envs(self.env_vars()?); + Ok(cmd) } /// Create a list of arguments for cargo builds pub(crate) fn build_arguments(&self) -> Vec { let mut cargo_args = Vec::new(); - // Set the target, profile and features that vary between the app and server builds - if self.build.platform() == Platform::Server { - cargo_args.push("--profile".to_string()); - match self.build.release { - true => cargo_args.push("release".to_string()), - false => cargo_args.push(self.build.server_profile.to_string()), - }; - } else { - // Add required profile flags. --release overrides any custom profiles. - let custom_profile = &self.build.profile.as_ref(); - if custom_profile.is_some() || self.build.release { - cargo_args.push("--profile".to_string()); - match self.build.release { - true => cargo_args.push("release".to_string()), - false => { - cargo_args.push( - custom_profile - .expect("custom_profile should have been checked by is_some") - .to_string(), - ); - } - }; - } - - // todo: use the right arch based on the current arch - let custom_target = match self.build.platform() { - Platform::Web => Some("wasm32-unknown-unknown"), - Platform::Ios => match self.build.target_args.device { - Some(true) => Some("aarch64-apple-ios"), - _ => Some("aarch64-apple-ios-sim"), - }, - Platform::Android => Some(self.build.target_args.arch().android_target_triplet()), - Platform::Server => None, - // we're assuming we're building for the native platform for now... if you're cross-compiling - // the targets here might be different - Platform::MacOS => None, - Platform::Windows => None, - Platform::Linux => None, - Platform::Liveview => None, - }; + // Add required profile flags. --release overrides any custom profiles. + cargo_args.push("--profile".to_string()); + cargo_args.push(self.profile.to_string()); - if let Some(target) = custom_target.or(self.build.target_args.target.as_deref()) { - cargo_args.push("--target".to_string()); - cargo_args.push(target.to_string()); - } - } + // Pass the appropriate target to cargo. We *always* specify a target which is somewhat helpful for preventing thrashing + cargo_args.push("--target".to_string()); + cargo_args.push(self.target.to_string()); // We always run in verbose since the CLI itself is the one doing the presentation cargo_args.push("--verbose".to_string()); - if self.build.target_args.no_default_features { + if self.no_default_features { cargo_args.push("--no-default-features".to_string()); } - let features = self.target_features(); - - if !features.is_empty() { + if !self.features.is_empty() { cargo_args.push("--features".to_string()); - cargo_args.push(features.join(" ")); + cargo_args.push(self.features.join(" ")); } - if let Some(ref package) = self.build.target_args.package { + // todo: maybe always set a package to reduce ambiguity? + if let Some(package) = &self.package { cargo_args.push(String::from("-p")); cargo_args.push(package.clone()); } - cargo_args.append(&mut self.build.cargo_args.clone()); - - match self.krate.executable_type() { + match self.executable_type() { krates::cm::TargetKind::Bin => cargo_args.push("--bin".to_string()), krates::cm::TargetKind::Lib => cargo_args.push("--lib".to_string()), krates::cm::TargetKind::Example => cargo_args.push("--example".to_string()), _ => {} }; - cargo_args.push(self.krate.executable_name().to_string()); + cargo_args.push(self.executable_name().to_string()); + + cargo_args.extend(self.cargo_args.clone()); + + cargo_args.push("--".to_string()); // the bundle splitter needs relocation data // we'll trim these out if we don't need them during the bundling process // todo(jon): for wasm binary patching we might want to leave these on all the time. - if self.build.platform() == Platform::Web && self.build.experimental_wasm_split { - cargo_args.push("--".to_string()); + if self.platform == Platform::Web && self.wasm_split { cargo_args.push("-Clink-args=--emit-relocs".to_string()); } - tracing::debug!(dx_src = ?TraceSrc::Build, "cargo args: {:?}", cargo_args); + // dx *always* links android and thin builds + if self.platform == Platform::Android || matches!(self.mode, BuildMode::Thin { .. }) { + cargo_args.push(format!( + "-Clinker={}", + dunce::canonicalize(std::env::current_exe().unwrap()) + .unwrap() + .display() + )); + } - cargo_args - } + match self.mode { + BuildMode::Base => {} + BuildMode::Thin { .. } => {} + BuildMode::Fat => { + // This prevents rust from passing -dead_strip to the linker + // todo: don't save temps here unless we become the linker for the base app + cargo_args.extend_from_slice(&[ + "-Csave-temps=true".to_string(), + "-Clink-dead-code".to_string(), + ]); + + match self.platform { + // if macos/ios, -Wl,-all_load is required for the linker to work correctly + // macos uses ld64 but through the `cc` interface.a + Platform::MacOS | Platform::Ios => { + cargo_args.push("-Clink-args=-Wl,-all_load".to_string()); + } - #[allow(dead_code)] - pub(crate) fn android_rust_flags(&self) -> String { - let mut rust_flags = std::env::var("RUSTFLAGS").unwrap_or_default(); + Platform::Android => { + cargo_args.push("-Clink-args=-Wl,--whole-archive".to_string()); + } - // todo(jon): maybe we can make the symbol aliasing logic here instead of using llvm-objcopy - if self.build.platform() == Platform::Android { - let cur_exe = std::env::current_exe().unwrap(); - rust_flags.push_str(format!(" -Clinker={}", cur_exe.display()).as_str()); - rust_flags.push_str(" -Clink-arg=-landroid"); - rust_flags.push_str(" -Clink-arg=-llog"); - rust_flags.push_str(" -Clink-arg=-lOpenSLES"); - rust_flags.push_str(" -Clink-arg=-Wl,--export-dynamic"); - } + // if linux -Wl,--whole-archive is required for the linker to work correctly + Platform::Linux => { + cargo_args.push("-Clink-args=-Wl,--whole-archive".to_string()); + } - rust_flags - } + // if windows -Wl,--whole-archive is required for the linker to work correctly + // https://learn.microsoft.com/en-us/cpp/build/reference/wholearchive-include-all-library-object-files?view=msvc-170 + Platform::Windows => { + cargo_args.push("-Clink-args=-Wl,--whole-archive".to_string()); + } - /// Create the list of features we need to pass to cargo to build the app by merging together - /// either the client or server features depending on if we're building a server or not. - pub(crate) fn target_features(&self) -> Vec { - let mut features = self.build.target_args.features.clone(); + // if web, -Wl,--whole-archive is required for the linker to work correctly. + // We also use --no-gc-sections and --export-table and --export-memory to push + // said symbols into the export table. + // + // We use --emit-relocs but scrub those before they make it into the final output. + // This is designed for us to build a solid call graph. + // + // rust uses its own wasm-ld linker which can be found here (it's just gcc-ld with a -target): + // /Users/jonkelley/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/aarch64-apple-darwin/bin/gcc-ld + // /Users/jonkelley/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/aarch64-apple-darwin/bin/gcc-ld/wasm-ld + // + // export all should place things like env.memory into the export table so we can access them + // when loading the patches + Platform::Web => { + cargo_args.push("-Clink-arg=--no-gc-sections".into()); + cargo_args.push("-Clink-arg=--growable-table".into()); + cargo_args.push("-Clink-arg=--whole-archive".into()); + cargo_args.push("-Clink-arg=--export-table".into()); + cargo_args.push("-Clink-arg=--export-memory".into()); + cargo_args.push("-Clink-arg=--emit-relocs".into()); + cargo_args.push("-Clink-arg=--export=__stack_pointer".into()); + cargo_args.push("-Clink-arg=--export=__heap_base".into()); + cargo_args.push("-Clink-arg=--export=__data_end".into()); + } - if self.build.platform() == Platform::Server { - features.extend(self.build.target_args.server_features.clone()); - } else { - features.extend(self.build.target_args.client_features.clone()); + _ => {} + } + } } - features + tracing::debug!(dx_src = ?TraceSrc::Build, "cargo args: {:?}", cargo_args); + + cargo_args } pub(crate) fn all_target_features(&self) -> Vec { - let mut features = self.target_features(); + let mut features = self.features.clone(); - if !self.build.target_args.no_default_features { + if !self.no_default_features { features.extend( - self.krate - .package() + self.package() .features .get("default") .cloned() @@ -438,123 +1826,30 @@ impl BuildRequest { /// TODO: always use https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#unit-graph once it is stable pub(crate) async fn get_unit_count_estimate(&self) -> usize { // Try to get it from nightly - self.get_unit_count().await.unwrap_or_else(|_| { - // Otherwise, use cargo metadata - (self - .krate - .krates - .krates_filtered(krates::DepKind::Dev) - .iter() - .map(|k| k.targets.len()) - .sum::() as f64 - / 3.5) as usize - }) - } - - /// We used to require traversing incremental artifacts for assets that were included but not - /// directly exposed to the final binary. Now, however, we force APIs to carry items created - /// from asset calls into top-level items such that they *do* get included in the final binary. - /// - /// There's a chance that's not actually true, so this function is kept around in case we do - /// need to revert to "deep extraction". - #[allow(unused)] - async fn deep_linker_asset_extract(&self) -> Result { - // Create a temp file to put the output of the args - // We need to do this since rustc won't actually print the link args to stdout, so we need to - // give `dx` a file to dump its env::args into - let tmp_file = tempfile::NamedTempFile::new()?; - - // Run `cargo rustc` again, but this time with a custom linker (dx) and an env var to force - // `dx` to act as a linker - // - // This will force `dx` to look through the incremental cache and find the assets from the previous build - Command::new("cargo") - .arg("rustc") - .args(self.build_arguments()) - .envs(self.env_vars()?) - .arg("--offline") /* don't use the network, should already be resolved */ - .arg("--") - .arg(format!( - "-Clinker={}", - std::env::current_exe() - .unwrap() - .canonicalize() - .unwrap() - .display() - )) - .env( - LinkAction::ENV_VAR_NAME, - LinkAction::BuildAssetManifest { - destination: tmp_file.path().to_path_buf().clone(), - } - .to_json(), - ) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .output() - .await?; - - // The linker wrote the manifest to the temp file, let's load it! - let manifest = AssetManifest::load_from_file(tmp_file.path())?; - - if let Ok(path) = std::env::var("DEEPLINK").map(|s| s.parse::().unwrap()) { - _ = tmp_file.persist(path); + if let Ok(count) = self.get_unit_count().await { + return count; } - Ok(manifest) + // Otherwise, use cargo metadata + let units = self + .workspace + .krates + .krates_filtered(krates::DepKind::Dev) + .iter() + .map(|k| k.targets.len()) + .sum::(); + + (units as f64 / 3.5) as usize } fn env_vars(&self) -> Result> { let mut env_vars = vec![]; - if self.build.platform() == Platform::Android { - let ndk = self - .krate - .android_ndk() - .context("Could not autodetect android linker")?; - let arch = self.build.target_args.arch(); - let linker = arch.android_linker(&ndk); - let min_sdk_version = arch.android_min_sdk_version(); - let ar_path = arch.android_ar_path(&ndk); - let target_cc = arch.target_cc(&ndk); - let target_cxx = arch.target_cxx(&ndk); - let java_home = arch.java_home(); - - tracing::debug!( - r#"Using android: - min_sdk_version: {min_sdk_version} - linker: {linker:?} - ar_path: {ar_path:?} - target_cc: {target_cc:?} - target_cxx: {target_cxx:?} - java_home: {java_home:?} - "# - ); - - env_vars.push(("ANDROID_NATIVE_API_LEVEL", min_sdk_version.to_string())); - env_vars.push(("TARGET_AR", ar_path.display().to_string())); - env_vars.push(("TARGET_CC", target_cc.display().to_string())); - env_vars.push(("TARGET_CXX", target_cxx.display().to_string())); - env_vars.push(("ANDROID_NDK_ROOT", ndk.display().to_string())); - - // attempt to set java_home to the android studio java home if it exists. - // https://stackoverflow.com/questions/71381050/java-home-is-set-to-an-invalid-directory-android-studio-flutter - // attempt to set java_home to the android studio java home if it exists and java_home was not already set - if let Some(java_home) = java_home { - tracing::debug!("Setting JAVA_HOME to {java_home:?}"); - env_vars.push(("JAVA_HOME", java_home.display().to_string())); - } - - env_vars.push(("WRY_ANDROID_PACKAGE", "dev.dioxus.main".to_string())); - env_vars.push(("WRY_ANDROID_LIBRARY", "dioxusmain".to_string())); - env_vars.push(( - "WRY_ANDROID_KOTLIN_FILES_OUT_DIR", - self.wry_android_kotlin_files_out_dir() - .display() - .to_string(), - )); + let mut custom_linker = None; - env_vars.push(("RUSTFLAGS", self.android_rust_flags())) + // Make sure to set all the crazy android flags + if self.platform == Platform::Android { + let linker = self.build_android_env(&mut env_vars, true)?; // todo(jon): the guide for openssl recommends extending the path to include the tools dir // in practice I couldn't get this to work, but this might eventually become useful. @@ -575,112 +1870,117 @@ impl BuildRequest { // std::env::var("PATH").unwrap_or_default() // ); // env_vars.push(("PATH", extended_path)); - }; - - // If this is a release build, bake the base path and title - // into the binary with env vars - if self.build.release { - if let Some(base_path) = &self.krate.config.web.app.base_path { - env_vars.push((ASSET_ROOT_ENV, base_path.clone())); - } - env_vars.push((APP_TITLE_ENV, self.krate.config.web.app.title.clone())); - } - - Ok(env_vars) - } - - /// We only really currently care about: - /// - /// - app dir (.app, .exe, .apk, etc) - /// - assets dir - /// - exe dir (.exe, .app, .apk, etc) - /// - extra scaffolding - /// - /// It's not guaranteed that they're different from any other folder - fn prepare_build_dir(&self) -> Result<()> { - use once_cell::sync::OnceCell; - use std::fs::{create_dir_all, remove_dir_all}; - - static INITIALIZED: OnceCell> = OnceCell::new(); - - let success = INITIALIZED.get_or_init(|| { - _ = remove_dir_all(self.exe_dir()); - - create_dir_all(self.root_dir())?; - create_dir_all(self.exe_dir())?; - create_dir_all(self.asset_dir())?; - tracing::debug!("Initialized Root dir: {:?}", self.root_dir()); - tracing::debug!("Initialized Exe dir: {:?}", self.exe_dir()); - tracing::debug!("Initialized Asset dir: {:?}", self.asset_dir()); + // Also make sure to set the linker + custom_linker = Some(linker); + }; - // we could download the templates from somewhere (github?) but after having banged my head against - // cargo-mobile2 for ages, I give up with that. We're literally just going to hardcode the templates - // by writing them here. - if let Platform::Android = self.build.platform() { - self.build_android_app_dir()?; + match &self.mode { + // We don't usually employ a custom linker for fat/base builds unless it's android + // This might change in the future for "zero-linking" + BuildMode::Base | BuildMode::Fat => { + if let Some(linker) = custom_linker { + tracing::info!("Using custom linker for base link: {linker:?}"); + env_vars.push(( + LinkAction::ENV_VAR_NAME, + LinkAction::BaseLink { + linker, + extra_flags: vec![], + } + .to_json(), + )); + } } - Ok(()) - }); - - if let Err(e) = success.as_ref() { - return Err(format!("Failed to initialize build directory: {e}").into()); - } - - Ok(()) - } - - /// The directory in which we'll put the main exe - /// - /// Mac, Android, Web are a little weird - /// - mac wants to be in Contents/MacOS - /// - android wants to be in jniLibs/arm64-v8a (or others, depending on the platform / architecture) - /// - web wants to be in wasm (which... we don't really need to, we could just drop the wasm into public and it would work) - /// - /// I think all others are just in the root folder - /// - /// todo(jon): investigate if we need to put .wasm in `wasm`. It kinda leaks implementation details, which ideally we don't want to do. - pub fn exe_dir(&self) -> PathBuf { - match self.build.platform() { - Platform::MacOS => self.root_dir().join("Contents").join("MacOS"), - Platform::Web => self.root_dir().join("wasm"), - - // Android has a whole build structure to it - Platform::Android => self - .root_dir() - .join("app") - .join("src") - .join("main") - .join("jniLibs") - .join(self.build.target_args.arch().android_jnilib()), + // We use a custom linker here (dx) but it doesn't actually do anything + BuildMode::Thin { .. } => { + std::fs::create_dir_all(self.link_args_file().parent().unwrap()); + env_vars.push(( + LinkAction::ENV_VAR_NAME, + LinkAction::ThinLink { + triple: self.target.clone(), + save_link_args: self.link_args_file(), + } + .to_json(), + )) + } + } - // these are all the same, I think? - Platform::Windows - | Platform::Linux - | Platform::Ios - | Platform::Server - | Platform::Liveview => self.root_dir(), + if let Some(target_dir) = self.custom_target_dir.as_ref() { + env_vars.push(("CARGO_TARGET_DIR", target_dir.display().to_string())); } - } - /// Get the path to the wasm bindgen temporary output folder - pub fn wasm_bindgen_out_dir(&self) -> PathBuf { - self.root_dir().join("wasm") - } + // If this is a release build, bake the base path and title + // into the binary with env vars + if self.release { + if let Some(base_path) = &self.config.web.app.base_path { + env_vars.push((ASSET_ROOT_ENV, base_path.clone())); + } + env_vars.push((APP_TITLE_ENV, self.config.web.app.title.clone())); + } - /// Get the path to the wasm bindgen javascript output file - pub fn wasm_bindgen_js_output_file(&self) -> PathBuf { - self.wasm_bindgen_out_dir() - .join(self.krate.executable_name()) - .with_extension("js") + Ok(env_vars) } - /// Get the path to the wasm bindgen wasm output file - pub fn wasm_bindgen_wasm_output_file(&self) -> PathBuf { - self.wasm_bindgen_out_dir() - .join(format!("{}_bg", self.krate.executable_name())) - .with_extension("wasm") + pub fn build_android_env( + &self, + env_vars: &mut Vec<(&str, String)>, + rustf_flags: bool, + ) -> Result { + let tools = crate::build::android_tools().context("Could not determine android tools")?; + let linker = tools.android_cc(&self.target); + let min_sdk_version = tools.min_sdk_version(); + let ar_path = tools.ar_path(); + let target_cc = tools.target_cc(); + let target_cxx = tools.target_cxx(); + let java_home = tools.java_home(); + let ndk = tools.ndk.clone(); + tracing::debug!( + r#"Using android: + min_sdk_version: {min_sdk_version} + linker: {linker:?} + ar_path: {ar_path:?} + target_cc: {target_cc:?} + target_cxx: {target_cxx:?} + java_home: {java_home:?} + "# + ); + env_vars.push(("ANDROID_NATIVE_API_LEVEL", min_sdk_version.to_string())); + env_vars.push(("TARGET_AR", ar_path.display().to_string())); + env_vars.push(("TARGET_CC", target_cc.display().to_string())); + env_vars.push(("TARGET_CXX", target_cxx.display().to_string())); + env_vars.push(("ANDROID_NDK_ROOT", ndk.display().to_string())); + if let Some(java_home) = java_home { + tracing::debug!("Setting JAVA_HOME to {java_home:?}"); + env_vars.push(("JAVA_HOME", java_home.display().to_string())); + } + env_vars.push(("WRY_ANDROID_PACKAGE", "dev.dioxus.main".to_string())); + env_vars.push(("WRY_ANDROID_LIBRARY", "dioxusmain".to_string())); + env_vars.push(( + "WRY_ANDROID_KOTLIN_FILES_OUT_DIR", + self.wry_android_kotlin_files_out_dir() + .display() + .to_string(), + )); + + if rustf_flags { + env_vars.push(("RUSTFLAGS", { + let mut rust_flags = std::env::var("RUSTFLAGS").unwrap_or_default(); + + // todo(jon): maybe we can make the symbol aliasing logic here instead of using llvm-objcopy + if self.platform == Platform::Android { + let cur_exe = std::env::current_exe().unwrap(); + rust_flags.push_str(format!(" -Clinker={}", cur_exe.display()).as_str()); + rust_flags.push_str(" -Clink-arg=-landroid"); + rust_flags.push_str(" -Clink-arg=-llog"); + rust_flags.push_str(" -Clink-arg=-lOpenSLES"); + rust_flags.push_str(" -Clink-arg=-Wl,--export-dynamic"); + } + + rust_flags + })); + } + Ok(linker) } /// returns the path to root build folder. This will be our working directory for the build. @@ -699,13 +1999,13 @@ impl BuildRequest { pub(crate) fn root_dir(&self) -> PathBuf { let platform_dir = self.platform_dir(); - match self.build.platform() { + match self.platform { Platform::Web => platform_dir.join("public"), Platform::Server => platform_dir.clone(), // ends up *next* to the public folder // These might not actually need to be called `.app` but it does let us run these with `open` - Platform::MacOS => platform_dir.join(format!("{}.app", self.krate.bundled_app_name())), - Platform::Ios => platform_dir.join(format!("{}.app", self.krate.bundled_app_name())), + Platform::MacOS => platform_dir.join(format!("{}.app", self.bundled_app_name())), + Platform::Ios => platform_dir.join(format!("{}.app", self.bundled_app_name())), // in theory, these all could end up directly in the root dir Platform::Android => platform_dir.join("app"), // .apk (after bundling) @@ -716,47 +2016,16 @@ impl BuildRequest { } pub(crate) fn platform_dir(&self) -> PathBuf { - self.krate - .build_dir(self.build.platform(), self.build.release) - } - - pub fn asset_dir(&self) -> PathBuf { - match self.build.platform() { - Platform::MacOS => self - .root_dir() - .join("Contents") - .join("Resources") - .join("assets"), - - Platform::Android => self - .root_dir() - .join("app") - .join("src") - .join("main") - .join("assets"), - - // everyone else is soooo normal, just app/assets :) - Platform::Web - | Platform::Ios - | Platform::Windows - | Platform::Linux - | Platform::Server - | Platform::Liveview => self.root_dir().join("assets"), - } - } - - /// Get the path to the asset optimizer version file - pub fn asset_optimizer_version_file(&self) -> PathBuf { - self.platform_dir().join(".cli-version") + self.build_dir(self.platform, self.release) } pub fn platform_exe_name(&self) -> String { - match self.build.platform() { - Platform::MacOS => self.krate.executable_name().to_string(), - Platform::Ios => self.krate.executable_name().to_string(), - Platform::Server => self.krate.executable_name().to_string(), - Platform::Liveview => self.krate.executable_name().to_string(), - Platform::Windows => format!("{}.exe", self.krate.executable_name()), + match self.platform { + Platform::MacOS => self.executable_name().to_string(), + Platform::Ios => self.executable_name().to_string(), + Platform::Server => self.executable_name().to_string(), + Platform::Liveview => self.executable_name().to_string(), + Platform::Windows => format!("{}.exe", self.executable_name()), // from the apk spec, the root exe is a shared library // we include the user's rust code as a shared library with a fixed namespacea @@ -765,7 +2034,7 @@ impl BuildRequest { Platform::Web => unimplemented!("there's no main exe on web"), // this will be wrong, I think, but not important? // todo: maybe this should be called AppRun? - Platform::Linux => self.krate.executable_name().to_string(), + Platform::Linux => self.executable_name().to_string(), } } @@ -798,17 +2067,17 @@ impl BuildRequest { tracing::debug!("Initialized app/src/assets: {:?}", app_assets); tracing::debug!("Initialized app/src/kotlin/main: {:?}", app_kotlin_out); - // handlerbars - let hbs = handlebars::Handlebars::new(); + // handlebars #[derive(serde::Serialize)] struct HbsTypes { application_id: String, app_name: String, } let hbs_data = HbsTypes { - application_id: self.krate.full_mobile_app_name(), - app_name: self.krate.bundled_app_name(), + application_id: self.full_mobile_app_name(), + app_name: self.bundled_app_name(), }; + let hbs = handlebars::Handlebars::new(); // Top-level gradle config write( @@ -968,4 +2237,721 @@ impl BuildRequest { kotlin_dir } + + pub(crate) fn is_patch(&self) -> bool { + matches!(&self.mode, BuildMode::Thin { .. }) + } + + // pub(crate) async fn new(args: &TargetArgs) -> Result { + + // Ok(Self { + // workspace: workspace.clone(), + // package, + // config: dioxus_config, + // target: Arc::new(target), + // }) + // } + + /// The asset dir we used to support before manganis became the default. + /// This generally was just a folder in your Dioxus.toml called "assets" or "public" where users + /// would store their assets. + /// + /// With manganis you now use `asset!()` and we pick it up automatically. + pub(crate) fn legacy_asset_dir(&self) -> Option { + self.config + .application + .asset_dir + .clone() + .map(|dir| self.crate_dir().join(dir)) + } + + /// Get the list of files in the "legacy" asset directory + pub(crate) fn legacy_asset_dir_files(&self) -> Vec { + let mut files = vec![]; + + let Some(legacy_asset_dir) = self.legacy_asset_dir() else { + return files; + }; + + let Ok(read_dir) = legacy_asset_dir.read_dir() else { + return files; + }; + + for entry in read_dir.flatten() { + files.push(entry.path()); + } + + files + } + + /// Get the directory where this app can write to for this session that's guaranteed to be stable + /// for the same app. This is useful for emitting state like window position and size. + /// + /// The directory is specific for this app and might be + pub(crate) fn session_cache_dir(&self) -> PathBuf { + self.internal_out_dir() + .join(self.executable_name()) + .join("session-cache") + } + + /// Get the outdir specified by the Dioxus.toml, relative to the crate directory. + /// We don't support workspaces yet since that would cause a collision of bundles per project. + pub(crate) fn crate_out_dir(&self) -> Option { + self.config + .application + .out_dir + .as_ref() + .map(|out_dir| self.crate_dir().join(out_dir)) + } + + /// Compose an out directory. Represents the typical "dist" directory that + /// is "distributed" after building an application (configurable in the + /// `Dioxus.toml`). + fn internal_out_dir(&self) -> PathBuf { + let dir = self.workspace_dir().join("target").join("dx"); + std::fs::create_dir_all(&dir).unwrap(); + dir + } + + /// Create a workdir for the given platform + /// This can be used as a temporary directory for the build, but in an observable way such that + /// you can see the files in the directory via `target` + /// + /// target/dx/build/app/web/ + /// target/dx/build/app/web/public/ + /// target/dx/build/app/web/server.exe + pub(crate) fn build_dir(&self, platform: Platform, release: bool) -> PathBuf { + self.internal_out_dir() + .join(self.executable_name()) + .join(if release { "release" } else { "debug" }) + .join(platform.build_folder_name()) + } + + /// target/dx/bundle/app/ + /// target/dx/bundle/app/blah.app + /// target/dx/bundle/app/blah.exe + /// target/dx/bundle/app/public/ + pub(crate) fn bundle_dir(&self, platform: Platform) -> PathBuf { + self.internal_out_dir() + .join(self.executable_name()) + .join("bundle") + .join(platform.build_folder_name()) + } + + /// Get the workspace directory for the crate + pub(crate) fn workspace_dir(&self) -> PathBuf { + self.workspace + .krates + .workspace_root() + .as_std_path() + .to_path_buf() + } + + /// Get the directory of the crate + pub(crate) fn crate_dir(&self) -> PathBuf { + self.package() + .manifest_path + .parent() + .unwrap() + .as_std_path() + .to_path_buf() + } + + /// Get the main source file of the target + pub(crate) fn main_source_file(&self) -> PathBuf { + self.crate_target.src_path.as_std_path().to_path_buf() + } + + /// Get the package we are currently in + pub(crate) fn package(&self) -> &krates::cm::Package { + &self.workspace.krates[self.crate_package] + } + + /// Get the name of the package we are compiling + pub(crate) fn executable_name(&self) -> &str { + &self.crate_target.name + } + + /// Get the type of executable we are compiling + pub(crate) fn executable_type(&self) -> krates::cm::TargetKind { + self.crate_target.kind[0].clone() + } + + /// Try to autodetect the platform from the package by reading its features + /// + /// Read the default-features list and/or the features list on dioxus to see if we can autodetect the platform + pub(crate) fn autodetect_platform(&self) -> Option<(Platform, String)> { + let krate = self.workspace.krates.krates_by_name("dioxus").next()?; + + // We're going to accumulate the platforms that are enabled + // This will let us create a better warning if multiple platforms are enabled + let manually_enabled_platforms = self + .workspace + .krates + .get_enabled_features(krate.kid)? + .iter() + .flat_map(|feature| { + tracing::trace!("Autodetecting platform from feature {feature}"); + Platform::autodetect_from_cargo_feature(feature).map(|f| (f, feature.to_string())) + }) + .collect::>(); + + if manually_enabled_platforms.len() > 1 { + tracing::error!("Multiple platforms are enabled. Please specify a platform with `--platform ` or set a single default platform using a cargo feature."); + for platform in manually_enabled_platforms { + tracing::error!(" - {platform:?}"); + } + return None; + } + + if manually_enabled_platforms.len() == 1 { + return manually_enabled_platforms.first().cloned(); + } + + // Let's try and find the list of platforms from the feature list + // This lets apps that specify web + server to work without specifying the platform. + // This is because we treat `server` as a binary thing rather than a dedicated platform, so at least we can disambiguate it + let possible_platforms = self + .package() + .features + .iter() + .filter_map(|(feature, _features)| { + match Platform::autodetect_from_cargo_feature(feature) { + Some(platform) => Some((platform, feature.to_string())), + None => { + let auto_implicit = _features + .iter() + .filter_map(|f| { + if !f.starts_with("dioxus?/") && !f.starts_with("dioxus/") { + return None; + } + + let rest = f + .trim_start_matches("dioxus/") + .trim_start_matches("dioxus?/"); + + Platform::autodetect_from_cargo_feature(rest) + }) + .collect::>(); + + if auto_implicit.len() == 1 { + Some((auto_implicit.first().copied().unwrap(), feature.to_string())) + } else { + None + } + } + } + }) + .filter(|platform| platform.0 != Platform::Server) + .collect::>(); + + if possible_platforms.len() == 1 { + return possible_platforms.first().cloned(); + } + + None + } + + /// Check if dioxus is being built with a particular feature + pub(crate) fn has_dioxus_feature(&self, filter: &str) -> bool { + self.workspace + .krates + .krates_by_name("dioxus") + .any(|dioxus| { + self.workspace + .krates + .get_enabled_features(dioxus.kid) + .map(|features| features.contains(filter)) + .unwrap_or_default() + }) + } + + /// Get the features required to build for the given platform + pub(crate) fn feature_for_platform(&self, platform: Platform) -> String { + let package = self.package(); + + // Try to find the feature that activates the dioxus feature for the given platform + let dioxus_feature = platform.feature_name(); + + let res = package.features.iter().find_map(|(key, features)| { + // if the feature is just the name of the platform, we use that + if key == dioxus_feature { + return Some(key.clone()); + } + + // Otherwise look for the feature that starts with dioxus/ or dioxus?/ and matches the platform + for feature in features { + if let Some((_, after_dioxus)) = feature.split_once("dioxus") { + if let Some(dioxus_feature_enabled) = + after_dioxus.trim_start_matches('?').strip_prefix('/') + { + // If that enables the feature we are looking for, return that feature + if dioxus_feature_enabled == dioxus_feature { + return Some(key.clone()); + } + } + } + } + + None + }); + + res.unwrap_or_else(|| { + let fallback = format!("dioxus/{}", platform.feature_name()) ; + tracing::debug!( + "Could not find explicit feature for platform {platform}, passing `fallback` instead" + ); + fallback + }) + } + + /// Check if assets should be pre_compressed. This will only be true in release mode if the user + /// has enabled pre_compress in the web config. + pub(crate) fn should_pre_compress_web_assets(&self, release: bool) -> bool { + self.config.web.pre_compress && release + } + + // The `opt-level=1` increases build times, but can noticeably decrease time + // between saving changes and being able to interact with an app (for wasm/web). The "overall" + // time difference (between having and not having the optimization) can be + // almost imperceptible (~1 s) but also can be very noticeable (~6 s) — depends + // on setup (hardware, OS, browser, idle load). + // + // Find or create the client and server profiles in the top-level Cargo.toml file + // todo(jon): we should/could make these optional by placing some defaults somewhere + pub(crate) fn initialize_profiles(&self) -> crate::Result<()> { + let config_path = self.workspace_dir().join("Cargo.toml"); + let mut config = match std::fs::read_to_string(&config_path) { + Ok(config) => config.parse::().map_err(|e| { + crate::Error::Other(anyhow::anyhow!("Failed to parse Cargo.toml: {}", e)) + })?, + Err(_) => Default::default(), + }; + + if let Item::Table(table) = config + .as_table_mut() + .entry("profile") + .or_insert(Item::Table(Default::default())) + { + if let toml_edit::Entry::Vacant(entry) = table.entry(PROFILE_WASM) { + let mut client = toml_edit::Table::new(); + client.insert("inherits", Item::Value("dev".into())); + client.insert("opt-level", Item::Value(1.into())); + entry.insert(Item::Table(client)); + } + + if let toml_edit::Entry::Vacant(entry) = table.entry(PROFILE_SERVER) { + let mut server = toml_edit::Table::new(); + server.insert("inherits", Item::Value("dev".into())); + entry.insert(Item::Table(server)); + } + + if let toml_edit::Entry::Vacant(entry) = table.entry(PROFILE_ANDROID) { + let mut android = toml_edit::Table::new(); + android.insert("inherits", Item::Value("dev".into())); + entry.insert(Item::Table(android)); + } + } + + std::fs::write(config_path, config.to_string()) + .context("Failed to write profiles to Cargo.toml")?; + + Ok(()) + } + + fn default_ignore_list(&self) -> Vec<&'static str> { + vec![ + ".git", + ".github", + ".vscode", + "target", + "node_modules", + "dist", + "*~", + ".*", + "*.lock", + "*.log", + ] + } + + /// Create a new gitignore map for this target crate + /// + /// todo(jon): this is a bit expensive to build, so maybe we should cache it? + pub fn workspace_gitignore(&self) -> ignore::gitignore::Gitignore { + let crate_dir = self.crate_dir(); + + let mut ignore_builder = ignore::gitignore::GitignoreBuilder::new(&crate_dir); + ignore_builder.add(crate_dir.join(".gitignore")); + + let workspace_dir = self.workspace_dir(); + ignore_builder.add(workspace_dir.join(".gitignore")); + + for path in self.default_ignore_list() { + ignore_builder + .add_line(None, path) + .expect("failed to add path to file excluded"); + } + + ignore_builder.build().unwrap() + } + + /// Return the version of the wasm-bindgen crate if it exists + pub fn wasm_bindgen_version(&self) -> Option { + self.workspace + .krates + .krates_by_name("wasm-bindgen") + .next() + .map(|krate| krate.krate.version.to_string()) + } + + pub(crate) fn default_platform(&self) -> Option { + let default = self.package().features.get("default")?; + + // we only trace features 1 level deep.. + for feature in default.iter() { + // If the user directly specified a platform we can just use that. + if feature.starts_with("dioxus/") { + let dx_feature = feature.trim_start_matches("dioxus/"); + let auto = Platform::autodetect_from_cargo_feature(dx_feature); + if auto.is_some() { + return auto; + } + } + + // If the user is specifying an internal feature that points to a platform, we can use that + let internal_feature = self.package().features.get(feature); + if let Some(internal_feature) = internal_feature { + for feature in internal_feature { + if feature.starts_with("dioxus/") { + let dx_feature = feature.trim_start_matches("dioxus/"); + let auto = Platform::autodetect_from_cargo_feature(dx_feature); + if auto.is_some() { + return auto; + } + } + } + } + } + + None + } + + /// Gather the features that are enabled for the package + pub(crate) fn platformless_features(&self) -> Vec { + let default = self.package().features.get("default").unwrap(); + let mut kept_features = vec![]; + + // Only keep the top-level features in the default list that don't point to a platform directly + // IE we want to drop `web` if default = ["web"] + 'top: for feature in default { + // Don't keep features that point to a platform via dioxus/blah + if feature.starts_with("dioxus/") { + let dx_feature = feature.trim_start_matches("dioxus/"); + if Platform::autodetect_from_cargo_feature(dx_feature).is_some() { + continue 'top; + } + } + + // Don't keep features that point to a platform via an internal feature + if let Some(internal_feature) = self.package().features.get(feature) { + for feature in internal_feature { + if feature.starts_with("dioxus/") { + let dx_feature = feature.trim_start_matches("dioxus/"); + if Platform::autodetect_from_cargo_feature(dx_feature).is_some() { + continue 'top; + } + } + } + } + + // Otherwise we can keep it + kept_features.push(feature.to_string()); + } + + kept_features + } + + /// Return the list of paths that we should watch for changes. + pub(crate) fn watch_paths(&self) -> Vec { + let mut watched_paths = vec![]; + + // Get a list of *all* the crates with Rust code that we need to watch. + // This will end up being dependencies in the workspace and non-workspace dependencies on the user's computer. + let mut watched_crates = self.local_dependencies(); + watched_crates.push(self.crate_dir()); + + // Now, watch all the folders in the crates, but respecting their respective ignore files + for krate_root in watched_crates { + // Build the ignore builder for this crate, but with our default ignore list as well + let ignore = self.ignore_for_krate(&krate_root); + + for entry in krate_root.read_dir().unwrap() { + let Ok(entry) = entry else { + continue; + }; + + if ignore + .matched(entry.path(), entry.path().is_dir()) + .is_ignore() + { + continue; + } + + watched_paths.push(entry.path().to_path_buf()); + } + } + + watched_paths.dedup(); + + watched_paths + } + + fn ignore_for_krate(&self, path: &Path) -> ignore::gitignore::Gitignore { + let mut ignore_builder = ignore::gitignore::GitignoreBuilder::new(path); + for path in self.default_ignore_list() { + ignore_builder + .add_line(None, path) + .expect("failed to add path to file excluded"); + } + ignore_builder.build().unwrap() + } + + /// Get all the Manifest paths for dependencies that we should watch. Will not return anything + /// in the `.cargo` folder - only local dependencies will be watched. + /// + /// This returns a list of manifest paths + /// + /// Extend the watch path to include: + /// + /// - the assets directory - this is so we can hotreload CSS and other assets by default + /// - the Cargo.toml file - this is so we can hotreload the project if the user changes dependencies + /// - the Dioxus.toml file - this is so we can hotreload the project if the user changes the Dioxus config + pub(crate) fn local_dependencies(&self) -> Vec { + let mut paths = vec![]; + + for (dependency, _edge) in self.workspace.krates.get_deps(self.crate_package) { + let krate = match dependency { + krates::Node::Krate { krate, .. } => krate, + krates::Node::Feature { krate_index, .. } => { + &self.workspace.krates[krate_index.index()] + } + }; + + if krate + .manifest_path + .components() + .any(|c| c.as_str() == ".cargo") + { + continue; + } + + paths.push( + krate + .manifest_path + .parent() + .unwrap() + .to_path_buf() + .into_std_path_buf(), + ); + } + + paths + } + + pub(crate) fn all_watched_crates(&self) -> Vec { + let mut krates: Vec = self + .local_dependencies() + .into_iter() + .map(|p| { + p.parent() + .expect("Local manifest to exist and have a parent") + .to_path_buf() + }) + .chain(Some(self.crate_dir())) + .collect(); + + krates.dedup(); + + krates + } + + pub(crate) fn mobile_org(&self) -> String { + let identifier = self.bundle_identifier(); + let mut split = identifier.splitn(3, '.'); + let sub = split + .next() + .expect("Identifier to have at least 3 periods like `com.example.app`"); + let tld = split + .next() + .expect("Identifier to have at least 3 periods like `com.example.app`"); + format!("{}.{}", sub, tld) + } + + pub(crate) fn bundled_app_name(&self) -> String { + use convert_case::{Case, Casing}; + self.executable_name().to_case(Case::Pascal) + } + + pub(crate) fn full_mobile_app_name(&self) -> String { + format!("{}.{}", self.mobile_org(), self.bundled_app_name()) + } + + pub(crate) fn bundle_identifier(&self) -> String { + if let Some(identifier) = self.config.bundle.identifier.clone() { + return identifier.clone(); + } + + format!("com.example.{}", self.bundled_app_name()) + } + + /// Find the main package in the workspace + fn find_main_package(krates: &Krates, package: Option) -> Result { + if let Some(package) = package { + let mut workspace_members = krates.workspace_members(); + let found = workspace_members.find_map(|node| { + if let krates::Node::Krate { id, krate, .. } = node { + if krate.name == package { + return Some(id); + } + } + None + }); + + if found.is_none() { + tracing::error!("Could not find package {package} in the workspace. Did you forget to add it to the workspace?"); + tracing::error!("Packages in the workspace:"); + for package in krates.workspace_members() { + if let krates::Node::Krate { krate, .. } = package { + tracing::error!("{}", krate.name()); + } + } + } + + let kid = found.ok_or_else(|| anyhow::anyhow!("Failed to find package {package}"))?; + + return Ok(krates.nid_for_kid(kid).unwrap()); + }; + + // Otherwise find the package that is the closest parent of the current directory + let current_dir = std::env::current_dir()?; + let current_dir = current_dir.as_path(); + + // Go through each member and find the path that is a parent of the current directory + let mut closest_parent = None; + for member in krates.workspace_members() { + if let krates::Node::Krate { id, krate, .. } = member { + let member_path = krate.manifest_path.parent().unwrap(); + if let Ok(path) = current_dir.strip_prefix(member_path.as_std_path()) { + let len = path.components().count(); + match closest_parent { + Some((_, closest_parent_len)) => { + if len < closest_parent_len { + closest_parent = Some((id, len)); + } + } + None => { + closest_parent = Some((id, len)); + } + } + } + } + } + + let kid = closest_parent + .map(|(id, _)| id) + .with_context(|| { + let bin_targets = krates.workspace_members().filter_map(|krate|match krate { + krates::Node::Krate { krate, .. } if krate.targets.iter().any(|t| t.kind.contains(&krates::cm::TargetKind::Bin))=> { + Some(format!("- {}", krate.name)) + } + _ => None + }).collect::>(); + format!("Failed to find binary package to build.\nYou need to either run dx from inside a binary crate or specify a binary package to build with the `--package` flag. Try building again with one of the binary packages in the workspace:\n{}", bin_targets.join("\n")) + })?; + + let package = krates.nid_for_kid(kid).unwrap(); + Ok(package) + } +} + +#[derive(serde::Serialize)] +pub struct InfoPlistData { + pub display_name: String, + pub bundle_name: String, + pub bundle_identifier: String, + pub executable_name: String, } + +// pub(crate) fn triple(&self) -> Triple { +// match self.platform { +// Platform::MacOS => Triple::from_str("aarc64-apple-darwin").unwrap(), +// Platform::Windows => Triple::from_str("x86_64-pc-windows-msvc").unwrap(), +// Platform::Linux => Triple::from_str("x86_64-unknown-linux-gnu").unwrap(), +// Platform::Web => Triple::from_str("wasm32-unknown-unknown").unwrap(), +// Platform::Ios => Triple::from_str("aarch64-apple-ios-sim").unwrap(), +// Platform::Android => Triple::from_str("aarch64-linux-android").unwrap(), +// Platform::Server => Triple::from_str("aarc64-apple-darwin").unwrap(), +// // Platform::Server => Triple::from_str("x86_64-unknown-linux-gnu").unwrap(), +// Platform::Liveview => Triple::from_str("aarc64-apple-darwin").unwrap(), +// } +// } + +// pub(crate) async fn autodetect_android_arch() -> Option { +// // Try auto detecting arch through adb. +// static AUTO_ARCH: OnceCell> = OnceCell::new(); + +// match AUTO_ARCH.get() { +// Some(a) => *a, +// None => { +// // TODO: Wire this up with --device flag. (add `-s serial`` flag before `shell` arg) +// let output = Command::new("adb") +// .arg("shell") +// .arg("uname") +// .arg("-m") +// .output() +// .await; + +// let out = match output { +// Ok(o) => o, +// Err(e) => { +// tracing::debug!("ADB command failed: {:?}", e); +// return None; +// } +// }; + +// // Parse ADB output +// let Ok(out) = String::from_utf8(out.stdout) else { +// tracing::debug!("ADB returned unexpected data."); +// return None; +// }; +// let trimmed = out.trim().to_string(); +// tracing::trace!("ADB Returned: `{trimmed:?}`"); + +// // Set the cell +// let arch = match trimmed.as_str() { +// "armv7l" => Ok(Self::Arm), +// "aarch64" => Ok(Self::Arm64), +// "i386" => Ok(Self::X86), +// "x86_64" => Ok(Self::X64), +// _ => Err(()), +// }; +// AUTO_ARCH +// .set(arch) +// .expect("the cell should have been checked empty by the match condition"); + +// arch +// } +// } +// } + +// impl std::fmt::Display for Arch { +// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +// match self { +// Arch::Arm => "armv7l", +// Arch::Arm64 => "aarch64", +// Arch::X86 => "i386", +// Arch::X64 => "x86_64", +// } +// .fmt(f) +// } +// } diff --git a/packages/cli/src/build/templates.rs b/packages/cli/src/build/templates.rs deleted file mode 100644 index 1f32bf57de..0000000000 --- a/packages/cli/src/build/templates.rs +++ /dev/null @@ -1,7 +0,0 @@ -#[derive(serde::Serialize)] -pub struct InfoPlistData { - pub display_name: String, - pub bundle_name: String, - pub bundle_identifier: String, - pub executable_name: String, -} diff --git a/packages/cli/src/build/verify.rs b/packages/cli/src/build/verify.rs index c3724ee8be..bebf55eb34 100644 --- a/packages/cli/src/build/verify.rs +++ b/packages/cli/src/build/verify.rs @@ -10,7 +10,7 @@ impl BuildRequest { tracing::debug!("Verifying tooling..."); self.status_installing_tooling(); - self.krate + self .initialize_profiles() .context("Failed to initialize profiles - dioxus can't build without them. You might need to initialize them yourself.")?; @@ -22,7 +22,7 @@ impl BuildRequest { } }; - match self.build.platform() { + match self.platform { Platform::Web => self.verify_web_tooling(rustc).await?, Platform::Ios => self.verify_ios_tooling(rustc).await?, Platform::Android => self.verify_android_tooling(rustc).await?, @@ -58,7 +58,7 @@ impl BuildRequest { } // Wasm bindgen - let krate_bindgen_version = self.krate.wasm_bindgen_version().ok_or(anyhow!( + let krate_bindgen_version = self.wasm_bindgen_version().ok_or(anyhow!( "failed to detect wasm-bindgen version, unable to proceed" ))?; @@ -116,15 +116,14 @@ impl BuildRequest { /// will do its best to fill in the missing bits by exploring the sdk structure /// IE will attempt to use the Java installed from android studio if possible. pub(crate) async fn verify_android_tooling(&self, _rustc: RustcDetails) -> Result<()> { - let result = self - .krate - .android_ndk() - .map(|ndk| self.build.target_args.arch().android_linker(&ndk)); + let android = crate::build::android_tools().context("Android not installed properly. Please set the `ANDROID_NDK_HOME` environment variable to the root of your NDK installation.")?; - if let Some(path) = result { - if path.exists() { - return Ok(()); - } + let linker = android.android_cc(&self.target); + + tracing::debug!("Verifying android linker: {linker:?}"); + + if linker.exists() { + return Ok(()); } Err(anyhow::anyhow!( diff --git a/packages/cli/src/build/web.rs b/packages/cli/src/build/web.rs index 52b2e8e914..d6cd44f9cb 100644 --- a/packages/cli/src/build/web.rs +++ b/packages/cli/src/build/web.rs @@ -1,33 +1,51 @@ use dioxus_cli_config::format_base_path_meta_element; +use dioxus_cli_opt::AssetManifest; use manganis::AssetOptions; use crate::error::Result; use std::fmt::Write; use std::path::{Path, PathBuf}; -use super::AppBundle; - -const DEFAULT_HTML: &str = include_str!("../../assets/web/index.html"); -const TOAST_HTML: &str = include_str!("../../assets/web/toast.html"); - -impl AppBundle { - pub(crate) fn prepare_html(&self) -> Result { +use super::BuildRequest; + +const DEFAULT_HTML: &str = include_str!("../../assets/web/dev.index.html"); + +impl BuildRequest { + /// Users create an index.html for their SPA if they want it + /// + /// We always write our wasm as main.js and main_bg.wasm + /// + /// In prod we run the optimizer which bundles everything together properly + /// + /// So their index.html needs to include main.js in the scripts otherwise nothing happens? + /// + /// Seems like every platform has a weird file that declares a bunch of stuff + /// - web: index.html + /// - ios: info.plist + /// - macos: info.plist + /// - linux: appimage root thing? + /// - android: androidmanifest.xml + /// + /// You also might different variants of these files (staging / prod) and different flavors (eu/us) + /// + /// web's index.html is weird since it's not just a bundle format but also a *content* format + pub(crate) fn prepare_html(&self, assets: &AssetManifest) -> Result { let mut html = { - let crate_root: &Path = &self.build.krate.crate_dir(); + let crate_root: &Path = &self.crate_dir(); let custom_html_file = crate_root.join("index.html"); std::fs::read_to_string(custom_html_file).unwrap_or_else(|_| String::from(DEFAULT_HTML)) }; // Inject any resources from the config into the html - self.inject_resources(&mut html)?; + self.inject_resources(&assets, &mut html)?; // Inject loading scripts if they are not already present self.inject_loading_scripts(&mut html); // Replace any special placeholders in the HTML with resolved values - self.replace_template_placeholders(&mut html); + self.replace_template_placeholders(&assets, &mut html); - let title = self.build.krate.config.web.app.title.clone(); + let title = self.config.web.app.title.clone(); replace_or_insert_before("{app_title}", " bool { - !self.build.build.release + !self.release } // Inject any resources from the config into the html - fn inject_resources(&self, html: &mut String) -> Result<()> { + fn inject_resources(&self, assets: &AssetManifest, html: &mut String) -> Result<()> { // Collect all resources into a list of styles and scripts - let resources = &self.build.krate.config.web.resource; + let resources = &self.config.web.resource; let mut style_list = resources.style.clone().unwrap_or_default(); let mut script_list = resources.script.clone().unwrap_or_default(); @@ -72,7 +90,7 @@ impl AppBundle { // Add the base path to the head if this is a debug build if self.is_dev_build() { - if let Some(base_path) = &self.build.krate.config.web.app.base_path { + if let Some(base_path) = &self.config.web.app.base_path { head_resources.push_str(&format_base_path_meta_element(base_path)); } } @@ -85,7 +103,7 @@ impl AppBundle { } // Inject any resources from manganis into the head - for asset in self.app.assets.assets.values() { + for asset in assets.assets.values() { let asset_path = asset.bundled_path(); match asset.options() { AssetOptions::Css(css_options) => { @@ -113,10 +131,8 @@ impl AppBundle { } } // Manually inject the wasm file for preloading. WASM currently doesn't support preloading in the manganis asset system - let wasm_source_path = self.build.wasm_bindgen_wasm_output_file(); - let wasm_path = self - .app - .assets + let wasm_source_path = self.wasm_bindgen_wasm_output_file(); + let wasm_path = assets .assets .get(&wasm_source_path) .expect("WASM asset should exist in web bundles") @@ -133,7 +149,7 @@ impl AppBundle { /// Inject loading scripts if they are not already present fn inject_loading_scripts(&self, html: &mut String) { // If it looks like we are already loading wasm or the current build opted out of injecting loading scripts, don't inject anything - if !self.build.build.inject_loading_scripts || html.contains("__wbindgen_start") { + if !self.inject_loading_scripts || html.contains("__wbindgen_start") { return; } @@ -156,36 +172,25 @@ r#" - {DX_TOAST_UTILITIES} html.replace("{DX_TOAST_UTILITIES}", TOAST_HTML), - false => html.replace("{DX_TOAST_UTILITIES}", ""), - }; } /// Replace any special placeholders in the HTML with resolved values - fn replace_template_placeholders(&self, html: &mut String) { - let base_path = self.build.krate.config.web.app.base_path(); + fn replace_template_placeholders(&self, assets: &AssetManifest, html: &mut String) { + let base_path = self.config.web.app.base_path(); *html = html.replace("{base_path}", base_path); - let app_name = &self.build.krate.executable_name(); - let wasm_source_path = self.build.wasm_bindgen_wasm_output_file(); - let wasm_path = self - .app - .assets + let app_name = &self.executable_name(); + let wasm_source_path = self.wasm_bindgen_wasm_output_file(); + let wasm_path = assets .assets .get(&wasm_source_path) .expect("WASM asset should exist in web bundles") .bundled_path(); let wasm_path = format!("assets/{wasm_path}"); - let js_source_path = self.build.wasm_bindgen_js_output_file(); - let js_path = self - .app - .assets + let js_source_path = self.wasm_bindgen_js_output_file(); + let js_path = assets .assets .get(&js_source_path) .expect("JS asset should exist in web bundles") @@ -216,14 +221,11 @@ r#" + + + +
+ + diff --git a/packages/subsecond/subsecond-cli/src/main.rs b/packages/subsecond/subsecond-cli/src/main.rs new file mode 100644 index 0000000000..f85c00203a --- /dev/null +++ b/packages/subsecond/subsecond-cli/src/main.rs @@ -0,0 +1,678 @@ +use anyhow::Context; +use cargo_metadata::camino::Utf8PathBuf; +use clap::Parser; +use futures::{SinkExt, StreamExt}; +use itertools::Itertools; +use notify::{ + event::{DataChange, ModifyKind}, + Watcher, +}; +use object::{write::Object, Architecture}; +use serde::Deserialize; +use std::{collections::HashMap, env, ffi::OsStr, path::PathBuf, process::Stdio, time::SystemTime}; +use subsecond_cli_support::{create_jump_table, move_func_initiailizers}; +use target_lexicon::{Environment, Triple}; +use tokio::{ + io::AsyncBufReadExt, + net::TcpListener, + process::{Child, Command}, + time::Instant, +}; +use tokio_tungstenite::WebSocketStream; +use tracing::info; + +#[derive(Debug, Parser)] +struct Args { + #[clap(long)] + target: Option, +} + +/// The main loop of the hotreload process +/// +/// 1. Create initial "fat" build +/// 2. Identify hotpoints from the incrementals. We ignore dependency hotpoints for now, but eventually might want to aggregate workspace deps together. +/// 3. Wait for changes to the main.rs file +/// 4. Perform a "fast" build +/// 5. Diff the object files, walking relocations, preserving local statics +/// 6. Create a minimal patch file to load into the process, including the changed symbol list +/// 7. Pause the process with lldb, run the "hotfn_load_binary_patch" command and then continue +/// 8. Repeat +#[tokio::main] +async fn main() -> anyhow::Result<()> { + // Go through the linker if we need to + if let Ok(action) = std::env::var("HOTRELOAD_LINK") { + return link(action).await; + } + + tracing_subscriber::fmt::init(); + + let args = Args::parse(); + let target: Triple = args + .target + .map(|t| t.parse().unwrap()) + .unwrap_or_else(|| Triple::host()); + + // Save the state of the rust files + let src_folder = subsecond_folder().join("subsecond-harness/src/"); + let main_rs = src_folder.join("main.rs"); + + // Modify the main.rs mtime so we skip "fresh" builds + // Basically `touch main.rs` in the directory + std::fs::File::open(&main_rs)?.set_modified(SystemTime::now())?; + + // Perform the initial build + let epoch = SystemTime::UNIX_EPOCH; + let now = std::time::Instant::now(); + tracing::debug!("Starting build for target {target:?}..."); + let result = initial_build(&target).await?; + tracing::debug!( + "Initial build: {:?} -> {}", + now.elapsed(), + &result.output_location, + ); + + // copy the exe and give it a "fat" name. todo: wipe the ld entry that points to `/deps` + let exe = &result.output_location; + let fat_exe = exe.with_file_name(format!( + "fatharness-{}", + epoch.elapsed().unwrap().as_millis() + )); + std::fs::copy(&exe, &fat_exe).unwrap(); + + // Launch the fat exe. We'll overwrite the slim exe location, so this prevents the app from bugging out + let app = launch_app(&fat_exe, &target)?; + + // Wait for the websocket to come up + let mut client = wait_for_ws(9393, &target).await?.unwrap(); + tracing::info!("Client connected"); + + // Watch the source folder for changes + let mut watcher = FsWatcher::watch(src_folder)?; + + while let Some(Ok(event)) = watcher.rx.next().await { + if event.kind != notify::EventKind::Modify(ModifyKind::Any) { + continue; + } + + if !watcher.file_changed(event.paths.first().unwrap()) { + continue; + } + + tracing::info!("Fast reloading... "); + + let started = Instant::now(); + let output_temp = match fast_build(&result, &target, client.aslr_reference).await { + Ok(output_temp) => output_temp, + Err(e) => { + tracing::warn!("Fast build failed: {e}"); + continue; + } + }; + + // Assemble the jump table of redirected addresses + // todo: keep track of this and merge it over time + let jump_table = + create_jump_table(fat_exe.as_std_path(), output_temp.as_std_path(), &target).unwrap(); + + client + .socket + .send(tokio_tungstenite::tungstenite::Message::Text( + serde_json::to_string(&jump_table).unwrap(), + )) + .await?; + + if target.architecture == target_lexicon::Architecture::Wasm32 { + let _ = std::fs::copy( + output_temp.as_std_path(), + static_folder().join(output_temp.file_name().unwrap()), + ); + + client.aslr_reference += 1; + } + + tracing::info!("Patching complete in {}ms", started.elapsed().as_millis()) + } + + drop(app); + + Ok(()) +} + +fn launch_app(fat_exe: &Utf8PathBuf, target: &Triple) -> Result { + let app = match target.architecture { + target_lexicon::Architecture::Wasm32 => { + info!("Serving wasm at http://127.0.0.1:9393"); + Command::new("python3") + .current_dir(static_folder()) + .arg("-m") + .arg("http.server") + .arg("9394") + .arg("--directory") + .arg(".") + .kill_on_drop(true) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn()? + } + _ => Command::new(fat_exe).kill_on_drop(true).spawn()?, + }; + + Ok(app) +} +async fn initial_build(target: &Triple) -> anyhow::Result { + // Perform the initial build and print out the link arguments. Don't strip dead code and preserve temp files. + // This results in a "fat" executable that we can bind to + // + // todo: clean up the temps manually + let mut build = Command::new("cargo"); + + build + .arg("rustc") + .arg("--package") + .arg("subsecond-harness") + .arg("--bin") + .arg("subsecond-harness") + .arg("--profile") + .arg("subsecond-dev") + .arg("--message-format") + .arg("json-diagnostic-rendered-ansi") + .arg("--verbose") + .arg("--target") + .arg(target.to_string()); + + match target.architecture { + target_lexicon::Architecture::Wasm32 => { + build.arg("--features").arg("web"); + } + _ => { + build.arg("--features").arg("desktop"); + } + } + + // these args are required to prevent DCE, save intermediates, and print the link args for future usage + // -all_load ensures all statics get bubbled out + // -link-dead-code prevents the flag `-Wl,-dead_strip` from being passed + // -save-temps ensures the intermediates are saved so we can use them for comparsions + build + .arg("--") + .arg("-Csave-temps=true") + .arg("-Clink-dead-code"); + + match target.architecture { + // usually just ld64 - uses your `cc` + target_lexicon::Architecture::Aarch64(_) => { + build.arg("-Clink-arg=-Wl,-all_load"); + } + + // /Users/jonkelley/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/aarch64-apple-darwin/bin/gcc-ld/wasm-ld + target_lexicon::Architecture::Wasm32 => { + // we want "all-load", adjustable ifunc table, + build.arg("-Clink-arg=--no-gc-sections"); + build.arg("-Clink-arg=--growable-table"); + build.arg("-Clink-arg=--whole-archive"); + build.arg("-Clink-arg=--export-table"); + build.arg("-Clink-arg=--export-memory"); + build.arg("-Clink-arg=--emit-relocs"); + build.arg("-Clink-arg=--export=__stack_pointer"); + build.arg("-Clink-arg=--export=__heap_base"); + build.arg("-Clink-arg=--export=__data_end"); + } + + _ => {} + } + + // we capture the link args, but eventually we should actually just use ourselves as the linker since that's more robust + build + .arg("--print") + .arg("link-args") + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .kill_on_drop(true) + .current_dir(workspace_dir()); + + let build = build.spawn()?; + + let out = run_cargo_output(build).await?; + + if target.architecture == target_lexicon::Architecture::Wasm32 { + _ = std::fs::remove_dir_all(static_folder()); + + let test_data_folder = wasm_data_folder(); + let _ = std::fs::create_dir_all(&test_data_folder); + let _ = std::fs::copy( + out.output_location.as_std_path(), + test_data_folder.join("pre-bindgen.wasm"), + ); + + let unprocessed = std::fs::read(out.output_location.as_std_path())?; + let all_exported_bytes = + subsecond_cli_support::prepare_wasm_base_module(&unprocessed).unwrap(); + let processed = test_data_folder.join("processed.wasm"); + std::fs::write(&processed, all_exported_bytes)?; + + let bind = Command::new("wasm-bindgen") + .arg("--target") + .arg("web") + .arg("--no-typescript") + .arg("--out-dir") + .arg(static_folder()) + .arg("--out-name") + .arg("main") + .arg("--no-demangle") + .arg("--keep-lld-exports") + .arg("--keep-debug") + .arg(&processed) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .kill_on_drop(true) + .current_dir(workspace_dir()) + .output() + .await?; + + let err = String::from_utf8(bind.stderr).unwrap(); + if !err.is_empty() { + tracing::error!("err: {err}"); + } + + let _ = std::fs::copy( + static_folder().join("main_bg.wasm"), + test_data_folder.join("post-bindgen.wasm"), + ); + + let index = include_str!("./index.html"); + std::fs::write(static_folder().join("index.html"), index).unwrap(); + } + + Ok(out) +} + +async fn fast_build( + original: &CargoOutputResult, + target: &Triple, + aslr_reference: u64, +) -> anyhow::Result { + let fast_build = Command::new(original.direct_rustc[0].clone()) + .args(original.direct_rustc[1..].iter()) + .arg("-C") + .arg(format!( + "linker={}", + std::env::current_exe().unwrap().display() + )) + .env("HOTRELOAD_LINK", "patch") + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .current_dir(workspace_dir()) + .spawn()?; + + let output = run_cargo_output(fast_build).await?; + + tracing::info!("fast_build output: {output:#?}"); + + let link_args = std::fs::read_to_string(link_args_file())?; + let mut object_files = link_args + .lines() + .filter(|arg| arg.ends_with(".rcgu.o")) + .sorted() + .map(|arg| PathBuf::from(arg)) + .collect::>(); + + // copy incrementals to the data folder + if target.architecture == target_lexicon::Architecture::Wasm32 { + let test_data_folder = wasm_data_folder().join("incrementals"); + let _ = std::fs::create_dir_all(&test_data_folder); + for object in object_files.iter() { + let dest = test_data_folder.join(object.file_name().unwrap()); + std::fs::copy(object, dest)?; + } + } + + // on wasm we'll need to add in some symbols that resolve to the ifunc table + // unfortunately we can't quite call functions from main directly so we need to go through the ifunc system + // I *think* we can just import them + if target.architecture != target_lexicon::Architecture::Wasm32 { + let resolved = subsecond_cli_support::resolve_undefined( + &original.output_location.as_std_path(), + &object_files, + target, + aslr_reference, + ) + .unwrap(); + + let syms = subsecond_folder().join("data").join("syms.o"); + std::fs::write(&syms, resolved).unwrap(); + object_files.push(syms); + } + + let output_location = original + .output_location + .with_file_name(format!( + "patch-{}", + SystemTime::UNIX_EPOCH.elapsed().unwrap().as_millis() + )) + .with_extension(match target.architecture { + target_lexicon::Architecture::Wasm32 => "wasm", + _ => "", + }); + + let res = match target.architecture { + // usually just ld64 - uses your `cc` + target_lexicon::Architecture::Aarch64(_) => { + Command::new("cc") + .args(object_files) + .arg("-Wl,-dylib") + .arg("-arch") + .arg("arm64") + .arg("-o") + .arg(&output_location) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .output() + .await? + } + target_lexicon::Architecture::Wasm32 => { + const WASM_PAGE_SIZE: u64 = 65536; + let table_base = 2000 * (aslr_reference + 1); + let global_base = + ((aslr_reference * WASM_PAGE_SIZE * 3) + (WASM_PAGE_SIZE * 32)) as i32; + tracing::info!( + "using aslr of table: {} and global: {}", + table_base, + global_base + ); + Command::new(wasm_ld().await.unwrap()) + .args(object_files) + .arg("--import-memory") + .arg("--import-table") + .arg("--growable-table") + .arg("--export") + .arg("main") + .arg("--export-all") + // .arg("-z") + // .arg("stack-size=1048576") + .arg("--stack-first") + .arg("--allow-undefined") + .arg("--no-demangle") + .arg("--no-entry") + .arg("--emit-relocs") + .arg(format!("--table-base={}", table_base)) + .arg(format!("--global-base={}", global_base)) + .arg("-o") + .arg(&output_location) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .output() + .await? + } + _ => todo!(), + }; + + let errs = String::from_utf8_lossy(&res.stderr); + if !errs.is_empty() { + tracing::error!("errs: {errs}"); + } + + if target.architecture == target_lexicon::Architecture::Wasm32 { + // let out_bytes = std::fs::read(&output_location).unwrap(); + // let original_butes = std::fs::read(&original.output_location).unwrap(); + // let res_ = move_func_initiailizers(&original_butes, &out_bytes, aslr_reference).unwrap(); + // std::fs::write(&output_location, res_).unwrap(); + } + + Ok(output_location) +} + +/// Store the linker args in a file for the main process to read. +async fn link(action: String) -> anyhow::Result<()> { + let args = std::env::args().collect::>(); + + // Write the linker args to a file for the main process to read + std::fs::write(link_args_file(), args.join("\n"))?; + + match action.as_str() { + // Actually link the object file. todo: figure out which linker we should be using + "link" => {} + + // Write a dummy object file to the output file to satisfy rust when it tries to strip the symbols + "patch" => { + let out = args.iter().position(|arg| arg == "-o").unwrap(); + let out_file = args[out + 1].clone(); + let host = Triple::host(); + let dummy_object_file = Object::new( + match host.binary_format { + target_lexicon::BinaryFormat::Elf => object::BinaryFormat::Elf, + target_lexicon::BinaryFormat::Coff => object::BinaryFormat::Coff, + target_lexicon::BinaryFormat::Macho => object::BinaryFormat::MachO, + target_lexicon::BinaryFormat::Wasm => object::BinaryFormat::Wasm, + target_lexicon::BinaryFormat::Xcoff => object::BinaryFormat::Xcoff, + _ => todo!(), + }, + match host.architecture { + target_lexicon::Architecture::Arm(_) => object::Architecture::Arm, + target_lexicon::Architecture::Aarch64(_) => object::Architecture::Aarch64, + target_lexicon::Architecture::X86_32(_) => object::Architecture::X86_64_X32, + target_lexicon::Architecture::X86_64 => object::Architecture::X86_64, + _ => todo!(), + }, + match host.endianness().unwrap() { + target_lexicon::Endianness::Little => object::Endianness::Little, + target_lexicon::Endianness::Big => object::Endianness::Big, + }, + ); + let bytes = dummy_object_file.write().unwrap(); + std::fs::write(out_file, bytes)?; + } + + _ => anyhow::bail!("Unknown action: {}", action), + } + + Ok(()) +} + +#[derive(Debug)] +struct CargoOutputResult { + output_location: Utf8PathBuf, + direct_rustc: Vec, +} + +async fn run_cargo_output(mut child: Child) -> anyhow::Result { + let stdout = tokio::io::BufReader::new(child.stdout.take().unwrap()); + let stderr = tokio::io::BufReader::new(child.stderr.take().unwrap()); + let mut output_location = None; + let mut stdout = stdout.lines(); + let mut stderr = stderr.lines(); + + let mut direct_rustc = vec![]; + + loop { + use cargo_metadata::Message; + + let line = tokio::select! { + Ok(Some(line)) = stdout.next_line() => line, + Ok(Some(line)) = stderr.next_line() => line, + else => break, + }; + + let mut messages = Message::parse_stream(std::io::Cursor::new(line)); + + loop { + let message = match messages.next() { + Some(Ok(message)) => message, + None => break, + other => { + tracing::trace!("other: {other:?}"); + break; + } + }; + + match message { + Message::CompilerArtifact(artifact) => { + if let Some(i) = artifact.executable { + output_location = Some(i) + } + } + Message::CompilerMessage(compiler_message) => { + if let Some(rendered) = &compiler_message.message.rendered { + tracing::trace!("rendered: {rendered}"); + } + } + Message::BuildScriptExecuted(_build_script) => {} + Message::BuildFinished(build_finished) => { + // assuming we received a message from the compiler, so we can exit + if !build_finished.success { + anyhow::bail!("Build failed"); + } + } + Message::TextLine(word) => { + // trim everyting but the contents between the quotes + if word.trim().starts_with("Running ") { + let args = word + .trim() + .trim_start_matches("Running `") + .trim_end_matches('`'); + direct_rustc = shell_words::split(args).unwrap(); + } + + #[derive(Debug, Deserialize)] + struct RustcArtifact { + artifact: PathBuf, + emit: String, + } + + if let Ok(artifact) = serde_json::from_str::(&word) { + if artifact.emit == "link" { + output_location = + Some(Utf8PathBuf::from_path_buf(artifact.artifact).unwrap()); + } + } + + tracing::trace!("text: {word}") + } + _ => {} + } + } + } + + let output_location = + output_location.context("Failed to find output location. Build must've failed.")?; + + Ok(CargoOutputResult { + output_location, + direct_rustc, + }) +} + +struct FsWatcher { + _watcher: notify::RecommendedWatcher, + files: HashMap, + rx: futures_channel::mpsc::UnboundedReceiver>, +} + +impl FsWatcher { + fn watch(src_folder: PathBuf) -> anyhow::Result { + let (tx, rx) = futures_channel::mpsc::unbounded(); + let mut watcher = + notify::recommended_watcher(move |res: notify::Result| { + _ = tx.unbounded_send(res); + })?; + + let mut files = HashMap::new(); + for entry in walkdir::WalkDir::new(src_folder) { + let entry = entry?; + let path = entry.path(); + if path.is_dir() || path.extension() != Some(OsStr::new("rs")) { + continue; + } + files.insert(path.to_path_buf(), std::fs::read_to_string(&path).unwrap()); + watcher.watch(&path, notify::RecursiveMode::NonRecursive)?; + } + + Ok(FsWatcher { + files, + rx, + _watcher: watcher, + }) + } + + /// Check if the file has changed and update the internal state + fn file_changed(&mut self, path: &PathBuf) -> bool { + if let Some(contents) = self.files.get_mut(path) { + let new_contents = std::fs::read_to_string(&path).unwrap(); + if new_contents == *contents { + return false; + } + *contents = new_contents; + return true; + } + + false + } +} + +struct WsClient { + aslr_reference: u64, + socket: WebSocketStream, +} + +async fn wait_for_ws(port: u16, target: &Triple) -> anyhow::Result> { + // if target.architecture == target_lexicon::Architecture::Wasm32 { + // return Ok(None); + // } + + let addr = format!("127.0.0.1:{}", port); + let try_socket = TcpListener::bind(&addr).await; + let listener = try_socket.expect("Failed to bind"); + + let (conn, _sock) = listener.accept().await?; + let mut socket = tokio_tungstenite::accept_async(conn).await?; + let msg = socket.next().await.unwrap()?; + let aslr_reference = msg.into_text().unwrap().parse().unwrap(); + + Ok(Some(WsClient { + aslr_reference, + socket, + })) +} + +async fn wasm_ld() -> anyhow::Result { + // eg. /Users/jonkelley/.rustup/toolchains/stable-aarch64-apple-darwin/lib/rustlib/aarch64-apple-darwin/bin/gcc-ld/wasm-ld + // |_________________________sysroot_____________________________| + // + // we should opt to use rust-lld since that's the default on linux and will eventually be the default on windows + // I think mac will keep ld + let root = Command::new("rustc") + .arg("--print") + .arg("sysroot") + .output() + .await?; + let root = PathBuf::from(String::from_utf8(root.stdout)?.trim()) + .join("lib") + .join("rustlib") + .join(Triple::host().to_string()) + .join("bin") + .join("gcc-ld") + .join("wasm-ld"); + Ok(root) +} + +fn workspace_dir() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("../../../") + .canonicalize() + .unwrap() +} + +fn subsecond_folder() -> PathBuf { + workspace_dir().join("packages").join("subsecond") +} + +fn wasm_data_folder() -> PathBuf { + subsecond_folder().join("data").join("wasm") +} + +fn static_folder() -> PathBuf { + subsecond_folder().join("subsecond-harness").join("static") +} + +fn link_args_file() -> PathBuf { + subsecond_folder().join("data").join("link.txt") +} diff --git a/packages/subsecond/subsecond-harness/Cargo.toml b/packages/subsecond/subsecond-harness/Cargo.toml new file mode 100644 index 0000000000..c260c3c4cc --- /dev/null +++ b/packages/subsecond/subsecond-harness/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "subsecond-harness" +edition = "2021" +version.workspace = true + +[dependencies] +dioxus = { workspace = true } +anyhow = { workspace = true } +subsecond = { workspace = true } +bincode = { workspace = true } +serde_json = { workspace = true } + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +rand = { workspace = true } +color-eyre = { workspace = true } +ratatui = { workspace = true, features = ["crossterm"] } +tungstenite = { version = "0.23.0" } +tokio = { workspace = true, features = ["full"] } + +[features] +default = [ ] +desktop = [ "dioxus/desktop" ] +mobile = [ "dioxus/mobile" ] +web = [ "dioxus/web" ] diff --git a/packages/cli/assets/web/loading.js b/packages/subsecond/subsecond-harness/README.md similarity index 100% rename from packages/cli/assets/web/loading.js rename to packages/subsecond/subsecond-harness/README.md diff --git a/packages/subsecond/subsecond-harness/src/dioxus_demo.rs b/packages/subsecond/subsecond-harness/src/dioxus_demo.rs new file mode 100644 index 0000000000..ee83a1b693 --- /dev/null +++ b/packages/subsecond/subsecond-harness/src/dioxus_demo.rs @@ -0,0 +1,103 @@ +use dioxus::prelude::*; + +pub fn launch() { + dioxus::launch(app); +} + +fn app() -> Element { + let mut count = use_signal(|| 0); + + rsx! { + div { style: "display: flex; flex-direction: column; align-items: center; justify-content: center;", + h1 { "Apple: {count} ???" } + button { onclick: move |_| count += 1, "Incr" } + button { onclick: move |_| count -= 1, "Decr" } + img { width: "300px", src: "https://rustacean.net/assets/rustacean-flat-happy.png" } + } + div { style: "display: flex; flex-direction: column; align-items: center; justify-content: center;", + div { style: "background-color: red", + for x in 0..1 { + Child { id: x + 1, opt: "List entry", color: "gri" } + } + } + div { style: "background-color: orange", + for x in 0..1 { + Child { id: x + 1, opt: "List entry", color: "blue" } + } + } + div { style: "background-color: yellow", + for x in 0..1 { + Child { id: x + 1, opt: "List entry", color: "yellow" } + } + } + div { style: "background-color: green", + for x in 0..1 { + Child { id: x + 10, opt: "List entry", color: "orange" } + } + } + div { style: "background-color: blue", + for x in 0..1 { + Child { id: x + 10, opt: "List entry", color: "bluebleu" } + } + } + div { style: "background-color: indigo", + for x in 0..1 { + Child { id: x + 10, opt: "List entry", color: "magentaaa" } + } + } + } + } +} + +#[component] +fn Child(id: u32, opt: String, color: String) -> Element { + let mut count = use_signal(|| 0); + + rsx! { + div { + h3 { "Chil!!!!!!!!!! {id} - {opt} - {color} - {color} - {color}" } + p { "count: {count}" } + button { + onclick: move |_| { + count += id; + }, + "Increment Count" + } + } + } +} +#[component] +fn Child2(id: u32, opt: String) -> Element { + rsx! { + div { "oh lordy!" } + div { "Hello ?? child2s: {id} - {opt} ?" } + } +} + +#[component] +fn Child3(id: u32, opt: String) -> Element { + rsx! { + div { "Hello ?? child: {id} - {opt} ?" } + } +} + +#[component] +fn Child4(id: u32, opt: String) -> Element { + rsx! { + div { "Hello ?? child: {id} - {opt} ?" } + div { "Hello ?? child: {id} - {opt} ?" } + div { "Hello ?? child: {id} - {opt} ?" } + } +} + +#[component] +fn ZoomComponent() -> Element { + // use dioxus::desktop::window; + // button { onclick: move |_| window().set_zoom_level(1.0), "Zoom 1x" } + // button { onclick: move |_| window().set_zoom_level(1.5), "Zoom 1.5x" } + // button { onclick: move |_| window().set_zoom_level(2.0), "Zoom 2x" } + // button { onclick: move |_| window().set_zoom_level(3.0), "Zoom 3x" } + rsx! { + div { "Zoom me!" } + } +} diff --git a/packages/subsecond/subsecond-harness/src/loop_demo.rs b/packages/subsecond/subsecond-harness/src/loop_demo.rs new file mode 100644 index 0000000000..50192fe873 --- /dev/null +++ b/packages/subsecond/subsecond-harness/src/loop_demo.rs @@ -0,0 +1,10 @@ +pub fn launch() { + loop { + std::thread::sleep(std::time::Duration::from_secs(1)); + subsecond::call(|| tick()); + } +} + +fn tick() { + println!("edit me to see the loop in action!!!!!!!!! "); +} diff --git a/packages/subsecond/subsecond-harness/src/main.rs b/packages/subsecond/subsecond-harness/src/main.rs new file mode 100644 index 0000000000..d67ed4a4b5 --- /dev/null +++ b/packages/subsecond/subsecond-harness/src/main.rs @@ -0,0 +1,23 @@ +mod dioxus_demo; +mod loop_demo; +mod tui_demo; +mod ws_conn; + +#[cfg(not(target_arch = "wasm32"))] +fn main() { + ws_conn::initialize(); + + let demo = std::env::var("DEMO").unwrap_or("dioxus".to_string()); + + match demo.as_str() { + "dioxus" => dioxus_demo::launch(), + "loop" => loop_demo::launch(), + "tui" => tui_demo::launch(), + _ => panic!("Unknown demo: {}", demo), + } +} + +#[cfg(target_arch = "wasm32")] +fn main() { + dioxus_demo::launch(); +} diff --git a/packages/subsecond/subsecond-harness/src/tui_demo.rs b/packages/subsecond/subsecond-harness/src/tui_demo.rs new file mode 100644 index 0000000000..81e6f1fbdc --- /dev/null +++ b/packages/subsecond/subsecond-harness/src/tui_demo.rs @@ -0,0 +1,113 @@ +#![cfg(not(target_arch = "wasm32"))] + +use color_eyre::Result; +use rand::{rng, Rng}; +use ratatui::{ + crossterm::event::{self, Event, KeyCode, KeyEventKind}, + layout::{Constraint, Layout}, + style::{Color, Style, Stylize}, + text::Line, + widgets::{Bar, BarChart, BarGroup}, + DefaultTerminal, Frame, +}; +use std::time::Duration; + +pub fn launch() { + color_eyre::install().unwrap(); + let mut terminal = ratatui::init(); + let app_result = subsecond::call(|| App::new().run(&mut terminal)); + ratatui::restore(); + app_result.unwrap(); +} + +struct App { + should_exit: bool, + temperatures: Vec, +} + +impl App { + fn new() -> Self { + let mut rng = rand::rng(); + let temperatures = (0..24).map(|_| rng.random_range(50..90)).collect(); + + Self { + should_exit: false, + temperatures, + } + } + + fn run(&mut self, terminal: &mut DefaultTerminal) -> Result<()> { + while !self.should_exit { + subsecond::call(|| self.tick(terminal))?; + } + Ok(()) + } + + fn tick(&mut self, terminal: &mut DefaultTerminal) -> Result<()> { + terminal.draw(|frame| self.draw(frame))?; + self.handle_events()?; + Ok(()) + } + + // wait 100ms for an event to occur + fn handle_events(&mut self) -> Result<()> { + if event::poll(Duration::from_millis(100))? { + if let Event::Key(key) = event::read()? { + if key.kind == KeyEventKind::Press && key.code == KeyCode::Char('q') { + self.should_exit = true; + } + + if key.kind == KeyEventKind::Press && key.code == KeyCode::Char('t') { + let mut rng = rng(); + self.temperatures = (0..24).map(|_| rng.random_range(50..90)).collect(); + } + } + } + + Ok(()) + } + + fn draw(&self, frame: &mut Frame) { + let [title, main] = Layout::vertical([Constraint::Length(1), Constraint::Fill(1)]) + .spacing(1) + .areas(frame.area()); + + frame.render_widget( + "Tui development has never been so easy!" + .bold() + .italic() + .into_centered_line() + .centered(), + title, + ); + frame.render_widget(vertical_barchart(&self.temperatures), main); + } +} + +/// Create a vertical bar chart from the temperatures data. +fn vertical_barchart(temperatures: &[u8]) -> BarChart { + let bars: Vec = temperatures + .iter() + .enumerate() + .map(|(hour, value)| vertical_bar(hour, value)) + .collect(); + BarChart::default() + .data(BarGroup::default().bars(&bars)) + .bar_width(5) +} + +fn vertical_bar(hour: usize, temperature: &u8) -> Bar { + Bar::default() + .value(u64::from(*temperature)) + .label(Line::from(format!("{hour:>02}:00"))) + .text_value(format!("{temperature:>3}°")) + .style(temperature_style(*temperature)) + .value_style(temperature_style(*temperature).reversed()) +} + +/// create a yellow to red value based on the value (50-90) +fn temperature_style(value: u8) -> Style { + let green = (255.0 * (1.0 - f64::from(value - 50) / 40.0)) as u8; + let color = Color::Rgb(255, green, 0); + Style::new().fg(color) +} diff --git a/packages/subsecond/subsecond-harness/src/ws_conn.rs b/packages/subsecond/subsecond-harness/src/ws_conn.rs new file mode 100644 index 0000000000..0ccf4112ce --- /dev/null +++ b/packages/subsecond/subsecond-harness/src/ws_conn.rs @@ -0,0 +1,35 @@ +use subsecond::JumpTable; + +pub fn initialize() { + // dx already has subsecond integrated, don't boot it twice + if dioxus::cli_config::devserver_ws_endpoint().is_some() { + return; + } + + // Spawn a thread that will read bytes from the fd + // the host process will write newa bytes to the fd when it wants to reload the binary + #[cfg(not(target_arch = "wasm32"))] + std::thread::spawn(|| { + let endpoint = + std::env::var("HOTRELOAD_ENDPOINT").unwrap_or("ws://localhost:9393".to_string()); + + let (mut websocket, _req) = match tungstenite::connect(endpoint.clone()) { + Ok((websocket, req)) => (websocket, req), + Err(_) => panic!("Failed to connect to hotreload endpoint"), + }; + + websocket + .send(tungstenite::Message::Text( + subsecond::aslr_reference().to_string(), + )) + .unwrap(); + + while let Ok(msg) = websocket.read() { + if let tungstenite::Message::Text(bytes) = msg { + if let Ok(msg) = serde_json::from_str::(bytes.as_ref()) { + unsafe { subsecond::apply_patch(msg) }; + } + } + } + }); +} diff --git a/packages/subsecond/subsecond-macro/Cargo.toml b/packages/subsecond/subsecond-macro/Cargo.toml new file mode 100644 index 0000000000..7877565e70 --- /dev/null +++ b/packages/subsecond/subsecond-macro/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "subsecond-macro" +edition = "2021" +version.workspace = true + +[dependencies] +syn = { workspace = true, features = ["full"] } +base16 = { workspace = true } +digest = { workspace = true } +quote = { workspace = true } +sha2 = { workspace = true } +proc-macro2 = { workspace = true } + +[lib] +proc-macro = true diff --git a/packages/subsecond/subsecond-macro/src/lib.rs b/packages/subsecond/subsecond-macro/src/lib.rs new file mode 100644 index 0000000000..8bd873ca1b --- /dev/null +++ b/packages/subsecond/subsecond-macro/src/lib.rs @@ -0,0 +1,167 @@ +use proc_macro::TokenStream; + +use digest::Digest; +use quote::{format_ident, quote, ToTokens}; +use syn::{parse_macro_input, parse_quote, FnArg, Ident, ItemFn, PatIdent, ReturnType, Signature}; + +/// Annotate a function with `#[hot]` to make it hot-reloadable. +/// +/// This can be used on functions and methods. Changes to the assembly "beneath" the function will +/// cause the function to be recompiled and the new assembly to be executed. +/// +/// If the changes propagate above the function, the nearest `#[hot]` function will be used as the +/// hot-reload point. +/// +/// ``` +/// struct Foo {} +/// +/// impl Foo { +/// #[hot] +/// fn tick(&mut self) { +/// self.do_stuff() +/// } +/// } +/// ``` +/// +/// ## Expansion: +/// +/// This macro simply expands functions from the following form: +/// +/// ```rust +/// #[hot] +/// fn do_thing(a: A, b: B) -> C { +/// } +/// ``` +/// +/// to the following: +/// +/// ```rust +/// fn do_thing(a: A, b: B) -> C { +/// #[inline(never)] // force this as a real symbol +/// fn __hot_do_thing(a: A, b: B) -> C { +/// do_thing_inner(a, b) +/// } +/// +/// subsecond::current(do_thing_inner).call((a, b)) +/// } +/// ``` +/// +/// You could also just call `subsecond::current()` yourself, though that interface is slightly +/// unwieldy and intended for use by framework authors. +#[proc_macro_attribute] +pub fn hot(_args: TokenStream, input: TokenStream) -> TokenStream { + /* + #[hot] + fn do_thing(a: A, b: B) -> C { + } + + // expands to + + fn do_thing(a: A, b: B) -> C { + #[inline(never)] // force this as a real symbol + fn __hot_do_thing(a: A, b: B) -> C { + do_thing_inner(a, b) + } + + subsecond::current(do_thing_inner).call((a, b)) + } + + + // for methods, we don't know the type of the receiver, so we generate another method that's hidden + // that also takes `self` as an argument + // + // note that we want to retain the names of idents so rust-analyzer provides the correct info + + struct Foo {} + impl Foo { + #[hot] + fn do_thing(&self, a: A, b: B) -> C { + // code... + } + + // expands to + fn do_thing(&self, a: A, b: B) -> C { + subsecond::current(Self::__hot_do_thing).call((self, a, b)) + } + + fn __hot_do_thing(&self, a: A, b: B) -> C { + // code... + } + } + */ + + let ItemFn { + attrs, + vis, + sig, + block, + } = parse_macro_input!(input as ItemFn); + + let mut outer_sig = sig.clone(); + let mut inner_sig = sig.clone(); + inner_sig.ident = format_ident!("__hot_{}", sig.ident); + + let inner_fn_name = inner_sig.ident.clone(); + + let mut args = vec![]; + for (i, param) in outer_sig.inputs.iter_mut().enumerate() { + match param { + syn::FnArg::Receiver(_) => args.push(format_ident!("self")), + syn::FnArg::Typed(pat_type) => { + match &*pat_type.pat { + // Attempt to preserve original ident for better RA support + syn::Pat::Ident(pat_ident) => { + args.push(pat_ident.ident.clone()); + } + + // Otherwise, generate a new ident + _ => { + // Create a new ident to tie the outer to the call of the inner + let param_ident = format_ident!("__hot_arg_{i}"); + args.push(param_ident.clone()); + pat_type.pat = Box::new(syn::Pat::Ident(syn::PatIdent { + attrs: vec![], + by_ref: None, + mutability: None, + ident: param_ident, + subpat: None, + })); + } + } + } + } + } + + let self_ident = if outer_sig + .inputs + .first() + .map(|arg| matches!(arg, FnArg::Receiver(_))) + == Some(true) + { + quote! { Self:: } + } else { + quote! {} + }; + + quote! { + // the primary function + // &self, Pattern { a, b, c}: i32, b: i32, c: i32, etc + // becomes + // self: &mut Self, arg0: i32, arg1: i32, arg2: i32, etc + #(#attrs)* + #vis #outer_sig { + subsecond::current(#self_ident #inner_fn_name).call( + (#(#args),*) // .call((self, arg0, arg1)) + ) + } + + // retains the original function signature + // &self, a: i32, b: i32, c: i32, etc + #[doc(hidden)] + #[inline(never)] + #inner_sig { + #block + } + } + .into() +} diff --git a/packages/subsecond/subsecond-types/Cargo.toml b/packages/subsecond/subsecond-types/Cargo.toml new file mode 100644 index 0000000000..a76c0b1f98 --- /dev/null +++ b/packages/subsecond/subsecond-types/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "subsecond-types" +edition = "2021" +version.workspace = true + +[dependencies] +serde = { workspace = true, features = ["derive"] } diff --git a/packages/subsecond/subsecond-types/src/lib.rs b/packages/subsecond/subsecond-types/src/lib.rs new file mode 100644 index 0000000000..4b2663d948 --- /dev/null +++ b/packages/subsecond/subsecond-types/src/lib.rs @@ -0,0 +1,45 @@ +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; + +mod nohasher; +pub use nohasher::AddressMap; + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub struct JumpTable { + /// The dylib containing the patch. This should be a valid path so you can just pass it to LibLoading + /// + /// On wasm you will need to fetch() this file and then pass it to the WebAssembly.instantiate() function + pub lib: PathBuf, + + /// old -> new + /// does not take into account the base address of the patch when loaded into memory - need dlopen for that + /// + /// These are intended to be `*const ()` pointers but need to be `u64` for the hashmap. On 32-bit platforms + /// you will need to cast to `usize` before using them. + pub map: AddressMap, + + /// The location of the aslr reference in the binary, used to calcualte offsets for the jump table + pub aslr_reference: u64, + + /// the address of the base address of the old original binary + /// + /// machos: this is the address of the `_mh_execute_header` symbol usually at 0x100000000 and loaded near 0x100000000 + /// linux: this is the address of the `__executable_start` symbol usually at 0x0 but loaded around 0x555555550000 + /// windows: this is the address of the `ImageBase` field of the PE header + /// wasm: not useful since there's no ASLR + /// + /// While we can generally guess that these values are, it's possible they are different and thus reading + /// them dynamically is worthwhile. + pub old_base_address: u64, + + /// the address of the base address of the new binary + /// + /// machos: this is the address of the `_mh_execute_header` symbol usually at 0x100000000 and loaded near 0x100000000 + /// linux: this is the address of the `__executable_start` symbol usually at 0x0 but loaded around 0x555555550000 + /// windows: this is the address of the `ImageBase` field of the PE header + /// wasm: not useful since there's no ASLR + /// + /// While we can generally guess that these values are, it's possible they are different and thus reading + /// them dynamically is worthwhile. + pub new_base_address: u64, +} diff --git a/packages/subsecond/subsecond-types/src/nohasher.rs b/packages/subsecond/subsecond-types/src/nohasher.rs new file mode 100644 index 0000000000..6547af2c13 --- /dev/null +++ b/packages/subsecond/subsecond-types/src/nohasher.rs @@ -0,0 +1,51 @@ +use std::{ + collections::HashMap, + hash::{BuildHasherDefault, Hasher}, +}; + +/// An address to address hashmap that does not hash addresses since addresses are by definition unique. +pub type AddressMap = HashMap; + +pub type BuildAddressHasher = BuildHasherDefault; + +#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct AddressHasher(u64); + +impl Hasher for AddressHasher { + fn write(&mut self, _: &[u8]) { + panic!("Invalid use of NoHashHasher") + } + fn write_u8(&mut self, n: u8) { + self.0 = u64::from(n) + } + fn write_u16(&mut self, n: u16) { + self.0 = u64::from(n) + } + fn write_u32(&mut self, n: u32) { + self.0 = u64::from(n) + } + fn write_u64(&mut self, n: u64) { + self.0 = n + } + fn write_usize(&mut self, n: usize) { + self.0 = n as u64 + } + fn write_i8(&mut self, n: i8) { + self.0 = n as u64 + } + fn write_i16(&mut self, n: i16) { + self.0 = n as u64 + } + fn write_i32(&mut self, n: i32) { + self.0 = n as u64 + } + fn write_i64(&mut self, n: i64) { + self.0 = n as u64 + } + fn write_isize(&mut self, n: isize) { + self.0 = n as u64 + } + fn finish(&self) -> u64 { + self.0 + } +} diff --git a/packages/subsecond/subsecond/Cargo.toml b/packages/subsecond/subsecond/Cargo.toml new file mode 100644 index 0000000000..8cb34494d1 --- /dev/null +++ b/packages/subsecond/subsecond/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "subsecond" +edition = "2021" +version.workspace = true + +[dependencies] +serde = { version = "1.0.203", features = ["derive"] } +subsecond-macro = { workspace = true } +subsecond-types = { workspace = true } + + +wasm-bindgen = { workspace = true } +wasm-bindgen-futures = { workspace = true } +js-sys = { workspace = true} +serde-wasm-bindgen = { version = "*"} +web-sys = { version = "*", features = ["FetchEvent", "Request", "Window", "Response", "ResponseType"] } + +[target.'cfg(target_arch = "wasm32")'.dependencies] + + +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +libloading = "0.8.6" +libc = "0.2.170" +memmap = "0.7.0" +dbg_breakpoint = "0.1.1" diff --git a/packages/subsecond/subsecond/src/lib.rs b/packages/subsecond/subsecond/src/lib.rs new file mode 100644 index 0000000000..1d57541f9f --- /dev/null +++ b/packages/subsecond/subsecond/src/lib.rs @@ -0,0 +1,708 @@ +//! # Subsecond: Hot-patching for Rust +//! +//! Subsecond is a library that enables hot-patching for Rust applications. This allows you to change +//! the code of a running application without restarting it. This is useful for game engines, servers, +//! and other long-running applications where the typical edit-compile-run cycle is too slow. +//! +//! Subsecond also implements a technique we call "ThinLinking" which makes compiling Rust code +//! significantly faster in development mode, which can be used outside of hot-patching. +//! +//! # Usage +//! +//! Subsecond is designed to be as simple for both application developers and library authors. +//! +//! Simply call your existing functions with [`call`] and Subsecond will automatically detour +//! that call to the latest version of the function. +//! +//! ```rust +//! fn main() { +//! for x in 0..5 { +//! subsecond::call(|| { +//! println!("Hello, world! {}", x); +//! }); +//! } +//! } +//! ``` +//! +//! To actually load patches into your application, a third-party tool that implements the Subsecond +//! compiler and protocol is required. Subsecond is built and maintained by the Dioxus team, so we +//! suggest using the dioxus CLI tool to use subsecond. +//! +//! To install the Dioxus CLI, we recommend using [`cargo binstall`](https://crates.io/crates/cargo-binstall): +//! +//! ```sh +//! cargo binstall dioxus-cli +//! ``` +//! +//! The Dioxus CLI provides several tools for development. To run your application with Subsecond enabled, +//! use `dx serve` - this takes the same arguments as `cargo run` but will automatically hot-reload your +//! application when changes are detected. +//! +//! ```sh +//! dx serve +//! ``` +//! +//! ## How it works +//! +//! Subsecond works by detouring function calls through a jump table. This jump table contains the latest +//! version of the program's function pointers, and when a function is called, Subsecond will look up +//! the function in the jump table and call that instead. +//! +//! Unlike libraries like [detour](https://crates.io/crates/detour), Subsecond *does not* modify your +//! process memory. Patching pointers is wildly unsafe and can lead to crashes and undefined behavior. +//! +//! Instead, an external tool compiles just the parts of your project that changed, links them together +//! using the addresses of the functions in your running program, and then sends the new jump table to +//! your application. Subsecond then applies the patch and continues running. Since Subsecond doesn't +//! modify memory, the program must have some runtime integration to handle the patching. +//! +//! If the framework you're using doesn't integrate with subsecond, you can rely on the fact that calls +//! to stale [`call`] instances will emit a safe panic that is automatically caught and retried +//! by the next [`call`] instance up the callstack. +//! +//! Subsecond is only enabled when debug_assertions are enabled so you can safely ship your application +//! with Subsecond enabled without worrying about the performance overhead. +//! +//! ## Globals and statics +//! +//! Subsecond *does* support hot-reloading of globals, statics, and thread locals. However, there are several limitations: +//! +//! - You may add new globals at runtime, but their destructors will never be called. +//! - Globals are tracked across patches, but will renames are considered to be *new* globals. +//! - Changes to static initializers will not be observed. +//! +//! Subsecond purposefully handles statics this way since many libraries like Dioxus and Tokio rely +//! on persistent global runtimes. +//! +//! ## Struct layout and alignment +//! +//! Subsecond currently does not support hot-reloading of structs. This is because the generated code +//! assumes a particular layout and alignment of the struct. If layout or alignment change and new +//! functions are called referencing an old version of the struct, the program will crash. +//! +//! To mitigate this, framework authors can integrate with Subsecond to either dispose of the old struct +//! or to re-allocate the struct in a way that is compatible with the new layout. This is called "re-instancing." +//! +//! Because Subsecond performs a safe panic if a stale function is called, you should never witness +//! a crash due to a struct layout change. However, changing a struct's layout will likely cause a +//! re-instantiation of the struct and potentially a loss of state. +//! +//! We'd like to lift this limitation in the future by providing utilities to re-instantiate structs, +//! but for now it's up to the framework authors to handle this. For example, Dioxus apps simply throw +//! out the old state and rebuild it from scratch. +//! +//! ## Nesting Calls +//! +//! Subsecond calls are designed to be nested. This provides clean integration points to know exactly +//! where a hooked function is called. +//! +//! The highest level call is `fn main()` though by default this is not hooked since initialization code +//! tends to be side-effectual and modify global state. Instead, we recommend wrapping the hot-patch +//! points manually with [`call`]. +//! +//! ```rust +//! fn main() { +//! // Changes to the the `for` loop will cause an unwind to this call. +//! subsecond::call(|| { +//! for x in 0..5 { +//! // Changes to the `println!` will be isolated to this call. +//! subsecond::call(|| { +//! println!("Hello, world! {}", x); +//! }); +//! } +//! }); +//! } +//! ``` +//! +//! The goal here is to provide granular control over where patches are applied to limit loss of state +//! when new code is loaded. +//! +//! ## Applying patches +//! +//! When running under the Dioxus CLI, the `dx serve` command will automatically apply patches when +//! changes are detected. Patches are delievered over the [Dioxus Devtools](https://crates.io/crates/dioxus-devtools) +//! websocket protocol and received by corresponding websocket. +//! +//! If you're using Subsecond in your own application that doesn't have a runtime integration, you can +//! build an integration using the [`apply_patch`] function. This function takes a `JumpTable` which +//! the subsecond-cli-support crate can generate. +//! +//! To add support for the Dioxus Devtools protocol to your app, you can use the [dioxus-devtools](https://crates.io/crates/dioxus-devtools) +//! crate which provides a `connect` method that will automatically apply patches to your application. +//! +//! Unfortunately, one design quirk of Subsecond is that running apps need to communicate the address +//! of `main` to the patcher. This is due to a security technique called [ASLR](https://en.wikipedia.org/wiki/Address_space_layout_randomization) +//! which randomizes the address of functions in memory. See the subsecond-harness and subsecond-cli +//! for more details on how to implement the protocol. +//! +//! ## ThinLink +//! +//! ThinLink is a program linker for Rust that is designed to be used with Subsecond. It implements +//! the powerful patching system that Subsecond uses to hot-reload Rust applications. +//! +//! ThinLink is simply a wrapper around your existing linker but with extra features: +//! +//! - Automatic dynamic linking to dependencies +//! - Generation of Subsecond jump tables +//! - Diffing of object files for function invalidation +//! +//! Because ThinLink performs very to little actual linking, it drastically speeds up traditional Rust +//! development. With a development-optimized profile, ThinLink can shrink an incremental build to less than 500ms. +//! +//! ThinLink is automatically integrated into the Dioxus CLI though it's currently not available as +//! a standalone tool. +//! +//! ## Limitations +//! +//! Subsecond is a powerful tool but it has several limitations. We talk about them above, but here's +//! a quick summary: +//! +//! - Struct hot reloading requires instancing or unwinding +//! - Statics are tracked but not destructed +//! +//! ## Platform support +//! +//! Subsecond works across all major platforms: +//! +//! - Android (arm64-v8a, armeabi-v7a) +//! - iOS (arm64, x86_64) +//! - Linux (x86_64, aarch64) +//! - macOS (x86_64, arm64) +//! - Windows (x86_64, aarch64) +//! - WebAssembly (wasm32) +//! +//! If you have a new platform you'd like to see supported, please open an issue on the Subsecond repository. +//! We are keen to add support for new platforms like wasm64, riscv64, and more. +//! +//! ## Adding the Subsecond badge to your project +//! +//! If you're a framework author and want your users to know that your library supports Subsecond, you +//! can add the Subsecond badge to your README! Users will know that your library is hot-reloadable and +//! can be used with Subsecond. +//! +//! [![Subsecond](https://img.shields.io/badge/Subsecond-Enabled-orange)](https://crates.io/crates/subsecond) +//! +//! ```markdown +//! [![Subsecond](https://img.shields.io/badge/Subsecond-Enabled-orange)](https://crates.io/crates/subsecond) +//! ``` +//! +//! ## License +//! +//! Subsecond and ThinLink are licensed under the MIT license. See the LICENSE file for more information. +//! +//! ## Supporting this work +//! +//! Subsecond is a project by the Dioxus team. If you'd like to support our work, please consider +//! [sponsoring us on GitHub](https://github.com/sponsors/DioxusLabs) or eventually deploying your +//! apps with Dioxus Deploy (currently under construction). + +use std::{ + any::TypeId, + backtrace, + collections::HashMap, + ffi::CStr, + mem::transmute, + ops::Deref, + os::raw::c_void, + panic::{panic_any, AssertUnwindSafe, UnwindSafe}, + path::PathBuf, + sync::{Arc, Mutex}, +}; + +use js_sys::{ + ArrayBuffer, Object, Reflect, Uint32Array, Uint8Array, + WebAssembly::{self, Module}, +}; +pub use subsecond_macro::hot; +pub use subsecond_types::JumpTable; +use wasm_bindgen::UnwrapThrowExt; + +// todo: if there's a reference held while we run our patch, this gets invalidated. should probably +// be a pointer to a jump table instead, behind a cell or something. I believe Atomic + relaxed is basically a no-op +static HOTRELOAD_HANDLERS: Mutex>> = Mutex::new(Vec::new()); +static mut APP_JUMP_TABLE: Option = None; +static mut CHANGED: bool = false; +static mut SUBSECOND_ENABLED: bool = false; + +/// Call a given function with hot-reloading enabled. If the function's code changes, `call` will use +/// the new version of the function. If code *above* the function changes, this will emit a panic +/// that forces an unwind to the next [`call`] instance. +/// +/// # Example +/// +/// +/// # Without unwinding +/// +/// +/// # WebAssembly +/// +/// WASM/rust does not support unwinding, so [`call`] will not track dependency graph changes. +/// If you are building a framework for use on WASM, you will need to use `Subsecond::HotFn` directly. +/// +/// However, if you wrap your calling code in a future, you *can* simply drop the future which will +/// cause `drop` to execute and get something similar to unwinding. Not great if refcells are open. +pub fn call(f: impl FnMut() -> O) -> O { + let mut hotfn = HotFn::current(f); + + loop { + let res = std::panic::catch_unwind(AssertUnwindSafe(|| hotfn.call(()))); + + // If the call succeeds just return the result, otherwise we try to handle the panic if its our own. + let err = match res { + Ok(res) => return res, + Err(err) => err, + }; + + // If this is our panic then let's handle it, otherwise we just resume unwinding + let Some(_hot_payload) = err.downcast_ref::() else { + std::panic::resume_unwind(err); + }; + + // If subsecond is in the look, issue a breakpoint so they can try and issue a hot-patch. + unsafe { + if SUBSECOND_ENABLED { + #[cfg(any(unix, windows))] + { + dbg_breakpoint::breakpoint_if_debugging(); + } + continue; + } + } + } +} + +/// A panic issued by the [`call`] function if the caller would be stale if called. This causes +/// an unwind to the next [`call`] instance that can properly handle the panic and retry the call. +/// +/// This technique allows Subsecond to provide hot-reloading of codebases that don't have a runtime integration. +#[derive(Debug)] +pub struct HotFnPanic { + _backtrace: backtrace::Backtrace, +} + +/// A hot-reloadable function. +/// +/// To call this function, use the [`HotFn::call`] method. This will automatically use the latest +/// version of the function from the JumpTable. +pub struct HotFn +where + T: HotFunction, +{ + inner: T, + _marker: std::marker::PhantomData<(A, M)>, +} + +impl> HotFn { + /// Create a new [`HotFn`] instance with the current function. + /// + /// Whenever you call [`HotFn::call`], it will use the current function from the JumpTable. + pub const fn current(f: F) -> HotFn { + HotFn { + inner: f, + _marker: std::marker::PhantomData, + } + } + + /// Call the function with the given arguments. + /// + /// This will attempt to + pub fn call(&mut self, args: A) -> F::Return { + self.try_call(args).unwrap() + } + + /// Attempt to call the function with the given arguments. + /// + /// If this function is stale and can't be updated in place (ie, changes occurred above this call), + /// then this function will emit an [`HotFnPanic`] which can be unwrapped and handled by next [`call`] + /// instance. + pub fn try_call(&mut self, args: A) -> Result { + // If we need to unwind, then let's throw a panic + // This will occur when the pending patch is "over our head" and needs to be applied to a + // "resume point". We can eventually look into migrating the datastructures over but for now + // the resume point will force the struct to be re-built. + // panic_any() + + unsafe { + // Try to handle known function pointers. This is *really really* unsafe, but due to how + // rust trait objects work, it's impossible to make an arbitrary usize-sized type implement Fn() + // since that would require a vtable pointer, pushing out the bounds of the pointer size. + if size_of::() == size_of:: ()>() { + return Ok(self.inner.call_as_ptr(args)); + } + + // Handle trait objects. This will occur for sizes other than usize. Normal rust functions + // become ZST's and thus their ::call becomes a function pointer to the function. + // + // For non-zst (trait object) types, then there might be an issue. The real call function + // will likely end up in the vtable and will never be hot-reloaded since signature takes self. + if let Some(jump_table) = APP_JUMP_TABLE.as_ref() { + let known_fn_ptr = >::call_it as *const () as u64; + if let Some(ptr) = jump_table.map.get(&known_fn_ptr).cloned() { + // The type sig of the cast should match the call_it function + // Technically function pointers need to be aligned, but that alignment is 1 so we're good + let call_it = transmute::<*const (), fn(&F, A) -> F::Return>(ptr as _); + return Ok(call_it(&self.inner, args)); + } + } + + Ok(self.inner.call_it(args)) + } + } +} + +pub fn register_handler(handler: Arc) { + unsafe { + HOTRELOAD_HANDLERS.lock().unwrap().push(handler); + } +} + +/// Apply the patch using a given jump table. +/// +/// # Safety +/// +/// This function is unsafe because it detours existing functions in memory. This is *wildly* unsafe, +/// especially if the JumpTable is malformed. Only run this if you know what you're doing. +/// +/// If the pointers are incorrect, function type signatures will be incorrect and the program will crash, +/// sometimes in a way that requires a restart of your entire computer. Be careful. +/// +/// # Warning +/// +/// This function will load the library and thus allocates. In cannot be used when the program is +/// stopped (ie in a signal handler). +pub unsafe fn apply_patch(mut jump_table: JumpTable) { + // On non-wasm platforms we can just use libloading and the known aslr offsets to load the library + #[cfg(any(unix, windows))] + { + // Use the `aslr_offset` symbol as a sentinel for the current executable. This is basically a + // cross-platform version of `__mh_execute_header` on macOS that sets a reference point for the + // jump table. + let old_offset = aslr_reference() - jump_table.aslr_reference as usize; + + // Use the `__rust_alloc` symbol as a sentinel for the loaded library. Might want to move away + // from this at some point, or make it configurable + let new_offset = unsafe { + // Leak the libary. dlopen is basically a no-op on many platforms and if we even try to drop it, + // some code might be called (ie drop) that results in really bad crashes (restart your computer...) + Box::leak(Box::new(libloading::Library::new(&jump_table.lib).unwrap())) + .get::<*const ()>(b"__rust_alloc") + .ok() + .unwrap() + .try_as_raw_ptr() + .unwrap() + .wrapping_byte_sub(jump_table.new_base_address as usize) as usize + }; + + // Modify the jump table to be relative to the base address of the loaded library + jump_table.map = jump_table + .map + .iter() + .map(|(k, v)| { + ( + (*k as usize + old_offset) as u64, + (*v as usize + new_offset) as u64, + ) + }) + .collect(); + }; + + // Update runtime state + unsafe { + APP_JUMP_TABLE = Some(jump_table); + CHANGED = true; + HOTRELOAD_HANDLERS + .lock() + .unwrap() + .clone() + .iter() + .for_each(|handler| { + handler(); + }); + } +} + +#[inline(never)] +#[no_mangle] +pub extern "C" fn aslr_reference() -> usize { + aslr_reference as *const () as usize +} + +/// Apply the patch using a given jump table. +/// +/// Used on WASM platforms where we need async integration to fetch the patch. +#[cfg_attr(target_arch = "wasm32", wasm_bindgen::prelude::wasm_bindgen)] +pub async unsafe fn __subsecond_wasm_patch(pointers: Uint32Array) { + use wasm_bindgen::JsValue; + // pub async unsafe fn __subsecond_wasm_patch(value: JsValue) { + use js_sys::Uint32Array; + use subsecond_types::AddressMap; + use wasm_bindgen::prelude::*; + + let mut table: JumpTable = JumpTable { + aslr_reference: 0, + lib: PathBuf::from("patch.wasm"), + map: AddressMap::default(), + new_base_address: 0, + old_base_address: 0, + }; + + // [Log] skipping – "__dso_handle" (patch_console.js, line 1) + // [Log] skipping – "__data_end" (patch_console.js, line 1) + // [Log] skipping – "__stack_low" (patch_console.js, line 1) + // [Log] skipping – "__stack_high" (patch_console.js, line 1) + // [Log] skipping – "__global_base" (patch_console.js, line 1) + // [Log] skipping – "__heap_base" (patch_console.js, line 1) + // [Log] skipping – "__heap_end" (patch_console.js, line 1) + // [Log] skipping – "__memory_base" (patch_console.js, line 1) + // [Log] skipping – "__table_base" (patch_console.js, line 1) + + let mut idx = 0; + for _ in 0..pointers.length() { + let left = pointers.get_index(idx); + let right = pointers.get_index(idx + 1); + table.map.insert(left as u64, right as u64); + idx += 2 + } + + unsafe { apply_patch(table) } + + // let table = serde_wasm_bindgen::from_value::(table).unwrap_throw(); + // run_wasm_patch(table).await.unwrap_throw(); +} +// #[cfg_attr(target_arch = "wasm32", wasm_bindgen::prelude::wasm_bindgen)] +// pub async unsafe fn __subsecond_wasm_patch(table: wasm_bindgen::JsValue) { +// let table = serde_wasm_bindgen::from_value::(table).unwrap_throw(); +// run_wasm_patch(table).await.unwrap_throw(); +// } + +pub async fn run_wasm_patch(table: JumpTable) -> Result<(), wasm_bindgen::JsValue> { + use js_sys::Reflect; + use js_sys::Uint32Array; + use subsecond_types::AddressMap; + use wasm_bindgen::prelude::*; + use wasm_bindgen::JsValue; + use wasm_bindgen_futures::JsFuture; + + const WASM_PAGE_LENGTH: u32 = 65536; + + let funcs: WebAssembly::Table = wasm_bindgen::function_table().unchecked_into(); + let memory: WebAssembly::Memory = wasm_bindgen::memory().unchecked_into(); + let m: WebAssembly::Module = wasm_bindgen::module().unchecked_into(); + let exports: Object = wasm_bindgen::exports().unchecked_into(); + let buffer: Uint8Array = memory.buffer().unchecked_into(); + + let data_start = memory.grow(3) * WASM_PAGE_LENGTH; + let func_start = funcs.grow(2000)?; + let bss_start = memory.grow(3) * WASM_PAGE_LENGTH; + + let imports = Object::new(); + let download = web_sys::window() + .unwrap_throw() + .fetch_with_str(&table.lib.to_str().unwrap_throw()); + + let env = Object::new(); + + // Move exports over + for key in Object::keys(&exports) { + Reflect::set(&env, &key, &Reflect::get(&exports, &key)?)?; + } + + // Set the memory and table in the imports + for (name, value) in [ + ("__BSS_DATA_START", 0), + ("__RO_DATA_START", 0), + ("__DATA_OFFSET", 0), + ("__IFUNC_OFFSET", 0), + ] { + let descripor = Object::new(); + Reflect::set(&descripor, &"value".into(), &"i32".into())?; + Reflect::set(&descripor, &"mutable".into(), &false.into())?; + let value = WebAssembly::Global::new(&descripor, &0.into())?; + Reflect::set(&env, &name.into(), &value)?; + } + + // Set the memory and table in the imports + let imports = Object::new(); + Reflect::set(&imports, &"env".into(), &env)?; + + let module = JsFuture::from(WebAssembly::instantiate_streaming(&download, &imports)).await?; + + // let mut idx = 0; + // for _ in 0..pointers.length() { + // let left = pointers.get_index(idx); + // let right = pointers.get_index(idx + 1); + // table.map.insert(left as u64, right as u64); + // idx += 2 + // } + + // window.patch = patch; + + // // We're going to match up export to export and then ifunc entry to ifunc entry + // // We're going to build a map of old -> new ifunc entries + // const patchExports = patch.instance.exports; + + // let nameToNativeMain = Object.fromEntries( + // Object.keys(wasmExports).map((key) => [key, wasmExports[key].name]).filter(([key, name]) => name !== undefined) + // ); + + // let nameToNativePatch = Object.fromEntries( + // Object.keys(patchExports).map((key) => [key, patchExports[key].name]).filter(([key, name]) => name !== undefined) + // ); + + // let nativeToIndex = Object.fromEntries( + // [...Array(wasmExports.__indirect_function_table.length).keys()].map((i) => { + // let entry = wasmExports.__indirect_function_table.get(i); + // if (entry === null) { + // return ["abcbac", 0]; + // } + // if (entry.name === undefined) { + // return ["abcbac", 0]; + // } + // return [entry.name, i]; + // }) + // ); + + // let jumpTable = Object.fromEntries( + // Object.entries(nameToNativePatch) + // .map(([fnName, nativeName]) => { + // let oldIndex = nativeToIndex[nameToNativeMain[fnName]]; + // let newIndex = nativeToIndex[nativeName]; + // return [fnName, [oldIndex, newIndex]]; + // }) + // .filter(([name, [oldIndex, newIndex]]) => + // oldIndex !== undefined && newIndex !== undefined + // ) + // ); + + // window.jumpTable = jumpTable; + + // let patchList = Object.keys(patchExports).flatMap((key) => { + // let entry = jumpTable[key]; + // if (entry === undefined) { + // return []; + // } + // let a = entry[0]; + // let b = entry[1]; + + // if (a === undefined || b === undefined) { + // return []; + // } + + // // console.log("Patching", key, "from", a, "to", b); + + // return [a, b]; + // }); + // console.log("Patching: ", patchList); + // base["__subsecond_wasm_patch"](patchList); + + // unsafe { apply_patch(table) } + todo!() +} + +/// A trait that enables types to be hot-patched. +/// +/// This trait is only implemented for FnMut types which naturally includes function pointers and +/// closures that can be re-ran. FnOnce closures are currently not supported since the hot-patching +/// system we use implies that the function can be called multiple times. +pub trait HotFunction { + /// The return type of the function. + type Return; + + /// The real function type. This is meant to be a function pointer. + /// When we call `call_as_ptr`, we will transmute the function to this type and call it. + type Real; + + /// Call the HotFunction with the given arguments. + /// + /// + /// # Why + /// + /// "rust-call" isn't stable, so we wrap the underyling call with our own, giving it a stable vtable entry. + /// This is more important than it seems since this function becomes "real" and can be hot-patched. + fn call_it(&mut self, args: Args) -> Self::Return; + + /// Call the HotFunction as if it were a function pointer. + /// + /// # Safety + /// + /// This is only safe if the underyling type is a function (function pointer or virtual/fat pointer). + /// Using this will use the JumpTable to find the patched function and call it. + unsafe fn call_as_ptr(&mut self, _args: Args) -> Self::Return; +} + +macro_rules! impl_hot_function { + ( + $( + ($marker:ident, $($arg:ident),*) + ),* + ) => { + $( + /// A marker type for the function. + /// This is hidden with the intention to seal this trait. + #[doc(hidden)] + pub struct $marker; + + impl HotFunction<($($arg,)*), $marker> for T + where + T: FnMut($($arg),*) -> R, + { + type Return = R; + type Real = fn($($arg),*) -> R; + + fn call_it(&mut self, args: ($($arg,)*)) -> Self::Return { + #[allow(non_snake_case)] + let ( $($arg,)* ) = args; + self($($arg),*) + } + + unsafe fn call_as_ptr(&mut self, args: ($($arg,)*)) -> Self::Return { + unsafe { + if let Some(jump_table) = APP_JUMP_TABLE.as_ref() { + let real = std::mem::transmute_copy::(&self) as *const (); + + // Android implements MTE / pointer tagging and we need to preserve the tag. + // If we leave the tag, then indexing our jump table will fail and patching won't work (or crash!) + // This is only implemented on 64-bit platforms since pointer tagging is not available on 32-bit platforms + // In dev, Dioxus disables MTE to work around this issue, but we still handle it anyways. + #[cfg(target_pointer_width = "64")] let nibble = real as u64 & 0xFF00_0000_0000_0000; + #[cfg(target_pointer_width = "64")] let real = real as u64 & 0x00FFF_FFF_FFFF_FFFF; + + #[cfg(target_pointer_width = "64")] let real = real as u64; + + // No nibble on 32-bit platforms, but we still need to assume u64 since the host always writes 64-bit pointers + #[cfg(target_pointer_width = "32")] let real = real as u64; + + if let Some(ptr) = jump_table.map.get(&real).cloned() { + // Re-apply the nibble - though this might not be required (we aren't calling malloc for a new pointer) + // #[cfg(target_pointer_width = "64")] let ptr: u64 = ptr | nibble; + + #[cfg(target_pointer_width = "64")] let ptr: u64 = ptr; + #[cfg(target_pointer_width = "32")] let ptr: u32 = ptr as u32; + + // Macro-rules requires unpacking the tuple before we call it + #[allow(non_snake_case)] + let ( $($arg,)* ) = args; + return std::mem::transmute::<_, Self::Real>(ptr)($($arg),*); + } + } + + self.call_it(args) + } + } + } + )* + }; +} + +impl_hot_function!( + (Fn0Marker,), + (Fn1Marker, A), + (Fn2Marker, A, B), + (Fn3Marker, A, B, C), + (Fn4Marker, A, B, C, D), + (Fn5Marker, A, B, C, D, E), + (Fn6Marker, A, B, C, D, E, F), + (Fn7Marker, A, B, C, D, E, F, G), + (Fn8Marker, A, B, C, D, E, F, G, H), + (Fn9Marker, A, B, C, D, E, F, G, H, I) +); diff --git a/packages/web/Cargo.toml b/packages/web/Cargo.toml index 9c37dc5090..0f8d998485 100644 --- a/packages/web/Cargo.toml +++ b/packages/web/Cargo.toml @@ -41,6 +41,7 @@ serde-wasm-bindgen = { version = "0.5.0", optional = true } ciborium = { workspace = true, optional = true } async-trait = { version = "0.1.58", optional = true } +subsecond = { workspace = true } [dependencies.web-sys] version = "0.3.70" diff --git a/packages/web/src/devtools.rs b/packages/web/src/devtools.rs index 606866c804..e032eae2dc 100644 --- a/packages/web/src/devtools.rs +++ b/packages/web/src/devtools.rs @@ -9,12 +9,12 @@ use std::time::Duration; use dioxus_core::prelude::RuntimeGuard; use dioxus_core::{Runtime, ScopeId}; -use dioxus_devtools::{DevserverMsg, HotReloadMsg}; +use dioxus_devtools::{ClientMsg, DevserverMsg, HotReloadMsg}; use dioxus_document::eval; use futures_channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; use js_sys::JsString; -use wasm_bindgen::JsCast; use wasm_bindgen::{closure::Closure, JsValue}; +use wasm_bindgen::{prelude::wasm_bindgen, JsCast}; use web_sys::{window, CloseEvent, MessageEvent, WebSocket}; const POLL_INTERVAL_MIN: i32 = 250; @@ -59,6 +59,7 @@ fn make_ws( // Set the onmessage handler to bounce messages off to the main dioxus loop let tx_ = tx.clone(); let runtime_ = runtime.clone(); + let ws_tx = ws.clone(); ws.set_onmessage(Some( Closure::::new(move |e: MessageEvent| { let Ok(text) = e.data().dyn_into::() else { @@ -67,7 +68,7 @@ fn make_ws( // The devserver messages have some &'static strs in them, so we need to leak the source string let string: String = text.into(); - // let leaked: &'static str = Box::leak(Box::new(string)); + let string = Box::leak(string.into_boxed_str()); match serde_json::from_str::(&string) { Ok(DevserverMsg::HotReload(hr)) => _ = tx_.unbounded_send(hr), @@ -162,7 +163,14 @@ fn make_ws( ws.set_onopen(Some( Closure::::new(move |_evt| { if reload { - window().unwrap().location().reload().unwrap() + window().unwrap().location().reload().unwrap(); + } else { + ws_tx.send_with_str( + &serde_json::to_string(&ClientMsg::Initialize { + aslr_reference: subsecond::aslr_reference() as _, + }) + .unwrap(), + ); } }) .into_js_value() @@ -218,6 +226,11 @@ fn show_toast( false => "showDXToast", }; + // #[wasm_bindgen::prelude::wasm_bindgen(inline_js = r#" + // console.log("hello"); + // "#)] + // pub fn show_test() {} + // Create the guard before running eval which uses the global runtime context let _guard = RuntimeGuard::new(runtime); ScopeId::ROOT.in_runtime(|| { diff --git a/packages/web/src/document.rs b/packages/web/src/document.rs index b97402e7e5..d7e9d2c5c4 100644 --- a/packages/web/src/document.rs +++ b/packages/web/src/document.rs @@ -31,6 +31,16 @@ impl JSOwner { } } } +#[wasm_bindgen::prelude::wasm_bindgen(module = "/src/js/eval.js")] +extern "C" { + pub type WeakDioxusChannel; + + #[wasm_bindgen(method, js_name = "rustSend")] + pub fn rust_send(this: &WeakDioxusChannel, value: wasm_bindgen::JsValue); + + #[wasm_bindgen(method, js_name = "rustRecv")] + pub async fn rust_recv(this: &WeakDioxusChannel) -> wasm_bindgen::JsValue; +} #[wasm_bindgen::prelude::wasm_bindgen(module = "/src/js/eval.js")] extern "C" { @@ -54,13 +64,6 @@ extern "C" { #[wasm_bindgen(method)] pub fn weak(this: &WebDioxusChannel) -> WeakDioxusChannel; - pub type WeakDioxusChannel; - - #[wasm_bindgen(method, js_name = "rustSend")] - pub fn rust_send(this: &WeakDioxusChannel, value: wasm_bindgen::JsValue); - - #[wasm_bindgen(method, js_name = "rustRecv")] - pub async fn rust_recv(this: &WeakDioxusChannel) -> wasm_bindgen::JsValue; } /// Provides the Document through [`ScopeId::provide_context`]. diff --git a/packages/web/src/js/eval.js b/packages/web/src/js/eval.js index 111158c3ea..7da39ed47c 100644 --- a/packages/web/src/js/eval.js +++ b/packages/web/src/js/eval.js @@ -1 +1 @@ -class Channel{pending;waiting;constructor(){this.pending=[],this.waiting=[]}send(data){if(this.waiting.length>0){this.waiting.shift()(data);return}this.pending.push(data)}async recv(){return new Promise((resolve,_reject)=>{if(this.pending.length>0){resolve(this.pending.shift());return}this.waiting.push(resolve)})}}class WeakDioxusChannel{inner;constructor(channel){this.inner=new WeakRef(channel)}rustSend(data){let channel=this.inner.deref();if(channel)channel.rustSend(data)}async rustRecv(){let channel=this.inner.deref();if(channel)return await channel.rustRecv()}}class DioxusChannel{weak(){return new WeakDioxusChannel(this)}}class WebDioxusChannel extends DioxusChannel{js_to_rust;rust_to_js;owner;constructor(owner){super();this.owner=owner,this.js_to_rust=new Channel,this.rust_to_js=new Channel}weak(){return new WeakDioxusChannel(this)}async recv(){return await this.rust_to_js.recv()}send(data){this.js_to_rust.send(data)}rustSend(data){this.rust_to_js.send(data)}async rustRecv(){return await this.js_to_rust.recv()}}export{WebDioxusChannel}; +class Channel{pending;waiting;constructor(){this.pending=[],this.waiting=[]}send(data){if(this.waiting.length>0){this.waiting.shift()(data);return}this.pending.push(data)}async recv(){return new Promise((resolve,_reject)=>{if(this.pending.length>0){resolve(this.pending.shift());return}this.waiting.push(resolve)})}}class WeakDioxusChannel{inner;constructor(channel){this.inner=new WeakRef(channel)}rustSend(data){let channel=this.inner.deref();if(channel)channel.rustSend(data)}async rustRecv(){let channel=this.inner.deref();if(channel)return await channel.rustRecv()}}class DioxusChannel{weak(){return new WeakDioxusChannel(this)}}class WebDioxusChannel extends DioxusChannel{js_to_rust;rust_to_js;owner;constructor(owner){super();this.owner=owner,this.js_to_rust=new Channel,this.rust_to_js=new Channel}weak(){return new WeakDioxusChannel(this)}async recv(){return await this.rust_to_js.recv()}send(data){this.js_to_rust.send(data)}rustSend(data){this.rust_to_js.send(data)}async rustRecv(){return await this.js_to_rust.recv()}}export{WebDioxusChannel,WeakDioxusChannel}; diff --git a/packages/web/src/js/hash.txt b/packages/web/src/js/hash.txt index 5002e6c9aa..ff6c0119db 100644 --- a/packages/web/src/js/hash.txt +++ b/packages/web/src/js/hash.txt @@ -1 +1 @@ -[1614426347475783279] +[3447431072648601413] \ No newline at end of file diff --git a/packages/web/src/ts/eval.ts b/packages/web/src/ts/eval.ts index 51396b63a6..9fcf63a870 100644 --- a/packages/web/src/ts/eval.ts +++ b/packages/web/src/ts/eval.ts @@ -4,6 +4,7 @@ import { WeakDioxusChannel, } from "../../../document/src/ts/eval"; +export { WeakDioxusChannel }; export class WebDioxusChannel extends DioxusChannel { js_to_rust: Channel; rust_to_js: Channel;