diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 49ba0900..efb0ccad 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,4 +111,4 @@ jobs: - name: cargo check run: | rm -f Cargo.lock - cargo +nightly check -Z minimal-versions --workspace --all-features --lib --bins + cargo +nightly check -Z minimal-versions -p quic-rpc -p quic-rpc-derive --all-features --lib --bins diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 64f9d3b4..00000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,296 +0,0 @@ -# Changelog - -All notable changes to quic-rpc will be documented in this file. - -## [unreleased] - -### โ›ฐ๏ธ Features - -- Update to iroh@0.29.0 - ([02b2559](https://github.com/n0-computer/iroh/commit/02b25594bbfd210acb29b65c59d80c3063565193)) -- Update to iroh@0.29.0 ([#125](https://github.com/n0-computer/iroh/issues/125)) - ([07f1335](https://github.com/n0-computer/iroh/commit/07f1335f1616359b93ee60277a80a07df7552e18)) - -## [0.16.0](https://github.com/n0-computer/iroh/compare/v0.15.1..v0.16.0) - 2024-12-02 - -### โ›ฐ๏ธ Features - -- Use postcard encoding for all transports that require serialization ([#114](https://github.com/n0-computer/iroh/issues/114)) - ([badb606](https://github.com/n0-computer/iroh/commit/badb6068db23e6262c183ef8981228fd8ca1ef61)) - -### ๐Ÿšœ Refactor - -- Rename iroh-net transport to iroh-transport - ([7ee875b](https://github.com/n0-computer/iroh/commit/7ee875b5a1cdf2bbfa377564d7a3c1792876f780)) -- Rename iroh-net transport to iroh-transport ([#123](https://github.com/n0-computer/iroh/issues/123)) - ([69e7c4a](https://github.com/n0-computer/iroh/commit/69e7c4a4f1c90533db61c32eb7145073f0bb1659)) - -### โš™๏ธ Miscellaneous Tasks - -- Update rcgen - ([27287e1](https://github.com/n0-computer/iroh/commit/27287e13fa125234898d9aabd7d9d640aba92a36)) -- Update rcgen ([#118](https://github.com/n0-computer/iroh/issues/118)) - ([2e1daa9](https://github.com/n0-computer/iroh/commit/2e1daa91552f99c448f6508fb55630f2933ee705)) -- Prune some deps ([#119](https://github.com/n0-computer/iroh/issues/119)) - ([dc75b95](https://github.com/n0-computer/iroh/commit/dc75b951bcd6b3b2239ab7a71e2fedcd12152853)) -- Remove `cc` version pinning - ([6da6783](https://github.com/n0-computer/iroh/commit/6da6783ca95f90e38f22091d0d5c8e6b13f6a3ec)) -- Remove `cc` version pinning ([#122](https://github.com/n0-computer/iroh/issues/122)) - ([a5606c2](https://github.com/n0-computer/iroh/commit/a5606c260275d433f00c3aed2fb57ed082900c38)) -- Release 0.16.0 ([#124](https://github.com/n0-computer/iroh/issues/124)) - ([a19ce1b](https://github.com/n0-computer/iroh/commit/a19ce1be542382a65ca7c56843de49a108bf21db)) - -## [0.15.1](https://github.com/n0-computer/iroh/compare/v0.15.0..v0.15.1) - 2024-11-14 - -### โ›ฐ๏ธ Features - -- Accept handler ([#116](https://github.com/n0-computer/iroh/issues/116)) - ([32d5bc1](https://github.com/n0-computer/iroh/commit/32d5bc1a08609f4f0b5650980088f07d81971a55)) - -### โš™๏ธ Miscellaneous Tasks - -- Consistently format imports... - ([33fb08b](https://github.com/n0-computer/iroh/commit/33fb08b417874bfdce79c8a5d3972aee1ca7ba8b)) -- Consistently format imports... ([#113](https://github.com/n0-computer/iroh/issues/113)) - ([08750c5](https://github.com/n0-computer/iroh/commit/08750c54cd82295e5819eeefd18d9f904fc51a02)) -- Introduce a .rustfmt.toml file with configs for automatic formatting ([#115](https://github.com/n0-computer/iroh/issues/115)) - ([a949899](https://github.com/n0-computer/iroh/commit/a949899deac2f626c03452028a86f9420bc93530)) - -## [0.15.0](https://github.com/n0-computer/iroh/compare/v0.14.0..v0.15.0) - 2024-11-06 - -### โš™๏ธ Miscellaneous Tasks - -- Release - ([be04be1](https://github.com/n0-computer/iroh/commit/be04be152a2be26cc6752e76e947b50f3b5da958)) - -## [0.14.0](https://github.com/n0-computer/iroh/compare/v0.12.0..v0.14.0) - 2024-11-04 - -### โ›ฐ๏ธ Features - -- Upgrade iroh-quinn to 0.12.0 ([#109](https://github.com/n0-computer/iroh/issues/109)) - ([a5fecdc](https://github.com/n0-computer/iroh/commit/a5fecdcd3b60d581328106dc79246a4b2b609709)) - -### ๐Ÿ› Bug Fixes - -- Transport:quinn only spawn when tokio is available ([#95](https://github.com/n0-computer/iroh/issues/95)) - ([baa4f83](https://github.com/n0-computer/iroh/commit/baa4f837f161ecd80c3a6d46fad1788aadf06049)) - -### โš™๏ธ Miscellaneous Tasks - -- Release - ([10a16f7](https://github.com/n0-computer/iroh/commit/10a16f72a3aca254259879bba611d099f9e9edf5)) -- Release ([#96](https://github.com/n0-computer/iroh/issues/96)) - ([277cde1](https://github.com/n0-computer/iroh/commit/277cde1fec1a341f35ed9b1ad5d9eda252c6ee9d)) -- New version for quic-rpc-derive as well ([#104](https://github.com/n0-computer/iroh/issues/104)) - ([39f5b20](https://github.com/n0-computer/iroh/commit/39f5b2014d48300f4308a6451cb378725c9926c0)) -- Release - ([64e0a7d](https://github.com/n0-computer/iroh/commit/64e0a7d1a9e9127fe8b6449964af10984097f186)) - -### Deps - -- Remove direct rustls dependency - ([f67c218](https://github.com/n0-computer/iroh/commit/f67c2189c1c8a8b8fa9f902877a74d64a38e994c)) -- Remove direct rustls dependency ([#94](https://github.com/n0-computer/iroh/issues/94)) - ([fe08b15](https://github.com/n0-computer/iroh/commit/fe08b157ae162ec71ca8ad77efea300132abce77)) - -## [0.12.0](https://github.com/n0-computer/iroh/compare/v0.10.1..v0.12.0) - 2024-08-15 - -### โ›ฐ๏ธ Features - -- WIP First somewhat working version of attribute macro to declare services - ([bab7fe0](https://github.com/n0-computer/iroh/commit/bab7fe083fdaf6ee81c4f17f2b96a5e1f9d18011)) - -### ๐Ÿšœ Refactor - -- Use two stage accept - ([ac8f358](https://github.com/n0-computer/iroh/commit/ac8f358e0ea623a9906402639a0882794b0a06d0)) -- Use two stage everywhere - ([b3c37ff](https://github.com/n0-computer/iroh/commit/b3c37ff88de533c6edb9c457a8c5ddd1f713bbf9)) -- Use two stage accept ([#87](https://github.com/n0-computer/iroh/issues/87)) - ([c2520b8](https://github.com/n0-computer/iroh/commit/c2520b85f5fd37b78bd0fc4f87c2989605209bac)) -- Remove the interprocess transport - ([bd72cdc](https://github.com/n0-computer/iroh/commit/bd72cdcceba82fe9953413e050d86e2f528f93ea)) - -### โš™๏ธ Miscellaneous Tasks - -- Release - ([e0b50a1](https://github.com/n0-computer/iroh/commit/e0b50a1d1e1f0b5865b1aff01d43ac4920ae3b44)) -- Release ([#82](https://github.com/n0-computer/iroh/issues/82)) - ([3b01e85](https://github.com/n0-computer/iroh/commit/3b01e856822aca75126fbdd8ae640f2a440f80e8)) -- Upgrade to Quinn 0.11 and Rustls 0.23 - ([220aa35](https://github.com/n0-computer/iroh/commit/220aa35b69e178361c53567f37416c0852593cbd)) -- Upgrade to Quinn 0.11 and Rustls 0.23 - ([1c7e3c6](https://github.com/n0-computer/iroh/commit/1c7e3c6d98d38d92a13253486b565260b89686ba)) -- Upgrade to Quinn 0.11 and Rustls 0.23 - ([2221339](https://github.com/n0-computer/iroh/commit/2221339d5e98cb2f8952b303c5ead24c8030f1f8)) -- Release - ([50dde15](https://github.com/n0-computer/iroh/commit/50dde1542ad12b24154eadb5c8bf7d713d79495f)) -- Release ([#93](https://github.com/n0-computer/iroh/issues/93)) - ([9066a40](https://github.com/n0-computer/iroh/commit/9066a403feed8277503d6f2a512a834b7fcafef7)) - -### Deps - -- Upgrade to Quinn 0.11 and Rustls 0.23 ([#92](https://github.com/n0-computer/iroh/issues/92)) - ([93e64ab](https://github.com/n0-computer/iroh/commit/93e64ab904922a1879f6dcf58dca763b4d038070)) - -## [0.10.1](https://github.com/n0-computer/iroh/compare/v0.10.0..v0.10.1) - 2024-05-24 - -### โ›ฐ๏ธ Features - -- Update and cleanup deps ([#80](https://github.com/n0-computer/iroh/issues/80)) - ([eba3a06](https://github.com/n0-computer/iroh/commit/eba3a06a1c8a8ac2f74b1e6dde02220f935e9be7)) - -### โš™๏ธ Miscellaneous Tasks - -- Release - ([af9272b](https://github.com/n0-computer/iroh/commit/af9272b17a896056d5f02565e4090ecfdbe000b8)) - -## [0.10.0](https://github.com/n0-computer/iroh/compare/v0.8.0..v0.10.0) - 2024-05-21 - -### ๐Ÿ› Bug Fixes - -- Downgrade derive_more to non beta - ([9235fdf](https://github.com/n0-computer/iroh/commit/9235fdfe0efc4cbcd9694c248d1f112f32000666)) - -### ๐Ÿšœ Refactor - -- Fix hyper and combined transports - ([a56099e](https://github.com/n0-computer/iroh/commit/a56099e8e557776d0dc41f6c45006f879fb88f05)) -- [**breaking**] Use `Service` generic in transport connections ([#76](https://github.com/n0-computer/iroh/issues/76)) - ([64ed5ef](https://github.com/n0-computer/iroh/commit/64ed5efea314a785ed9890fb78f857dadba3dc85)) - -### โš™๏ธ Miscellaneous Tasks - -- Clippy - ([38601de](https://github.com/n0-computer/iroh/commit/38601de6e19e52723b30a200d1c22621c1d772f2)) -- Update derive-more - ([78a3250](https://github.com/n0-computer/iroh/commit/78a32506214cfa564c06fac4f468952c22734c0c)) -- Fix merge issue - ([4e8deef](https://github.com/n0-computer/iroh/commit/4e8deef7c02d0e1b3a8dbfc1d5484636eadd9a21)) -- Update Cargo.lock - ([89f1b65](https://github.com/n0-computer/iroh/commit/89f1b65718b00f8a3880bc6f799d308693c88507)) -- Release - ([92f472b](https://github.com/n0-computer/iroh/commit/92f472bf34611cff39f238e7b0f45ca65287b9f8)) - -### Deps - -- Depend on iroh-quinn and bump version ([#75](https://github.com/n0-computer/iroh/issues/75)) - ([df54382](https://github.com/n0-computer/iroh/commit/df5438284ec30b5b5b527932b39a48659e4518e5)) - -### Wip - -- Easier generics - ([6d710b7](https://github.com/n0-computer/iroh/commit/6d710b74cf45d8a04cbbcd9d803718b9aceb8012)) - -## [0.8.0] - 2024-04-24 - -### โ›ฐ๏ธ Features - -- *(ci)* Add minimal crates version check - ([00b6d12](https://github.com/n0-computer/iroh/commit/00b6d1203e31a57c63f21fc59f8358fc3cdbfd42)) -- *(http2)* Shut down the hyper server on drop. - ([124591a](https://github.com/n0-computer/iroh/commit/124591a336df5399cb102da99cfb213159947317)) -- *(http2)* Shut down the hyper server on drop - ([cd86839](https://github.com/n0-computer/iroh/commit/cd868396ffd73bda310971ff59624e3d68ba8a3e)) -- *(http2)* Move serialization and deserialization closer to the user - ([c1742a5](https://github.com/n0-computer/iroh/commit/c1742a5cb6698e8067844a6944fb5d07bfd3585d)) -- *(http2)* Add config to http2 channel - ([8a98ba7](https://github.com/n0-computer/iroh/commit/8a98ba7cb4db3e258ae65f0c568e8524fc26fb4a)) -- Add optional macros to reduce boilerplate - ([0010a98](https://github.com/n0-computer/iroh/commit/0010a9861e956a31fe609e84d2da3569c535eb42)) -- Allow to split RPC types into a seperate crate. - ([1564ad8](https://github.com/n0-computer/iroh/commit/1564ad81ec2333732ce7268970ca76ced2ea59a5)) -- Generate code only if needed and add client - ([e4d6d91](https://github.com/n0-computer/iroh/commit/e4d6d91d3006666ef0e733b8cf97bc8c51d1bb79)) -- Better docs for macro-generated items - ([bbf8e97](https://github.com/n0-computer/iroh/commit/bbf8e97e9aafbe74eb1ee60b0fe3bfd997394bf3)) -- Add convenience function for handling errors in rpc calls - ([26283b1](https://github.com/n0-computer/iroh/commit/26283b1ebc2c68d0e4847856d1b07ae55fd7036e)) -- Lazy connections, take 2 - ([3ed491e](https://github.com/n0-computer/iroh/commit/3ed491ed3857e9ae495144df696b968d342ac91e)) -- More efficient channel replacement - ([6d5808d](https://github.com/n0-computer/iroh/commit/6d5808df40a7ec008d79ab7f8553cc0253a31c33)) -- Allow lazy connect in client of split example - ([97b7e1a](https://github.com/n0-computer/iroh/commit/97b7e1af7c517a70c270418a4d9c18bd58440461)) -- Implement std::error::Error for combined channel errors - ([d7828e3](https://github.com/n0-computer/iroh/commit/d7828e31395dd9a096c4afc128eae03357c4db2e)) -- Add minimal tracing support - ([850bde1](https://github.com/n0-computer/iroh/commit/850bde116a93d881294e7d80ac007137d075ba61)) -- Add minimal tracing support - ([2de50d5](https://github.com/n0-computer/iroh/commit/2de50d52dcefd3fb1e5d557f7ef8f170481038e8)) -- Make channels configurable - ([11d3071](https://github.com/n0-computer/iroh/commit/11d30715fc7912ba08e755a57138bc36cf675bfc)) -- Expose local address of ServerChannel - ([b67dcdb](https://github.com/n0-computer/iroh/commit/b67dcdb87f50a349a9977c1dc662d321f3ac68d2)) -- Add dummy endpoint - ([48cf5db](https://github.com/n0-computer/iroh/commit/48cf5db8af9d21418bc3b306d9b27dd27c6e8146)) -- Add ability to creaete a RpcClient from a single connection - ([7a4f40f](https://github.com/n0-computer/iroh/commit/7a4f40f2ddaea0e858c12a882a19302c9c3ca853)) -- Add AsRef and into_inner for RpcClient and RpcServer - ([ea8e119](https://github.com/n0-computer/iroh/commit/ea8e1195fda510b470feaf638f519f185151e8d6)) -- Update quinn and rustls - ([20679f9](https://github.com/n0-computer/iroh/commit/20679f938d1c257cb51d64f46ba39cc46c580a73)) -- Make wasm compatible ([#49](https://github.com/n0-computer/iroh/issues/49)) - ([6cbf62b](https://github.com/n0-computer/iroh/commit/6cbf62b2fdf150dca6a261dfdb16e338c7bd7cd0)) -- Add additional `Sync` bounds to allow for better reuse of streams - ([54c4ade](https://github.com/n0-computer/iroh/commit/54c4adeef2c851cbe2e6ac542d221b6f020a430c)) -- Add additional `Sync` bounds to allow for better reuse of streams ([#68](https://github.com/n0-computer/iroh/issues/68)) - ([bc589b7](https://github.com/n0-computer/iroh/commit/bc589b7a49e277b6847cb01675e92984152d033f)) -- Allow to compose RPC services ([#67](https://github.com/n0-computer/iroh/issues/67)) - ([77785a2](https://github.com/n0-computer/iroh/commit/77785a21babe4e56d28541d1b3ba401dcf366441)) - -### ๐Ÿ› Bug Fixes - -- *(ci)* Cancel stale repeat jobs ([#64](https://github.com/n0-computer/iroh/issues/64)) - ([d9b385c](https://github.com/n0-computer/iroh/commit/d9b385ce8ba66430c6a2744c0f595a74a9e3d578)) -- *(http2)* Don't log normal occurrences as errors - ([a4e76da](https://github.com/n0-computer/iroh/commit/a4e76da0fbc53381a9d97fbaf3df336b2bc04e0d)) -- Consistent naming - ([a23fc78](https://github.com/n0-computer/iroh/commit/a23fc784f30a9eae6314f4b2cc6151dcfae7b215)) -- Add docs to macro - ([c868bc8](https://github.com/n0-computer/iroh/commit/c868bc8c297ae9bb3cae5ba467e703a8bf1b135f)) -- Improve docs - ([1957794](https://github.com/n0-computer/iroh/commit/19577949db0becd2da2dc0d206bbb1a91daa5722)) -- Docs typos - ([a965fce](https://github.com/n0-computer/iroh/commit/a965fceba8558e144f15fb338d1d7586453fbfb5)) -- Derive Debug for generated client - ([9e72faf](https://github.com/n0-computer/iroh/commit/9e72faf76746f7e7cdc3e76d04a7c87c3bf03a8c)) -- Rename macro to rpc_service - ([bdb71c1](https://github.com/n0-computer/iroh/commit/bdb71c1659a58a41f11393e1c18f58e8f74e6206)) -- Hide the exports also behind feature flags - ([4cff83d](https://github.com/n0-computer/iroh/commit/4cff83dd9741e4fd3ae8982f96e203839dd17ec4)) -- Get rid of get rid of channel!!! - ([e0b504d](https://github.com/n0-computer/iroh/commit/e0b504de6c81054996aa6271f1bae319046d405f)) -- Add additional framing to messages - ([8a6038e](https://github.com/n0-computer/iroh/commit/8a6038e6f478f7782158c7a0e771672b9bf9722b)) -- Do buffering in the forwarder - ([4e3d9fd](https://github.com/n0-computer/iroh/commit/4e3d9fd841396aaf0cb4024017605cef19b2cacd)) -- Add flume dependency to quinn-transport - ([e64ba0b](https://github.com/n0-computer/iroh/commit/e64ba0b4e6725d4b1800fdd2c4b12bd1b39a97f8)) -- Call wait_idle on endpoint drop to allow for time for the close msg to be sent - ([7ba3bee](https://github.com/n0-computer/iroh/commit/7ba3bee8f6fcc92f761964582f684072fb8a1bd0)) -- Update MSRV to 1.65 - ([3cb7870](https://github.com/n0-computer/iroh/commit/3cb7870ffc1783290d536c74fe7d5481aa935a8b)) -- Explicitly close channel when client is dropped - ([2c81d23](https://github.com/n0-computer/iroh/commit/2c81d2307d994790ca67a7758953b748133c6655)) -- Do not use macros in tests - ([596a426](https://github.com/n0-computer/iroh/commit/596a426e20fd1e5ada2eebc2c2256d202700dc5d)) -- Do two forwarder tasks - ([2e334f3](https://github.com/n0-computer/iroh/commit/2e334f345d5d2a82bb425c993478772e63d11dfe)) -- Add explicit proc-macro dependency so the minimal-versions test works - ([cf2045c](https://github.com/n0-computer/iroh/commit/cf2045c6b7f25ddf84c1e99776cfcec60eae5778)) -- Make socket name os comaptible - ([ec3314c](https://github.com/n0-computer/iroh/commit/ec3314c8410a07aa66cf2b1792262902b94caa95)) -- Nightly failures ([#66](https://github.com/n0-computer/iroh/issues/66)) - ([865622e](https://github.com/n0-computer/iroh/commit/865622e99c0618dca16e530b5a44fe3f1f2bdced)) -- Rpc client concurrently waits for requests and new connections ([#62](https://github.com/n0-computer/iroh/issues/62)) - ([3323574](https://github.com/n0-computer/iroh/commit/3323574c972dbdf4dc4e9ae81ab8f32d27b7f3c2)) -- Try to make client streaming and bidi streaming work - ([2bb27d0](https://github.com/n0-computer/iroh/commit/2bb27d0b9ae912203f3292c527863e8203bbc619)) - -### ๐Ÿšœ Refactor - -- *(http2)* Use a slice instead of a vec for the local addr - ([0d79990](https://github.com/n0-computer/iroh/commit/0d79990aacf48c1083aed30a3718ea35fb3225be)) -- *(mem)* Return &[LocalAddr::Mem] directly - ([2004c46](https://github.com/n0-computer/iroh/commit/2004c461cfbbc6c3c4b0caf8c953f810accb10b1)) -- Move more code out of the macro - ([796dccd](https://github.com/n0-computer/iroh/commit/796dccd183904659c8b2bfacf0ee33dae4967790)) -- One less &mut - ([53af0f9](https://github.com/n0-computer/iroh/commit/53af0f90cd9288a6a27d25558495b9f1091698d3)) -- Add some mut again - ([aab91dc](https://github.com/n0-computer/iroh/commit/aab91dc99efcbc472c405ad9e7862fc24723b8ee)) -- Error remodeling - ([6bad622](https://github.com/n0-computer/iroh/commit/6bad6228be19c111e052ea30f4334c469173414b)) -- Make lazy client a separate example - ([e92771e](https://github.com/n0-computer/iroh/commit/e92771ecbb7aad5c9442396bb351984f86ee94fa)) -- Make the lazy example simpler - ([86c2b94](https://github.com/n0-computer/iroh/commit/86c2b942c8328a0edd6154574f22936518aabed1)) -- Round and round we go - ([37a5703](https://github.com/n0-computer/iroh/commit/37a5703f09a36fa5587bb7a92e82133a588b41bf)) -- Remove dead code - ([7d7ac15](https://github.com/n0-computer/iroh/commit/7d7ac154e5c7149318628e41887cd07838a52438)) -- Add ClientChannelInner - ([8aafe34](https://github.com/n0-computer/iroh/commit/8aafe347c5390fb82f4e1033d389e3408a3d3ef9)) -- WIP make both server and client side take quinn endpoints - ([4d99d71](https://github.com/n0-computer/iroh/commit/4d99d712448ef17719255f2bd9c762c4a4e8b2f4)) -- WIP make benches work - ([639883c](https://github.com/n0-computer/iroh/commit/639883c7866d8de2149c932c62ab0e9638ed28e8)) -- Make the par bench work without the multithreaded executor - ([ee62f93](https://github.com/n0-computer/iroh/commit/ee62f9383c98f3705e50a34cd52b1053cc33d366)) -- WIP add substream source - ([6070939](https://github.com/n0-computer/iroh/commit/6070939fb36d9ed5a5dea9de5f301bc44f5e930d)) -- WIP channel source - ([45fb792](https://github.com/n0-computer/iroh/commit/45fb792fbd226ee3dc851d8202d0b6df371adb2b)) -- Combined has won - ([b743bf1](https://github.com/n0-computer/iroh/commit/b743bf138ad0ac600556fff692386ed4fce78610)) -- Move some things around - ([3ceade3](https://github.com/n0-computer/iroh/commit/3ceade3d4025b2d3f257a955f998e3892962f066)) -- Get rid of some boxes - ([ff786ab](https://github.com/n0-computer/iroh/commit/ff786ab152812a85b0907a5ea8504b6eda292ccb)) -- Rename transports to the crate they wrap - ([bd65fe6](https://github.com/n0-computer/iroh/commit/bd65fe6b3f23dc336150af817c821c9548f4c1e7)) -- Get rid of the lifetimes - ([6ea4862](https://github.com/n0-computer/iroh/commit/6ea486296647ae6d7eb13c784fc2afc931e99849)) -- Rename the macros to declare_... - ([ae24dd1](https://github.com/n0-computer/iroh/commit/ae24dd11e44ce8a09cc8617c47a563c52be08e6b)) -- Make macros optional, and also revert the trait name changes - ([884ceed](https://github.com/n0-computer/iroh/commit/884ceed646231a413c1f011068d3bcbb415b18fc)) -- Remove all transports from the defaults - ([3c02ee5](https://github.com/n0-computer/iroh/commit/3c02ee5f3058539365bd7d588c1610fb7d8ea050)) -- Use spans - ([15be738](https://github.com/n0-computer/iroh/commit/15be73800c511180834e8712c24121bd473e5edd)) -- Add mapped methods for client and server - ([58b029e](https://github.com/n0-computer/iroh/commit/58b029ed2022040e0eb924d80884f521ed76c195)) -- Better generics with helper trait - ([b41c76a](https://github.com/n0-computer/iroh/commit/b41c76ae2948341a37d97a2296fb4b9dc421a9a9)) -- Naming - ([ee5272a](https://github.com/n0-computer/iroh/commit/ee5272af1040223152e2750d8680a0d128b1afd6)) -- No more futures crate ([#73](https://github.com/n0-computer/iroh/issues/73)) - ([403fab0](https://github.com/n0-computer/iroh/commit/403fab014dea45b5d58978d9d4b8a9c80e145c1f)) - -### ๐Ÿ“š Documentation - -- *(http2)* Add comments for the new error cases - ([103b8f4](https://github.com/n0-computer/iroh/commit/103b8f400c39369710f110cbf9463813858708ff)) -- Better comments - ([c7de505](https://github.com/n0-computer/iroh/commit/c7de505a8730e8cd213d20f6d072d9d7fa61e0f7)) -- Yet another badge - ([c0c1ac3](https://github.com/n0-computer/iroh/commit/c0c1ac3a740ac2b5e28a8173329f8a7a2790f57f)) -- Fix github badge - ([60c511f](https://github.com/n0-computer/iroh/commit/60c511f8d8afefaa35c5fff1011a3030bc1db7c0)) -- Update todo comments and made some other comments nicer - ([311307c](https://github.com/n0-computer/iroh/commit/311307cb8acb9a7ca5a4c97adb1e7193a3c17c95)) -- Update docs to match implementation - ([7b6bf32](https://github.com/n0-computer/iroh/commit/7b6bf325884c7f6843fd7656b269aa9b2506b2b0)) -- Add some more text to the readme about why this thing exists in the first place - ([a512de5](https://github.com/n0-computer/iroh/commit/a512de5e80e2e7f19e14cfaf01873407247237aa)) -- Better docs for the declare macros - ([ffc934c](https://github.com/n0-computer/iroh/commit/ffc934c7f2b79cf76565f52e135d14e3c0d637ac)) - -### โšก Performance - -- Avoid a copy - ([b57564f](https://github.com/n0-computer/iroh/commit/b57564f3ee9cdbf9fcc1a70965484c40dfae2a40)) -- Preallocate small buffer - ([e306eba](https://github.com/n0-computer/iroh/commit/e306ebaf46e69e6b0f5ed086559900ff4b64dc4b)) - -### ๐ŸŽจ Styling - -- Fmt - ([0152170](https://github.com/n0-computer/iroh/commit/01521701db70a14f11ba2e5937cd30a72d3e3b12)) - -### ๐Ÿงช Testing - -- *(http2)* Add some tests for the not so happy path - ([c04cf77](https://github.com/n0-computer/iroh/commit/c04cf7790ed92ca68129c95f26cae0c3136333a6)) -- Adapt examples - ([80f4921](https://github.com/n0-computer/iroh/commit/80f4921f959b9363204443c39aa81ba83a875c6e)) - -### โš™๏ธ Miscellaneous Tasks - -- *(docs)* Enable all features for docs.rs builds - ([d3f55ce](https://github.com/n0-computer/iroh/commit/d3f55ced941448de4e3b571ae6bdf6bc3942d4fb)) -- *(docs)* Enable all features for docs.rs builds ([#60](https://github.com/n0-computer/iroh/issues/60)) - ([e063747](https://github.com/n0-computer/iroh/commit/e063747f5eb47cde022845e1b2cecb5426b823c1)) -- Fmt - ([20bb7a0](https://github.com/n0-computer/iroh/commit/20bb7a01cfb7c019bd91106e51ffb6e5acc0ad88)) -- Rename main structs to include type - ([d61bf8d](https://github.com/n0-computer/iroh/commit/d61bf8d09f51d2df4728d4f76dbe69626ca9d0ac)) -- Configure rust version and check during CI - ([da6f282](https://github.com/n0-computer/iroh/commit/da6f2827229514946a4c800c085c956e195fec44)) -- Add more up to date n0 ci workflow - ([7adeaec](https://github.com/n0-computer/iroh/commit/7adeaec832ebf0e1da46f963d29f9cf16854c518)) -- Clippy ([#61](https://github.com/n0-computer/iroh/issues/61)) - ([b25d30d](https://github.com/n0-computer/iroh/commit/b25d30d6749c7508cf3e2e425703be53fd52c49e)) -- Fmt - ([63bc8d8](https://github.com/n0-computer/iroh/commit/63bc8d882453f45ee51187bca9e1399a928d417a)) -- Fix feature flags for tests - ([9c4a7e6](https://github.com/n0-computer/iroh/commit/9c4a7e69b186c84d16464de55b4d80baab73c41b)) -- Clippy - ([1652d5f](https://github.com/n0-computer/iroh/commit/1652d5fb39e464ad8929861614ad1a3153d3feea)) -- Fix feature flags for tests ([#69](https://github.com/n0-computer/iroh/issues/69)) - ([488bb8c](https://github.com/n0-computer/iroh/commit/488bb8c62850bd1cc74eac7303d820f24c0a9151)) - -### Fix - -- Typos - ([b39a1ac](https://github.com/n0-computer/iroh/commit/b39a1ac757add613787a6d66174733bc2f168251)) - -### Change - -- Improve split macro example - ([7d5dc82](https://github.com/n0-computer/iroh/commit/7d5dc82da29933fd922343e0722bab17e1011f5a)) - -### Cleanup - -- Move socket name generation into the lib - ([4f40732](https://github.com/n0-computer/iroh/commit/4f40732e33e812d40b97c3d031c3230096bd9ff9)) - -### Deps - -- Update flume - ([637f9f2](https://github.com/n0-computer/iroh/commit/637f9f28a917b01a6db1459042466a3bdb3dde66)) -- Update flume - ([c966283](https://github.com/n0-computer/iroh/commit/c96628305b6463f31db64ab6943317f2ca58c976)) - -### Pr - -- *(http2)* Log remote addr again - ([5388bb2](https://github.com/n0-computer/iroh/commit/5388bb2daf69aaeac4b1038c3f7a40937eec16dc)) -- Make ChannelConfigError a proper error - ([e4a548b](https://github.com/n0-computer/iroh/commit/e4a548b64fb97935a607d90f7da98d64ed89d8c5)) -- Rename extra constructors and add some comments - ([2c9a08b](https://github.com/n0-computer/iroh/commit/2c9a08bf5b0d2fcd62d93374862932a5c717af2f)) - -### Ref - -- Rename main structs not conflict with trait names - ([6fba32a](https://github.com/n0-computer/iroh/commit/6fba32a4ecf67cb4700f96005c4519f3a9d5bd5b)) - -### Wip - -- Modularize example - ([1782411](https://github.com/n0-computer/iroh/commit/17824114e5ddd50c3bfd590e884e977f401e6e0b)) -- Better approach - setup - ([92b9b60](https://github.com/n0-computer/iroh/commit/92b9b60beeb933bc85a8f977242336a504b735f6)) - - diff --git a/Cargo.lock b/Cargo.lock index 6bb08af6..7aaa7ebd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -60,9 +60,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.95" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" [[package]] name = "asn1-rs" @@ -88,7 +88,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "synstructure", ] @@ -100,7 +100,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -111,40 +111,18 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", -] - -[[package]] -name = "async-stream" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" -dependencies = [ - "async-stream-impl", - "futures-core", - "pin-project-lite", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "async-trait" -version = "0.1.86" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -181,9 +159,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "backon" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49fef586913a57ff189f25c9b3d034356a5bf6b3fa9a7f067588fe1698ba1f5d" +checksum = "970d91570c01a8a5959b36ad7dd1c30642df24b6b3068710066f6809f7033bb7" dependencies = [ "fastrand", "gloo-timers", @@ -219,9 +197,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.6.0" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" [[package]] name = "bitflags" @@ -231,9 +209,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" [[package]] name = "block-buffer" @@ -264,15 +242,15 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "cc" -version = "1.2.14" +version = "1.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3d1b2e905a3a7b00a6141adb0e4c0bb941d11caf55349d863942a1cc44e3c9" +checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a" dependencies = [ "shlex", ] @@ -308,15 +286,15 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.39" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -330,20 +308,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "client" -version = "0.1.0" -dependencies = [ - "anyhow", - "futures", - "iroh-quinn", - "quic-rpc", - "rustls", - "tokio", - "tracing-subscriber", - "types", -] - [[package]] name = "cobs" version = "0.2.3" @@ -534,7 +498,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -577,14 +541,14 @@ checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "deranged" -version = "0.3.11" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "28cfac68e08048ae1883171632c2aef3ebc555621ae56fbccce1cbf22dd7f058" dependencies = [ "powerfmt", ] @@ -595,7 +559,16 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" dependencies = [ - "derive_more-impl", + "derive_more-impl 1.0.0", +] + +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl 2.0.1", ] [[package]] @@ -606,10 +579,21 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "unicode-xid", ] +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "diatomic-waker" version = "0.2.3" @@ -635,7 +619,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -651,18 +635,18 @@ dependencies = [ [[package]] name = "document-features" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6969eaabd2421f8a2775cfd2471a2b634372b4a25d41e3bd647b79912850a0" +checksum = "95249b50c6c185bee49034bcb378a49dc2b5dff0be90ff6616d31d64febab05d" dependencies = [ "litrs", ] [[package]] name = "dtoa" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" +checksum = "d6add3b8cff394282be81f3fc1a0605db594ed69890078ca6e2cab1c408bcf04" [[package]] name = "ed25519" @@ -711,7 +695,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -731,7 +715,7 @@ checksum = "fc4caf64a58d7a6d65ab00639b046ff54399a39f5f2554728895ace4b297cd79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -755,16 +739,6 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a02a5d186d7bf1cb21f1f95e1a9cfa5c1f2dcd803a47aad454423ceec13525c5" -[[package]] -name = "errno" -version = "0.3.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" -dependencies = [ - "libc", - "windows-sys 0.59.0", -] - [[package]] name = "fallible-iterator" version = "0.3.0" @@ -803,9 +777,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" [[package]] name = "form_urlencoded" @@ -833,14 +807,15 @@ dependencies = [ [[package]] name = "futures-buffered" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34acda8ae8b63fbe0b2195c998b180cff89a8212fb2622a78b572a9f1c6f7684" +checksum = "fe940397c8b744b9c2c974791c2c08bca2c3242ce0290393249e98f215a00472" dependencies = [ "cordyceps", "diatomic-waker", "futures-core", "pin-project-lite", + "spin", ] [[package]] @@ -897,7 +872,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -982,14 +957,16 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" dependencies = [ "cfg-if", + "js-sys", "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets 0.52.6", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", ] [[package]] @@ -1018,35 +995,16 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.26" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.12", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "h2" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.2.0", + "http 1.3.1", "indexmap", "slab", "tokio", @@ -1124,7 +1082,7 @@ dependencies = [ "ipnet", "once_cell", "rand 0.9.0", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tokio", "tracing", @@ -1147,7 +1105,7 @@ dependencies = [ "rand 0.9.0", "resolv-conf", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tracing", ] @@ -1179,13 +1137,13 @@ checksum = "4a8575493d277c9092b988c780c94737fb9fd8651a1001e16bee3eccfc1baedb" [[package]] name = "hostname" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" +checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ + "cfg-if", "libc", - "match_cfg", - "winapi", + "windows 0.52.0", ] [[package]] @@ -1207,26 +1165,15 @@ dependencies = [ [[package]] name = "http" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", "itoa", ] -[[package]] -name = "http-body" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" -dependencies = [ - "bytes", - "http 0.2.12", - "pin-project-lite", -] - [[package]] name = "http-body" version = "1.0.1" @@ -1234,27 +1181,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.2.0", + "http 1.3.1", ] [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", - "futures-util", - "http 1.2.0", - "http-body 1.0.1", + "futures-core", + "http 1.3.1", + "http-body", "pin-project-lite", ] [[package]] name = "httparse" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" @@ -1262,30 +1209,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" -[[package]] -name = "hyper" -version = "0.14.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - [[package]] name = "hyper" version = "1.6.0" @@ -1295,9 +1218,9 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.7", - "http 1.2.0", - "http-body 1.0.1", + "h2", + "http 1.3.1", + "http-body", "httparse", "httpdate", "itoa", @@ -1314,8 +1237,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" dependencies = [ "futures-util", - "http 1.2.0", - "hyper 1.6.0", + "http 1.3.1", + "hyper", "hyper-util", "rustls", "rustls-pki-types", @@ -1334,9 +1257,9 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.2.0", - "http-body 1.0.1", - "hyper 1.6.0", + "http 1.3.1", + "http-body", + "hyper", "pin-project-lite", "socket2", "tokio", @@ -1346,14 +1269,15 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.61" +version = "0.1.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +checksum = "b2fd658b06e56721792c5df4475705b6cda790e9298d19d2f8af083457bcd127" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", + "log", "wasm-bindgen", "windows-core 0.52.0", ] @@ -1408,9 +1332,9 @@ dependencies = [ [[package]] name = "icu_locid_transform_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" +checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" [[package]] name = "icu_normalizer" @@ -1432,9 +1356,9 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" [[package]] name = "icu_properties" @@ -1453,9 +1377,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" [[package]] name = "icu_provider" @@ -1482,7 +1406,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1516,9 +1440,9 @@ dependencies = [ "attohttpc", "bytes", "futures", - "http 1.2.0", + "http 1.3.1", "http-body-util", - "hyper 1.6.0", + "hyper", "hyper-util", "log", "rand 0.8.5", @@ -1529,9 +1453,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -1539,9 +1463,9 @@ dependencies = [ [[package]] name = "inout" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ "generic-array", ] @@ -1592,11 +1516,11 @@ dependencies = [ "crypto_box", "data-encoding", "der", - "derive_more", + "derive_more 1.0.0", "ed25519-dalek", "futures-util", "hickory-resolver", - "http 1.2.0", + "http 1.3.1", "igd-next", "instant", "iroh-base", @@ -1608,7 +1532,7 @@ dependencies = [ "iroh-relay", "n0-future", "netdev", - "netwatch 0.4.0", + "netwatch", "pin-project", "pkarr", "portmapper", @@ -1617,12 +1541,12 @@ dependencies = [ "reqwest", "ring", "rustls", - "rustls-webpki", + "rustls-webpki 0.102.8", "serde", "smallvec", "strum", "stun-rs", - "thiserror 2.0.11", + "thiserror 2.0.12", "time", "tokio", "tokio-stream", @@ -1643,11 +1567,11 @@ checksum = "02bf2374c0f1d01cde6e60de7505e42a604acda1a1bb3f7be19806e466055517" dependencies = [ "curve25519-dalek", "data-encoding", - "derive_more", + "derive_more 1.0.0", "ed25519-dalek", "rand_core 0.6.4", "serde", - "thiserror 2.0.11", + "thiserror 2.0.12", "url", ] @@ -1661,7 +1585,7 @@ dependencies = [ "prometheus-client", "serde", "struct_iterable", - "thiserror 2.0.11", + "thiserror 2.0.12", "tracing", ] @@ -1674,20 +1598,20 @@ dependencies = [ "anyhow", "bytes", "cfg_aliases", - "derive_more", + "derive_more 1.0.0", "hickory-resolver", "iroh-base", "iroh-metrics", "iroh-quinn", "iroh-relay", "n0-future", - "netwatch 0.4.0", + "netwatch", "portmapper", "rand 0.8.5", "reqwest", "rustls", "surge-ping", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tokio-util", "tracing", @@ -1708,7 +1632,7 @@ dependencies = [ "rustc-hash", "rustls", "socket2", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tracing", "web-time", @@ -1729,7 +1653,7 @@ dependencies = [ "rustls-pki-types", "rustls-platform-verifier", "slab", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tracing", "web-time", @@ -1759,11 +1683,11 @@ dependencies = [ "bytes", "cfg_aliases", "data-encoding", - "derive_more", + "derive_more 1.0.0", "hickory-resolver", - "http 1.2.0", + "http 1.3.1", "http-body-util", - "hyper 1.6.0", + "hyper", "hyper-util", "iroh-base", "iroh-metrics", @@ -1778,11 +1702,11 @@ dependencies = [ "rand 0.8.5", "reqwest", "rustls", - "rustls-webpki", + "rustls-webpki 0.102.8", "serde", "strum", "stun-rs", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tokio-rustls", "tokio-tungstenite-wasm", @@ -1795,9 +1719,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jni" @@ -1839,21 +1763,15 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.169" +version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" - -[[package]] -name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" [[package]] name = "litemap" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "litrs" @@ -1873,9 +1791,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.25" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "loom" @@ -1912,12 +1830,6 @@ dependencies = [ "hashbrown 0.15.2", ] -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - [[package]] name = "matchers" version = "0.1.0" @@ -1953,9 +1865,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3b1c9bd4fe1f0f8b387f6eb9eb3b4a1aa26185e5750efb9140301703f62cd1b" +checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" dependencies = [ "adler2", ] @@ -1997,7 +1909,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "399e11dc3b0e8d9d65b27170d22f5d779d52d9bed888db70d7e0c2c7ce3dfc52" dependencies = [ "cfg_aliases", - "derive_more", + "derive_more 1.0.0", "futures-buffered", "futures-lite", "futures-util", @@ -2020,18 +1932,6 @@ dependencies = [ "getrandom 0.2.15", ] -[[package]] -name = "nested_enum_utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f256ef99e7ac37428ef98c89bef9d84b590172de4bbfbe81b68a4cd3abadb32" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "netdev" version = "0.31.0" @@ -2111,7 +2011,7 @@ dependencies = [ "log", "netlink-packet-core", "netlink-sys", - "thiserror 2.0.11", + "thiserror 2.0.12", ] [[package]] @@ -2127,39 +2027,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "netwatch" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64da82edf903649e6cb6a77b5a6f7fe01387d8865065d411d139018510880302" -dependencies = [ - "anyhow", - "atomic-waker", - "bytes", - "derive_more", - "futures-lite", - "futures-sink", - "futures-util", - "iroh-quinn-udp", - "libc", - "netdev", - "netlink-packet-core", - "netlink-packet-route 0.19.0", - "netlink-sys", - "once_cell", - "rtnetlink 0.13.1", - "rtnetlink 0.14.1", - "serde", - "socket2", - "thiserror 2.0.11", - "time", - "tokio", - "tokio-util", - "tracing", - "windows 0.58.0", - "wmi", -] - [[package]] name = "netwatch" version = "0.4.0" @@ -2169,7 +2036,7 @@ dependencies = [ "atomic-waker", "bytes", "cfg_aliases", - "derive_more", + "derive_more 1.0.0", "iroh-quinn-udp", "js-sys", "libc", @@ -2182,14 +2049,14 @@ dependencies = [ "rtnetlink 0.14.1", "serde", "socket2", - "thiserror 2.0.11", + "thiserror 2.0.12", "time", "tokio", "tokio-util", "tracing", "web-sys", "windows 0.59.0", - "windows-result 0.3.0", + "windows-result 0.3.2", "wmi", ] @@ -2210,7 +2077,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cfg-if", "libc", ] @@ -2293,7 +2160,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2316,9 +2183,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.3" +version = "1.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" +checksum = "d75b0bedcc4fe52caa0e03d9f1151a323e4aa5e2d78ba3580400cd3c9e2bc4bc" [[package]] name = "opaque-debug" @@ -2375,9 +2242,9 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pem" -version = "3.0.4" +version = "3.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" dependencies = [ "base64", "serde", @@ -2400,20 +2267,20 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.15" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" +checksum = "198db74531d58c70a361c42201efde7e2591e976d518caf7662a47dc5720e7b6" dependencies = [ "memchr", - "thiserror 2.0.11", + "thiserror 2.0.12", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.7.15" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "816518421cfc6887a0d62bf441b6ffb4536fcc926395a69e1a85852d4363f57e" +checksum = "d725d9cfd79e87dccc9341a2ef39d1b6f6353d68c4b33c177febbe1a402c97c5" dependencies = [ "pest", "pest_generator", @@ -2421,22 +2288,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.15" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d1396fd3a870fc7838768d171b4616d5c91f6cc25e377b673d714567d99377b" +checksum = "db7d01726be8ab66ab32f9df467ae8b1148906685bbe75c82d1e65d7f5b3f841" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "pest_meta" -version = "2.7.15" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1e58089ea25d717bfd31fb534e4f3afcc2cc569c70de3e239778991ea3b7dea" +checksum = "7f9f832470494906d1fca5329f8ab5791cc60beb230c74815dff541cbd2b5ca0" dependencies = [ "once_cell", "pest", @@ -2445,22 +2312,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2490,7 +2357,7 @@ dependencies = [ "lru", "self_cell", "simple-dns", - "thiserror 2.0.11", + "thiserror 2.0.12", "tracing", "ureq", "wasm-bindgen", @@ -2527,7 +2394,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2564,31 +2431,31 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" [[package]] name = "portmapper" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b715da165f399be093fecb2ca774b00713a3b32f6b27e0752fbf255e3be622af" +checksum = "247dcb75747c53cc433d6d8963a064187eec4a676ba13ea33143f1c9100e754f" dependencies = [ "base64", "bytes", - "derive_more", + "derive_more 1.0.0", "futures-lite", "futures-util", "igd-next", "iroh-metrics", "libc", - "netwatch 0.3.0", + "netwatch", "num_enum", "rand 0.8.5", "serde", "smallvec", "socket2", - "thiserror 2.0.11", + "thiserror 2.0.12", "time", "tokio", "tokio-util", @@ -2629,11 +2496,11 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.7.35", + "zerocopy", ] [[package]] @@ -2672,18 +2539,18 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ "toml_edit", ] [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] @@ -2708,43 +2575,27 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "quic-rpc" -version = "0.19.0" +version = "0.50.0" dependencies = [ "anyhow", - "async-stream", - "bytes", - "derive_more", - "document-features", - "flume", - "futures", + "derive_more 2.0.1", "futures-buffered", - "futures-lite", - "futures-sink", - "futures-util", - "hyper 0.14.32", - "iroh", "iroh-quinn", - "nested_enum_utils", - "pin-project", + "n0-future", "postcard", - "proc-macro2", - "rand 0.8.5", + "quic-rpc-derive", "rcgen", "rustls", "serde", - "slab", "smallvec", - "tempfile", - "testresult", + "thiserror 2.0.12", "thousands", - "time", "tokio", - "tokio-serde", "tokio-util", "tracing", "tracing-subscriber", @@ -2752,56 +2603,69 @@ dependencies = [ [[package]] name = "quic-rpc-derive" -version = "0.19.0" +version = "0.50.0" dependencies = [ - "derive_more", + "derive_more 2.0.1", "proc-macro2", "quic-rpc", "quote", "serde", "syn 1.0.109", + "tracing", "trybuild", ] [[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +name = "quic-rpc-iroh" +version = "0.1.0" +dependencies = [ + "anyhow", + "iroh", + "n0-future", + "postcard", + "quic-rpc", + "quic-rpc-derive", + "serde", + "tokio", + "tracing", + "tracing-subscriber", +] [[package]] name = "quinn" -version = "0.11.6" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" dependencies = [ "bytes", + "cfg_aliases", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash", "rustls", "socket2", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tracing", + "web-time", ] [[package]] name = "quinn-proto" -version = "0.11.9" +version = "0.11.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc" dependencies = [ "bytes", - "getrandom 0.2.15", - "rand 0.8.5", + "getrandom 0.3.2", + "rand 0.9.0", "ring", "rustc-hash", "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tracing", "web-time", @@ -2809,9 +2673,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" +checksum = "541d0f57c6ec747a90738a52741d3221f7960e8ac2f0ff4b1a63680e033b4ab5" dependencies = [ "cfg_aliases", "libc", @@ -2823,9 +2687,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] @@ -2840,6 +2704,12 @@ dependencies = [ "pest_derive", ] +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + [[package]] name = "rand" version = "0.8.5" @@ -2858,8 +2728,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.0", - "zerocopy 0.8.18", + "rand_core 0.9.3", + "zerocopy", ] [[package]] @@ -2879,7 +2749,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.0", + "rand_core 0.9.3", ] [[package]] @@ -2893,12 +2763,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.9.0" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.1", - "zerocopy 0.8.18", + "getrandom 0.3.2", ] [[package]] @@ -2916,11 +2785,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -2975,18 +2844,18 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.12" +version = "0.12.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" +checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" dependencies = [ "base64", "bytes", "futures-core", "futures-util", - "http 1.2.0", - "http-body 1.0.1", + "http 1.3.1", + "http-body", "http-body-util", - "hyper 1.6.0", + "hyper", "hyper-rustls", "hyper-util", "ipnet", @@ -3020,19 +2889,18 @@ dependencies = [ [[package]] name = "resolv-conf" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +checksum = "48375394603e3dd4b2d64371f7148fd8c7baa2680e28741f2cb8d23b59e3d4c4" dependencies = [ "hostname", - "quick-error", ] [[package]] name = "ring" -version = "0.17.9" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e75ec5e92c4d8aede845126adc388046234541629e76029599ed35a003c7ed24" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", @@ -3108,30 +2976,17 @@ dependencies = [ "nom", ] -[[package]] -name = "rustix" -version = "0.38.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" -dependencies = [ - "bitflags 2.8.0", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.59.0", -] - [[package]] name = "rustls" -version = "0.23.23" +version = "0.23.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" +checksum = "822ee9188ac4ec04a2f0531e55d035fb2de73f18b41a63c70c2712503b6fb13c" dependencies = [ "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki", + "rustls-webpki 0.103.1", "subtle", "zeroize", ] @@ -3168,9 +3023,9 @@ dependencies = [ [[package]] name = "rustls-platform-verifier" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e012c45844a1790332c9386ed4ca3a06def221092eda277e6f079728f8ea99da" +checksum = "4a5467026f437b4cb2a533865eaa73eb840019a0916f4b9ec563c6e617e086c9" dependencies = [ "core-foundation 0.10.0", "core-foundation-sys", @@ -3180,11 +3035,11 @@ dependencies = [ "rustls", "rustls-native-certs", "rustls-platform-verifier-android", - "rustls-webpki", + "rustls-webpki 0.103.1", "security-framework", "security-framework-sys", "webpki-root-certs", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3204,17 +3059,28 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustls-webpki" +version = "0.103.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + [[package]] name = "rustversion" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" [[package]] name = "ryu" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "salsa20" @@ -3261,7 +3127,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "core-foundation 0.10.0", "core-foundation-sys", "libc", @@ -3286,9 +3152,9 @@ checksum = "c2fdfc24bc566f839a2da4c4295b82db7d25a24253867d5c64355abb5799bdbe" [[package]] name = "semver" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "send_wrapper" @@ -3298,29 +3164,29 @@ checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" [[package]] name = "serde" -version = "1.0.217" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.217" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "serde_json" -version = "1.0.138" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", "memchr", @@ -3359,20 +3225,6 @@ dependencies = [ "serde", ] -[[package]] -name = "server" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-stream", - "futures", - "iroh-quinn", - "quic-rpc", - "tokio", - "tracing-subscriber", - "types", -] - [[package]] name = "sha1" version = "0.10.6" @@ -3434,7 +3286,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dee851d0e5e7af3721faea1843e8015e820a234f81fda3dea9247e15bac9a86a" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -3448,9 +3300,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" [[package]] name = "socket2" @@ -3507,7 +3359,7 @@ dependencies = [ "proc-macro2", "quote", "struct_iterable_internal", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -3535,14 +3387,14 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "stun-rs" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b79cc624c9a747353810310af44f1f03f71eb4561284a894acc0396e6d0de76e" +checksum = "c6a47cab181e04277c2ceebe9d4ae102f6a50049b1855fd64546923581665492" dependencies = [ "base64", "bounded-integer", @@ -3559,7 +3411,7 @@ dependencies = [ "precis-core", "precis-profiles", "quoted-string-parser", - "rand 0.8.5", + "rand 0.9.0", ] [[package]] @@ -3570,14 +3422,14 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "surge-ping" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbf95ce4c7c5b311d2ce3f088af2b93edef0f09727fa50fbe03c7a979afce77" +checksum = "6fda78103d8016bb25c331ddc54af634e801806463682cc3e549d335df644d95" dependencies = [ "hex", "parking_lot", "pnet_packet", - "rand 0.8.5", + "rand 0.9.0", "socket2", "thiserror 1.0.69", "tokio", @@ -3597,9 +3449,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.98" +version = "2.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", @@ -3623,7 +3475,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -3632,7 +3484,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -3655,23 +3507,9 @@ checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" [[package]] name = "target-triple" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42a4d50cdb458045afc8131fd91b64904da29548bcb63c7236e0844936c13078" - -[[package]] -name = "tempfile" -version = "3.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" -dependencies = [ - "cfg-if", - "fastrand", - "getrandom 0.3.1", - "once_cell", - "rustix", - "windows-sys 0.59.0", -] +checksum = "1ac9aa371f599d22256307c24a9d748c041e548cbf599f35d890f9d365361790" [[package]] name = "termcolor" @@ -3682,12 +3520,6 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "testresult" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614b328ff036a4ef882c61570f72918f7e9c5bee1da33f8e7f91e01daee7e56c" - [[package]] name = "thiserror" version = "1.0.69" @@ -3699,11 +3531,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl 2.0.11", + "thiserror-impl 2.0.12", ] [[package]] @@ -3714,18 +3546,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "thiserror-impl" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -3746,9 +3578,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", @@ -3762,15 +3594,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "time-macros" -version = "0.2.19" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" dependencies = [ "num-conv", "time-core", @@ -3788,9 +3620,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" dependencies = [ "tinyvec_macros", ] @@ -3803,9 +3635,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.43.0" +version = "1.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a" dependencies = [ "backtrace", "bytes", @@ -3827,31 +3659,19 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "tokio-rustls" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ "rustls", "tokio", ] -[[package]] -name = "tokio-serde" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf600e7036b17782571dd44fa0a5cea3c82f60db5137f774a325a76a0d6852b" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project", -] - [[package]] name = "tokio-stream" version = "0.1.17" @@ -3884,7 +3704,7 @@ checksum = "e21a5c399399c3db9f08d8297ac12b500e86bca82e930253fdc62eaf9c0de6ae" dependencies = [ "futures-channel", "futures-util", - "http 1.2.0", + "http 1.3.1", "httparse", "js-sys", "thiserror 1.0.69", @@ -3896,9 +3716,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" dependencies = [ "bytes", "futures-core", @@ -3990,7 +3810,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4040,9 +3860,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.103" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b812699e0c4f813b872b373a4471717d9eb550da14b311058a4d9cf4173cbca6" +checksum = "6ae08be68c056db96f0e6c6dd820727cca756ced9e1f4cc7fdd20e2a55e23898" dependencies = [ "glob", "serde", @@ -4062,7 +3882,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http 1.2.0", + "http 1.3.1", "httparse", "log", "rand 0.8.5", @@ -4073,18 +3893,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" - -[[package]] -name = "types" -version = "0.1.0" -dependencies = [ - "derive_more", - "quic-rpc", - "serde", -] +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "ucd-parse" @@ -4103,9 +3914,9 @@ checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" [[package]] name = "unicode-ident" -version = "1.0.16" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-normalization" @@ -4185,11 +3996,11 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.13.1" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced87ca4be083373936a67f8de945faa23b6b42384bd5b64434850802c6dccd0" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.2", ] [[package]] @@ -4231,9 +4042,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi" -version = "0.13.3+wasi-0.2.2" +version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ "wit-bindgen-rt", ] @@ -4260,7 +4071,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "wasm-bindgen-shared", ] @@ -4295,7 +4106,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4362,9 +4173,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" +checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d" [[package]] name = "winapi" @@ -4406,6 +4217,16 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +dependencies = [ + "windows-core 0.52.0", + "windows-targets 0.52.6", +] + [[package]] name = "windows" version = "0.58.0" @@ -4455,9 +4276,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "810ce18ed2112484b0d4e15d022e5f598113e220c53e373fb31e67e21670c1ce" dependencies = [ "windows-implement 0.59.0", - "windows-interface 0.59.0", - "windows-result 0.3.0", - "windows-strings 0.3.0", + "windows-interface 0.59.1", + "windows-result 0.3.2", + "windows-strings 0.3.1", "windows-targets 0.53.0", ] @@ -4469,7 +4290,7 @@ checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4480,7 +4301,7 @@ checksum = "83577b051e2f49a058c308f17f273b570a6a758386fc291b5f6a934dd84e48c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4491,29 +4312,35 @@ checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "windows-interface" -version = "0.59.0" +version = "0.59.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb26fd936d991781ea39e87c3a27285081e3c0da5ca0fcbc02d368cc6f52ff01" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] +[[package]] +name = "windows-link" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + [[package]] name = "windows-registry" -version = "0.2.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" dependencies = [ - "windows-result 0.2.0", - "windows-strings 0.1.0", - "windows-targets 0.52.6", + "windows-result 0.3.2", + "windows-strings 0.3.1", + "windows-targets 0.53.0", ] [[package]] @@ -4527,11 +4354,11 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.3.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d08106ce80268c4067c0571ca55a9b4e9516518eaa1a1fe9b37ca403ae1d1a34" +checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" dependencies = [ - "windows-targets 0.53.0", + "windows-link", ] [[package]] @@ -4546,11 +4373,11 @@ dependencies = [ [[package]] name = "windows-strings" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b888f919960b42ea4e11c2f408fadb55f78a9f236d5eef084103c8ce52893491" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" dependencies = [ - "windows-targets 0.53.0", + "windows-link", ] [[package]] @@ -4833,9 +4660,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59690dea168f2198d1a3b0cac23b8063efcd11012f10ae4698f284808c8ef603" +checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36" dependencies = [ "memchr", ] @@ -4852,11 +4679,11 @@ dependencies = [ [[package]] name = "wit-bindgen-rt" -version = "0.33.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -4869,7 +4696,7 @@ dependencies = [ "futures", "log", "serde", - "thiserror 2.0.11", + "thiserror 2.0.12", "windows 0.59.0", "windows-core 0.59.0", ] @@ -4947,7 +4774,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "synstructure", ] @@ -4959,63 +4786,42 @@ checksum = "2164e798d9e3d84ee2c91139ace54638059a3b23e361f5c11781c2c6459bde0f" [[package]] name = "zerocopy" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" -dependencies = [ - "byteorder", - "zerocopy-derive 0.7.35", -] - -[[package]] -name = "zerocopy" -version = "0.8.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79386d31a42a4996e3336b0919ddb90f81112af416270cff95b5f5af22b839c2" -dependencies = [ - "zerocopy-derive 0.8.18", -] - -[[package]] -name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.98", + "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.18" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76331675d372f91bf8d17e13afbd5fe639200b73d01f0fc748bb059f9cca2db7" +checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "zerofrom" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "synstructure", ] @@ -5044,5 +4850,5 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] diff --git a/Cargo.toml b/Cargo.toml index 9a840d05..ff65bce5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "quic-rpc" -version = "0.19.0" +version = "0.50.0" edition = "2021" authors = ["Rรผdiger Klaehn ", "n0 team"] keywords = ["api", "protocol", "network", "rpc"] @@ -13,68 +13,55 @@ description = "A streaming rpc system based on quic" rust-version = "1.76" [dependencies] -bytes = { version = "1", optional = true } -flume = { version = "0.11", optional = true } -futures-lite = "2.3.0" -futures-sink = "0.3.30" -futures-util = { version = "0.3.30", features = ["sink"] } -hyper = { version = "0.14.16", features = ["full"], optional = true } -iroh = { version = "0.34", optional = true } -pin-project = "1" -quinn = { package = "iroh-quinn", version = "0.13", optional = true } -serde = { version = "1", features = ["derive"] } -tokio = { version = "1", default-features = false, features = ["macros", "sync"] } -tokio-serde = { version = "0.9", features = [], optional = true } -tokio-util = { version = "0.7", features = ["rt"] } -postcard = { version = "1", features = ["use-std"], optional = true } -tracing = "0.1" -futures = { version = "0.3.30", optional = true } -anyhow = "1" -document-features = "0.2" -# for test-utils -rcgen = { version = "0.13", optional = true } -# for test-utils -rustls = { version = "0.23", default-features = false, features = ["ring"], optional = true } +# we require serde even in non-rpc mode +serde = { version = "1", default-features = false } +# just for the oneshot and mpsc queues +tokio = { version = "1.44", features = ["sync"], default-features = false } +# for PollSender (which for some reason is not available in the main tokio api) +tokio-util = { version = "0.7.14", default-features = false } -# Indirect dependencies, is needed to make the minimal crates versions work -slab = "0.4.9" # iroh-quinn -smallvec = "1.13.2" -time = "0.3.36" # serde +# used in the endpoint handler code when using rpc +tracing = { version = "0.1.41", optional = true } +# used to ser/de messages when using rpc +postcard = { version = "1.1.1", features = ["alloc", "use-std"], optional = true } +# currently only transport when using rpc +quinn = { version = "0.13.0", package = "iroh-quinn", optional = true } +# used as a buffer for serialization when using rpc +smallvec = { version = "1.14.0", features = ["write"], optional = true } +# used in the test utils to generate quinn endpoints +rustls = { version = "0.23.5", default-features = false, features = ["std"], optional = true } +# used in the test utils to generate quinn endpoints +rcgen = { version = "0.13.2", optional = true } +# used in the test utils to generate quinn endpoints +anyhow = { version = "1.0.66", optional = true } +# used in the benches +futures-buffered ={ version = "0.2.9", optional = true } +thiserror = "2.0.12" [dev-dependencies] -anyhow = "1" -async-stream = "0.3.3" -derive_more = { version = "1", features = ["from", "try_into", "display"] } -rand = "0.8" - -serde = { version = "1", features = ["derive"] } +tracing-subscriber = { version = "0.3.19", features = ["fmt"] } +# used in the derive example. This must not be a main crate dep or else it will be circular! +quic-rpc-derive = { path = "quic-rpc-derive" } +# just convenient for the enum definitions +derive_more = { version = "2", features = ["from"] } +# we need full for example main etc. tokio = { version = "1", features = ["full"] } -quinn = { package = "iroh-quinn", version = "0.13", features = ["ring"] } -rcgen = "0.13" +# formatting thousands = "0.2.0" -tracing-subscriber = "0.3.16" -tempfile = "3.5.0" -proc-macro2 = "1.0.66" -futures-buffered = "0.2.4" -testresult = "0.4.1" -nested_enum_utils = "0.1.0" -tokio-util = { version = "0.7", features = ["rt"] } +# for AbortOnDropHandle +n0-future = { version = "0.1.2" } [features] -## HTTP transport using the `hyper` crate -hyper-transport = ["dep:flume", "dep:hyper", "dep:postcard", "dep:bytes", "dep:tokio-serde", "tokio-util/codec"] -## QUIC transport using the `iroh-quinn` crate -quinn-transport = ["dep:flume", "dep:quinn", "dep:postcard", "dep:bytes", "dep:tokio-serde", "tokio-util/codec"] -## In memory transport using the `flume` crate -flume-transport = ["dep:flume"] -## p2p QUIC transport using the `iroh` crate -iroh-transport = ["dep:iroh", "dep:flume", "dep:postcard", "dep:tokio-serde", "tokio-util/codec"] -## Macros for creating request handlers -macros = [] -## Utilities for testing -test-utils = ["dep:rcgen", "dep:rustls"] -## Default, includes the memory transport -default = ["flume-transport"] +# enable the remote transport +rpc = ["dep:quinn", "dep:postcard", "dep:anyhow", "dep:smallvec", "dep:tracing", "tokio/io-util"] +# add test utilities +quinn_endpoint_setup = ["rpc", "dep:rustls", "dep:rcgen", "dep:anyhow", "dep:futures-buffered"] +# pick up parent span when creating channel messages +message_spans = [] +default = ["rpc", "quinn_endpoint_setup", "message_spans"] + +[workspace] +members = ["quic-rpc-derive", "quic-rpc-iroh"] [package.metadata.docs.rs] all-features = true @@ -82,22 +69,3 @@ rustdoc-args = ["--cfg", "quicrpc_docsrs"] [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ["cfg(quicrpc_docsrs)"] } - -[[example]] -name = "errors" -required-features = ["flume-transport"] - -[[example]] -name = "macro" -required-features = ["flume-transport", "macros"] - -[[example]] -name = "store" -required-features = ["flume-transport", "macros"] - -[[example]] -name = "modularize" -required-features = ["flume-transport"] - -[workspace] -members = ["examples/split/types", "examples/split/server", "examples/split/client", "quic-rpc-derive"] diff --git a/DOCS.md b/DOCS.md index ca1ec920..021b18b5 100644 --- a/DOCS.md +++ b/DOCS.md @@ -1,5 +1,5 @@ -Building docs for this crate is a bit complex. There are lots of feature flags, -so we want feature flag markers in the docs, especially for the transports. +Building docs for this crate is a bit complex. There are some feature flags, +so we want feature flag markers in the docs. There is an experimental cargo doc feature that adds feature flag markers. To get those, run docs with this command line: diff --git a/README.md b/README.md index 062c4374..64116b7d 100644 --- a/README.md +++ b/README.md @@ -13,87 +13,9 @@ A streaming rpc system based on quic [status link]: https://github.com/n0-computer/quic-rpc/actions/workflows/rust.yml [repo link]: https://github.com/n0-computer/quic-rpc -## Goals +# Goals -### Interaction patterns - -Provide not just request/response RPC, but also streaming in both directions, similar to [grpc]. - -- 1 req -> 1 res -- 1 req, update stream -> 1 res -- 1 req -> res stream -- 1 req, update stream -> res stream - -It is still a RPC system in the sense that interactions get initiated by the client. - -### Transports - -- memory transport with very low overhead. In particular, no ser/deser, currently using [flume] -- quic transport via the [quinn] crate -- transparent combination of the above - -### API - -- The API should be similar to the quinn api. Basically "quinn with types". - -## Non-Goals - -- Cross language interop. This is for talking from rust to rust -- Any kind of versioning. You have to do this yourself -- Making remote message passing look like local async function calls -- Being runtime agnostic. This is for tokio - -## Example - -[computation service](https://github.com/n0-computer/quic-rpc/blob/main/tests/math.rs) - -## Why? - -The purpose of quic-rpc is to serve as an *optional* rpc framework. One of the -main goals is to be able to use it as an *in process* way to have well specified -protocols and boundaries between subsystems, including an async boundary. - -It should not have noticeable overhead compared to what you would do anyway to -isolate subsystems in a complex single process app, but should have the *option* -to also send messages over a process boundary via one of the non mem transports. - -What do you usually do in rust to have isolation between subsystems, e.g. -between a database and a networking layer? You have some kind of -channel between the systems and define messages flowing back and forth over that -channel. For almost all interactions these messages itself will again contain -(oneshot or mpsc) channels for independent async communication between the -subsystems. - -Quic-rpc with the mem channel does exactly the same thing, except that it hides -the details and allows you to specify a clean high level interaction protocol -in the rust type system. - -Instead of having a message that explicitly contains some data and the send side -of a oneshot or mpsc channel for the response, it creates a pair of flume -channels internally and sends one end of them to the server. This has some slight -overhead (2 flume channels vs. 1 oneshot channel) for a RPC interaction. But -for streaming interactions the overhead is negligible. - -For the case where you have a process boundary, the overhead is very low for -transports that already have a concept of cheap substreams (http2, quic, ...). -Quic is the poster child of a network transport that has built in cheap -substreams including per substream backpressure. However, I found that for raw -data transfer http2/tcp has still superior performance. This is why the http2 -transport exists. - -Currently you would use the quinn transport for cases where you want to have -connections to many different peers and can't accept a large per connection -overhead, or where you want low latency for small messages. - -You would use the hyper transport for cases where you have a small number of -connections, so per connection overhead does not matter that much, and where -you want maximum throughput at the expense of some latency. - -This may change in the future as quic implementations get more optimized. - -[quinn]: https://docs.rs/quinn/ -[flume]: https://docs.rs/flume/ -[grpc]: https://grpc.io/ +See the [module docs](https://docs.rs/quic-rpc/latest/quic_rpc/). # Docs diff --git a/cliff.toml b/cliff.toml deleted file mode 100644 index 32033c41..00000000 --- a/cliff.toml +++ /dev/null @@ -1,64 +0,0 @@ -[changelog] -# changelog header -header = """ -# Changelog\n -All notable changes to quic-rpc will be documented in this file.\n -""" - -body = """ -{% if version %}\ - {% if previous.version %}\ - ## [{{ version | trim_start_matches(pat="v") }}](/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }} - {% else %}\ - ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} - {% endif %}\ -{% else %}\ - ## [unreleased] -{% endif %}\ - -{% macro commit(commit) -%} - - {% if commit.scope %}*({{ commit.scope }})* {% endif %}{% if commit.breaking %}[**breaking**] {% endif %}\ - {{ commit.message | upper_first }} - ([{{ commit.id | truncate(length=7, end="") }}](/commit/{{ commit.id }}))\ -{% endmacro -%} - -{% for group, commits in commits | group_by(attribute="group") %} - ### {{ group | striptags | trim | upper_first }} - {% for commit in commits - | filter(attribute="scope") - | sort(attribute="scope") %} - {{ self::commit(commit=commit) }} - {%- endfor -%} - {% raw %}\n{% endraw %}\ - {%- for commit in commits %} - {%- if not commit.scope -%} - {{ self::commit(commit=commit) }} - {% endif -%} - {% endfor -%} -{% endfor %}\n -""" - -footer = "" -postprocessors = [ - { pattern = '', replace = "https://github.com/n0-computer/iroh" }, - { pattern = "\\(#([0-9]+)\\)", replace = "([#${1}](https://github.com/n0-computer/iroh/issues/${1}))"} -] - - -[git] -# regex for parsing and grouping commits -commit_parsers = [ - { message = "^feat", group = "โ›ฐ๏ธ Features" }, - { message = "^fix", group = "๐Ÿ› Bug Fixes" }, - { message = "^doc", group = "๐Ÿ“š Documentation" }, - { message = "^perf", group = "โšก Performance" }, - { message = "^refactor", group = "๐Ÿšœ Refactor" }, - { message = "^style", group = "๐ŸŽจ Styling" }, - { message = "^test", group = "๐Ÿงช Testing" }, - { message = "^chore\\(release\\)", skip = true }, - { message = "^chore\\(deps\\)", skip = true }, - { message = "^chore\\(pr\\)", skip = true }, - { message = "^chore\\(pull\\)", skip = true }, - { message = "^chore|ci", group = "โš™๏ธ Miscellaneous Tasks" }, - { body = ".*security", group = "๐Ÿ›ก๏ธ Security" }, - { message = "^revert", group = "โ—€๏ธ Revert" }, -] \ No newline at end of file diff --git a/examples/compute.rs b/examples/compute.rs new file mode 100644 index 00000000..1aecf200 --- /dev/null +++ b/examples/compute.rs @@ -0,0 +1,482 @@ +use std::{ + io::{self, Write}, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + sync::Arc, +}; + +use anyhow::bail; +use futures_buffered::BufferedStreamExt; +use n0_future::{ + stream::StreamExt, + task::{self, AbortOnDropHandle}, +}; +use quic_rpc::{ + channel::{oneshot, spsc}, + rpc::{listen, Handler}, + util::{make_client_endpoint, make_server_endpoint}, + Client, LocalSender, Request, Service, WithChannels, +}; +use quic_rpc_derive::rpc_requests; +use serde::{Deserialize, Serialize}; +use thousands::Separable; +use tracing::trace; + +// Define the ComputeService +#[derive(Debug, Clone, Copy)] +struct ComputeService; + +impl Service for ComputeService {} + +// Define ComputeRequest sub-messages +#[derive(Debug, Serialize, Deserialize)] +struct Sqr { + num: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +struct Sum; + +#[derive(Debug, Serialize, Deserialize)] +struct Fibonacci { + max: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +struct Multiply { + initial: u64, +} + +// Define ComputeRequest enum +#[derive(Debug, Serialize, Deserialize)] +enum ComputeRequest { + Sqr(Sqr), + Sum(Sum), + Fibonacci(Fibonacci), + Multiply(Multiply), +} + +// Define the protocol and message enums using the macro +#[rpc_requests(ComputeService, ComputeMessage)] +#[derive(Serialize, Deserialize)] +enum ComputeProtocol { + #[rpc(tx=oneshot::Sender)] + Sqr(Sqr), + #[rpc(rx=spsc::Receiver, tx=oneshot::Sender)] + Sum(Sum), + #[rpc(tx=spsc::Sender)] + Fibonacci(Fibonacci), + #[rpc(rx=spsc::Receiver, tx=spsc::Sender)] + Multiply(Multiply), +} + +// The actor that processes requests +struct ComputeActor { + recv: tokio::sync::mpsc::Receiver, +} + +impl ComputeActor { + pub fn local() -> ComputeApi { + let (tx, rx) = tokio::sync::mpsc::channel(128); + let actor = Self { recv: rx }; + n0_future::task::spawn(actor.run()); + let local = LocalSender::::from(tx); + ComputeApi { + inner: local.into(), + } + } + + async fn run(mut self) { + while let Some(msg) = self.recv.recv().await { + n0_future::task::spawn(async move { + if let Err(cause) = Self::handle(msg).await { + eprintln!("Error: {}", cause); + } + }); + } + } + + async fn handle(msg: ComputeMessage) -> io::Result<()> { + match msg { + ComputeMessage::Sqr(sqr) => { + trace!("sqr {:?}", sqr); + let WithChannels { + tx, inner, span, .. + } = sqr; + let _entered = span.enter(); + let result = (inner.num as u128) * (inner.num as u128); + tx.send(result).await?; + } + ComputeMessage::Sum(sum) => { + trace!("sum {:?}", sum); + let WithChannels { rx, tx, span, .. } = sum; + let _entered = span.enter(); + let mut receiver = rx; + let mut total = 0; + while let Some(num) = receiver.recv().await? { + total += num; + } + tx.send(total).await?; + } + ComputeMessage::Fibonacci(fib) => { + trace!("fibonacci {:?}", fib); + let WithChannels { + tx, inner, span, .. + } = fib; + let _entered = span.enter(); + let mut sender = tx; + let mut a = 0u64; + let mut b = 1u64; + while a <= inner.max { + sender.send(a).await?; + let next = a + b; + a = b; + b = next; + } + } + ComputeMessage::Multiply(mult) => { + trace!("multiply {:?}", mult); + let WithChannels { + rx, + tx, + inner, + span, + .. + } = mult; + let _entered = span.enter(); + let mut receiver = rx; + let mut sender = tx; + let multiplier = inner.initial; + while let Some(num) = receiver.recv().await? { + sender.send(multiplier * num).await?; + } + } + } + Ok(()) + } +} +// The API for interacting with the ComputeService +#[derive(Clone)] +struct ComputeApi { + inner: Client, +} + +impl ComputeApi { + pub fn connect(endpoint: quinn::Endpoint, addr: SocketAddr) -> anyhow::Result { + Ok(ComputeApi { + inner: Client::quinn(endpoint, addr), + }) + } + + pub fn listen(&self, endpoint: quinn::Endpoint) -> anyhow::Result> { + let Some(local) = self.inner.local() else { + bail!("cannot listen on a remote service"); + }; + let handler: Handler = Arc::new(move |msg, rx, tx| { + let local = local.clone(); + Box::pin(match msg { + ComputeProtocol::Sqr(msg) => local.send((msg, tx)), + ComputeProtocol::Sum(msg) => local.send((msg, tx, rx)), + ComputeProtocol::Fibonacci(msg) => local.send((msg, tx)), + ComputeProtocol::Multiply(msg) => local.send((msg, tx, rx)), + }) + }); + Ok(AbortOnDropHandle::new(task::spawn(listen( + endpoint, handler, + )))) + } + + pub async fn sqr(&self, num: u64) -> anyhow::Result> { + let msg = Sqr { num }; + match self.inner.request().await? { + Request::Local(request) => { + let (tx, rx) = oneshot::channel(); + request.send((msg, tx)).await?; + Ok(rx) + } + Request::Remote(request) => { + let (_tx, rx) = request.write(msg).await?; + Ok(rx.into()) + } + } + } + + pub async fn sum(&self) -> anyhow::Result<(spsc::Sender, oneshot::Receiver)> { + let msg = Sum; + match self.inner.request().await? { + Request::Local(request) => { + let (num_tx, num_rx) = spsc::channel(10); + let (sum_tx, sum_rx) = oneshot::channel(); + request.send((msg, sum_tx, num_rx)).await?; + Ok((num_tx, sum_rx)) + } + Request::Remote(request) => { + let (tx, rx) = request.write(msg).await?; + Ok((tx.into(), rx.into())) + } + } + } + + pub async fn fibonacci(&self, max: u64) -> anyhow::Result> { + let msg = Fibonacci { max }; + match self.inner.request().await? { + Request::Local(request) => { + let (tx, rx) = spsc::channel(128); + request.send((msg, tx)).await?; + Ok(rx) + } + Request::Remote(request) => { + let (_tx, rx) = request.write(msg).await?; + Ok(rx.into()) + } + } + } + + pub async fn multiply( + &self, + initial: u64, + ) -> anyhow::Result<(spsc::Sender, spsc::Receiver)> { + let msg = Multiply { initial }; + match self.inner.request().await? { + Request::Local(request) => { + let (in_tx, in_rx) = spsc::channel(128); + let (out_tx, out_rx) = spsc::channel(128); + request.send((msg, out_tx, in_rx)).await?; + Ok((in_tx, out_rx)) + } + Request::Remote(request) => { + let (tx, rx) = request.write(msg).await?; + Ok((tx.into(), rx.into())) + } + } + } +} + +// Local usage example +async fn local() -> anyhow::Result<()> { + let api = ComputeActor::local(); + + // Test Sqr + let rx = api.sqr(5).await?; + println!("Local: 5^2 = {}", rx.await?); + + // Test Sum + let (mut tx, rx) = api.sum().await?; + tx.send(1).await?; + tx.send(2).await?; + tx.send(3).await?; + drop(tx); + println!("Local: sum of [1, 2, 3] = {}", rx.await?); + + // Test Fibonacci + let mut rx = api.fibonacci(10).await?; + print!("Local: Fibonacci up to 10 = "); + while let Some(num) = rx.recv().await? { + print!("{} ", num); + } + println!(); + + // Test Multiply + let (mut in_tx, mut out_rx) = api.multiply(3).await?; + in_tx.send(2).await?; + in_tx.send(4).await?; + in_tx.send(6).await?; + drop(in_tx); + print!("Local: 3 * [2, 4, 6] = "); + while let Some(num) = out_rx.recv().await? { + print!("{} ", num); + } + println!(); + + Ok(()) +} + +// Remote usage example +async fn remote() -> anyhow::Result<()> { + let port = 10114; + let (server, cert) = + make_server_endpoint(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, port).into())?; + let client = + make_client_endpoint(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0).into(), &[&cert])?; + let compute = ComputeActor::local(); + let handle = compute.listen(server)?; + let api = ComputeApi::connect(client, SocketAddrV4::new(Ipv4Addr::LOCALHOST, port).into())?; + + // Test Sqr + let rx = api.sqr(4).await?; + println!("Remote: 4^2 = {}", rx.await?); + + // Test Sum + let (mut tx, rx) = api.sum().await?; + tx.send(4).await?; + tx.send(5).await?; + tx.send(6).await?; + drop(tx); + println!("Remote: sum of [4, 5, 6] = {}", rx.await?); + + // Test Fibonacci + let mut rx = api.fibonacci(20).await?; + print!("Remote: Fibonacci up to 20 = "); + while let Some(num) = rx.recv().await? { + print!("{} ", num); + } + println!(); + + // Test Multiply + let (mut in_tx, mut out_rx) = api.multiply(5).await?; + in_tx.send(1).await?; + in_tx.send(2).await?; + in_tx.send(3).await?; + drop(in_tx); + print!("Remote: 5 * [1, 2, 3] = "); + while let Some(num) = out_rx.recv().await? { + print!("{} ", num); + } + println!(); + + drop(handle); + Ok(()) +} + +// Benchmark function using the new ComputeApi +async fn bench(api: ComputeApi, n: u64) -> anyhow::Result<()> { + // Individual RPCs (sequential) + { + let mut sum = 0; + let t0 = std::time::Instant::now(); + for i in 0..n { + sum += api.sqr(i).await?.await?; + if i % 10000 == 0 { + print!("."); + io::stdout().flush()?; + } + } + let rps = ((n as f64) / t0.elapsed().as_secs_f64()).round() as u64; + assert_eq!(sum, sum_of_squares(n)); + clear_line()?; + println!("RPC seq {} rps", rps.separate_with_underscores()); + } + + // Parallel RPCs + { + let t0 = std::time::Instant::now(); + let api = api.clone(); + let reqs = n0_future::stream::iter((0..n).map(move |i| { + let api = api.clone(); + async move { anyhow::Ok(api.sqr(i).await?.await?) } + })); + let resp: Vec<_> = reqs.buffered_unordered(32).try_collect().await?; + let sum = resp.into_iter().sum::(); + let rps = ((n as f64) / t0.elapsed().as_secs_f64()).round() as u64; + assert_eq!(sum, sum_of_squares(n)); + clear_line()?; + println!("RPC par {} rps", rps.separate_with_underscores()); + } + + // Sequential streaming (using Multiply instead of MultiplyUpdate) + { + let t0 = std::time::Instant::now(); + let (mut send, mut recv) = api.multiply(2).await?; + let handle = tokio::task::spawn(async move { + for i in 0..n { + send.send(i).await?; + } + Ok::<(), io::Error>(()) + }); + let mut sum = 0; + let mut i = 0; + while let Some(res) = recv.recv().await? { + sum += res; + if i % 10000 == 0 { + print!("."); + io::stdout().flush()?; + } + i += 1; + } + let rps = ((n as f64) / t0.elapsed().as_secs_f64()).round() as u64; + assert_eq!(sum, (0..n).map(|x| x * 2).sum::()); + clear_line()?; + println!("bidi seq {} rps", rps.separate_with_underscores()); + handle.await??; + } + + Ok(()) +} + +// Helper function to compute the sum of squares +fn sum_of_squares(n: u64) -> u128 { + (0..n).map(|x| (x * x) as u128).sum() +} + +// Helper function to clear the current line +fn clear_line() -> io::Result<()> { + io::stdout().write_all(b"\r\x1b[K")?; + io::stdout().flush()?; + Ok(()) +} + +// Simple benchmark sending oneshot senders via an mpsc channel +pub async fn reference_bench(n: u64) -> anyhow::Result<()> { + // Create an mpsc channel to send oneshot senders + let (tx, mut rx) = tokio::sync::mpsc::channel::>(32); + + // Spawn a task to respond to all oneshot senders + tokio::spawn(async move { + while let Some(sender) = rx.recv().await { + // Immediately send a fixed response (42) back through the oneshot sender + sender.send(42).ok(); + } + Ok::<(), io::Error>(()) + }); + + // Sequential oneshot sends + { + let mut sum = 0; + let t0 = std::time::Instant::now(); + for i in 0..n { + let (send, recv) = tokio::sync::oneshot::channel(); + tx.send(send).await?; + sum += recv.await?; + if i % 10000 == 0 { + print!("."); + io::stdout().flush()?; + } + } + let rps = ((n as f64) / t0.elapsed().as_secs_f64()).round() as u64; + assert_eq!(sum, 42 * n); // Each response is 42 + clear_line()?; + println!("Reference seq {} rps", rps.separate_with_underscores()); + } + + // Parallel oneshot sends + { + let t0 = std::time::Instant::now(); + let reqs = n0_future::stream::iter((0..n).map(|_| async { + let (send, recv) = tokio::sync::oneshot::channel(); + tx.send(send).await?; + anyhow::Ok(recv.await?) + })); + let resp: Vec<_> = reqs.buffered_unordered(32).try_collect().await?; + let sum = resp.into_iter().sum::(); + let rps = ((n as f64) / t0.elapsed().as_secs_f64()).round() as u64; + assert_eq!(sum, 42 * n); // Each response is 42 + clear_line()?; + println!("Reference par {} rps", rps.separate_with_underscores()); + } + + Ok(()) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_subscriber::fmt().init(); + println!("Local use"); + local().await?; + println!("Remote use"); + remote().await?; + + let api = ComputeActor::local(); + bench(api, 1000000).await?; + + reference_bench(1000000).await?; + Ok(()) +} diff --git a/examples/derive.rs b/examples/derive.rs new file mode 100644 index 00000000..842dbe65 --- /dev/null +++ b/examples/derive.rs @@ -0,0 +1,225 @@ +use std::{ + collections::BTreeMap, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + sync::Arc, +}; + +use anyhow::bail; +use n0_future::task::{self, AbortOnDropHandle}; +use quic_rpc::{ + channel::{oneshot, spsc}, + rpc::{listen, Handler}, + util::{make_client_endpoint, make_server_endpoint}, + Client, LocalSender, Request, Service, WithChannels, +}; +// Import the macro +use quic_rpc_derive::rpc_requests; +use serde::{Deserialize, Serialize}; +use tracing::info; + +/// A simple storage service, just to try it out +#[derive(Debug, Clone, Copy)] +struct StorageService; + +impl Service for StorageService {} + +#[derive(Debug, Serialize, Deserialize)] +struct Get { + key: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct List; + +#[derive(Debug, Serialize, Deserialize)] +struct Set { + key: String, + value: String, +} + +// Use the macro to generate both the StorageProtocol and StorageMessage enums +// plus implement Channels for each type +#[rpc_requests(StorageService, StorageMessage)] +#[derive(Serialize, Deserialize)] +enum StorageProtocol { + #[rpc(tx=oneshot::Sender>)] + Get(Get), + #[rpc(tx=oneshot::Sender<()>)] + Set(Set), + #[rpc(tx=spsc::Sender)] + List(List), +} + +struct StorageActor { + recv: tokio::sync::mpsc::Receiver, + state: BTreeMap, +} + +impl StorageActor { + pub fn local() -> StorageApi { + let (tx, rx) = tokio::sync::mpsc::channel(1); + let actor = Self { + recv: rx, + state: BTreeMap::new(), + }; + n0_future::task::spawn(actor.run()); + let local = LocalSender::::from(tx); + StorageApi { + inner: local.into(), + } + } + + async fn run(mut self) { + while let Some(msg) = self.recv.recv().await { + self.handle(msg).await; + } + } + + async fn handle(&mut self, msg: StorageMessage) { + match msg { + StorageMessage::Get(get) => { + info!("get {:?}", get); + let WithChannels { tx, inner, .. } = get; + tx.send(self.state.get(&inner.key).cloned()).await.ok(); + } + StorageMessage::Set(set) => { + info!("set {:?}", set); + let WithChannels { tx, inner, .. } = set; + self.state.insert(inner.key, inner.value); + tx.send(()).await.ok(); + } + StorageMessage::List(list) => { + info!("list {:?}", list); + let WithChannels { mut tx, .. } = list; + for (key, value) in &self.state { + if tx.send(format!("{key}={value}")).await.is_err() { + break; + } + } + } + } + } +} + +struct StorageApi { + inner: Client, +} + +impl StorageApi { + pub fn connect(endpoint: quinn::Endpoint, addr: SocketAddr) -> anyhow::Result { + Ok(StorageApi { + inner: Client::quinn(endpoint, addr), + }) + } + + pub fn listen(&self, endpoint: quinn::Endpoint) -> anyhow::Result> { + let Some(local) = self.inner.local() else { + bail!("cannot listen on a remote service"); + }; + let handler: Handler = Arc::new(move |msg, _, tx| { + let local = local.clone(); + Box::pin(match msg { + StorageProtocol::Get(msg) => local.send((msg, tx)), + StorageProtocol::Set(msg) => local.send((msg, tx)), + StorageProtocol::List(msg) => local.send((msg, tx)), + }) + }); + Ok(AbortOnDropHandle::new(task::spawn(listen( + endpoint, handler, + )))) + } + + pub async fn get(&self, key: String) -> anyhow::Result>> { + let msg = Get { key }; + match self.inner.request().await? { + Request::Local(request) => { + let (tx, rx) = oneshot::channel(); + request.send((msg, tx)).await?; + Ok(rx) + } + Request::Remote(request) => { + let (_tx, rx) = request.write(msg).await?; + Ok(rx.into()) + } + } + } + + pub async fn list(&self) -> anyhow::Result> { + let msg = List; + match self.inner.request().await? { + Request::Local(request) => { + let (tx, rx) = spsc::channel(10); + request.send((msg, tx)).await?; + Ok(rx) + } + Request::Remote(request) => { + let (_tx, rx) = request.write(msg).await?; + Ok(rx.into()) + } + } + } + + pub async fn set(&self, key: String, value: String) -> anyhow::Result> { + let msg = Set { key, value }; + match self.inner.request().await? { + Request::Local(request) => { + let (tx, rx) = oneshot::channel(); + request.send((msg, tx)).await?; + Ok(rx) + } + Request::Remote(request) => { + let (_tx, rx) = request.write(msg).await?; + Ok(rx.into()) + } + } + } +} + +async fn local() -> anyhow::Result<()> { + let api = StorageActor::local(); + api.set("hello".to_string(), "world".to_string()) + .await? + .await?; + let value = api.get("hello".to_string()).await?.await?; + let mut list = api.list().await?; + while let Some(value) = list.recv().await? { + println!("list value = {:?}", value); + } + println!("value = {:?}", value); + Ok(()) +} + +async fn remote() -> anyhow::Result<()> { + let port = 10113; + let (server, cert) = + make_server_endpoint(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, port).into())?; + let client = + make_client_endpoint(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0).into(), &[&cert])?; + let store = StorageActor::local(); + let handle = store.listen(server)?; + let api = StorageApi::connect(client, SocketAddrV4::new(Ipv4Addr::LOCALHOST, port).into())?; + api.set("hello".to_string(), "world".to_string()) + .await? + .await?; + api.set("goodbye".to_string(), "world".to_string()) + .await? + .await?; + let value = api.get("hello".to_string()).await?.await?; + println!("value = {:?}", value); + let mut list = api.list().await?; + while let Some(value) = list.recv().await? { + println!("list value = {:?}", value); + } + drop(handle); + Ok(()) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_subscriber::fmt().init(); + println!("Local use"); + local().await?; + println!("Remote use"); + remote().await?; + Ok(()) +} diff --git a/examples/errors.rs b/examples/errors.rs deleted file mode 100644 index a8e015e0..00000000 --- a/examples/errors.rs +++ /dev/null @@ -1,76 +0,0 @@ -use std::result; - -use derive_more::{Display, From, TryInto}; -use quic_rpc::{message::RpcMsg, RpcClient, RpcServer, Service}; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Serialize, Deserialize)] -struct WriteRequest(String, Vec); - -#[derive(Debug, Serialize, Deserialize, From, TryInto)] -enum IoRequest { - Write(WriteRequest), -} - -/// Serializable wire error type. There has to be a From instance from the convenience error type. -/// -/// The RPC client sees this type directly. -#[derive(Debug, Display, Serialize, Deserialize)] -struct WriteError(String); - -impl std::error::Error for WriteError {} - -impl From for WriteError { - fn from(e: anyhow::Error) -> Self { - WriteError(format!("{e:?}")) - } -} - -#[derive(Debug, Serialize, Deserialize, From, TryInto)] -enum IoResponse { - Write(result::Result<(), WriteError>), -} - -#[derive(Debug, Clone)] -struct IoService; - -impl Service for IoService { - type Req = IoRequest; - type Res = IoResponse; -} - -impl RpcMsg for WriteRequest { - type Response = result::Result<(), WriteError>; -} - -#[derive(Debug, Clone, Copy)] -struct Fs; - -impl Fs { - /// write a file, returning the convenient anyhow::Result - async fn write(self, _req: WriteRequest) -> anyhow::Result<()> { - Ok(()) - } -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - let fs = Fs; - let (server, client) = quic_rpc::transport::flume::channel(1); - let client = RpcClient::::new(client); - let server = RpcServer::new(server); - let handle = tokio::task::spawn(async move { - for _ in 0..1 { - let (req, chan) = server.accept().await?.read_first().await?; - match req { - IoRequest::Write(req) => chan.rpc_map_err(req, fs, Fs::write).await, - }? - } - anyhow::Ok(()) - }); - client - .rpc(WriteRequest("hello".to_string(), vec![0u8; 32])) - .await??; - handle.await??; - Ok(()) -} diff --git a/examples/macro.rs b/examples/macro.rs deleted file mode 100644 index 1a5f2b6c..00000000 --- a/examples/macro.rs +++ /dev/null @@ -1,158 +0,0 @@ -mod store_rpc { - use std::fmt::Debug; - - use quic_rpc::rpc_service; - use serde::{Deserialize, Serialize}; - - pub type Cid = [u8; 32]; - - #[derive(Debug, Serialize, Deserialize)] - pub struct Put(pub Vec); - #[derive(Debug, Serialize, Deserialize)] - pub struct PutResponse(pub Cid); - - #[derive(Debug, Serialize, Deserialize)] - pub struct Get(pub Cid); - #[derive(Debug, Serialize, Deserialize)] - pub struct GetResponse(pub Vec); - - #[derive(Debug, Serialize, Deserialize)] - pub struct PutFile; - #[derive(Debug, Serialize, Deserialize)] - pub struct PutFileUpdate(pub Vec); - #[derive(Debug, Serialize, Deserialize)] - pub struct PutFileResponse(pub Cid); - - #[derive(Debug, Serialize, Deserialize)] - pub struct GetFile(pub Cid); - #[derive(Debug, Serialize, Deserialize)] - pub struct GetFileResponse(pub Vec); - - #[derive(Debug, Serialize, Deserialize)] - pub struct ConvertFile; - #[derive(Debug, Serialize, Deserialize)] - pub struct ConvertFileUpdate(pub Vec); - #[derive(Debug, Serialize, Deserialize)] - pub struct ConvertFileResponse(pub Vec); - - rpc_service! { - Request = StoreRequest; - Response = StoreResponse; - Service = StoreService; - CreateDispatch = create_store_dispatch; - - Rpc put = Put, _ -> PutResponse; - Rpc get = Get, _ -> GetResponse; - ClientStreaming put_file = PutFile, PutFileUpdate -> PutFileResponse; - ServerStreaming get_file = GetFile, _ -> GetFileResponse; - BidiStreaming convert_file = ConvertFile, ConvertFileUpdate -> ConvertFileResponse; - } -} - -use async_stream::stream; -use futures_lite::{Stream, StreamExt}; -use futures_util::SinkExt; -use quic_rpc::{client::RpcClient, server::run_server_loop, transport::flume}; -use store_rpc::*; - -#[derive(Clone)] -pub struct Store; - -impl Store { - async fn put(self, _put: Put) -> PutResponse { - PutResponse([0; 32]) - } - - async fn get(self, _get: Get) -> GetResponse { - GetResponse(vec![]) - } - - async fn put_file( - self, - _put: PutFile, - updates: impl Stream, - ) -> PutFileResponse { - tokio::pin!(updates); - while let Some(_update) = updates.next().await {} - PutFileResponse([0; 32]) - } - - fn get_file(self, _get: GetFile) -> impl Stream + Send + 'static { - stream! { - for i in 0..3 { - yield GetFileResponse(vec![i]); - } - } - } - - fn convert_file( - self, - _convert: ConvertFile, - updates: impl Stream + Send + 'static, - ) -> impl Stream + Send + 'static { - stream! { - tokio::pin!(updates); - while let Some(msg) = updates.next().await { - yield ConvertFileResponse(msg.0); - } - } - } -} - -create_store_dispatch!(Store, dispatch_store_request); -// create_store_client!(StoreClient); - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - let (server, client) = flume::channel(1); - let server_handle = tokio::task::spawn(async move { - let target = Store; - run_server_loop(StoreService, server, target, dispatch_store_request).await - }); - let client = RpcClient::::new(client); - - // a rpc call - for i in 0..3 { - println!("a rpc call [{i}]"); - let client = client.clone(); - tokio::task::spawn(async move { - let res = client.rpc(Get([0u8; 32])).await; - println!("rpc res [{i}]: {res:?}"); - }); - } - - // server streaming call - println!("a server streaming call"); - let mut s = client.server_streaming(GetFile([0u8; 32])).await?; - while let Some(res) = s.next().await { - println!("streaming res: {res:?}"); - } - - // client streaming call - println!("a client streaming call"); - let (mut send, recv) = client.client_streaming(PutFile).await?; - tokio::task::spawn(async move { - for i in 0..3 { - send.send(PutFileUpdate(vec![i])).await.unwrap(); - } - }); - let res = recv.await?; - println!("client stremaing res: {res:?}"); - - // bidi streaming call - println!("a bidi streaming call"); - let (mut send, mut recv) = client.bidi(ConvertFile).await?; - tokio::task::spawn(async move { - for i in 0..3 { - send.send(ConvertFileUpdate(vec![i])).await.unwrap(); - } - }); - while let Some(res) = recv.next().await { - println!("bidi res: {res:?}"); - } - - // dropping the client will cause the server to terminate - drop(client); - server_handle.await??; - Ok(()) -} diff --git a/examples/modularize.rs b/examples/modularize.rs deleted file mode 100644 index 4143b3e5..00000000 --- a/examples/modularize.rs +++ /dev/null @@ -1,507 +0,0 @@ -//! This example shows how an RPC service can be modularized, even between different crates. -//! -//! * `app` module is the top level. it composes `iroh` plus one handler of the app itself -//! * `iroh` module composes two other services, `calc` and `clock` -//! -//! The [`calc`] and [`clock`] modules both expose a [`quic_rpc::Service`] in a regular fashion. -//! They do not `use` anything from `super` or `app` so they could live in their own crates -//! unchanged. - -use anyhow::Result; -use app::AppService; -use futures_lite::StreamExt; -use futures_util::SinkExt; -use quic_rpc::{client::BoxedConnector, transport::flume, Listener, RpcClient, RpcServer}; - -#[tokio::main] -async fn main() -> Result<()> { - // Spawn an inmemory connection. - // Could use quic equally (all code in this example is generic over the transport) - let (server_conn, client_conn) = flume::channel(1); - - // spawn the server - let handler = app::Handler::default(); - tokio::task::spawn(run_server(server_conn, handler)); - - // run a client demo - client_demo(BoxedConnector::::new(client_conn)).await?; - - Ok(()) -} - -async fn run_server>(server_conn: C, handler: app::Handler) { - let server = RpcServer::::new(server_conn); - server - .accept_loop(move |req, chan| handler.clone().handle_rpc_request(req, chan)) - .await -} - -pub async fn client_demo(conn: BoxedConnector) -> Result<()> { - let rpc_client = RpcClient::::new(conn); - let client = app::Client::new(rpc_client.clone()); - - // call a method from the top-level app client - let res = client.app_version().await?; - println!("app_version: {res:?}"); - - // call a method from the wrapped iroh.calc client - let res = client.iroh.calc.add(40, 2).await?; - println!("iroh.calc.add: {res:?}"); - - // can also do "raw" calls without using the wrapped clients - let res = rpc_client - .clone() - .map::() - .map::() - .rpc(calc::AddRequest(19, 4)) - .await?; - println!("iroh.calc.add (raw): {res:?}"); - - let (mut sink, res) = rpc_client - .map::() - .map::() - .client_streaming(calc::SumRequest) - .await?; - sink.send(calc::SumUpdate(4)).await.unwrap(); - sink.send(calc::SumUpdate(8)).await.unwrap(); - sink.send(calc::SumUpdate(30)).await.unwrap(); - drop(sink); - let res = res.await?; - println!("iroh.calc.sum (raw): {res:?}"); - - // call a server-streaming method from the wrapped iroh.clock client - let mut stream = client.iroh.clock.tick().await?; - while let Some(tick) = stream.try_next().await? { - println!("iroh.clock.tick: {tick}"); - } - Ok(()) -} - -mod app { - //! This is the app-specific code. - //! - //! It composes all of `iroh` (which internally composes two other modules) and adds an - //! application specific RPC. - //! - //! It could also easily compose services from other crates or internal modules. - - use anyhow::Result; - use derive_more::{From, TryInto}; - use quic_rpc::{message::RpcMsg, server::RpcChannel, Listener, RpcClient, Service}; - use serde::{Deserialize, Serialize}; - - use super::iroh; - - #[derive(Debug, Serialize, Deserialize, From, TryInto)] - pub enum Request { - Iroh(iroh::Request), - AppVersion(AppVersionRequest), - } - - #[derive(Debug, Serialize, Deserialize, From, TryInto)] - pub enum Response { - Iroh(iroh::Response), - AppVersion(AppVersionResponse), - } - - #[derive(Debug, Serialize, Deserialize)] - pub struct AppVersionRequest; - - impl RpcMsg for AppVersionRequest { - type Response = AppVersionResponse; - } - - #[derive(Debug, Serialize, Deserialize)] - pub struct AppVersionResponse(pub String); - - #[derive(Copy, Clone, Debug)] - pub struct AppService; - impl Service for AppService { - type Req = Request; - type Res = Response; - } - - #[derive(Clone)] - pub struct Handler { - iroh: iroh::Handler, - app_version: String, - } - - impl Default for Handler { - fn default() -> Self { - Self { - iroh: iroh::Handler::default(), - app_version: "v0.1-alpha".to_string(), - } - } - } - - impl Handler { - pub async fn handle_rpc_request>( - self, - req: Request, - chan: RpcChannel, - ) -> Result<()> { - match req { - Request::Iroh(req) => { - self.iroh - .handle_rpc_request(req, chan.map().boxed()) - .await? - } - Request::AppVersion(req) => chan.rpc(req, self, Self::on_version).await?, - }; - Ok(()) - } - - pub async fn on_version(self, _req: AppVersionRequest) -> AppVersionResponse { - AppVersionResponse(self.app_version.clone()) - } - } - - #[derive(Debug, Clone)] - pub struct Client { - pub iroh: iroh::Client, - client: RpcClient, - } - - impl Client { - pub fn new(client: RpcClient) -> Self { - Self { - client: client.clone(), - iroh: iroh::Client::new(client.map().boxed()), - } - } - - pub async fn app_version(&self) -> Result { - let res = self.client.rpc(AppVersionRequest).await?; - Ok(res.0) - } - } -} - -mod iroh { - //! This module composes two sub-services. Think `iroh` crate which exposes services and - //! clients for iroh-bytes and iroh-gossip or so. - //! It uses only the `calc` and `clock` modules and nothing else. - - use anyhow::Result; - use derive_more::{From, TryInto}; - use quic_rpc::{server::RpcChannel, RpcClient, Service}; - use serde::{Deserialize, Serialize}; - - use super::{calc, clock}; - - #[derive(Debug, Serialize, Deserialize, From, TryInto)] - pub enum Request { - Calc(calc::Request), - Clock(clock::Request), - } - - #[derive(Debug, Serialize, Deserialize, From, TryInto)] - pub enum Response { - Calc(calc::Response), - Clock(clock::Response), - } - - #[derive(Copy, Clone, Debug)] - pub struct IrohService; - impl Service for IrohService { - type Req = Request; - type Res = Response; - } - - #[derive(Clone, Default)] - pub struct Handler { - calc: calc::Handler, - clock: clock::Handler, - } - - impl Handler { - pub async fn handle_rpc_request( - self, - req: Request, - chan: RpcChannel, - ) -> Result<()> { - match req { - Request::Calc(req) => { - self.calc - .handle_rpc_request(req, chan.map().boxed()) - .await? - } - Request::Clock(req) => { - self.clock - .handle_rpc_request(req, chan.map().boxed()) - .await? - } - } - Ok(()) - } - } - - #[derive(Debug, Clone)] - pub struct Client { - pub calc: calc::Client, - pub clock: clock::Client, - } - - impl Client { - pub fn new(client: RpcClient) -> Self { - Self { - calc: calc::Client::new(client.clone().map().boxed()), - clock: clock::Client::new(client.clone().map().boxed()), - } - } - } -} - -mod calc { - //! This is a library providing a service, and a client. E.g. iroh-bytes or iroh-hypermerge. - //! It does not use any `super` imports, it is completely decoupled. - - use std::fmt::Debug; - - use anyhow::{bail, Result}; - use derive_more::{From, TryInto}; - use futures_lite::{Stream, StreamExt}; - use quic_rpc::{ - message::{ClientStreaming, ClientStreamingMsg, Msg, RpcMsg}, - server::RpcChannel, - RpcClient, Service, - }; - use serde::{Deserialize, Serialize}; - - #[derive(Debug, Serialize, Deserialize)] - pub struct AddRequest(pub i64, pub i64); - - impl RpcMsg for AddRequest { - type Response = AddResponse; - } - - #[derive(Debug, Serialize, Deserialize)] - pub struct AddResponse(pub i64); - - #[derive(Debug, Serialize, Deserialize)] - pub struct SumRequest; - - #[derive(Debug, Serialize, Deserialize)] - pub struct SumUpdate(pub i64); - - impl Msg for SumRequest { - type Pattern = ClientStreaming; - } - - impl ClientStreamingMsg for SumRequest { - type Update = SumUpdate; - type Response = SumResponse; - } - - #[derive(Debug, Serialize, Deserialize)] - pub struct SumResponse(pub i64); - - #[derive(Debug, Serialize, Deserialize, From, TryInto)] - pub enum Request { - Add(AddRequest), - Sum(SumRequest), - SumUpdate(SumUpdate), - } - - #[derive(Debug, Serialize, Deserialize, From, TryInto)] - pub enum Response { - Add(AddResponse), - Sum(SumResponse), - } - - #[derive(Copy, Clone, Debug)] - pub struct CalcService; - impl Service for CalcService { - type Req = Request; - type Res = Response; - } - - #[derive(Clone, Default)] - pub struct Handler; - - impl Handler { - pub async fn handle_rpc_request( - self, - req: Request, - chan: RpcChannel, - ) -> Result<()> { - match req { - Request::Add(req) => chan.rpc(req, self, Self::on_add).await?, - Request::Sum(req) => chan.client_streaming(req, self, Self::on_sum).await?, - Request::SumUpdate(_) => bail!("Unexpected update message at start of request"), - } - Ok(()) - } - - pub async fn on_add(self, req: AddRequest) -> AddResponse { - AddResponse(req.0 + req.1) - } - - pub async fn on_sum( - self, - _req: SumRequest, - updates: impl Stream, - ) -> SumResponse { - let mut sum = 0i64; - tokio::pin!(updates); - while let Some(SumUpdate(n)) = updates.next().await { - sum += n; - } - SumResponse(sum) - } - } - - #[derive(Debug, Clone)] - pub struct Client { - client: RpcClient, - } - - impl Client { - pub fn new(client: RpcClient) -> Self { - Self { client } - } - pub async fn add(&self, a: i64, b: i64) -> anyhow::Result { - let res = self.client.rpc(AddRequest(a, b)).await?; - Ok(res.0) - } - } -} - -mod clock { - //! This is a library providing a service, and a client. E.g. iroh-bytes or iroh-hypermerge. - //! It does not use any `super` imports, it is completely decoupled. - - use std::{ - fmt::Debug, - sync::{Arc, RwLock}, - time::Duration, - }; - - use anyhow::Result; - use derive_more::{From, TryInto}; - use futures_lite::{stream::Boxed as BoxStream, Stream, StreamExt}; - use futures_util::TryStreamExt; - use quic_rpc::{ - message::{Msg, ServerStreaming, ServerStreamingMsg}, - server::RpcChannel, - RpcClient, Service, - }; - use serde::{Deserialize, Serialize}; - use tokio::sync::Notify; - - #[derive(Debug, Serialize, Deserialize)] - pub struct TickRequest; - - impl Msg for TickRequest { - type Pattern = ServerStreaming; - } - - impl ServerStreamingMsg for TickRequest { - type Response = TickResponse; - } - - #[derive(Debug, Serialize, Deserialize)] - pub struct TickResponse { - tick: usize, - } - - #[derive(Debug, Serialize, Deserialize, From, TryInto)] - pub enum Request { - Tick(TickRequest), - } - - #[derive(Debug, Serialize, Deserialize, From, TryInto)] - pub enum Response { - Tick(TickResponse), - } - - #[derive(Copy, Clone, Debug)] - pub struct ClockService; - impl Service for ClockService { - type Req = Request; - type Res = Response; - } - - #[derive(Clone)] - pub struct Handler { - tick: Arc>, - ontick: Arc, - } - - impl Default for Handler { - fn default() -> Self { - Self::new(Duration::from_secs(1)) - } - } - - impl Handler { - pub fn new(tick_duration: Duration) -> Self { - let h = Handler { - tick: Default::default(), - ontick: Default::default(), - }; - let h2 = h.clone(); - tokio::task::spawn(async move { - loop { - tokio::time::sleep(tick_duration).await; - *h2.tick.write().unwrap() += 1; - h2.ontick.notify_waiters(); - } - }); - h - } - - pub async fn handle_rpc_request( - self, - req: Request, - chan: RpcChannel, - ) -> Result<()> { - match req { - Request::Tick(req) => chan.server_streaming(req, self, Self::on_tick).await?, - } - Ok(()) - } - - pub fn on_tick( - self, - req: TickRequest, - ) -> impl Stream + Send + 'static { - let (tx, rx) = flume::bounded(2); - tokio::task::spawn(async move { - if let Err(err) = self.on_tick0(req, tx).await { - tracing::warn!(?err, "on_tick RPC handler failed"); - } - }); - rx.into_stream() - } - - pub async fn on_tick0( - self, - _req: TickRequest, - tx: flume::Sender, - ) -> Result<()> { - loop { - let tick = *self.tick.read().unwrap(); - tx.send_async(TickResponse { tick }).await?; - self.ontick.notified().await; - } - } - } - - #[derive(Debug, Clone)] - pub struct Client { - client: RpcClient, - } - - impl Client { - pub fn new(client: RpcClient) -> Self { - Self { client } - } - pub async fn tick(&self) -> Result>> { - let res = self.client.server_streaming(TickRequest).await?; - Ok(res.map_ok(|r| r.tick).map_err(anyhow::Error::from).boxed()) - } - } -} diff --git a/examples/split/client/Cargo.toml b/examples/split/client/Cargo.toml deleted file mode 100644 index e0cbab55..00000000 --- a/examples/split/client/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "client" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -anyhow = "1.0.14" -futures = "0.3.26" -quic-rpc = { path = "../../..", features = ["quinn-transport", "macros", "test-utils"] } -quinn = { package = "iroh-quinn", version = "0.13" } -rustls = { version = "0.23", default-features = false, features = ["ring"] } -tracing-subscriber = "0.3.16" -tokio = { version = "1", features = ["full"] } -types = { path = "../types" } diff --git a/examples/split/client/src/main.rs b/examples/split/client/src/main.rs deleted file mode 100644 index b7379aef..00000000 --- a/examples/split/client/src/main.rs +++ /dev/null @@ -1,66 +0,0 @@ -#![allow(unknown_lints, non_local_definitions)] - -use std::net::SocketAddr; - -use futures::{sink::SinkExt, stream::StreamExt}; -use quic_rpc::{ - transport::quinn::{make_insecure_client_endpoint, QuinnConnector}, - RpcClient, -}; -use types::compute::*; - -// types::create_compute_client!(ComputeClient); - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - tracing_subscriber::fmt::init(); - let server_addr: SocketAddr = "127.0.0.1:12345".parse()?; - let endpoint = make_insecure_client_endpoint("0.0.0.0:0".parse()?)?; - let client = QuinnConnector::new(endpoint, server_addr, "localhost".to_string()); - let client = RpcClient::new(client); - // let mut client = ComputeClient(client); - - // a rpc call - for i in 0..3 { - let client = client.clone(); - tokio::task::spawn(async move { - println!("rpc call: square([{i}])"); - let res = client.rpc(Sqr(i)).await; - println!("rpc res: square({i}) = {:?}", res.unwrap()); - }); - } - - // client streaming call - println!("client streaming call: sum()"); - let (mut send, recv) = client.client_streaming(Sum).await?; - tokio::task::spawn(async move { - for i in 2..4 { - println!("client streaming update: {i}"); - send.send(SumUpdate(i)).await.unwrap(); - } - }); - let res = recv.await?; - println!("client streaming res: {res:?}"); - - // server streaming call - println!("server streaming call: fibonacci(10)"); - let mut s = client.server_streaming(Fibonacci(10)).await?; - while let Some(res) = s.next().await { - println!("server streaming res: {:?}", res?); - } - - // bidi streaming call - println!("bidi streaming call: multiply(2)"); - let (mut send, mut recv) = client.bidi(Multiply(2)).await?; - tokio::task::spawn(async move { - for i in 1..3 { - println!("bidi streaming update: {i}"); - send.send(MultiplyUpdate(i)).await.unwrap(); - } - }); - while let Some(res) = recv.next().await { - println!("bidi streaming res: {:?}", res?); - } - - Ok(()) -} diff --git a/examples/split/server/Cargo.toml b/examples/split/server/Cargo.toml deleted file mode 100644 index 5264bdb6..00000000 --- a/examples/split/server/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "server" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -anyhow = "1.0.14" -async-stream = "0.3.3" -futures = "0.3.26" -tracing-subscriber = "0.3.16" -quic-rpc = { path = "../../..", features = ["quinn-transport", "macros", "test-utils"] } -quinn = { package = "iroh-quinn", version = "0.13" } -tokio = { version = "1", features = ["full"] } -types = { path = "../types" } diff --git a/examples/split/server/src/main.rs b/examples/split/server/src/main.rs deleted file mode 100644 index 90bdfcde..00000000 --- a/examples/split/server/src/main.rs +++ /dev/null @@ -1,75 +0,0 @@ -use std::net::SocketAddr; - -use async_stream::stream; -use futures::stream::{Stream, StreamExt}; -use quic_rpc::{ - server::run_server_loop, - transport::quinn::{make_server_endpoint, QuinnListener}, -}; -use types::compute::*; - -#[derive(Clone)] -pub struct Compute; - -types::create_compute_dispatch!(Compute, dispatch_compute_request); - -impl Compute { - async fn square(self, req: Sqr) -> SqrResponse { - SqrResponse(req.0 as u128 * req.0 as u128) - } - - async fn sum(self, _req: Sum, updates: impl Stream) -> SumResponse { - let mut sum = 0u128; - tokio::pin!(updates); - while let Some(SumUpdate(n)) = updates.next().await { - sum += n as u128; - } - SumResponse(sum) - } - - fn fibonacci(self, req: Fibonacci) -> impl Stream { - let mut a = 0u128; - let mut b = 1u128; - let mut n = req.0; - stream! { - while n > 0 { - yield FibonacciResponse(a); - let c = a + b; - a = b; - b = c; - n -= 1; - } - } - } - - fn multiply( - self, - req: Multiply, - updates: impl Stream, - ) -> impl Stream { - let product = req.0 as u128; - stream! { - tokio::pin!(updates); - while let Some(MultiplyUpdate(n)) = updates.next().await { - yield MultiplyResponse(product * n as u128); - } - } - } -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - tracing_subscriber::fmt::init(); - let server_addr: SocketAddr = "127.0.0.1:12345".parse()?; - let (server, _server_certs) = make_server_endpoint(server_addr)?; - let channel = QuinnListener::new(server)?; - let target = Compute; - run_server_loop( - ComputeService, - channel.clone(), - target, - dispatch_compute_request, - ) - .await?; - Ok(()) -} diff --git a/examples/split/types/Cargo.toml b/examples/split/types/Cargo.toml deleted file mode 100644 index 81b8eb16..00000000 --- a/examples/split/types/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "types" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -quic-rpc = { path = "../../..", features = ["macros"] } -serde = { version = "1", features = ["derive"] } -derive_more = { version = "1", features = ["from", "try_into"] } diff --git a/examples/split/types/src/lib.rs b/examples/split/types/src/lib.rs deleted file mode 100644 index a25a6fdd..00000000 --- a/examples/split/types/src/lib.rs +++ /dev/null @@ -1,46 +0,0 @@ -pub mod compute { - use std::fmt::Debug; - - use quic_rpc::rpc_service; - use serde::{Deserialize, Serialize}; - - /// compute the square of a number - #[derive(Debug, Serialize, Deserialize)] - pub struct Sqr(pub u64); - #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] - pub struct SqrResponse(pub u128); - - /// sum a stream of numbers - #[derive(Debug, Serialize, Deserialize)] - pub struct Sum; - #[derive(Debug, Serialize, Deserialize)] - pub struct SumUpdate(pub u64); - #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] - pub struct SumResponse(pub u128); - - /// compute the fibonacci sequence as a stream - #[derive(Debug, Serialize, Deserialize)] - pub struct Fibonacci(pub u64); - #[derive(Debug, Serialize, Deserialize)] - pub struct FibonacciResponse(pub u128); - - /// multiply a stream of numbers, returning a stream - #[derive(Debug, Serialize, Deserialize)] - pub struct Multiply(pub u64); - #[derive(Debug, Serialize, Deserialize)] - pub struct MultiplyUpdate(pub u64); - #[derive(Debug, Serialize, Deserialize)] - pub struct MultiplyResponse(pub u128); - - rpc_service! { - Request = ComputeRequest; - Response = ComputeResponse; - Service = ComputeService; - CreateDispatch = create_compute_dispatch; - - Rpc square = Sqr, _ -> SqrResponse; - ClientStreaming sum = Sum, SumUpdate -> SumResponse; - ServerStreaming fibonacci = Fibonacci, _ -> FibonacciResponse; - BidiStreaming multiply = Multiply, MultiplyUpdate -> MultiplyResponse; - } -} diff --git a/examples/storage.rs b/examples/storage.rs new file mode 100644 index 00000000..50295cbb --- /dev/null +++ b/examples/storage.rs @@ -0,0 +1,238 @@ +use std::{ + collections::BTreeMap, + net::{Ipv4Addr, SocketAddr, SocketAddrV4}, + sync::Arc, +}; + +use anyhow::bail; +use n0_future::task::{self, AbortOnDropHandle}; +use quic_rpc::{ + channel::{none::NoReceiver, oneshot, spsc}, + rpc::{listen, Handler}, + util::{make_client_endpoint, make_server_endpoint}, + Channels, Client, LocalSender, Request, Service, WithChannels, +}; +use serde::{Deserialize, Serialize}; +use tracing::info; + +/// A simple storage service, just to try it out +#[derive(Debug, Clone, Copy)] +struct StorageService; + +impl Service for StorageService {} + +#[derive(Debug, Serialize, Deserialize)] +struct Get { + key: String, +} + +impl Channels for Get { + type Rx = NoReceiver; + type Tx = oneshot::Sender>; +} + +#[derive(Debug, Serialize, Deserialize)] +struct List; + +impl Channels for List { + type Rx = NoReceiver; + type Tx = spsc::Sender; +} + +#[derive(Debug, Serialize, Deserialize)] +struct Set { + key: String, + value: String, +} + +impl Channels for Set { + type Rx = NoReceiver; + type Tx = oneshot::Sender<()>; +} + +#[derive(derive_more::From, Serialize, Deserialize)] +enum StorageProtocol { + Get(Get), + Set(Set), + List(List), +} + +#[derive(derive_more::From)] +enum StorageMessage { + Get(WithChannels), + Set(WithChannels), + List(WithChannels), +} + +struct StorageActor { + recv: tokio::sync::mpsc::Receiver, + state: BTreeMap, +} + +impl StorageActor { + pub fn local() -> StorageApi { + let (tx, rx) = tokio::sync::mpsc::channel(1); + let actor = Self { + recv: rx, + state: BTreeMap::new(), + }; + n0_future::task::spawn(actor.run()); + let local = LocalSender::::from(tx); + StorageApi { + inner: local.into(), + } + } + + async fn run(mut self) { + while let Some(msg) = self.recv.recv().await { + self.handle(msg).await; + } + } + + async fn handle(&mut self, msg: StorageMessage) { + match msg { + StorageMessage::Get(get) => { + info!("get {:?}", get); + let WithChannels { tx, inner, .. } = get; + tx.send(self.state.get(&inner.key).cloned()).await.ok(); + } + StorageMessage::Set(set) => { + info!("set {:?}", set); + let WithChannels { tx, inner, .. } = set; + self.state.insert(inner.key, inner.value); + tx.send(()).await.ok(); + } + StorageMessage::List(list) => { + info!("list {:?}", list); + let WithChannels { mut tx, .. } = list; + for (key, value) in &self.state { + if tx.send(format!("{key}={value}")).await.is_err() { + break; + } + } + } + } + } +} +struct StorageApi { + inner: Client, +} + +impl StorageApi { + pub fn connect(endpoint: quinn::Endpoint, addr: SocketAddr) -> anyhow::Result { + Ok(StorageApi { + inner: Client::quinn(endpoint, addr), + }) + } + + pub fn listen(&self, endpoint: quinn::Endpoint) -> anyhow::Result> { + let Some(local) = self.inner.local() else { + bail!("cannot listen on a remote service"); + }; + let handler: Handler = Arc::new(move |msg, _rx, tx| { + let local = local.clone(); + Box::pin(match msg { + StorageProtocol::Get(msg) => local.send((msg, tx)), + StorageProtocol::Set(msg) => local.send((msg, tx)), + StorageProtocol::List(msg) => local.send((msg, tx)), + }) + }); + Ok(AbortOnDropHandle::new(task::spawn(listen( + endpoint, handler, + )))) + } + + pub async fn get(&self, key: String) -> anyhow::Result>> { + let msg = Get { key }; + match self.inner.request().await? { + Request::Local(request) => { + let (tx, rx) = oneshot::channel(); + request.send((msg, tx)).await?; + Ok(rx) + } + Request::Remote(request) => { + let (_tx, rx) = request.write(msg).await?; + Ok(rx.into()) + } + } + } + + pub async fn list(&self) -> anyhow::Result> { + let msg = List; + match self.inner.request().await? { + Request::Local(request) => { + let (tx, rx) = spsc::channel(10); + request.send((msg, tx)).await?; + Ok(rx) + } + Request::Remote(request) => { + let (_tx, rx) = request.write(msg).await?; + Ok(rx.into()) + } + } + } + + pub async fn set(&self, key: String, value: String) -> anyhow::Result> { + let msg = Set { key, value }; + match self.inner.request().await? { + Request::Local(request) => { + let (tx, rx) = oneshot::channel(); + request.send((msg, tx)).await?; + Ok(rx) + } + Request::Remote(request) => { + let (_tx, rx) = request.write(msg).await?; + Ok(rx.into()) + } + } + } +} + +async fn local() -> anyhow::Result<()> { + let api = StorageActor::local(); + api.set("hello".to_string(), "world".to_string()) + .await? + .await?; + let value = api.get("hello".to_string()).await?.await?; + let mut list = api.list().await?; + while let Some(value) = list.recv().await? { + println!("list value = {:?}", value); + } + println!("value = {:?}", value); + Ok(()) +} + +async fn remote() -> anyhow::Result<()> { + let port = 10113; + let (server, cert) = + make_server_endpoint(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, port).into())?; + let client = + make_client_endpoint(SocketAddrV4::new(Ipv4Addr::UNSPECIFIED, 0).into(), &[&cert])?; + let store = StorageActor::local(); + let handle = store.listen(server)?; + let api = StorageApi::connect(client, SocketAddrV4::new(Ipv4Addr::LOCALHOST, port).into())?; + api.set("hello".to_string(), "world".to_string()) + .await? + .await?; + api.set("goodbye".to_string(), "world".to_string()) + .await? + .await?; + let value = api.get("hello".to_string()).await?.await?; + println!("value = {:?}", value); + let mut list = api.list().await?; + while let Some(value) = list.recv().await? { + println!("list value = {:?}", value); + } + drop(handle); + Ok(()) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_subscriber::fmt().init(); + println!("Local use"); + local().await?; + println!("Remote use"); + remote().await?; + Ok(()) +} diff --git a/examples/store.rs b/examples/store.rs deleted file mode 100644 index b99edeac..00000000 --- a/examples/store.rs +++ /dev/null @@ -1,267 +0,0 @@ -#![allow(clippy::enum_variant_names)] -use std::{fmt::Debug, result}; - -use async_stream::stream; -use derive_more::{From, TryInto}; -use futures_lite::{Stream, StreamExt}; -use futures_util::SinkExt; -use quic_rpc::{ - server::RpcServerError, - transport::{flume, Connector}, - *, -}; -use serde::{Deserialize, Serialize}; - -type Cid = [u8; 32]; -#[derive(Debug, Serialize, Deserialize)] -struct Put(Vec); -#[derive(Debug, Serialize, Deserialize)] -struct Get(Cid); -#[derive(Debug, Serialize, Deserialize)] -struct PutResponse(Cid); -#[derive(Debug, Serialize, Deserialize)] -struct GetResponse(Vec); - -#[derive(Debug, Serialize, Deserialize)] -struct PutFile; - -#[derive(Debug, Serialize, Deserialize)] -struct PutFileUpdate(Vec); - -#[derive(Debug, Serialize, Deserialize)] -struct PutFileResponse(Cid); - -#[derive(Debug, Serialize, Deserialize)] -struct GetFile(Cid); - -#[derive(Debug, Serialize, Deserialize)] -struct GetFileResponse(Vec); - -#[derive(Debug, Serialize, Deserialize)] -struct ConvertFile; - -#[derive(Debug, Serialize, Deserialize)] -struct ConvertFileUpdate(Vec); - -#[derive(Debug, Serialize, Deserialize)] -struct ConvertFileResponse(Vec); - -macro_rules! request_enum { - // User entry points. - ($enum_name:ident { $variant_name:ident $($tt:tt)* }) => { - request_enum!(@ {[$enum_name] [$variant_name]} $($tt)*); - }; - - // Internal rules to categorize each value - (@ {[$enum_name:ident] [$($agg:ident)*]} $(,)? $variant_name:ident $($tt:tt)*) => { - request_enum!(@ {[$enum_name] [$($agg)* $variant_name]} $($tt)*); - }; - - // Final internal rule that generates the enum from the categorized input - (@ {[$enum_name:ident] [$($n:ident)*]} $(,)?) => { - #[derive(::std::fmt::Debug, ::derive_more::From, ::derive_more::TryInto, ::serde::Serialize, ::serde::Deserialize)] - enum $enum_name { - $($n($n),)* - } - }; -} - -request_enum! { - StoreRequest2 { - Put, - Get, - PutFile, PutFileUpdate, - GetFile, - ConvertFile, ConvertFileUpdate, - } -} - -#[derive(Debug, From, TryInto, Serialize, Deserialize)] -enum StoreRequest { - Put(Put), - - Get(Get), - - PutFile(PutFile), - PutFileUpdate(PutFileUpdate), - - GetFile(GetFile), - - ConvertFile(ConvertFile), - ConvertFileUpdate(ConvertFileUpdate), -} - -#[derive(Debug, From, TryInto, Serialize, Deserialize)] -enum StoreResponse { - PutResponse(PutResponse), - GetResponse(GetResponse), - PutFileResponse(PutFileResponse), - GetFileResponse(GetFileResponse), - ConvertFileResponse(ConvertFileResponse), -} - -#[derive(Debug, Clone)] -struct StoreService; -impl Service for StoreService { - type Req = StoreRequest; - type Res = StoreResponse; -} - -declare_rpc!(StoreService, Get, GetResponse); -declare_rpc!(StoreService, Put, PutResponse); -declare_client_streaming!(StoreService, PutFile, PutFileUpdate, PutFileResponse); -declare_server_streaming!(StoreService, GetFile, GetFileResponse); -declare_bidi_streaming!( - StoreService, - ConvertFile, - ConvertFileUpdate, - ConvertFileResponse -); - -#[derive(Clone)] -struct Store; -impl Store { - async fn put(self, _put: Put) -> PutResponse { - PutResponse([0; 32]) - } - - async fn get(self, _get: Get) -> GetResponse { - GetResponse(vec![]) - } - - async fn put_file( - self, - _put: PutFile, - updates: impl Stream, - ) -> PutFileResponse { - tokio::pin!(updates); - while let Some(_update) = updates.next().await {} - PutFileResponse([0; 32]) - } - - fn get_file(self, _get: GetFile) -> impl Stream + Send + 'static { - stream! { - for i in 0..3 { - yield GetFileResponse(vec![i]); - } - } - } - - fn convert_file( - self, - _convert: ConvertFile, - updates: impl Stream + Send + 'static, - ) -> impl Stream + Send + 'static { - stream! { - tokio::pin!(updates); - while let Some(msg) = updates.next().await { - yield ConvertFileResponse(msg.0); - } - } - } -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - async fn server_future>( - server: RpcServer, - ) -> result::Result<(), RpcServerError> { - let s = server; - let store = Store; - loop { - let (req, chan) = s.accept().await?.read_first().await?; - use StoreRequest::*; - let store = store.clone(); - #[rustfmt::skip] - match req { - Put(msg) => chan.rpc(msg, store, Store::put).await, - Get(msg) => chan.rpc(msg, store, Store::get).await, - PutFile(msg) => chan.client_streaming(msg, store, Store::put_file).await, - GetFile(msg) => chan.server_streaming(msg, store, Store::get_file).await, - ConvertFile(msg) => chan.bidi_streaming(msg, store, Store::convert_file).await, - PutFileUpdate(_) => Err(RpcServerError::UnexpectedStartMessage)?, - ConvertFileUpdate(_) => Err(RpcServerError::UnexpectedStartMessage)?, - }?; - } - } - - let (server, client) = flume::channel(1); - let client = RpcClient::::new(client); - let server = RpcServer::::new(server); - let server_handle = tokio::task::spawn(server_future(server)); - - // a rpc call - println!("a rpc call"); - let res = client.rpc(Get([0u8; 32])).await?; - println!("{res:?}"); - - // server streaming call - println!("a server streaming call"); - let mut s = client.server_streaming(GetFile([0u8; 32])).await?; - while let Some(res) = s.next().await { - println!("{res:?}"); - } - - // client streaming call - println!("a client streaming call"); - let (mut send, recv) = client.client_streaming(PutFile).await?; - tokio::task::spawn(async move { - for i in 0..3 { - send.send(PutFileUpdate(vec![i])).await.unwrap(); - } - }); - let res = recv.await?; - println!("{res:?}"); - - // bidi streaming call - println!("a bidi streaming call"); - let (mut send, mut recv) = client.bidi(ConvertFile).await?; - tokio::task::spawn(async move { - for i in 0..3 { - send.send(ConvertFileUpdate(vec![i])).await.unwrap(); - } - }); - while let Some(res) = recv.next().await { - println!("{res:?}"); - } - - // dropping the client will cause the server to terminate - drop(client); - server_handle.await??; - Ok(()) -} - -async fn _main_unsugared() -> anyhow::Result<()> { - use transport::Listener; - #[derive(Clone, Debug)] - struct Service; - impl crate::Service for Service { - type Req = u64; - type Res = String; - } - let (server, client) = flume::channel::(1); - let to_string_service = tokio::spawn(async move { - let (mut send, mut recv) = server.accept().await?; - while let Some(item) = recv.next().await { - let item = item?; - println!("server got: {item:?}"); - send.send(item.to_string()).await?; - } - anyhow::Ok(()) - }); - let (mut send, mut recv) = client.open().await?; - let print_result_service = tokio::spawn(async move { - while let Some(item) = recv.next().await { - let item = item?; - println!("got result: {item}"); - } - anyhow::Ok(()) - }); - for i in 0..100 { - send.send(i).await?; - } - drop(send); - to_string_service.await??; - print_result_service.await??; - Ok(()) -} diff --git a/quic-rpc-derive/Cargo.toml b/quic-rpc-derive/Cargo.toml index 80d7828f..02aa1002 100644 --- a/quic-rpc-derive/Cargo.toml +++ b/quic-rpc-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "quic-rpc-derive" -version = "0.19.0" +version = "0.50.0" edition = "2021" authors = ["Rรผdiger Klaehn "] keywords = ["api", "protocol", "network", "rpc", "macro"] @@ -16,9 +16,11 @@ proc-macro = true syn = { version = "1", features = ["full"] } quote = "1" proc-macro2 = "1" -quic-rpc = { version = "0.19", path = ".." } [dev-dependencies] -derive_more = { version = "1", features = ["from", "try_into", "display"] } +derive_more = { version = "2", features = ["from"] } serde = { version = "1", features = ["serde_derive"] } trybuild = "1.0" +quic-rpc = { path = ".." } +tracing = "0.1.41" + diff --git a/quic-rpc-derive/src/lib.rs b/quic-rpc-derive/src/lib.rs index fee10960..32fe2ec5 100644 --- a/quic-rpc-derive/src/lib.rs +++ b/quic-rpc-derive/src/lib.rs @@ -10,183 +10,229 @@ use syn::{ Data, DeriveInput, Fields, Ident, Token, Type, }; -const SERVER_STREAMING: &str = "server_streaming"; -const CLIENT_STREAMING: &str = "client_streaming"; -const BIDI_STREAMING: &str = "bidi_streaming"; -const RPC: &str = "rpc"; -const TRY_SERVER_STREAMING: &str = "try_server_streaming"; -const IDENTS: [&str; 5] = [ - SERVER_STREAMING, - CLIENT_STREAMING, - BIDI_STREAMING, - RPC, - TRY_SERVER_STREAMING, -]; - -fn generate_rpc_impls( - pat: &str, - mut args: RpcArgs, +// Helper function for error reporting +fn error_tokens(span: Span, message: &str) -> TokenStream { + syn::Error::new(span, message).to_compile_error().into() +} + +/// The only attribute we care about +const ATTR_NAME: &str = "rpc"; +/// the tx type name +const TX_ATTR: &str = "tx"; +/// the rx type name +const RX_ATTR: &str = "rx"; +/// Fully qualified path to the default rx type +const DEFAULT_RX_TYPE: &str = "::quic_rpc::channel::none::NoReceiver"; + +fn generate_channels_impl( + mut args: NamedTypeArgs, service_name: &Ident, request_type: &Type, attr_span: Span, ) -> syn::Result { - let res = match pat { - RPC => { - let response = args.get("response", pat, attr_span)?; - quote! { - impl ::quic_rpc::pattern::rpc::RpcMsg<#service_name> for #request_type { - type Response = #response; - } - } - } - SERVER_STREAMING => { - let response = args.get("response", pat, attr_span)?; - quote! { - impl ::quic_rpc::message::Msg<#service_name> for #request_type { - type Pattern = ::quic_rpc::pattern::server_streaming::ServerStreaming; - } - impl ::quic_rpc::pattern::server_streaming::ServerStreamingMsg<#service_name> for #request_type { - type Response = #response; - } - } - } - BIDI_STREAMING => { - let update = args.get("update", pat, attr_span)?; - let response = args.get("response", pat, attr_span)?; - quote! { - impl ::quic_rpc::message::Msg<#service_name> for #request_type { - type Pattern = ::quic_rpc::pattern::bidi_streaming::BidiStreaming; - } - impl ::quic_rpc::pattern::bidi_streaming::BidiStreamingMsg<#service_name> for #request_type { - type Update = #update; - type Response = #response; - } - } + // Try to get rx, default to NoReceiver if not present + // Use unwrap_or_else for a cleaner default + let rx = args.types.remove(RX_ATTR).unwrap_or_else(|| { + // We can safely unwrap here because this is a known valid type + syn::parse_str::(DEFAULT_RX_TYPE).expect("Failed to parse default rx type") + }); + let tx = args.get(TX_ATTR, attr_span)?; + + let res = quote! { + impl ::quic_rpc::Channels<#service_name> for #request_type { + type Tx = #tx; + type Rx = #rx; } - CLIENT_STREAMING => { - let update = args.get("update", pat, attr_span)?; - let response = args.get("response", pat, attr_span)?; - quote! { - impl ::quic_rpc::message::Msg<#service_name> for #request_type { - type Pattern = ::quic_rpc::pattern::client_streaming::ClientStreaming; - } - impl ::quic_rpc::pattern::client_streaming::ClientStreamingMsg<#service_name> for #request_type { - type Update = #update; - type Response = #response; + }; + + args.check_empty(attr_span)?; + Ok(res) +} +fn generate_from_impls( + message_enum_name: &Ident, + variants: &[(Ident, Type)], + service_name: &Ident, + original_enum_name: &Ident, + additional_items: &mut Vec, +) { + // Generate and add From impls for the message enum + for (variant_name, inner_type) in variants { + let message_impl = quote! { + impl From<::quic_rpc::WithChannels<#inner_type, #service_name>> for #message_enum_name { + fn from(value: ::quic_rpc::WithChannels<#inner_type, #service_name>) -> Self { + #message_enum_name::#variant_name(value) } } - } - TRY_SERVER_STREAMING => { - let create_error = args.get("create_error", pat, attr_span)?; - let item_error = args.get("item_error", pat, attr_span)?; - let item = args.get("item", pat, attr_span)?; - quote! { - impl ::quic_rpc::message::Msg<#service_name> for #request_type { - type Pattern = ::quic_rpc::pattern::try_server_streaming::TryServerStreaming; - } - impl ::quic_rpc::pattern::try_server_streaming::TryServerStreamingMsg<#service_name> for #request_type { - type CreateError = #create_error; - type ItemError = #item_error; - type Item = #item; + }; + additional_items.extend(message_impl); + + // Generate and add From impls for the original enum + let original_impl = quote! { + impl From<#inner_type> for #original_enum_name { + fn from(value: #inner_type) -> Self { + #original_enum_name::#variant_name(value) } } - } - _ => return Err(syn::Error::new(attr_span, "Unknown RPC pattern")), - }; - args.check_empty(attr_span)?; - - Ok(res) + }; + additional_items.extend(original_impl); + } } #[proc_macro_attribute] pub fn rpc_requests(attr: TokenStream, item: TokenStream) -> TokenStream { let mut input = parse_macro_input!(item as DeriveInput); - let service_name = parse_macro_input!(attr as Ident); + let MacroArgs { + service_name, + message_enum_name, + } = parse_macro_input!(attr as MacroArgs); let input_span = input.span(); let data_enum = match &mut input.data { Data::Enum(data_enum) => data_enum, - _ => { - return syn::Error::new(input.span(), "RpcRequests can only be applied to enums") - .to_compile_error() - .into() - } + _ => return error_tokens(input.span(), "RpcRequests can only be applied to enums"), }; + // builder for the trait impls let mut additional_items = Vec::new(); + // types to check for uniqueness let mut types = HashSet::new(); + // variant names and types + let mut variants = Vec::new(); for variant in &mut data_enum.variants { // Check field structure for every variant let request_type = match &variant.fields { Fields::Unnamed(fields) if fields.unnamed.len() == 1 => &fields.unnamed[0].ty, _ => { - return syn::Error::new( + return error_tokens( variant.span(), "Each variant must have exactly one unnamed field", ) - .to_compile_error() - .into() } }; + variants.push((variant.ident.clone(), request_type.clone())); if !types.insert(request_type.to_token_stream().to_string()) { - return syn::Error::new(input_span, "Each variant must have a unique request type") - .to_compile_error() - .into(); + return error_tokens(input_span, "Each variant must have a unique request type"); } + // Find and remove the rpc attribute + let mut rpc_attr = None; + let mut multiple_rpc_attrs = false; - // Extract and remove RPC attributes - let mut rpc_attr = Vec::new(); variant.attrs.retain(|attr| { - for ident in IDENTS { - if attr.path.is_ident(ident) { - rpc_attr.push((ident, attr.clone())); - return false; + if attr.path.is_ident(ATTR_NAME) { + if rpc_attr.is_some() { + multiple_rpc_attrs = true; + true // Keep this duplicate attribute + } else { + rpc_attr = Some(attr.clone()); + false // Remove this attribute } + } else { + true // Keep other attributes } - true }); - // Fail if there are multiple RPC patterns - if rpc_attr.len() > 1 { - return syn::Error::new(variant.span(), "Each variant can only have one RPC pattern") - .to_compile_error() - .into(); + // Check for multiple rpc attributes + if multiple_rpc_attrs { + return error_tokens( + variant.span(), + "Each variant can only have one rpc attribute", + ); } - if let Some((ident, attr)) = rpc_attr.pop() { - let args = match attr.parse_args::() { + // if there is no attr, the user has to impl Channels manually + if let Some(attr) = rpc_attr { + let args = match attr.parse_args::() { Ok(info) => info, Err(e) => return e.to_compile_error().into(), }; - match generate_rpc_impls(ident, args, &service_name, request_type, attr.span()) { + match generate_channels_impl(args, &service_name, request_type, attr.span()) { Ok(impls) => additional_items.extend(impls), Err(e) => return e.to_compile_error().into(), } } } + let message_variants = variants + .iter() + .map(|(variant_name, inner_type)| { + quote! { + #variant_name(::quic_rpc::WithChannels<#inner_type, #service_name>) + } + }) + .collect::>(); + + // Extract variant names for the match pattern + let variant_names = variants.iter().map(|(name, _)| name).collect::>(); + + let message_enum = quote! { + #[derive(Debug)] + pub enum #message_enum_name { + #(#message_variants),* + } + + impl #message_enum_name { + /// Get the parent span of the message + pub fn parent_span(&self) -> tracing::Span { + let span = match self { + #(#message_enum_name::#variant_names(inner) => inner.parent_span_opt()),* + }; + span.cloned().unwrap_or_else(|| ::tracing::Span::current()) + } + } + }; + + // Generate the From implementations + generate_from_impls( + &message_enum_name, + &variants, + &service_name, + &input.ident, + &mut additional_items, + ); + let output = quote! { #input + #message_enum + #(#additional_items)* }; output.into() } -struct RpcArgs { +// Parse arguments in the format (ServiceType, MessageEnumName) +struct MacroArgs { + service_name: Ident, + message_enum_name: Ident, +} + +impl Parse for MacroArgs { + fn parse(input: ParseStream) -> syn::Result { + let service_name: Ident = input.parse()?; + let _: Token![,] = input.parse()?; + let message_enum_name: Ident = input.parse()?; + + Ok(MacroArgs { + service_name, + message_enum_name, + }) + } +} + +struct NamedTypeArgs { types: BTreeMap, } -impl RpcArgs { +impl NamedTypeArgs { /// Get and remove a type from the map, failing if it doesn't exist - fn get(&mut self, key: &str, kind: &str, span: Span) -> syn::Result { + fn get(&mut self, key: &str, span: Span) -> syn::Result { self.types .remove(key) - .ok_or_else(|| syn::Error::new(span, format!("{kind} requires a {key} type"))) + .ok_or_else(|| syn::Error::new(span, format!("rpc requires a {key} type"))) } /// Fail if there are any unknown arguments remaining @@ -206,7 +252,7 @@ impl RpcArgs { } /// Parse the rpc args as a comma separated list of name=type pairs -impl Parse for RpcArgs { +impl Parse for NamedTypeArgs { fn parse(input: ParseStream) -> syn::Result { let mut types = BTreeMap::new(); @@ -227,6 +273,6 @@ impl Parse for RpcArgs { let _: Token![,] = input.parse()?; } - Ok(RpcArgs { types }) + Ok(NamedTypeArgs { types }) } } diff --git a/quic-rpc-derive/tests/compile_fail/duplicate_type.rs b/quic-rpc-derive/tests/compile_fail/duplicate_type.rs index 45db3937..5d8710ab 100644 --- a/quic-rpc-derive/tests/compile_fail/duplicate_type.rs +++ b/quic-rpc-derive/tests/compile_fail/duplicate_type.rs @@ -1,6 +1,6 @@ use quic_rpc_derive::rpc_requests; -#[rpc_requests(Service)] +#[rpc_requests(Service, Msg)] enum Enum { A(u8), B(u8), diff --git a/quic-rpc-derive/tests/compile_fail/duplicate_type.stderr b/quic-rpc-derive/tests/compile_fail/duplicate_type.stderr index 71c5e956..68155813 100644 --- a/quic-rpc-derive/tests/compile_fail/duplicate_type.stderr +++ b/quic-rpc-derive/tests/compile_fail/duplicate_type.stderr @@ -1,5 +1,8 @@ error: Each variant must have a unique request type --> tests/compile_fail/duplicate_type.rs:4:1 | -4 | enum Enum { - | ^^^^ +4 | / enum Enum { +5 | | A(u8), +6 | | B(u8), +7 | | } + | |_^ diff --git a/quic-rpc-derive/tests/compile_fail/extra_attr_types.rs b/quic-rpc-derive/tests/compile_fail/extra_attr_types.rs index 7ca34a98..5639d458 100644 --- a/quic-rpc-derive/tests/compile_fail/extra_attr_types.rs +++ b/quic-rpc-derive/tests/compile_fail/extra_attr_types.rs @@ -1,8 +1,8 @@ use quic_rpc_derive::rpc_requests; -#[rpc_requests(Service)] +#[rpc_requests(Service, Msg)] enum Enum { - #[rpc(response = Bla, fnord = Foo)] + #[rpc(tx = NoSender, rx = NoReceiver, fnord = Foo)] A(u8), } diff --git a/quic-rpc-derive/tests/compile_fail/extra_attr_types.stderr b/quic-rpc-derive/tests/compile_fail/extra_attr_types.stderr index 2de36f70..c19048bd 100644 --- a/quic-rpc-derive/tests/compile_fail/extra_attr_types.stderr +++ b/quic-rpc-derive/tests/compile_fail/extra_attr_types.stderr @@ -1,5 +1,5 @@ error: Unknown arguments provided: ["fnord"] --> tests/compile_fail/extra_attr_types.rs:5:5 | -5 | #[rpc(response = Bla, fnord = Foo)] - | ^ +5 | #[rpc(tx = NoSender, rx = NoReceiver, fnord = Foo)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/quic-rpc-derive/tests/compile_fail/multiple_fields.rs b/quic-rpc-derive/tests/compile_fail/multiple_fields.rs index b4ec1ebc..ff2257dd 100644 --- a/quic-rpc-derive/tests/compile_fail/multiple_fields.rs +++ b/quic-rpc-derive/tests/compile_fail/multiple_fields.rs @@ -1,6 +1,6 @@ use quic_rpc_derive::rpc_requests; -#[rpc_requests(Service)] +#[rpc_requests(Service, Msg)] enum Enum { A(u8, u8), } diff --git a/quic-rpc-derive/tests/compile_fail/multiple_fields.stderr b/quic-rpc-derive/tests/compile_fail/multiple_fields.stderr index 80fc879e..c9aa8f40 100644 --- a/quic-rpc-derive/tests/compile_fail/multiple_fields.stderr +++ b/quic-rpc-derive/tests/compile_fail/multiple_fields.stderr @@ -2,4 +2,4 @@ error: Each variant must have exactly one unnamed field --> tests/compile_fail/multiple_fields.rs:5:5 | 5 | A(u8, u8), - | ^ + | ^^^^^^^^^ diff --git a/quic-rpc-derive/tests/compile_fail/named_enum.rs b/quic-rpc-derive/tests/compile_fail/named_enum.rs index 4bd442cc..9846410f 100644 --- a/quic-rpc-derive/tests/compile_fail/named_enum.rs +++ b/quic-rpc-derive/tests/compile_fail/named_enum.rs @@ -1,6 +1,6 @@ use quic_rpc_derive::rpc_requests; -#[rpc_requests(Service)] +#[rpc_requests(Service, Msg)] enum Enum { A { name: u8 }, } diff --git a/quic-rpc-derive/tests/compile_fail/named_enum.stderr b/quic-rpc-derive/tests/compile_fail/named_enum.stderr index f13da1dc..7aa3da27 100644 --- a/quic-rpc-derive/tests/compile_fail/named_enum.stderr +++ b/quic-rpc-derive/tests/compile_fail/named_enum.stderr @@ -2,4 +2,4 @@ error: Each variant must have exactly one unnamed field --> tests/compile_fail/named_enum.rs:5:5 | 5 | A { name: u8 }, - | ^ + | ^^^^^^^^^^^^^^ diff --git a/quic-rpc-derive/tests/compile_fail/non_enum.rs b/quic-rpc-derive/tests/compile_fail/non_enum.rs index e80782d6..0afa60ab 100644 --- a/quic-rpc-derive/tests/compile_fail/non_enum.rs +++ b/quic-rpc-derive/tests/compile_fail/non_enum.rs @@ -1,6 +1,6 @@ use quic_rpc_derive::rpc_requests; -#[rpc_requests(Service)] +#[rpc_requests(Service, Msg)] struct Foo; fn main() {} \ No newline at end of file diff --git a/quic-rpc-derive/tests/compile_fail/non_enum.stderr b/quic-rpc-derive/tests/compile_fail/non_enum.stderr index c0286efe..c6187062 100644 --- a/quic-rpc-derive/tests/compile_fail/non_enum.stderr +++ b/quic-rpc-derive/tests/compile_fail/non_enum.stderr @@ -2,4 +2,4 @@ error: RpcRequests can only be applied to enums --> tests/compile_fail/non_enum.rs:4:1 | 4 | struct Foo; - | ^^^^^^ + | ^^^^^^^^^^^ diff --git a/quic-rpc-derive/tests/compile_fail/wrong_attr_types.rs b/quic-rpc-derive/tests/compile_fail/wrong_attr_types.rs index 2daca8df..4d0618ff 100644 --- a/quic-rpc-derive/tests/compile_fail/wrong_attr_types.rs +++ b/quic-rpc-derive/tests/compile_fail/wrong_attr_types.rs @@ -1,6 +1,6 @@ use quic_rpc_derive::rpc_requests; -#[rpc_requests(Service)] +#[rpc_requests(Service, Msg)] enum Enum { #[rpc(fnord = Bla)] A(u8), diff --git a/quic-rpc-derive/tests/compile_fail/wrong_attr_types.stderr b/quic-rpc-derive/tests/compile_fail/wrong_attr_types.stderr index 4c81995c..f679dbd3 100644 --- a/quic-rpc-derive/tests/compile_fail/wrong_attr_types.stderr +++ b/quic-rpc-derive/tests/compile_fail/wrong_attr_types.stderr @@ -1,5 +1,5 @@ -error: rpc requires a response type +error: rpc requires a tx type --> tests/compile_fail/wrong_attr_types.rs:5:5 | 5 | #[rpc(fnord = Bla)] - | ^ + | ^^^^^^^^^^^^^^^^^^^ diff --git a/quic-rpc-derive/tests/smoke.rs b/quic-rpc-derive/tests/smoke.rs index b6fe5211..9302814c 100644 --- a/quic-rpc-derive/tests/smoke.rs +++ b/quic-rpc-derive/tests/smoke.rs @@ -1,3 +1,4 @@ +use quic_rpc::channel::{none::NoSender, oneshot}; use quic_rpc_derive::rpc_requests; use serde::{Deserialize, Serialize}; @@ -33,38 +34,23 @@ fn simple() { #[derive(Debug, Serialize, Deserialize)] struct Response4; - #[rpc_requests(Service)] - #[derive(Debug, Serialize, Deserialize, derive_more::From, derive_more::TryInto)] + #[rpc_requests(Service, RequestWithChannels)] + #[derive(Debug, Serialize, Deserialize)] enum Request { - #[rpc(response=Response1)] + #[rpc(tx=oneshot::Sender<()>)] Rpc(RpcRequest), - #[server_streaming(response=Response2)] + #[rpc(tx=NoSender)] ServerStreaming(ServerStreamingRequest), - #[bidi_streaming(update= Update1, response = Response3)] + #[rpc(tx=NoSender)] BidiStreaming(BidiStreamingRequest), - #[client_streaming(update = Update2, response = Response4)] + #[rpc(tx=NoSender)] ClientStreaming(ClientStreamingRequest), - Update1(Update1), - Update2(Update2), - } - - #[derive(Debug, Serialize, Deserialize, derive_more::From, derive_more::TryInto)] - enum Response { - Response1(Response1), - Response2(Response2), - Response3(Response3), - Response4(Response4), } #[derive(Debug, Clone)] struct Service; - impl quic_rpc::Service for Service { - type Req = Request; - type Res = Response; - } - - let _ = Service; + impl quic_rpc::Service for Service {} } /// Use @@ -73,6 +59,7 @@ fn simple() { /// /// to update the snapshots #[test] +#[ignore = "stupid diffs depending on rustc version"] fn compile_fail() { let t = trybuild::TestCases::new(); t.compile_fail("tests/compile_fail/*.rs"); diff --git a/quic-rpc-iroh/Cargo.toml b/quic-rpc-iroh/Cargo.toml new file mode 100644 index 00000000..7d8d4bfb --- /dev/null +++ b/quic-rpc-iroh/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "quic-rpc-iroh" +version = "0.1.0" +edition = "2024" + +[dependencies] +anyhow = "1.0.97" +iroh = "0.34.0" +quic-rpc = { path = ".." } +tokio = { version = "1.44.1", default-features = false, features = ["sync"] } +serde = { version = "1", default-features = false } +tracing = { version = "0.1.41" } +postcard = { version = "1.1.1", features = ["alloc", "use-std"] } + +[dev-dependencies] +n0-future = { version = "0.1.2", default-features = false } +quic-rpc-derive = { path = "../quic-rpc-derive" } +tracing-subscriber = { version = "0.3.19", features = ["fmt"] } diff --git a/quic-rpc-iroh/examples/derive.rs b/quic-rpc-iroh/examples/derive.rs new file mode 100644 index 00000000..1c9bd31c --- /dev/null +++ b/quic-rpc-iroh/examples/derive.rs @@ -0,0 +1,228 @@ +use std::{collections::BTreeMap, sync::Arc}; + +use anyhow::Context; +use n0_future::task::{self, AbortOnDropHandle}; +use quic_rpc::{ + Client, LocalSender, Request, Service, WithChannels, + channel::{oneshot, spsc}, + rpc::Handler, +}; +// Import the macro +use quic_rpc_derive::rpc_requests; +use quic_rpc_iroh::{IrohRemoteConnection, listen}; +use serde::{Deserialize, Serialize}; +use tracing::info; + +/// A simple storage service, just to try it out +#[derive(Debug, Clone, Copy)] +struct StorageService; + +impl Service for StorageService {} + +#[derive(Debug, Serialize, Deserialize)] +struct Get { + key: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct List; + +#[derive(Debug, Serialize, Deserialize)] +struct Set { + key: String, + value: String, +} + +// Use the macro to generate both the StorageProtocol and StorageMessage enums +// plus implement Channels for each type +#[rpc_requests(StorageService, StorageMessage)] +#[derive(Serialize, Deserialize)] +enum StorageProtocol { + #[rpc(tx=oneshot::Sender>)] + Get(Get), + #[rpc(tx=oneshot::Sender<()>)] + Set(Set), + #[rpc(tx=spsc::Sender)] + List(List), +} + +struct StorageActor { + recv: tokio::sync::mpsc::Receiver, + state: BTreeMap, +} + +impl StorageActor { + pub fn local() -> StorageApi { + let (tx, rx) = tokio::sync::mpsc::channel(1); + let actor = Self { + recv: rx, + state: BTreeMap::new(), + }; + n0_future::task::spawn(actor.run()); + let local = LocalSender::::from(tx); + StorageApi { + inner: local.into(), + } + } + + async fn run(mut self) { + while let Some(msg) = self.recv.recv().await { + self.handle(msg).await; + } + } + + async fn handle(&mut self, msg: StorageMessage) { + match msg { + StorageMessage::Get(get) => { + info!("get {:?}", get); + let WithChannels { tx, inner, .. } = get; + tx.send(self.state.get(&inner.key).cloned()).await.ok(); + } + StorageMessage::Set(set) => { + info!("set {:?}", set); + let WithChannels { tx, inner, .. } = set; + self.state.insert(inner.key, inner.value); + tx.send(()).await.ok(); + } + StorageMessage::List(list) => { + info!("list {:?}", list); + let WithChannels { mut tx, .. } = list; + for (key, value) in &self.state { + if tx.send(format!("{key}={value}")).await.is_err() { + break; + } + } + } + } + } +} + +struct StorageApi { + inner: Client, +} + +impl StorageApi { + pub fn connect(endpoint: iroh::Endpoint, addr: iroh::NodeAddr) -> anyhow::Result { + Ok(StorageApi { + inner: Client::boxed(IrohRemoteConnection::new( + endpoint, + addr, + b"RPC-Storage".to_vec(), + )), + }) + } + + pub fn listen(&self, endpoint: iroh::Endpoint) -> anyhow::Result> { + let local = self + .inner + .local() + .context("can not listen on remote service")?; + let handler: Handler = Arc::new(move |msg, _, tx| { + let local = local.clone(); + Box::pin(match msg { + StorageProtocol::Get(msg) => local.send((msg, tx)), + StorageProtocol::Set(msg) => local.send((msg, tx)), + StorageProtocol::List(msg) => local.send((msg, tx)), + }) + }); + Ok(AbortOnDropHandle::new(task::spawn(listen( + endpoint, handler, + )))) + } + + pub async fn get(&self, key: String) -> anyhow::Result>> { + let msg = Get { key }; + match self.inner.request().await? { + Request::Local(request) => { + let (tx, rx) = oneshot::channel(); + request.send((msg, tx)).await?; + Ok(rx) + } + Request::Remote(request) => { + let (_tx, rx) = request.write(msg).await?; + Ok(rx.into()) + } + } + } + + pub async fn list(&self) -> anyhow::Result> { + let msg = List; + match self.inner.request().await? { + Request::Local(request) => { + let (tx, rx) = spsc::channel(10); + request.send((msg, tx)).await?; + Ok(rx) + } + Request::Remote(request) => { + let (_tx, rx) = request.write(msg).await?; + Ok(rx.into()) + } + } + } + + pub async fn set(&self, key: String, value: String) -> anyhow::Result> { + let msg = Set { key, value }; + match self.inner.request().await? { + Request::Local(request) => { + let (tx, rx) = oneshot::channel(); + request.send((msg, tx)).await?; + Ok(rx) + } + Request::Remote(request) => { + let (_tx, rx) = request.write(msg).await?; + Ok(rx.into()) + } + } + } +} + +async fn local() -> anyhow::Result<()> { + let api = StorageActor::local(); + api.set("hello".to_string(), "world".to_string()) + .await? + .await?; + let value = api.get("hello".to_string()).await?.await?; + let mut list = api.list().await?; + while let Some(value) = list.recv().await? { + println!("list value = {:?}", value); + } + println!("value = {:?}", value); + Ok(()) +} + +async fn remote() -> anyhow::Result<()> { + let server = iroh::Endpoint::builder() + .discovery_n0() + .alpns(vec![b"RPC-Storage".to_vec()]) + .bind() + .await?; + let client = iroh::Endpoint::builder().bind().await?; + let addr = server.node_addr().await?; + let store = StorageActor::local(); + let handle = store.listen(server)?; + let api = StorageApi::connect(client, addr)?; + api.set("hello".to_string(), "world".to_string()) + .await? + .await?; + api.set("goodbye".to_string(), "world".to_string()) + .await? + .await?; + let value = api.get("hello".to_string()).await?.await?; + println!("value = {:?}", value); + let mut list = api.list().await?; + while let Some(value) = list.recv().await? { + println!("list value = {:?}", value); + } + drop(handle); + Ok(()) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_subscriber::fmt().init(); + println!("Local use"); + local().await?; + println!("Remote use"); + remote().await?; + Ok(()) +} diff --git a/quic-rpc-iroh/src/lib.rs b/quic-rpc-iroh/src/lib.rs new file mode 100644 index 00000000..adb8ceaa --- /dev/null +++ b/quic-rpc-iroh/src/lib.rs @@ -0,0 +1,144 @@ +use std::{io, sync::Arc}; + +use iroh::endpoint::{ConnectionError, RecvStream, SendStream}; +use quic_rpc::{ + RequestError, + rpc::{Handler, RemoteConnection}, + util::AsyncReadVarintExt, +}; + +/// A connection to a remote service. +/// +/// Initially this does just have the endpoint and the address. Once a +/// connection is established, it will be stored. +#[derive(Debug, Clone)] +pub struct IrohRemoteConnection(Arc); + +#[derive(Debug)] +struct IrohRemoteConnectionInner { + endpoint: iroh::Endpoint, + addr: iroh::NodeAddr, + connection: tokio::sync::Mutex>, + alpn: Vec, +} + +impl IrohRemoteConnection { + pub fn new(endpoint: iroh::Endpoint, addr: iroh::NodeAddr, alpn: Vec) -> Self { + Self(Arc::new(IrohRemoteConnectionInner { + endpoint, + addr, + connection: Default::default(), + alpn, + })) + } +} + +impl RemoteConnection for IrohRemoteConnection { + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } + + fn open_bi(&self) -> BoxedFuture> { + let this = self.0.clone(); + Box::pin(async move { + let mut guard = this.connection.lock().await; + let pair = match guard.as_mut() { + Some(conn) => { + // try to reuse the connection + match conn.open_bi().await { + Ok(pair) => pair, + Err(_) => { + // try with a new connection, just once + *guard = None; + connect_and_open_bi(&this.endpoint, &this.addr, &this.alpn, guard) + .await + .map_err(RequestError::Other)? + } + } + } + None => connect_and_open_bi(&this.endpoint, &this.addr, &this.alpn, guard) + .await + .map_err(RequestError::Other)?, + }; + Ok(pair) + }) + } +} + +async fn connect_and_open_bi( + endpoint: &iroh::Endpoint, + addr: &iroh::NodeAddr, + alpn: &[u8], + mut guard: tokio::sync::MutexGuard<'_, Option>, +) -> anyhow::Result<(SendStream, RecvStream)> { + let conn = endpoint.connect(addr.clone(), alpn).await?; + let (send, recv) = conn.open_bi().await?; + *guard = Some(conn); + Ok((send, recv)) +} + +mod wasm_browser { + #![allow(dead_code)] + pub(crate) type BoxedFuture<'a, T> = + std::pin::Pin + 'a>>; +} +mod multithreaded { + #![allow(dead_code)] + pub(crate) type BoxedFuture<'a, T> = + std::pin::Pin + Send + 'a>>; +} +#[cfg(not(all(target_family = "wasm", target_os = "unknown")))] +use multithreaded::*; +use serde::de::DeserializeOwned; +use tokio::task::JoinSet; +use tracing::{Instrument, trace, trace_span, warn}; +#[cfg(all(target_family = "wasm", target_os = "unknown"))] +use wasm_browser::*; + +/// Utility function to listen for incoming connections and handle them with the provided handler +pub async fn listen(endpoint: iroh::Endpoint, handler: Handler) { + let mut request_id = 0u64; + let mut tasks = JoinSet::new(); + while let Some(incoming) = endpoint.accept().await { + let handler = handler.clone(); + let fut = async move { + let connection = match incoming.await { + Ok(connection) => connection, + Err(cause) => { + warn!("failed to accept connection {cause:?}"); + return io::Result::Ok(()); + } + }; + loop { + let (send, mut recv) = match connection.accept_bi().await { + Ok((s, r)) => (s, r), + Err(ConnectionError::ApplicationClosed(cause)) + if cause.error_code.into_inner() == 0 => + { + trace!("remote side closed connection {cause:?}"); + return Ok(()); + } + Err(cause) => { + warn!("failed to accept bi stream {cause:?}"); + return Err(cause.into()); + } + }; + let size = recv.read_varint_u64().await?.ok_or_else(|| { + io::Error::new(io::ErrorKind::UnexpectedEof, "failed to read size") + })?; + let mut buf = vec![0; size as usize]; + recv.read_exact(&mut buf) + .await + .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))?; + let msg: R = postcard::from_bytes(&buf) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + let rx = recv; + let tx = send; + handler(msg, rx, tx).await?; + } + }; + let span = trace_span!("rpc", id = request_id); + tasks.spawn(fut.instrument(span)); + request_id += 1; + } +} diff --git a/src/client.rs b/src/client.rs deleted file mode 100644 index 203eb052..00000000 --- a/src/client.rs +++ /dev/null @@ -1,195 +0,0 @@ -//! Client side api -//! -//! The main entry point is [RpcClient]. -use std::{ - fmt::Debug, - marker::PhantomData, - pin::Pin, - task::{Context, Poll}, -}; - -use futures_lite::Stream; -use futures_sink::Sink; -use pin_project::pin_project; - -use crate::{ - transport::{boxed::BoxableConnector, mapped::MappedConnector, StreamTypes}, - Connector, Service, -}; - -/// A boxed connector for the given [`Service`] -pub type BoxedConnector = - crate::transport::boxed::BoxedConnector<::Res, ::Req>; - -#[cfg(feature = "flume-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "flume-transport")))] -/// A flume connector for the given [`Service`] -pub type FlumeConnector = - crate::transport::flume::FlumeConnector<::Res, ::Req>; - -#[cfg(feature = "quinn-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "quinn-transport")))] -/// A quinn connector for the given [`Service`] -pub type QuinnConnector = - crate::transport::quinn::QuinnConnector<::Res, ::Req>; - -#[cfg(feature = "hyper-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "hyper-transport")))] -/// A hyper connector for the given [`Service`] -pub type HyperConnector = - crate::transport::hyper::HyperConnector<::Res, ::Req>; - -#[cfg(feature = "iroh-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "iroh-transport")))] -/// An iroh connector for the given [`Service`] -pub type IrohConnector = - crate::transport::iroh::IrohConnector<::Res, ::Req>; - -/// Sync version of `future::stream::BoxStream`. -pub type BoxStreamSync<'a, T> = Pin + Send + Sync + 'a>>; - -/// A client for a specific service -/// -/// This is a wrapper around a [`Connector`] that serves as the entry point -/// for the client DSL. -/// -/// Type parameters: -/// -/// `S` is the service type that determines what interactions this client supports. -/// `C` is the connector that determines the transport. -#[derive(Debug)] -pub struct RpcClient> { - pub(crate) source: C, - pub(crate) _p: PhantomData, -} - -impl Clone for RpcClient { - fn clone(&self) -> Self { - Self { - source: self.source.clone(), - _p: PhantomData, - } - } -} - -/// Sink that can be used to send updates to the server for the two interaction patterns -/// that support it, [crate::message::ClientStreaming] and [crate::message::BidiStreaming]. -#[pin_project] -#[derive(Debug)] -pub struct UpdateSink(#[pin] pub C::SendSink, PhantomData) -where - C: StreamTypes; - -impl UpdateSink -where - C: StreamTypes, - T: Into, -{ - /// Create a new update sink - pub fn new(sink: C::SendSink) -> Self { - Self(sink, PhantomData) - } -} - -impl Sink for UpdateSink -where - C: StreamTypes, - T: Into, -{ - type Error = C::SendError; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().0.poll_ready(cx) - } - - fn start_send(self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { - let req = item.into(); - self.project().0.start_send(req) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().0.poll_flush(cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().0.poll_close(cx) - } -} - -impl RpcClient -where - S: Service, - C: Connector, -{ - /// Create a new rpc client for a specific [Service] given a compatible - /// [Connector]. - /// - /// This is where a generic typed connection is converted into a client for a specific service. - /// - /// You can get a client for a nested service by calling [map](RpcClient::map). - pub fn new(source: C) -> Self { - Self { - source, - _p: PhantomData, - } - } -} - -impl RpcClient -where - S: Service, - C: Connector, -{ - /// Get the underlying connection - pub fn into_inner(self) -> C { - self.source - } - - /// Map this channel's service into an inner service. - /// - /// This method is available if the required bounds are upheld: - /// SNext::Req: Into + TryFrom, - /// SNext::Res: Into + TryFrom, - /// - /// Where SNext is the new service to map to and S is the current inner service. - /// - /// This method can be chained infintely. - pub fn map(self) -> RpcClient> - where - SNext: Service, - S::Req: From, - SNext::Res: TryFrom, - { - RpcClient::new(self.source.map::()) - } - - /// box - pub fn boxed(self) -> RpcClient> - where - C: BoxableConnector, - { - RpcClient::new(self.source.boxed()) - } -} - -impl AsRef for RpcClient -where - S: Service, - C: Connector, -{ - fn as_ref(&self) -> &C { - &self.source - } -} - -/// Wrap a stream with an additional item that is kept alive until the stream is dropped -#[pin_project] -pub(crate) struct DeferDrop(#[pin] pub S, pub X); - -impl Stream for DeferDrop { - type Item = S::Item; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().0.poll_next(cx) - } -} diff --git a/src/lib.rs b/src/lib.rs index 074a5731..08729933 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,114 +1,89 @@ -//! A streaming rpc system for transports that support multiple bidirectional -//! streams, such as QUIC and HTTP2. -//! -//! A lightweight memory transport is provided for cases where you want have -//! multiple cleanly separated substreams in the same process. -//! -//! For supported transports, see the [transport] module. -//! -//! # Motivation -//! -//! See the [README](https://github.com/n0-computer/quic-rpc/blob/main/README.md) -//! -//! # Example -//! ``` -//! # async fn example() -> anyhow::Result<()> { -//! use derive_more::{From, TryInto}; -//! use quic_rpc::{message::RpcMsg, RpcClient, RpcServer, Service}; -//! use serde::{Deserialize, Serialize}; -//! -//! // Define your messages -//! #[derive(Debug, Serialize, Deserialize)] -//! struct Ping; -//! -//! #[derive(Debug, Serialize, Deserialize)] -//! struct Pong; -//! -//! // Define your RPC service and its request/response types -//! #[derive(Debug, Clone)] -//! struct PingService; -//! -//! #[derive(Debug, Serialize, Deserialize, From, TryInto)] -//! enum PingRequest { -//! Ping(Ping), -//! } -//! -//! #[derive(Debug, Serialize, Deserialize, From, TryInto)] -//! enum PingResponse { -//! Pong(Pong), -//! } -//! -//! impl Service for PingService { -//! type Req = PingRequest; -//! type Res = PingResponse; -//! } -//! -//! // Define interaction patterns for each request type -//! impl RpcMsg for Ping { -//! type Response = Pong; -//! } -//! -//! // create a transport channel, here a memory channel for testing -//! let (server, client) = quic_rpc::transport::flume::channel(1); -//! -//! // client side -//! // create the rpc client given the channel and the service type -//! let mut client = RpcClient::::new(client); -//! -//! // call the service -//! let res = client.rpc(Ping).await?; -//! -//! // server side -//! // create the rpc server given the channel and the service type -//! let mut server = RpcServer::::new(server); -//! -//! let handler = Handler; -//! loop { -//! // accept connections -//! let (msg, chan) = server.accept().await?.read_first().await?; -//! // dispatch the message to the appropriate handler -//! match msg { -//! PingRequest::Ping(ping) => chan.rpc(ping, handler, Handler::ping).await?, -//! } -//! } -//! -//! // the handler. For a more complex example, this would contain any state -//! // needed to handle the request. -//! #[derive(Debug, Clone, Copy)] -//! struct Handler; -//! -//! impl Handler { -//! // the handle fn for a Ping request. -//! -//! // The return type is the response type for the service. -//! // Note that this must take self by value, not by reference. -//! async fn ping(self, _req: Ping) -> Pong { -//! Pong -//! } -//! } -//! # Ok(()) -//! # } -//! ``` -//! -//! # Features -#![doc = document_features::document_features!()] -#![deny(missing_docs)] -#![deny(rustdoc::broken_intra_doc_links)] +//! # A minimal RPC library for use with [iroh]. +//! +//! ## Goals +//! +//! The main goal of this library is to provide an rpc framework that is so +//! lightweight that it can be also used for async boundaries within a single +//! process without any overhead, instead of the usual practice of a mpsc channel +//! with a giant message enum where each enum case contains mpsc or oneshot +//! backchannels. +//! +//! The second goal is to lightly abstract over remote and local communication, +//! so that a system can be interacted with cross process or even across networks. +//! +//! ## Non-goals +//! +//! - Cross language interop. This is for talking from rust to rust +//! - Any kind of versioning. You have to do this yourself +//! - Making remote message passing look like local async function calls +//! - Being runtime agnostic. This is for tokio +//! +//! ## Interaction patterns +//! +//! For each request, there can be a response and update channel. Each channel +//! can be either oneshot, carry multiple messages, or be disabled. This enables +//! the typical interaction patterns known from libraries like grpc: +//! +//! - rpc: 1 request, 1 response +//! - server streaming: 1 request, multiple responses +//! - client streaming: multiple requests, 1 response +//! - bidi streaming: multiple requests, multiple responses +//! +//! as well as more complex patterns. It is however not possible to have multiple +//! differently typed tx channels for a single message type. +//! +//! ## Transports +//! +//! We don't abstract over the send and receive stream. These must always be +//! quinn streams, specifically streams from the [iroh quinn fork]. +//! +//! This restricts the possible rpc transports to quinn (QUIC with dial by +//! socket address) and iroh (QUIC with dial by node id). +//! +//! An upside of this is that the quinn streams can be tuned for each rpc +//! request, e.g. by setting the stream priority or by directy using more +//! advanced part of the quinn SendStream and RecvStream APIs such as out of +//! order receiving. +//! +//! ## Serialization +//! +//! Serialization is currently done using [postcard]. Messages are always +//! length prefixed with postcard varints, even in the case of oneshot +//! channels. +//! +//! Serialization only happens for cross process rpc communication. +//! +//! However, the requirement for message enums to be serializable is present even +//! when disabling the `rpc` feature. Due to the fact that the channels live +//! outside the message, this is not a big restriction. +//! +//! ## Features +//! +//! - `rpc`: Enable the rpc features. Enabled by default. +//! By disabling this feature, all rpc related dependencies are removed. +//! The remaining dependencies are just serde, tokio and tokio-util. +//! - `message_spans`: Enable tracing spans for messages. Enabled by default. +//! This is useful even without rpc, to not lose tracing context when message +//! passing. This is frequently done manually. This obviously requires +//! a dependency on tracing. +//! - `quinn_endpoint_setup`: Easy way to create quinn endpoints. This is useful +//! both for testing and for rpc on localhost. Enabled by default. +//! +//! - iroh: https://docs.rs/iroh/latest/iroh/index.html +//! - quinn: https://docs.rs/quinn/latest/quinn/index.html +//! - bytes: https://docs.rs/bytes/latest/bytes/index.html +//! - iroh quinn fork: https://docs.rs/iroh-quinn/latest/iroh-quinn/index.html #![cfg_attr(quicrpc_docsrs, feature(doc_cfg))] -use std::fmt::{Debug, Display}; +use std::{fmt::Debug, future::Future, io, marker::PhantomData, ops::Deref}; +use channel::none::NoReceiver; +use sealed::Sealed; use serde::{de::DeserializeOwned, Serialize}; -pub mod client; -pub mod message; -pub mod server; -pub mod transport; -pub use client::RpcClient; -pub use server::RpcServer; -#[cfg(feature = "macros")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "macros")))] -mod macros; - -pub mod pattern; +#[cfg(feature = "rpc")] +#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "rpc")))] +pub mod util; +#[cfg(not(feature = "rpc"))] +mod util; /// Requirements for a RPC message /// @@ -124,96 +99,1273 @@ impl RpcMessage for T where { } -/// Requirements for an internal error -/// -/// All errors have to be Send, Sync and 'static so they can be sent across threads. -/// They also have to be Debug and Display so they can be logged. +/// Marker trait for a service /// -/// We don't require them to implement [std::error::Error] so we can use -/// anyhow::Error as an error type. +/// This is usually implemented by a zero-sized struct. +/// It has various bounds to make derives easier. /// -/// Instead we require them to implement `Into`, which is available -/// both for any type that implements [std::error::Error] and anyhow itself. -pub trait RpcError: Debug + Display + Into + Send + Sync + Unpin + 'static {} +/// A service acts as a scope for defining the tx and rx channels for each +/// message type, and provides some type safety when sending messages. +pub trait Service: Send + Sync + Debug + Clone + 'static {} -impl RpcError for T where T: Debug + Display + Into + Send + Sync + Unpin + 'static -{} +mod sealed { + pub trait Sealed {} +} -/// A service -/// -/// A service has request and response message types. These types have to be the -/// union of all possible request and response types for all interactions with -/// the service. +/// Sealed marker trait for a sender +pub trait Sender: Debug + Sealed {} + +/// Sealed marker trait for a receiver +pub trait Receiver: Debug + Sealed {} + +/// Trait to specify channels for a message and service +pub trait Channels { + /// The sender type, can be either spsc, oneshot or none + type Tx: Sender; + /// The receiver type, can be either spsc, oneshot or none + /// + /// For many services, the receiver is not needed, so it can be set to [`NoReceiver`]. + type Rx: Receiver; +} + +mod wasm_browser { + #![allow(dead_code)] + pub(crate) type BoxedFuture<'a, T> = + std::pin::Pin + 'a>>; +} +mod multithreaded { + #![allow(dead_code)] + pub(crate) type BoxedFuture<'a, T> = + std::pin::Pin + Send + 'a>>; +} +#[cfg(not(all(target_family = "wasm", target_os = "unknown")))] +use multithreaded::*; +#[cfg(all(target_family = "wasm", target_os = "unknown"))] +use wasm_browser::*; + +/// Channels that abstract over local or remote sending +pub mod channel { + use std::io; + + /// Oneshot channel, similar to tokio's oneshot channel + pub mod oneshot { + use std::{fmt::Debug, future::Future, io, pin::Pin, task}; + + use super::{RecvError, SendError}; + use crate::util::FusedOneshotReceiver; + + /// Create a local oneshot sender and receiver pair. + /// + /// This is currently using a tokio channel pair internally. + pub fn channel() -> (Sender, Receiver) { + let (tx, rx) = tokio::sync::oneshot::channel(); + (tx.into(), rx.into()) + } + + /// A generic boxed sender. + /// + /// Remote senders are always boxed, since for remote communication the boxing + /// overhead is negligible. However, boxing can also be used for local communication, + /// e.g. when applying a transform or filter to the message before sending it. + pub type BoxedSender = Box< + dyn FnOnce(T) -> crate::BoxedFuture<'static, io::Result<()>> + Send + Sync + 'static, + >; + + /// A sender that can be wrapped in a `Box>`. + /// + /// In addition to implementing `Future`, this provides a fn to check if the sender is + /// an rpc sender. + /// + /// Remote receivers are always boxed, since for remote communication the boxing + /// overhead is negligible. However, boxing can also be used for local communication, + /// e.g. when applying a transform or filter to the message before receiving it. + pub trait DynSender: Future> + Send + Sync + 'static { + fn is_rpc(&self) -> bool; + } + + /// A generic boxed receiver + /// + /// Remote receivers are always boxed, since for remote communication the boxing + /// overhead is negligible. However, boxing can also be used for local communication, + /// e.g. when applying a transform or filter to the message before receiving it. + pub type BoxedReceiver = crate::BoxedFuture<'static, io::Result>; + + /// A oneshot sender. + /// + /// Compared to a local onehsot sender, sending a message is async since in the case + /// of remote communication, sending over the wire is async. Other than that it + /// behaves like a local oneshot sender and has no overhead in the local case. + pub enum Sender { + Tokio(tokio::sync::oneshot::Sender), + /// we can't yet distinguish between local and remote boxed oneshot senders. + /// If we ever want to have local boxed oneshot senders, we need to add a + /// third variant here. + Boxed(BoxedSender), + } + + impl Debug for Sender { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Tokio(_) => f.debug_tuple("Tokio").finish(), + Self::Boxed(_) => f.debug_tuple("Boxed").finish(), + } + } + } + + impl From> for Sender { + fn from(tx: tokio::sync::oneshot::Sender) -> Self { + Self::Tokio(tx) + } + } + + impl TryFrom> for tokio::sync::oneshot::Sender { + type Error = Sender; + + fn try_from(value: Sender) -> Result { + match value { + Sender::Tokio(tx) => Ok(tx), + Sender::Boxed(_) => Err(value), + } + } + } + + impl Sender { + /// Send a message + /// + /// If this is a boxed sender that represents a remote connection, sending may yield or fail with an io error. + /// Local senders will never yield, but can fail if the receiver has been closed. + pub async fn send(self, value: T) -> std::result::Result<(), SendError> { + match self { + Sender::Tokio(tx) => tx.send(value).map_err(|_| SendError::ReceiverClosed), + Sender::Boxed(f) => f(value).await.map_err(SendError::from), + } + } + } + + impl Sender { + /// Check if this is a remote sender + pub fn is_rpc(&self) -> bool + where + T: 'static, + { + match self { + Sender::Tokio(_) => false, + Sender::Boxed(_) => true, + } + } + } + + impl crate::sealed::Sealed for Sender {} + impl crate::Sender for Sender {} + + /// A oneshot receiver. + /// + /// Compared to a local oneshot receiver, receiving a message can fail not just + /// when the sender has been closed, but also when the remote connection fails. + pub enum Receiver { + Tokio(FusedOneshotReceiver), + Boxed(BoxedReceiver), + } + + impl Future for Receiver { + type Output = std::result::Result; + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context) -> task::Poll { + match self.get_mut() { + Self::Tokio(rx) => Pin::new(rx).poll(cx).map_err(|_| RecvError::SenderClosed), + Self::Boxed(rx) => Pin::new(rx).poll(cx).map_err(RecvError::Io), + } + } + } + + /// Convert a tokio oneshot receiver to a receiver for this crate + impl From> for Receiver { + fn from(rx: tokio::sync::oneshot::Receiver) -> Self { + Self::Tokio(FusedOneshotReceiver(rx)) + } + } + + impl TryFrom> for tokio::sync::oneshot::Receiver { + type Error = Receiver; + + fn try_from(value: Receiver) -> Result { + match value { + Receiver::Tokio(tx) => Ok(tx.0), + Receiver::Boxed(_) => Err(value), + } + } + } + + /// Convert a function that produces a future to a receiver for this crate + impl From for Receiver + where + F: FnOnce() -> Fut, + Fut: Future> + Send + 'static, + { + fn from(f: F) -> Self { + Self::Boxed(Box::pin(f())) + } + } + + impl Debug for Receiver { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Tokio(_) => f.debug_tuple("Tokio").finish(), + Self::Boxed(_) => f.debug_tuple("Boxed").finish(), + } + } + } + + impl crate::sealed::Sealed for Receiver {} + impl crate::Receiver for Receiver {} + } + + /// SPSC channel, similar to tokio's mpsc channel + /// + /// For the rpc case, the send side can not be cloned, hence spsc instead of mpsc. + pub mod spsc { + use std::{fmt::Debug, future::Future, io, pin::Pin}; + + use super::{RecvError, SendError}; + use crate::RpcMessage; + + /// Create a local spsc sender and receiver pair, with the given buffer size. + /// + /// This is currently using a tokio channel pair internally. + pub fn channel(buffer: usize) -> (Sender, Receiver) { + let (tx, rx) = tokio::sync::mpsc::channel(buffer); + (tx.into(), rx.into()) + } + + /// Single producer, single consumer sender. + /// + /// For the local case, this wraps a tokio::sync::mpsc::Sender. However, + /// due to the fact that a stream to a remote service can not be cloned, + /// this can also not be cloned. + /// + /// This forces you to use senders in a linear way, passing out references + /// to the sender to other tasks instead of cloning it. + pub enum Sender { + Tokio(tokio::sync::mpsc::Sender), + Boxed(Box>), + } + + impl Sender { + pub fn is_rpc(&self) -> bool + where + T: 'static, + { + match self { + Sender::Tokio(_) => false, + Sender::Boxed(x) => x.is_rpc(), + } + } + } + + impl From> for Sender { + fn from(tx: tokio::sync::mpsc::Sender) -> Self { + Self::Tokio(tx) + } + } + + impl TryFrom> for tokio::sync::mpsc::Sender { + type Error = Sender; + + fn try_from(value: Sender) -> Result { + match value { + Sender::Tokio(tx) => Ok(tx), + Sender::Boxed(_) => Err(value), + } + } + } + + /// A sender that can be wrapped in a `Box>`. + pub trait DynSender: Debug + Send + Sync + 'static { + /// Send a message. + /// + /// For the remote case, if the message can not be completely sent, + /// this must return an error and disable the channel. + fn send( + &mut self, + value: T, + ) -> Pin> + Send + '_>>; + + /// Try to send a message, returning as fast as possible if sending + /// is not currently possible. + /// + /// For the remote case, it must be guaranteed that the message is + /// either completely sent or not at all. + fn try_send( + &mut self, + value: T, + ) -> Pin> + Send + '_>>; + + /// True if this is a remote sender + fn is_rpc(&self) -> bool; + } + + /// A receiver that can be wrapped in a `Box>`. + pub trait DynReceiver: Debug + Send + Sync + 'static { + fn recv( + &mut self, + ) -> Pin, RecvError>> + Send + '_>>; + } + + impl Debug for Sender { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Tokio(x) => f + .debug_struct("Tokio") + .field("avail", &x.capacity()) + .field("cap", &x.max_capacity()) + .finish(), + Self::Boxed(inner) => f.debug_tuple("Boxed").field(&inner).finish(), + } + } + } + + impl Sender { + /// Send a message and yield until either it is sent or an error occurs. + pub async fn send(&mut self, value: T) -> std::result::Result<(), SendError> { + match self { + Sender::Tokio(tx) => { + tx.send(value).await.map_err(|_| SendError::ReceiverClosed) + } + Sender::Boxed(sink) => sink.send(value).await.map_err(SendError::from), + } + } + + /// Try to send a message, returning as fast as possible if sending + /// is not currently possible. This can be used to send ephemeral + /// messages. + /// + /// For the local case, this will immediately return false if the + /// channel is full. + /// + /// For the remote case, it will attempt to send the message and + /// return false if sending the first byte fails, otherwise yield + /// until the message is completely sent or an error occurs. This + /// guarantees that the message is sent either completely or not at + /// all. + /// + /// Returns true if the message was sent. + pub async fn try_send(&mut self, value: T) -> std::result::Result<(), SendError> { + match self { + Sender::Tokio(tx) => match tx.try_send(value) { + Ok(()) => Ok(()), + Err(tokio::sync::mpsc::error::TrySendError::Closed(_)) => { + Err(SendError::ReceiverClosed) + } + Err(tokio::sync::mpsc::error::TrySendError::Full(_)) => Ok(()), + }, + Sender::Boxed(sink) => { + sink.try_send(value).await.map_err(SendError::from)?; + Ok(()) + } + } + } + } + + impl crate::sealed::Sealed for Sender {} + impl crate::Sender for Sender {} + + pub enum Receiver { + Tokio(tokio::sync::mpsc::Receiver), + Boxed(Box>), + } + + impl Receiver { + /// Receive a message + /// + /// Returns Ok(None) if the sender has been dropped or the remote end has + /// cleanly closed the connection. + /// + /// Returns an an io error if there was an error receiving the message. + pub async fn recv(&mut self) -> std::result::Result, RecvError> { + match self { + Self::Tokio(rx) => Ok(rx.recv().await), + Self::Boxed(rx) => Ok(rx.recv().await?), + } + } + } + + impl From> for Receiver { + fn from(rx: tokio::sync::mpsc::Receiver) -> Self { + Self::Tokio(rx) + } + } + + impl TryFrom> for tokio::sync::mpsc::Receiver { + type Error = Receiver; + + fn try_from(value: Receiver) -> Result { + match value { + Receiver::Tokio(tx) => Ok(tx), + Receiver::Boxed(_) => Err(value), + } + } + } + + impl Debug for Receiver { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Tokio(inner) => f + .debug_struct("Tokio") + .field("avail", &inner.capacity()) + .field("cap", &inner.max_capacity()) + .finish(), + Self::Boxed(inner) => f.debug_tuple("Boxed").field(&inner).finish(), + } + } + } + + impl crate::sealed::Sealed for Receiver {} + impl crate::Receiver for Receiver {} + } + + /// No channels, used when no communication is needed + pub mod none { + use crate::sealed::Sealed; + + /// A sender that does nothing. This is used when no communication is needed. + #[derive(Debug)] + pub struct NoSender; + impl Sealed for NoSender {} + impl crate::Sender for NoSender {} + + /// A receiver that does nothing. This is used when no communication is needed. + #[derive(Debug)] + pub struct NoReceiver; + + impl Sealed for NoReceiver {} + impl crate::Receiver for NoReceiver {} + } + + /// Error when sending a oneshot or spsc message. For local communication, + /// the only thing that can go wrong is that the receiver has been dropped. + /// + /// For rpc communication, there can be any number of errors, so this is a + /// generic io error. + #[derive(Debug, thiserror::Error)] + pub enum SendError { + /// The receiver has been closed. This is the only error that can occur + /// for local communication. + #[error("receiver closed")] + ReceiverClosed, + /// The underlying io error. This can occur for remote communication, + /// due to a network error or serialization error. + #[error("io error: {0}")] + Io(#[from] io::Error), + } + + impl From for io::Error { + fn from(e: SendError) -> Self { + match e { + SendError::ReceiverClosed => io::Error::new(io::ErrorKind::BrokenPipe, e), + SendError::Io(e) => e, + } + } + } + + /// Error when receiving a oneshot or spsc message. For local communication, + /// the only thing that can go wrong is that the sender has been closed. + /// + /// For rpc communication, there can be any number of errors, so this is a + /// generic io error. + #[derive(Debug, thiserror::Error)] + pub enum RecvError { + /// The sender has been closed. This is the only error that can occur + /// for local communication. + #[error("sender closed")] + SenderClosed, + /// An io error occurred. This can occur for remote communication, + /// due to a network error or deserialization error. + #[error("io error: {0}")] + Io(#[from] io::Error), + } + + impl From for io::Error { + fn from(e: RecvError) -> Self { + match e { + RecvError::Io(e) => e, + RecvError::SenderClosed => io::Error::new(io::ErrorKind::BrokenPipe, e), + } + } + } +} + +/// A wrapper for a message with channels to send and receive it. +/// This expands the protocol message to a full message that includes the +/// active and unserializable channels. /// -/// Usually you will define an enum for the request and response -/// type, and use the [derive_more](https://crates.io/crates/derive_more) crate to -/// define the conversions between the enum and the actual request and response types. +/// The channel kind for rx and tx is defined by implementing the `Channels` +/// trait, either manually or using a macro. /// -/// To make a message type usable as a request for a service, implement [message::Msg] -/// for it. This is how you define the interaction patterns for each request type. +/// When the `message_spans` feature is enabled, this also includes a tracing +/// span to carry the tracing context during message passing. +pub struct WithChannels, S: Service> { + /// The inner message. + pub inner: I, + /// The return channel to send the response to. Can be set to [`crate::channel::none::NoSender`] if not needed. + pub tx: >::Tx, + /// The request channel to receive the request from. Can be set to [`NoReceiver`] if not needed. + pub rx: >::Rx, + /// The current span where the full message was created. + #[cfg(feature = "message_spans")] + #[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "message_spans")))] + pub span: tracing::Span, +} + +impl + Debug, S: Service> Debug for WithChannels { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("") + .field(&self.inner) + .field(&self.tx) + .field(&self.rx) + .finish() + } +} + +impl, S: Service> WithChannels { + /// Get the parent span + #[cfg(feature = "message_spans")] + pub fn parent_span_opt(&self) -> Option<&tracing::Span> { + Some(&self.span) + } +} + +/// Tuple conversion from inner message and tx/rx channels to a WithChannels struct /// -/// Depending on the interaction type, you might need to implement traits that further -/// define details of the interaction. +/// For the case where you want both tx and rx channels. +impl, S: Service, Tx, Rx> From<(I, Tx, Rx)> for WithChannels +where + I: Channels, + >::Tx: From, + >::Rx: From, +{ + fn from(inner: (I, Tx, Rx)) -> Self { + let (inner, tx, rx) = inner; + Self { + inner, + tx: tx.into(), + rx: rx.into(), + #[cfg(feature = "message_spans")] + #[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "message_spans")))] + span: tracing::Span::current(), + } + } +} + +/// Tuple conversion from inner message and tx channel to a WithChannels struct /// -/// A message type can be used for multiple services. E.g. you might have a -/// Status request that is understood by multiple services and returns a -/// standard status response. -pub trait Service: Send + Sync + Debug + Clone + 'static { - /// Type of request messages - type Req: RpcMessage; - /// Type of response messages - type Res: RpcMessage; +/// For the very common case where you just need a tx channel to send the response to. +impl From<(I, Tx)> for WithChannels +where + I: Channels, + S: Service, + >::Tx: From, +{ + fn from(inner: (I, Tx)) -> Self { + let (inner, tx) = inner; + Self { + inner, + tx: tx.into(), + rx: NoReceiver, + #[cfg(feature = "message_spans")] + #[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "message_spans")))] + span: tracing::Span::current(), + } + } } -/// A connector to a specific service +/// Deref so you can access the inner fields directly. /// -/// This is just a trait alias for a [`transport::Connector`] with the right types. It is used -/// to make it easier to specify the bounds of a connector that matches a specific -/// service. -pub trait Connector: transport::Connector {} +/// If the inner message has fields named `tx`, `rx` or `span`, you need to use the +/// `inner` field to access them. +impl, S: Service> Deref for WithChannels { + type Target = I; -impl, S: Service> Connector for T {} + fn deref(&self) -> &Self::Target { + &self.inner + } +} -/// A listener for a specific service +/// A client to the service `S` using the local message type `M` and the remote +/// message type `R`. +/// +/// `R` is typically a serializable enum with a case for each possible message +/// type. It can be thought of as the definition of the protocol. /// -/// This is just a trait alias for a [`transport::Listener`] with the right types. It is used -/// to make it easier to specify the bounds of a listener that matches a specific -/// service. -pub trait Listener: transport::Listener {} - -impl, S: Service> Listener for T {} - -#[cfg(feature = "flume-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "flume-transport")))] -/// Create a pair of [`RpcServer`] and [`RpcClient`] for the given [`Service`] type using a flume channel -pub fn flume_channel( - size: usize, -) -> ( - RpcServer>, - RpcClient>, -) { - let (listener, connector) = transport::flume::channel(size); - (RpcServer::new(listener), RpcClient::new(connector)) -} - -#[cfg(feature = "test-utils")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "test-utils")))] -/// Create a pair of [`RpcServer`] and [`RpcClient`] for the given [`Service`] type using a quinn channel +/// `M` is typically an enum with a case for each possible message type, where +/// each case is a `WithChannels` struct that extends the inner protocol message +/// with a local tx and rx channel as well as a tracing span to allow for +/// keeping tracing context across async boundaries. /// -/// This is using a network connection using the local network. It is useful for testing remote services -/// in a more realistic way than the memory transport. -#[allow(clippy::type_complexity)] -pub fn quinn_channel() -> anyhow::Result<( - RpcServer>, - RpcClient>, -)> { - let bind_addr: std::net::SocketAddr = ([0, 0, 0, 0], 0).into(); - let (server_endpoint, cert_der) = transport::quinn::make_server_endpoint(bind_addr)?; - let addr = server_endpoint.local_addr()?; - let server = server::QuinnListener::::new(server_endpoint)?; - let server = RpcServer::new(server); - let client_endpoint = transport::quinn::make_client_endpoint(bind_addr, &[&cert_der])?; - let client = client::QuinnConnector::::new(client_endpoint, addr, "localhost".into()); - let client = RpcClient::new(client); - Ok((server, client)) +/// In some cases, `M` and `R` can be enums for a subset of the protocol. E.g. +/// if you have a subsystem that only handles a part of the messages. +/// +/// The service type `S` provides a scope for the protocol messages. It exists +/// so you can use the same message with multiple services. +#[derive(Debug)] +pub struct Client(ClientInner, PhantomData<(R, S)>); + +impl Clone for Client { + fn clone(&self) -> Self { + Self(self.0.clone(), PhantomData) + } +} + +impl From> for Client { + fn from(tx: LocalSender) -> Self { + Self(ClientInner::Local(tx.0), PhantomData) + } +} + +impl From> for Client { + fn from(tx: tokio::sync::mpsc::Sender) -> Self { + LocalSender::from(tx).into() + } +} + +impl Client { + /// Create a new client to a remote service using the given quinn `endpoint` + /// and a socket `addr` of the remote service. + #[cfg(feature = "rpc")] + pub fn quinn(endpoint: quinn::Endpoint, addr: std::net::SocketAddr) -> Self { + Self::boxed(rpc::QuinnRemoteConnection::new(endpoint, addr)) + } + + /// Create a new client from a `rpc::RemoteConnection` trait object. + /// This is used from crates that want to provide other transports than quinn, + /// such as the iroh transport. + #[cfg(feature = "rpc")] + pub fn boxed(remote: impl rpc::RemoteConnection) -> Self { + Self(ClientInner::Remote(Box::new(remote)), PhantomData) + } + + /// Get the local sender. This is useful if you don't care about remote + /// requests. + pub fn local(&self) -> Option> { + match &self.0 { + ClientInner::Local(tx) => Some(tx.clone().into()), + ClientInner::Remote(..) => None, + } + } + + /// Start a request by creating a sender that can be used to send the initial + /// message to the local or remote service. + /// + /// In the local case, this is just a clone which has almost zero overhead. + /// Creating a local sender can not fail. + /// + /// In the remote case, this involves lazily creating a connection to the + /// remote side and then creating a new stream on the underlying + /// [`quinn`] or iroh connection. + /// + /// In both cases, the returned sender is fully self contained. + #[allow(clippy::type_complexity)] + pub fn request( + &self, + ) -> impl Future< + Output = Result, rpc::RemoteSender>, RequestError>, + > + 'static + where + S: Service, + M: Send + Sync + 'static, + R: 'static, + { + #[cfg(feature = "rpc")] + { + let cloned = match &self.0 { + ClientInner::Local(tx) => Request::Local(tx.clone()), + ClientInner::Remote(connection) => Request::Remote(connection.clone_boxed()), + }; + async move { + match cloned { + Request::Local(tx) => Ok(Request::Local(tx.into())), + #[cfg(feature = "rpc")] + Request::Remote(conn) => { + let (send, recv) = conn.open_bi().await?; + Ok(Request::Remote(rpc::RemoteSender::new(send, recv))) + } + } + } + } + #[cfg(not(feature = "rpc"))] + { + let ClientInner::Local(tx) = &self.0 else { + unreachable!() + }; + let tx = tx.clone().into(); + async move { Ok(Request::Local(tx)) } + } + } +} + +#[derive(Debug)] +pub(crate) enum ClientInner { + Local(tokio::sync::mpsc::Sender), + #[cfg(feature = "rpc")] + #[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "rpc")))] + Remote(Box), + #[cfg(not(feature = "rpc"))] + #[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "rpc")))] + Remote(PhantomData), +} + +impl Clone for ClientInner { + fn clone(&self) -> Self { + match self { + Self::Local(tx) => Self::Local(tx.clone()), + #[cfg(feature = "rpc")] + Self::Remote(conn) => Self::Remote(conn.clone_boxed()), + #[cfg(not(feature = "rpc"))] + Self::Remote(_) => unreachable!(), + } + } +} + +/// Error when opening a request. When cross-process rpc is disabled, this is +/// an empty enum since local requests can not fail. +#[derive(Debug, thiserror::Error)] +pub enum RequestError { + /// Error in quinn during connect + #[cfg(feature = "rpc")] + #[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "rpc")))] + #[error("error establishing connection: {0}")] + Connect(#[from] quinn::ConnectError), + /// Error in quinn when the connection already exists, when opening a stream pair + #[cfg(feature = "rpc")] + #[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "rpc")))] + #[error("error opening stream: {0}")] + Connection(#[from] quinn::ConnectionError), + /// Generic error for non-quinn transports + #[cfg(feature = "rpc")] + #[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "rpc")))] + #[error("error opening stream: {0}")] + Other(#[from] anyhow::Error), +} + +/// Error type that subsumes all possible errors in this crate, for convenience. +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("request error: {0}")] + Request(#[from] RequestError), + #[error("send error: {0}")] + Send(#[from] channel::SendError), + #[error("recv error: {0}")] + Recv(#[from] channel::RecvError), + #[cfg(feature = "rpc")] + #[error("recv error: {0}")] + Write(#[from] rpc::WriteError), +} + +impl From for io::Error { + fn from(e: Error) -> Self { + match e { + Error::Request(e) => e.into(), + Error::Send(e) => e.into(), + Error::Recv(e) => e.into(), + #[cfg(feature = "rpc")] + Error::Write(e) => e.into(), + } + } +} + +impl From for io::Error { + fn from(e: RequestError) -> Self { + match e { + #[cfg(feature = "rpc")] + RequestError::Connect(e) => io::Error::other(e), + #[cfg(feature = "rpc")] + RequestError::Connection(e) => e.into(), + #[cfg(feature = "rpc")] + RequestError::Other(e) => io::Error::other(e), + } + } +} + +/// A local sender for the service `S` using the message type `M`. +/// +/// This is a wrapper around an in-memory channel (currently [`tokio::sync::mpsc::Sender`]), +/// that adds nice syntax for sending messages that can be converted into +/// [`WithChannels`]. +#[derive(Debug)] +#[repr(transparent)] +pub struct LocalSender(tokio::sync::mpsc::Sender, std::marker::PhantomData); + +impl Clone for LocalSender { + fn clone(&self) -> Self { + Self(self.0.clone(), PhantomData) + } +} + +impl From> for LocalSender { + fn from(tx: tokio::sync::mpsc::Sender) -> Self { + Self(tx, PhantomData) + } +} + +#[cfg(not(feature = "rpc"))] +pub mod rpc { + pub struct RemoteSender(std::marker::PhantomData<(R, S)>); +} + +#[cfg(feature = "rpc")] +#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "rpc")))] +pub mod rpc { + //! Module for cross-process RPC using [`quinn`]. + use std::{fmt::Debug, future::Future, io, marker::PhantomData, pin::Pin, sync::Arc}; + + use quinn::ConnectionError; + use serde::{de::DeserializeOwned, Serialize}; + use smallvec::SmallVec; + use tokio::task::JoinSet; + use tracing::{trace, trace_span, warn, Instrument}; + + use crate::{ + channel::{ + none::NoSender, + oneshot, + spsc::{self, DynReceiver, DynSender}, + RecvError, SendError, + }, + util::{now_or_never, AsyncReadVarintExt, WriteVarintExt}, + BoxedFuture, RequestError, RpcMessage, + }; + + /// Error that can occur when writing the initial message when doing a + /// cross-process RPC. + #[derive(Debug, thiserror::Error)] + pub enum WriteError { + /// Error writing to the stream with quinn + #[error("error writing to stream: {0}")] + Quinn(#[from] quinn::WriteError), + /// Generic IO error, e.g. when serializing the message or when using + /// other transports. + #[error("error serializing: {0}")] + Io(#[from] io::Error), + } + + impl From for io::Error { + fn from(e: WriteError) -> Self { + match e { + WriteError::Io(e) => e, + WriteError::Quinn(e) => e.into(), + } + } + } + + /// Trait to abstract over a client connection to a remote service. + /// + /// This isn't really that much abstracted, since the result of open_bi must + /// still be a quinn::SendStream and quinn::RecvStream. This is just so we + /// can have different connection implementations for normal quinn connections, + /// iroh connections, and possibly quinn connections with disabled encryption + /// for performance. + /// + /// This is done as a trait instead of an enum, so we don't need an iroh + /// dependency in the main crate. + pub trait RemoteConnection: Send + Sync + Debug + 'static { + /// Boxed clone so the trait is dynable. + fn clone_boxed(&self) -> Box; + + /// Open a bidirectional stream to the remote service. + fn open_bi( + &self, + ) -> BoxedFuture>; + } + + /// A connection to a remote service. + /// + /// Initially this does just have the endpoint and the address. Once a + /// connection is established, it will be stored. + #[derive(Debug, Clone)] + pub(crate) struct QuinnRemoteConnection(Arc); + + #[derive(Debug)] + struct QuinnRemoteConnectionInner { + pub endpoint: quinn::Endpoint, + pub addr: std::net::SocketAddr, + pub connection: tokio::sync::Mutex>, + } + + impl QuinnRemoteConnection { + pub fn new(endpoint: quinn::Endpoint, addr: std::net::SocketAddr) -> Self { + Self(Arc::new(QuinnRemoteConnectionInner { + endpoint, + addr, + connection: Default::default(), + })) + } + } + + impl RemoteConnection for QuinnRemoteConnection { + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } + + fn open_bi( + &self, + ) -> BoxedFuture> + { + let this = self.0.clone(); + Box::pin(async move { + let mut guard = this.connection.lock().await; + let pair = match guard.as_mut() { + Some(conn) => { + // try to reuse the connection + match conn.open_bi().await { + Ok(pair) => pair, + Err(_) => { + // try with a new connection, just once + *guard = None; + connect_and_open_bi(&this.endpoint, &this.addr, guard).await? + } + } + } + None => connect_and_open_bi(&this.endpoint, &this.addr, guard).await?, + }; + Ok(pair) + }) + } + } + + async fn connect_and_open_bi( + endpoint: &quinn::Endpoint, + addr: &std::net::SocketAddr, + mut guard: tokio::sync::MutexGuard<'_, Option>, + ) -> Result<(quinn::SendStream, quinn::RecvStream), RequestError> { + let conn = endpoint.connect(*addr, "localhost")?.await?; + let (send, recv) = conn.open_bi().await?; + *guard = Some(conn); + Ok((send, recv)) + } + + /// A connection to a remote service that can be used to send the initial message. + #[derive(Debug)] + pub struct RemoteSender( + quinn::SendStream, + quinn::RecvStream, + std::marker::PhantomData<(R, S)>, + ); + + impl RemoteSender { + pub fn new(send: quinn::SendStream, recv: quinn::RecvStream) -> Self { + Self(send, recv, PhantomData) + } + + pub async fn write( + self, + msg: impl Into, + ) -> std::result::Result<(quinn::SendStream, quinn::RecvStream), WriteError> + where + R: Serialize, + { + let RemoteSender(mut send, recv, _) = self; + let msg = msg.into(); + let mut buf = SmallVec::<[u8; 128]>::new(); + buf.write_length_prefixed(msg)?; + send.write_all(&buf).await?; + Ok((send, recv)) + } + } + + impl From for oneshot::Receiver { + fn from(mut read: quinn::RecvStream) -> Self { + let fut = async move { + let size = read.read_varint_u64().await?.ok_or(io::Error::new( + io::ErrorKind::UnexpectedEof, + "failed to read size", + ))?; + let rest = read + .read_to_end(size as usize) + .await + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + let msg: T = postcard::from_bytes(&rest) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + io::Result::Ok(msg) + }; + oneshot::Receiver::from(|| fut) + } + } + + impl From for spsc::Receiver { + fn from(read: quinn::RecvStream) -> Self { + spsc::Receiver::Boxed(Box::new(QuinnReceiver { + recv: read, + _marker: PhantomData, + })) + } + } + + impl From for NoSender { + fn from(write: quinn::SendStream) -> Self { + let _ = write; + NoSender + } + } + + impl From for oneshot::Sender { + fn from(mut writer: quinn::SendStream) -> Self { + oneshot::Sender::Boxed(Box::new(move |value| { + Box::pin(async move { + // write via a small buffer to avoid allocation for small values + let mut buf = SmallVec::<[u8; 128]>::new(); + buf.write_length_prefixed(value)?; + writer.write_all(&buf).await?; + io::Result::Ok(()) + }) + })) + } + } + + impl From for spsc::Sender { + fn from(write: quinn::SendStream) -> Self { + spsc::Sender::Boxed(Box::new(QuinnSender { + send: write, + buffer: SmallVec::new(), + _marker: PhantomData, + })) + } + } + + struct QuinnReceiver { + recv: quinn::RecvStream, + _marker: std::marker::PhantomData, + } + + impl Debug for QuinnReceiver { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("QuinnReceiver").finish() + } + } + + impl DynReceiver for QuinnReceiver { + fn recv( + &mut self, + ) -> Pin, RecvError>> + Send + '_>> + { + Box::pin(async { + let read = &mut self.recv; + let Some(size) = read.read_varint_u64().await? else { + return Ok(None); + }; + let mut buf = vec![0; size as usize]; + read.read_exact(&mut buf) + .await + .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))?; + let msg: T = postcard::from_bytes(&buf) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + Ok(Some(msg)) + }) + } + } + + impl Drop for QuinnReceiver { + fn drop(&mut self) {} + } + + struct QuinnSender { + send: quinn::SendStream, + buffer: SmallVec<[u8; 128]>, + _marker: std::marker::PhantomData, + } + + impl Debug for QuinnSender { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("QuinnSender").finish() + } + } + + impl DynSender for QuinnSender { + fn send(&mut self, value: T) -> Pin> + Send + '_>> { + Box::pin(async { + let value = value; + self.buffer.clear(); + self.buffer.write_length_prefixed(value)?; + self.send.write_all(&self.buffer).await?; + self.buffer.clear(); + Ok(()) + }) + } + + fn try_send( + &mut self, + value: T, + ) -> Pin> + Send + '_>> { + Box::pin(async { + // todo: move the non-async part out of the box. Will require a new return type. + let value = value; + self.buffer.clear(); + self.buffer.write_length_prefixed(value)?; + let Some(n) = now_or_never(self.send.write(&self.buffer)) else { + return Ok(false); + }; + let n = n?; + self.send.write_all(&self.buffer[n..]).await?; + self.buffer.clear(); + Ok(true) + }) + } + + fn is_rpc(&self) -> bool { + true + } + } + + impl Drop for QuinnSender { + fn drop(&mut self) { + self.send.finish().ok(); + } + } + + /// Type alias for a handler fn for remote requests + pub type Handler = Arc< + dyn Fn( + R, + quinn::RecvStream, + quinn::SendStream, + ) -> crate::BoxedFuture<'static, std::result::Result<(), SendError>> + + Send + + Sync + + 'static, + >; + + /// Utility function to listen for incoming connections and handle them with the provided handler + pub async fn listen( + endpoint: quinn::Endpoint, + handler: Handler, + ) { + let mut request_id = 0u64; + let mut tasks = JoinSet::new(); + while let Some(incoming) = endpoint.accept().await { + let handler = handler.clone(); + let fut = async move { + let connection = match incoming.await { + Ok(connection) => connection, + Err(cause) => { + warn!("failed to accept connection {cause:?}"); + return io::Result::Ok(()); + } + }; + loop { + let (send, mut recv) = match connection.accept_bi().await { + Ok((s, r)) => (s, r), + Err(ConnectionError::ApplicationClosed(cause)) + if cause.error_code.into_inner() == 0 => + { + trace!("remote side closed connection {cause:?}"); + return Ok(()); + } + Err(cause) => { + warn!("failed to accept bi stream {cause:?}"); + return Err(cause.into()); + } + }; + let size = recv.read_varint_u64().await?.ok_or_else(|| { + io::Error::new(io::ErrorKind::UnexpectedEof, "failed to read size") + })?; + let mut buf = vec![0; size as usize]; + recv.read_exact(&mut buf) + .await + .map_err(|e| io::Error::new(io::ErrorKind::UnexpectedEof, e))?; + let msg: R = postcard::from_bytes(&buf) + .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?; + let rx = recv; + let tx = send; + handler(msg, rx, tx).await?; + } + }; + let span = trace_span!("rpc", id = request_id); + tasks.spawn(fut.instrument(span)); + request_id += 1; + } + } +} + +/// A request to a service. This can be either local or remote. +#[derive(Debug)] +pub enum Request { + /// Local in memory request + Local(L), + /// Remote cross process request + Remote(R), +} + +impl LocalSender { + /// Send a message to the service + pub fn send(&self, value: impl Into>) -> SendFut + where + T: Channels, + M: From>, + { + let value: M = value.into().into(); + SendFut::new(self.0.clone(), value) + } + + /// Send a message to the service without the type conversion magic + pub fn send_raw(&self, value: M) -> SendFut { + SendFut::new(self.0.clone(), value) + } +} + +mod send_fut { + use std::{ + future::Future, + pin::Pin, + task::{Context, Poll}, + }; + + use tokio::sync::mpsc::Sender; + use tokio_util::sync::PollSender; + + use crate::channel::SendError; + + pub struct SendFut { + poll_sender: PollSender, + value: Option, + } + + impl SendFut { + pub fn new(sender: Sender, value: T) -> Self { + Self { + poll_sender: PollSender::new(sender), + value: Some(value), + } + } + } + + impl Future for SendFut { + type Output = std::result::Result<(), SendError>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.get_mut(); + + // Safely extract the value + let value = match this.value.take() { + Some(v) => v, + None => return Poll::Ready(Ok(())), // Already completed + }; + + // Try to reserve capacity + match this.poll_sender.poll_reserve(cx) { + Poll::Ready(Ok(())) => { + // Send the item + this.poll_sender.send_item(value).ok(); + Poll::Ready(Ok(())) + } + Poll::Ready(Err(_)) => { + // Channel is closed + Poll::Ready(Err(SendError::ReceiverClosed)) + } + Poll::Pending => { + // Restore the value and wait + this.value = Some(value); + Poll::Pending + } + } + } + } } +use send_fut::SendFut; diff --git a/src/macros.rs b/src/macros.rs deleted file mode 100644 index 2f2ba6c6..00000000 --- a/src/macros.rs +++ /dev/null @@ -1,508 +0,0 @@ -//! Macros to reduce boilerplate for RPC implementations. - -/// Derive a set of RPC types and message implementation from a declaration. -/// -/// The macros are completely optional. They generate the request and response -/// message enums and the service zerosized struct. -/// Optionally, a function can be created to dispatch RPC calls to methods -/// on a struct of your choice. -/// It can also create a type-safe RPC client for the service. -/// -/// Usage is as follows: -/// -/// ```no_run -/// # use serde::{Serialize,Deserialize}; -/// # use quic_rpc::*; -/// -/// // Define your message types -/// -/// #[derive(Debug, Serialize, Deserialize)] -/// struct Add(pub i32, pub i32); -/// #[derive(Debug, Serialize, Deserialize)] -/// pub struct Sum(pub i32); -/// #[derive(Debug, Serialize, Deserialize)] -/// pub struct Multiply(pub i32); -/// #[derive(Debug, Serialize, Deserialize)] -/// pub struct MultiplyUpdate(pub i32); -/// #[derive(Debug, Serialize, Deserialize)] -/// pub struct MultiplyOutput(pub i32); -/// -/// // Derive the RPC types. -/// -/// rpc_service! { -/// // Name of the created request enum. -/// Request = MyRequest; -/// // Name of the created response enum. -/// Response = MyResponse; -/// // Name of the created service struct enum. -/// Service = MyService; -/// // Name of the macro to create a dispatch function. -/// // Optional, if not needed pass _ (underscore) as name. -/// CreateDispatch = create_my_dispatch; -/// // Name of the macro to create an RPC client. -/// -/// Rpc add = Add, _ -> Sum; -/// BidiStreaming multiply = Multiply, MultiplyUpdate -> MultiplyOutput -/// } -/// ``` -/// -/// This will generate a request enum `MyRequest`, a response enum `MyRespone` -/// and a service declaration `MyService`. -/// -/// It will also generate two macros to create an RPC client and a dispatch function. -/// -/// To use the client, invoke the macro with a name. The macro will generate a struct that -/// takes a client channel and exposes typesafe methods for each RPC method. -/// -/// ```ignore -/// create_store_client!(MyClient); -/// let client = quic_rpc::quinn::Channel::new(client); -/// let client = quic_rpc::client::RpcClient::::new(client); -/// let mut client = MyClient(client); -/// let sum = client.add(Add(3, 4)).await?; -/// // Sum(7) -/// let (send, mut recv) = client.multiply(Multiply(2)); -/// send(Update(3)); -/// let res = recv.next().await?; -/// // Some(MultiplyOutput(6)) -/// ``` -/// -/// To use the dispatch function, invoke the macro with a struct that implements your RPC -/// methods and the name of the generated function. You can then use this dispatch function -/// to dispatch the RPC calls to the methods on your target struct. -/// -/// ```ignore -/// #[derive(Clone)] -/// pub struct Calculator; -/// impl Calculator { -/// async fn add(self, req: Add) -> Sum { -/// Sum(req.0 + req.1) -/// } -/// async fn multiply( -/// self, -/// req: Multiply, -/// updates: impl Stream -/// ) -> impl Stream { -/// stream! { -/// tokio::pin!(updates); -/// while let Some(MultiplyUpdate(n)) = updates.next().await { -/// yield MultiplyResponse(req.0 * n); -/// } -/// } -/// } -/// } -/// -/// create_my_dispatch!(Calculator, dispatch_calculator_request); -/// -/// #[tokio::main] -/// async fn main() -> anyhow::Result<()> { -/// let server_addr: std::net::SocketAddr = "127.0.0.1:12345".parse()?; -/// let (server, _server_certs) = make_server_endpoint(server_addr)?; -/// let accept = server.accept().await.context("accept failed")?.await?; -/// let connection = quic_rpc::quinn::Channel::new(accept); -/// let calculator = Calculator; -/// let server_handle = spawn_server( -/// StoreService, -/// quic_rpc::quinn::QuinnChannelTypes, -/// connection, -/// calculator, -/// dispatch_calculator_request, -/// ); -/// server_handle.await??; -/// Ok(()) -/// } -/// ``` -/// -/// The generation of the macros in `CreateDispatch` and `CreateClient` -/// is optional. If you don't need them, pass `_` instead: -/// -/// ```ignore -/// # use quic_rpc::*; -/// rpc_service! { -/// Request = MyRequest; -/// Response = MyResponse; -/// Service = MyService; -/// CreateDispatch = _; -/// CreateClient = _; -/// -/// Rpc add = Add, _ -> Sum; -/// ClientStreaming stream = Input, Update -> Output; -/// } -/// ``` -/// ` -#[macro_export] -macro_rules! rpc_service { - ( - Request = $request:ident; - Response = $response:ident; - Service = $service:ident; - CreateDispatch = $create_dispatch:tt; - - $($m_pattern:ident $m_name:ident = $m_input:ident, $m_update:tt -> $m_output:ident);+$(;)? - ) => { - - $crate::__request_enum! { - $service, - $request { - $($m_input,)* - $($m_update,)* - } - } - - #[doc=concat!("Response messages for ", stringify!($service))] - #[allow(clippy::enum_variant_names)] - #[derive(::std::fmt::Debug, ::derive_more::From, ::derive_more::TryInto, ::serde::Serialize, ::serde::Deserialize)] - pub enum $response { - $($m_output($m_output),)* - } - - $( - $crate::__rpc_message!($service, $m_pattern, $m_input, $m_update, $m_output); - )* - - #[doc=concat!("RPC service ", stringify!($service))] - #[derive(::std::clone::Clone, ::std::fmt::Debug)] - pub struct $service; - - impl $crate::Service for $service { - type Req = $request; - type Res = $response; - } - - $crate::__derive_create_dispatch!( - $service, - $request, - $create_dispatch, - [ $($m_pattern $m_name = $m_input, $m_update -> $m_output);+ ] - ); - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __derive_create_dispatch { - ( - $service:ident, - $request:ident, - _, - [ $($tt:tt)* ] - ) => {}; - ( - $service:ident, - $request:ident, - $create_dispatch:ident, - [ $($m_pattern:ident $m_name:ident = $m_input:ident, $m_update:tt -> $m_output:ident);+ ] - ) => { - #[doc = concat!("Create an RPC request dispatch function for ", stringify!($service), "\n\nSee the docs for [quic_rpc::rpc_service] for usage docs.")] - #[macro_export] - macro_rules! $create_dispatch { - ($target:ident, $handler:ident) => { - pub async fn $handler>( - mut chan: $crate::server::RpcChannel<$service, C>, - msg: <$service as $crate::Service>::Req, - target: $target, - ) -> Result<(), $crate::server::RpcServerError> { - let res = match msg { - $( - $request::$m_input(msg) => { $crate::__rpc_invoke!($m_pattern, $m_name, $target, msg, chan, target) }, - )* - _ => Err($crate::server::RpcServerError::::UnexpectedStartMessage), - }; - res?; - Ok(()) - } - } - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __request_enum { - // User entry points. - ($service:ident, $enum_name:ident { $variant_name:ident $($tt:tt)* }) => { - $crate::__request_enum!(@ {[$service $enum_name] [$variant_name]} $($tt)*); - }; - - // Internal rules to categorize each value - // This also filters out _ placeholders from non-streaming methods. - (@ {[$service:ident $enum_name:ident] [$($agg:ident)*]} $(,)? $(_$(,)?)* $variant_name:ident $($tt:tt)*) => { - $crate::__request_enum!(@ {[$service $enum_name] [$($agg)* $variant_name]} $($tt)*); - }; - - // Internal rules to categorize each value - (@ {[$service:ident $enum_name:ident] [$($agg:ident)*]} $(,)? $variant_name:ident $($tt:tt)*) => { - $crate::__request_enum!(@ {[$service $enum_name] [$($agg)* $variant_name]} $($tt)*); - }; - - // Final internal rule that generates the enum from the categorized input - (@ {[$service:ident $enum_name:ident] [$($n:ident)*]} $(,)? $(_$(,)?)*) => { - #[doc=concat!("Request messages for ", stringify!($service))] - #[derive(::std::fmt::Debug, ::derive_more::From, ::derive_more::TryInto, ::serde::Serialize, ::serde::Deserialize)] - pub enum $enum_name { - $($n($n),)* - } - }; -} - -/// Declare a message to be a rpc message for a service. -/// -/// Example: -/// ```ignore -/// declare_rpc!(TestService, TestRequest, TestResponse); -/// ``` -/// -/// This is equivalent to: -/// ```ignore -/// impl RpcMsg for TestRequest { -/// type Response = TestResponse; -/// } -/// ``` -#[macro_export] -macro_rules! declare_rpc { - ($service:ty, $m_input:ty, $m_output:ty) => { - impl $crate::message::RpcMsg<$service> for $m_input { - type Response = $m_output; - } - }; -} - -/// Declare a message to be a server streaming message for a service. -/// -/// Example: -/// ```ignore -/// declare_server_streaming!(TestService, TestRequest, TestResponse); -/// ``` -/// -/// This is equivalent to: -/// ```ignore -/// impl Msg for TestRequest { -/// type Pattern = ServerStreamingPattern; -/// } -/// -/// impl ServerStreamingMsg for TestRequest { -/// type Response = TestResponse; -/// } -#[macro_export] -macro_rules! declare_server_streaming { - ($service:ident, $m_input:ident, $m_output:ident) => { - impl $crate::message::Msg<$service> for $m_input { - type Pattern = $crate::message::ServerStreaming; - } - impl $crate::message::ServerStreamingMsg<$service> for $m_input { - type Response = $m_output; - } - }; -} - -/// Declare a message to be a server streaming message for a service. -/// -/// Example: -/// ```ignore -/// declare_client_streaming!(TestService, TestRequest, TestUpdate, TestResponse); -/// ``` -/// -/// This is equivalent to: -/// ```ignore -/// impl Msg for TestRequest { -/// type Pattern = ClientStreamingPattern; -/// } -/// -/// impl ClientStreamingMsg for TestRequest { -/// type Update = TestUpdate; -/// type Response = TestResponse; -/// } -/// ``` -#[macro_export] -macro_rules! declare_client_streaming { - ($service:ident, $m_input:ident, $m_update:ident, $m_output:ident) => { - impl $crate::message::Msg<$service> for $m_input { - type Pattern = $crate::message::ClientStreaming; - } - impl $crate::message::ClientStreamingMsg<$service> for $m_input { - type Update = $m_update; - type Response = $m_output; - } - }; -} - -/// Declare a message to be a server streaming message for a service. -/// -/// Example: -/// ```ignore -/// declare_bidi_streaming!(TestService, TestRequest, TestUpdate, TestResponse); -/// ``` -/// -/// This is equivalent to: -/// ```ignore -/// impl Msg for TestRequest { -/// type Pattern = BidiStreamingPattern; -/// } -/// -/// impl BidiStreamingMsg for TestRequest { -/// type Update = TestUpdate; -/// type Response = TestResponse; -/// } -/// ``` -#[macro_export] -macro_rules! declare_bidi_streaming { - ($service:ident, $m_input:ident, $m_update:ident, $m_output:ident) => { - impl $crate::message::Msg<$service> for $m_input { - type Pattern = $crate::message::BidiStreaming; - } - impl $crate::message::BidiStreamingMsg<$service> for $m_input { - type Update = $m_update; - type Response = $m_output; - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __rpc_message { - ($service:ident, Rpc, $m_input:ident, _, $m_output:ident) => { - impl $crate::message::RpcMsg<$service> for $m_input { - type Response = $m_output; - } - }; - ($service:ident, ServerStreaming, $m_input:ident, _, $m_output:ident) => { - impl $crate::message::Msg<$service> for $m_input { - type Pattern = $crate::message::ServerStreaming; - } - impl $crate::message::ServerStreamingMsg<$service> for $m_input { - type Response = $m_output; - } - }; - ($service:ident, ClientStreaming, $m_input:ident, $m_update:ident, $m_output:ident) => { - impl $crate::message::Msg<$service> for $m_input { - type Pattern = $crate::message::ClientStreaming; - } - impl $crate::message::ClientStreamingMsg<$service> for $m_input { - type Response = $m_output; - type Update = $m_update; - } - }; - ($service:ident, BidiStreaming, $m_input:ident, $m_update:ident, $m_output:ident) => { - impl $crate::message::Msg<$service> for $m_input { - type Pattern = $crate::message::BidiStreaming; - } - impl $crate::message::BidiStreamingMsg<$service> for $m_input { - type Response = $m_output; - type Update = $m_update; - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __rpc_invoke { - (Rpc, $m_name:ident, $target_ty:ident, $msg:ident, $chan:ident, $target:ident) => { - $chan.rpc($msg, $target, $target_ty::$m_name).await - }; - (ClientStreaming, $m_name:ident, $target_ty:ident, $msg:ident, $chan:ident, $target:ident) => { - $chan - .client_streaming($msg, $target, $target_ty::$m_name) - .await - }; - (ServerStreaming, $m_name:ident, $target_ty:ident, $msg:ident, $chan:ident, $target:ident) => { - $chan - .server_streaming($msg, $target, $target_ty::$m_name) - .await - }; - (BidiStreaming, $m_name:ident, $target_ty:ident, $msg:ident, $chan:ident, $target:ident) => { - $chan - .bidi_streaming($msg, $target, $target_ty::$m_name) - .await - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __derive_create_client{ - ( - $service:ident, - _, - [ $($tt:tt)* ] - ) => {}; - ( - $service:ident, - $create_client:tt, - [ $($m_pattern:ident $m_name:ident = $m_input:ident, $m_update:tt -> $m_output:ident);+ ] - ) => { - #[doc = concat!("Create an RPC client for ", stringify!($service), "\n\nSee the docs for [quic_rpc::rpc_service] for usage docs.")] - #[macro_export] - macro_rules! $create_client { - ($struct:ident) => { - #[derive(::std::clone::Clone, ::std::fmt::Debug)] - pub struct $struct>(pub $crate::client::RpcClient<$service, C>); - - impl> $struct { - $( - $crate::__rpc_method!($m_pattern, $service, $m_name, $m_input, $m_output, $m_update); - )* - } - }; - } - }; -} - -#[doc(hidden)] -#[macro_export] -macro_rules! __rpc_method { - (Rpc, $service:ident, $m_name:ident, $m_input:ident, $m_output:ident, _) => { - pub async fn $m_name( - &mut self, - input: $m_input, - ) -> ::std::result::Result<$m_output, $crate::client::RpcClientError> { - self.0.rpc(input).await - } - }; - (ClientStreaming, $service:ident, $m_name:ident, $m_input:ident, $m_output:ident, $m_update:ident) => { - pub async fn $m_name( - &mut self, - input: $m_input, - ) -> ::std::result::Result< - ( - $crate::client::UpdateSink<$service, C, $m_input>, - ::futures::future::BoxFuture< - 'static, - ::std::result::Result<$m_output, $crate::client::ClientStreamingItemError>, - >, - ), - $crate::client::ClientStreamingError, - > { - self.0.client_streaming(input).await - } - }; - (ServerStreaming, $service:ident, $m_name:ident, $m_input:ident, $m_output:ident, _) => { - pub async fn $m_name( - &mut self, - input: $m_input, - ) -> ::std::result::Result< - ::futures::stream::BoxStream< - 'static, - ::std::result::Result<$m_output, $crate::client::StreamingResponseItemError>, - >, - $crate::client::StreamingResponseError, - > { - self.0.server_streaming(input).await - } - }; - (BidiStreaming, $service:ident, $m_name:ident, $m_input:ident, $m_output:ident, $m_update:ident) => { - pub async fn $m_name( - &mut self, - input: $m_input, - ) -> ::std::result::Result< - ( - $crate::client::UpdateSink<$service, C, $m_input>, - ::futures::stream::BoxStream< - 'static, - ::std::result::Result<$m_output, $crate::client::BidiItemError>, - >, - ), - $crate::client::BidiError, - > { - self.0.bidi(input).await - } - }; -} diff --git a/src/message.rs b/src/message.rs deleted file mode 100644 index 688303f2..00000000 --- a/src/message.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! Service definition -//! -//! Traits to define the behaviour of messages for services -use std::fmt::Debug; - -pub use crate::pattern::{ - bidi_streaming::{BidiStreaming, BidiStreamingMsg}, - client_streaming::{ClientStreaming, ClientStreamingMsg}, - rpc::{Rpc, RpcMsg}, - server_streaming::{ServerStreaming, ServerStreamingMsg}, -}; -use crate::Service; - -/// Declares the interaction pattern for a message and a service. -/// -/// For each server and each message, only one interaction pattern can be defined. -pub trait Msg: Into + TryFrom + Send + 'static { - /// The interaction pattern for this message with this service. - type Pattern: InteractionPattern; -} - -/// Trait defining interaction pattern. -/// -/// Currently there are 4 patterns: -/// - [Rpc]: 1 request, 1 response -/// - [ClientStreaming]: 1 request, stream of updates, 1 response -/// - [ServerStreaming]: 1 request, stream of responses -/// - [BidiStreaming]: 1 request, stream of updates, stream of responses -/// -/// You could define your own interaction patterns such as OneWay. -pub trait InteractionPattern: Debug + Clone + Send + Sync + 'static {} diff --git a/src/pattern/bidi_streaming.rs b/src/pattern/bidi_streaming.rs deleted file mode 100644 index 6ea94d5a..00000000 --- a/src/pattern/bidi_streaming.rs +++ /dev/null @@ -1,146 +0,0 @@ -//! Bidirectional stream interaction pattern. - -use std::{ - error, - fmt::{self, Debug}, - result, -}; - -use futures_lite::{Stream, StreamExt}; -use futures_util::{FutureExt, SinkExt}; - -use crate::{ - client::{BoxStreamSync, UpdateSink}, - message::{InteractionPattern, Msg}, - server::{race2, RpcChannel, RpcServerError, UpdateStream}, - transport::{ConnectionErrors, Connector, StreamTypes}, - RpcClient, Service, -}; - -/// Bidirectional streaming interaction pattern -/// -/// After the initial request, the client can send updates and the server can -/// send responses. -#[derive(Debug, Clone, Copy)] -pub struct BidiStreaming; -impl InteractionPattern for BidiStreaming {} - -/// Defines update type and response type for a bidi streaming message. -pub trait BidiStreamingMsg: Msg { - /// The type for request updates - /// - /// For a request that does not support updates, this can be safely set to any type, including - /// the message type itself. Any update for such a request will result in an error. - type Update: Into + TryFrom + Send + 'static; - - /// The type for the response - /// - /// For requests that can produce errors, this can be set to [Result](std::result::Result). - type Response: Into + TryFrom + Send + 'static; -} - -/// Server error when accepting a bidi request -#[derive(Debug)] -pub enum Error { - /// Unable to open a substream at all - Open(C::OpenError), - /// Unable to send the request to the server - Send(C::SendError), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for Error {} - -/// Server error when receiving an item for a bidi request -#[derive(Debug)] -pub enum ItemError { - /// Unable to receive the response from the server - RecvError(C::RecvError), - /// Unexpected response from the server - DowncastError, -} - -impl fmt::Display for ItemError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for ItemError {} - -impl RpcClient -where - S: Service, - C: Connector, -{ - /// Bidi call to the server, request opens a stream, response is a stream - pub async fn bidi( - &self, - msg: M, - ) -> result::Result< - ( - UpdateSink, - BoxStreamSync<'static, result::Result>>, - ), - Error, - > - where - M: BidiStreamingMsg, - { - let msg = msg.into(); - let (mut send, recv) = self.source.open().await.map_err(Error::Open)?; - send.send(msg).await.map_err(Error::::Send)?; - let send = UpdateSink::new(send); - let recv = Box::pin(recv.map(move |x| match x { - Ok(msg) => M::Response::try_from(msg).map_err(|_| ItemError::DowncastError), - Err(e) => Err(ItemError::RecvError(e)), - })); - Ok((send, recv)) - } -} - -impl RpcChannel -where - C: StreamTypes, - S: Service, -{ - /// handle the message M using the given function on the target object - /// - /// If you want to support concurrent requests, you need to spawn this on a tokio task yourself. - pub async fn bidi_streaming( - self, - req: M, - target: T, - f: F, - ) -> result::Result<(), RpcServerError> - where - M: BidiStreamingMsg, - F: FnOnce(T, M, UpdateStream) -> Str + Send + 'static, - Str: Stream + Send + 'static, - T: Send + 'static, - { - let Self { mut send, recv, .. } = self; - // downcast the updates - let (updates, read_error) = UpdateStream::new(recv); - // get the response - let responses = f(target, req, updates); - race2(read_error.map(Err), async move { - tokio::pin!(responses); - while let Some(response) = responses.next().await { - // turn into a S::Res so we can send it - let response = response.into(); - // send it and return the error if any - send.send(response) - .await - .map_err(RpcServerError::SendError)?; - } - Ok(()) - }) - .await - } -} diff --git a/src/pattern/client_streaming.rs b/src/pattern/client_streaming.rs deleted file mode 100644 index 055aaabe..00000000 --- a/src/pattern/client_streaming.rs +++ /dev/null @@ -1,146 +0,0 @@ -//! Client streaming interaction pattern. - -use std::{ - error, - fmt::{self, Debug}, - result, -}; - -use futures_lite::{future::Boxed, Future, StreamExt}; -use futures_util::{FutureExt, SinkExt, TryFutureExt}; - -use crate::{ - client::UpdateSink, - message::{InteractionPattern, Msg}, - server::{race2, RpcChannel, RpcServerError, UpdateStream}, - transport::{ConnectionErrors, StreamTypes}, - Connector, RpcClient, Service, -}; - -/// Client streaming interaction pattern -/// -/// After the initial request, the client can send updates, but there is only -/// one response. -#[derive(Debug, Clone, Copy)] -pub struct ClientStreaming; -impl InteractionPattern for ClientStreaming {} - -/// Defines update type and response type for a client streaming message. -pub trait ClientStreamingMsg: Msg { - /// The type for request updates - /// - /// For a request that does not support updates, this can be safely set to any type, including - /// the message type itself. Any update for such a request will result in an error. - type Update: Into + TryFrom + Send + 'static; - - /// The type for the response - /// - /// For requests that can produce errors, this can be set to [Result](std::result::Result). - type Response: Into + TryFrom + Send + 'static; -} - -/// Server error when accepting a client streaming request -#[derive(Debug)] -pub enum Error { - /// Unable to open a substream at all - Open(C::OpenError), - /// Unable to send the request to the server - Send(C::SendError), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for Error {} - -/// Server error when receiving an item for a client streaming request -#[derive(Debug)] -pub enum ItemError { - /// Connection was closed before receiving the first message - EarlyClose, - /// Unable to receive the response from the server - RecvError(C::RecvError), - /// Unexpected response from the server - DowncastError, -} - -impl fmt::Display for ItemError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for ItemError {} - -impl RpcClient -where - S: Service, - C: Connector, -{ - /// Call to the server that allows the client to stream, single response - pub async fn client_streaming( - &self, - msg: M, - ) -> result::Result< - ( - UpdateSink, - Boxed>>, - ), - Error, - > - where - M: ClientStreamingMsg, - { - let msg = msg.into(); - let (mut send, mut recv) = self.source.open().await.map_err(Error::Open)?; - send.send(msg).map_err(Error::Send).await?; - let send = UpdateSink::::new(send); - let recv = async move { - let item = recv.next().await.ok_or(ItemError::EarlyClose)?; - - match item { - Ok(msg) => M::Response::try_from(msg).map_err(|_| ItemError::DowncastError), - Err(e) => Err(ItemError::RecvError(e)), - } - } - .boxed(); - Ok((send, recv)) - } -} - -impl RpcChannel -where - S: Service, - C: StreamTypes, -{ - /// handle the message M using the given function on the target object - /// - /// If you want to support concurrent requests, you need to spawn this on a tokio task yourself. - pub async fn client_streaming( - self, - req: M, - target: T, - f: F, - ) -> result::Result<(), RpcServerError> - where - M: ClientStreamingMsg, - F: FnOnce(T, M, UpdateStream) -> Fut + Send + 'static, - Fut: Future + Send + 'static, - T: Send + 'static, - { - let Self { mut send, recv, .. } = self; - let (updates, read_error) = UpdateStream::new(recv); - race2(read_error.map(Err), async move { - // get the response - let res = f(target, req, updates).await; - // turn into a S::Res so we can send it - let res = res.into(); - // send it and return the error if any - send.send(res).await.map_err(RpcServerError::SendError) - }) - .await - } -} diff --git a/src/pattern/mod.rs b/src/pattern/mod.rs deleted file mode 100644 index da2b879a..00000000 --- a/src/pattern/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! Predefined interaction patterns. -//! -//! An interaction pattern can be as simple as an rpc call or something more -//! complex such as bidirectional streaming. -//! -//! Each pattern defines different associated message types for the interaction. -pub mod bidi_streaming; -pub mod client_streaming; -pub mod rpc; -pub mod server_streaming; -pub mod try_server_streaming; diff --git a/src/pattern/rpc.rs b/src/pattern/rpc.rs deleted file mode 100644 index 044069cb..00000000 --- a/src/pattern/rpc.rs +++ /dev/null @@ -1,154 +0,0 @@ -//! RPC interaction pattern. - -use std::{ - error, - fmt::{self, Debug}, - result, -}; - -use futures_lite::{Future, StreamExt}; -use futures_util::{FutureExt, SinkExt}; - -use crate::{ - message::{InteractionPattern, Msg}, - server::{race2, RpcChannel, RpcServerError}, - transport::{ConnectionErrors, StreamTypes}, - Connector, RpcClient, Service, -}; - -/// Rpc interaction pattern -/// -/// There is only one request and one response. -#[derive(Debug, Clone, Copy)] -pub struct Rpc; -impl InteractionPattern for Rpc {} - -/// Defines the response type for a rpc message. -/// -/// Since this is the most common interaction pattern, this also implements [Msg] for you -/// automatically, with the interaction pattern set to [Rpc]. This is to reduce boilerplate -/// when defining rpc messages. -pub trait RpcMsg: Msg { - /// The type for the response - /// - /// For requests that can produce errors, this can be set to [Result](std::result::Result). - type Response: Into + TryFrom + Send + 'static; -} - -/// We can only do this for one trait, so we do it for RpcMsg since it is the most common -impl, S: Service> Msg for T { - type Pattern = Rpc; -} -/// Client error. All client DSL methods return a `Result` with this error type. -#[derive(Debug)] -pub enum Error { - /// Unable to open a substream at all - Open(C::OpenError), - /// Unable to send the request to the server - Send(C::SendError), - /// Server closed the stream before sending a response - EarlyClose, - /// Unable to receive the response from the server - RecvError(C::RecvError), - /// Unexpected response from the server - DowncastError, -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for Error {} - -impl RpcClient -where - S: Service, - C: Connector, -{ - /// RPC call to the server, single request, single response - pub async fn rpc(&self, msg: M) -> result::Result> - where - M: RpcMsg, - { - let msg = msg.into(); - let (mut send, mut recv) = self.source.open().await.map_err(Error::Open)?; - send.send(msg).await.map_err(Error::::Send)?; - let res = recv - .next() - .await - .ok_or(Error::::EarlyClose)? - .map_err(Error::::RecvError)?; - // keep send alive until we have the answer - drop(send); - M::Response::try_from(res).map_err(|_| Error::DowncastError) - } -} - -impl RpcChannel -where - S: Service, - C: StreamTypes, -{ - /// handle the message of type `M` using the given function on the target object - /// - /// If you want to support concurrent requests, you need to spawn this on a tokio task yourself. - pub async fn rpc( - self, - req: M, - target: T, - f: F, - ) -> result::Result<(), RpcServerError> - where - M: RpcMsg, - F: FnOnce(T, M) -> Fut, - Fut: Future, - T: Send + 'static, - { - let Self { - mut send, mut recv, .. - } = self; - // cancel if we get an update, no matter what it is - let cancel = recv - .next() - .map(|_| RpcServerError::UnexpectedUpdateMessage::); - // race the computation and the cancellation - race2(cancel.map(Err), async move { - // get the response - let res = f(target, req).await; - // turn into a S::Res so we can send it - let res = res.into(); - // send it and return the error if any - send.send(res).await.map_err(RpcServerError::SendError) - }) - .await - } - - /// A rpc call that also maps the error from the user type to the wire type - /// - /// This is useful if you want to write your function with a convenient error type like anyhow::Error, - /// yet still use a serializable error type on the wire. - pub async fn rpc_map_err( - self, - req: M, - target: T, - f: F, - ) -> result::Result<(), RpcServerError> - where - M: RpcMsg>, - F: FnOnce(T, M) -> Fut, - Fut: Future>, - E2: From, - T: Send + 'static, - { - let fut = |target: T, msg: M| async move { - // call the inner fn - let res: Result = f(target, msg).await; - // convert the error type - let res: Result = res.map_err(E2::from); - res - }; - self.rpc(req, target, fut).await - } -} diff --git a/src/pattern/server_streaming.rs b/src/pattern/server_streaming.rs deleted file mode 100644 index 26d2846e..00000000 --- a/src/pattern/server_streaming.rs +++ /dev/null @@ -1,139 +0,0 @@ -//! Server streaming interaction pattern. - -use std::{ - error, - fmt::{self, Debug}, - result, -}; - -use futures_lite::{Stream, StreamExt}; -use futures_util::{FutureExt, SinkExt, TryFutureExt}; - -use crate::{ - client::{BoxStreamSync, DeferDrop}, - message::{InteractionPattern, Msg}, - server::{race2, RpcChannel, RpcServerError}, - transport::{ConnectionErrors, Connector, StreamTypes}, - RpcClient, Service, -}; - -/// Server streaming interaction pattern -/// -/// After the initial request, the server can send a stream of responses. -#[derive(Debug, Clone, Copy)] -pub struct ServerStreaming; -impl InteractionPattern for ServerStreaming {} - -/// Defines response type for a server streaming message. -pub trait ServerStreamingMsg: Msg { - /// The type for the response - /// - /// For requests that can produce errors, this can be set to [Result](std::result::Result). - type Response: Into + TryFrom + Send + 'static; -} - -/// Server error when accepting a server streaming request -#[derive(Debug)] -pub enum Error { - /// Unable to open a substream at all - Open(C::OpenError), - /// Unable to send the request to the server - Send(C::SendError), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for Error {} - -/// Client error when handling responses from a server streaming request -#[derive(Debug)] -pub enum ItemError { - /// Unable to receive the response from the server - RecvError(S::RecvError), - /// Unexpected response from the server - DowncastError, -} - -impl fmt::Display for ItemError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for ItemError {} - -impl RpcClient -where - C: crate::Connector, - S: Service, -{ - /// Bidi call to the server, request opens a stream, response is a stream - pub async fn server_streaming( - &self, - msg: M, - ) -> result::Result>>, Error> - where - M: ServerStreamingMsg, - { - let msg = msg.into(); - let (mut send, recv) = self.source.open().await.map_err(Error::Open)?; - send.send(msg).map_err(Error::::Send).await?; - let recv = recv.map(move |x| match x { - Ok(msg) => M::Response::try_from(msg).map_err(|_| ItemError::DowncastError), - Err(e) => Err(ItemError::RecvError(e)), - }); - // keep send alive so the request on the server side does not get cancelled - let recv = Box::pin(DeferDrop(recv, send)); - Ok(recv) - } -} - -impl RpcChannel -where - S: Service, - C: StreamTypes, -{ - /// handle the message M using the given function on the target object - /// - /// If you want to support concurrent requests, you need to spawn this on a tokio task yourself. - pub async fn server_streaming( - self, - req: M, - target: T, - f: F, - ) -> result::Result<(), RpcServerError> - where - M: ServerStreamingMsg, - F: FnOnce(T, M) -> Str + Send + 'static, - Str: Stream + Send + 'static, - T: Send + 'static, - { - let Self { - mut send, mut recv, .. - } = self; - // cancel if we get an update, no matter what it is - let cancel = recv - .next() - .map(|_| RpcServerError::UnexpectedUpdateMessage::); - // race the computation and the cancellation - race2(cancel.map(Err), async move { - // get the response - let responses = f(target, req); - tokio::pin!(responses); - while let Some(response) = responses.next().await { - // turn into a S::Res so we can send it - let response = response.into(); - // send it and return the error if any - send.send(response) - .await - .map_err(RpcServerError::SendError)?; - } - Ok(()) - }) - .await - } -} diff --git a/src/pattern/try_server_streaming.rs b/src/pattern/try_server_streaming.rs deleted file mode 100644 index e3ebb061..00000000 --- a/src/pattern/try_server_streaming.rs +++ /dev/null @@ -1,210 +0,0 @@ -//! Fallible server streaming interaction pattern. - -use std::{ - error, - fmt::{self, Debug}, - result, -}; - -use futures_lite::{Future, Stream, StreamExt}; -use futures_util::{FutureExt, SinkExt, TryFutureExt}; -use serde::{Deserialize, Serialize}; - -use crate::{ - client::{BoxStreamSync, DeferDrop}, - message::{InteractionPattern, Msg}, - server::{race2, RpcChannel, RpcServerError}, - transport::{self, ConnectionErrors, StreamTypes}, - Connector, RpcClient, Service, -}; - -/// A guard message to indicate that the stream has been created. -/// -/// This is so we can dinstinguish between an error creating the stream and -/// an error in the first item produced by the stream. -#[derive(Debug, Clone, Copy, Serialize, Deserialize)] -pub struct StreamCreated; - -/// Fallible server streaming interaction pattern. -#[derive(Debug, Clone, Copy)] -pub struct TryServerStreaming; - -impl InteractionPattern for TryServerStreaming {} - -/// Same as ServerStreamingMsg, but with lazy stream creation and the error type explicitly defined. -pub trait TryServerStreamingMsg: Msg -where - result::Result: Into + TryFrom, - result::Result: Into + TryFrom, -{ - /// Error when creating the stream - type CreateError: Debug + Send + 'static; - - /// Error for stream items - type ItemError: Debug + Send + 'static; - - /// Successful response item - type Item: Send + 'static; -} - -/// Server error when accepting a server streaming request -/// -/// This combines network errors with application errors. Usually you don't -/// care about the exact nature of the error, but if you want to handle -/// application errors differently, you can match on this enum. -#[derive(Debug)] -pub enum Error { - /// Unable to open a substream at all - Open(C::OpenError), - /// Unable to send the request to the server - Send(C::SendError), - /// Error received when creating the stream - Recv(C::RecvError), - /// Connection was closed before receiving the first message - EarlyClose, - /// Unexpected response from the server - Downcast, - /// Application error - Application(E), -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for Error {} - -/// Client error when handling responses from a server streaming request. -/// -/// This combines network errors with application errors. -#[derive(Debug)] -pub enum ItemError { - /// Unable to receive the response from the server - Recv(S::RecvError), - /// Unexpected response from the server - Downcast, - /// Application error - Application(E), -} - -impl fmt::Display for ItemError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for ItemError {} - -impl RpcChannel -where - C: StreamTypes, - S: Service, -{ - /// handle the message M using the given function on the target object - /// - /// If you want to support concurrent requests, you need to spawn this on a tokio task yourself. - /// - /// Compared to [RpcChannel::server_streaming], with this method the stream creation is via - /// a function that returns a future that resolves to a stream. - pub async fn try_server_streaming( - self, - req: M, - target: T, - f: F, - ) -> result::Result<(), RpcServerError> - where - M: TryServerStreamingMsg, - std::result::Result: Into + TryFrom, - std::result::Result: Into + TryFrom, - F: FnOnce(T, M) -> Fut + Send + 'static, - Fut: Future> + Send + 'static, - Str: Stream> + Send + 'static, - T: Send + 'static, - { - let Self { - mut send, mut recv, .. - } = self; - // cancel if we get an update, no matter what it is - let cancel = recv - .next() - .map(|_| RpcServerError::UnexpectedUpdateMessage::); - // race the computation and the cancellation - race2(cancel.map(Err), async move { - // get the response - let responses = match f(target, req).await { - Ok(responses) => { - // turn into a S::Res so we can send it - let response = Ok(StreamCreated).into(); - // send it and return the error if any - send.send(response) - .await - .map_err(RpcServerError::SendError)?; - responses - } - Err(cause) => { - // turn into a S::Res so we can send it - let response = Err(cause).into(); - // send it and return the error if any - send.send(response) - .await - .map_err(RpcServerError::SendError)?; - return Ok(()); - } - }; - tokio::pin!(responses); - while let Some(response) = responses.next().await { - // turn into a S::Res so we can send it - let response = response.into(); - // send it and return the error if any - send.send(response) - .await - .map_err(RpcServerError::SendError)?; - } - Ok(()) - }) - .await - } -} - -impl RpcClient -where - C: Connector, - S: Service, -{ - /// Bidi call to the server, request opens a stream, response is a stream - pub async fn try_server_streaming( - &self, - msg: M, - ) -> result::Result< - BoxStreamSync<'static, Result>>, - Error, - > - where - M: TryServerStreamingMsg, - Result: Into + TryFrom, - Result: Into + TryFrom, - { - let msg = msg.into(); - let (mut send, mut recv) = self.source.open().await.map_err(Error::Open)?; - send.send(msg).map_err(Error::Send).await?; - let Some(initial) = recv.next().await else { - return Err(Error::EarlyClose); - }; - let initial = initial.map_err(Error::Recv)?; // initial response - let initial = >::try_from(initial) - .map_err(|_| Error::Downcast)?; - let _ = initial.map_err(Error::Application)?; - let recv = recv.map(move |x| { - let x = x.map_err(ItemError::Recv)?; - let x = >::try_from(x) - .map_err(|_| ItemError::Downcast)?; - let x = x.map_err(ItemError::Application)?; - Ok(x) - }); - // keep send alive so the request on the server side does not get cancelled - let recv = Box::pin(DeferDrop(recv, send)); - Ok(recv) - } -} diff --git a/src/server.rs b/src/server.rs deleted file mode 100644 index 387d41da..00000000 --- a/src/server.rs +++ /dev/null @@ -1,496 +0,0 @@ -//! Server side api -//! -//! The main entry point is [RpcServer] -use std::{ - error, - fmt::{self, Debug}, - marker::PhantomData, - pin::Pin, - result, - sync::Arc, - task::{self, Poll}, -}; - -use futures_lite::{Future, Stream, StreamExt}; -use futures_util::{SinkExt, TryStreamExt}; -use pin_project::pin_project; -use tokio::{sync::oneshot, task::JoinSet}; -use tokio_util::task::AbortOnDropHandle; -use tracing::{error, warn}; - -use crate::{ - transport::{ - self, - boxed::BoxableListener, - mapped::{ErrorOrMapError, MappedRecvStream, MappedSendSink, MappedStreamTypes}, - ConnectionErrors, StreamTypes, - }, - Listener, RpcMessage, Service, -}; - -/// Stream types on the server side -/// -/// On the server side, we receive requests and send responses. -/// On the client side, we send requests and receive responses. -pub trait ChannelTypes: transport::StreamTypes {} - -impl, S: Service> ChannelTypes for T {} - -/// Type alias for when you want to require a boxed channel -pub type BoxedChannelTypes = crate::transport::boxed::BoxedStreamTypes< - ::Req, - ::Res, ->; - -/// A boxed listener for the given [`Service`] -pub type BoxedListener = - crate::transport::boxed::BoxedListener<::Req, ::Res>; - -#[cfg(feature = "flume-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "flume-transport")))] -/// A flume listener for the given [`Service`] -pub type FlumeListener = - crate::transport::flume::FlumeListener<::Req, ::Res>; - -#[cfg(feature = "quinn-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "quinn-transport")))] -/// A quinn listener for the given [`Service`] -pub type QuinnListener = - crate::transport::quinn::QuinnListener<::Req, ::Res>; - -#[cfg(feature = "hyper-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "hyper-transport")))] -/// A hyper listener for the given [`Service`] -pub type HyperListener = - crate::transport::hyper::HyperListener<::Req, ::Res>; - -#[cfg(feature = "iroh-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "iroh-transport")))] -/// An iroh listener for the given [`Service`] -pub type IrohListener = - crate::transport::iroh::IrohListener<::Req, ::Res>; - -/// A server for a specific service. -/// -/// This is a wrapper around a [`Listener`] that serves as the entry point for the server DSL. -/// -/// Type parameters: -/// -/// `S` is the service type. -/// `C` is the channel type. -#[derive(Debug)] -pub struct RpcServer> { - /// The channel on which new requests arrive. - /// - /// Each new request is a receiver and channel pair on which messages for this request - /// are received and responses sent. - source: C, - _p: PhantomData, -} - -impl Clone for RpcServer { - fn clone(&self) -> Self { - Self { - source: self.source.clone(), - _p: PhantomData, - } - } -} - -impl> RpcServer { - /// Create a new rpc server for a specific service for a [Service] given a compatible - /// [Listener]. - /// - /// This is where a generic typed endpoint is converted into a server for a specific service. - pub fn new(source: C) -> Self { - Self { - source, - _p: PhantomData, - } - } - - /// Box the transport for the service. - /// - /// The boxed transport is the default for the `C` type parameter, so by boxing we can avoid - /// having to specify the type parameter. - pub fn boxed(self) -> RpcServer> - where - C: BoxableListener, - { - RpcServer::new(self.source.boxed()) - } -} - -/// A channel for requests and responses for a specific service. -/// -/// This just groups the sink and stream into a single type, and attaches the -/// information about the service type. -/// -/// Sink and stream are independent, so you can take the channel apart and use -/// them independently. -/// -/// Type parameters: -/// -/// `S` is the service type. -/// `C` is the service endpoint from which the channel was created. -#[derive(Debug)] -pub struct RpcChannel = BoxedChannelTypes> { - /// Sink to send responses to the client. - pub send: C::SendSink, - /// Stream to receive requests from the client. - pub recv: C::RecvStream, - - pub(crate) _p: PhantomData, -} - -impl RpcChannel -where - S: Service, - C: StreamTypes, -{ - /// Create a new RPC channel. - pub fn new(send: C::SendSink, recv: C::RecvStream) -> Self { - Self { - send, - recv, - _p: PhantomData, - } - } - - /// Convert this channel into a boxed channel. - pub fn boxed(self) -> RpcChannel> - where - C::SendError: Into + Send + Sync + 'static, - C::RecvError: Into + Send + Sync + 'static, - { - let send = - transport::boxed::SendSink::boxed(Box::new(self.send.sink_map_err(|e| e.into()))); - let recv = transport::boxed::RecvStream::boxed(Box::new(self.recv.map_err(|e| e.into()))); - RpcChannel::new(send, recv) - } - - /// Map this channel's service into an inner service. - /// - /// This method is available if the required bounds are upheld: - /// SNext::Req: Into + TryFrom, - /// SNext::Res: Into + TryFrom, - /// - /// Where SNext is the new service to map to and S is the current inner service. - /// - /// This method can be chained infintely. - pub fn map(self) -> RpcChannel> - where - SNext: Service, - SNext::Req: TryFrom, - S::Res: From, - { - RpcChannel::new( - MappedSendSink::new(self.send), - MappedRecvStream::new(self.recv), - ) - } -} - -/// The result of accepting a new connection. -pub struct Accepting> { - send: C::SendSink, - recv: C::RecvStream, - _p: PhantomData, -} - -impl> Accepting { - /// Read the first message from the client. - /// - /// The return value is a tuple of `(request, channel)`. Here `request` is the - /// first request which is already read from the stream. The `channel` is a - /// [RpcChannel] that has `sink` and `stream` fields that can be used to send more - /// requests and/or receive more responses. - /// - /// Often sink and stream will wrap an an underlying byte stream. In this case you can - /// call into_inner() on them to get it back to perform byte level reads and writes. - pub async fn read_first(self) -> result::Result<(S::Req, RpcChannel), RpcServerError> { - let Accepting { send, mut recv, .. } = self; - // get the first message from the client. This will tell us what it wants to do. - let request: S::Req = recv - .next() - .await - // no msg => early close - .ok_or(RpcServerError::EarlyClose)? - // recv error - .map_err(RpcServerError::RecvError)?; - Ok((request, RpcChannel::::new(send, recv))) - } -} - -impl> RpcServer { - /// Accepts a new channel from a client. The result is an [Accepting] object that - /// can be used to read the first request. - pub async fn accept(&self) -> result::Result, RpcServerError> { - let (send, recv) = self.source.accept().await.map_err(RpcServerError::Accept)?; - Ok(Accepting { - send, - recv, - _p: PhantomData, - }) - } - - /// Get the underlying service endpoint - pub fn into_inner(self) -> C { - self.source - } - - /// Run an accept loop for this server. - /// - /// Each request will be handled in a separate task. - /// - /// It is the caller's responsibility to poll the returned future to drive the server. - pub async fn accept_loop(self, handler: Fun) - where - S: Service, - C: Listener, - Fun: Fn(S::Req, RpcChannel) -> Fut + Send + Sync + 'static, - Fut: Future> + Send + 'static, - E: Into + 'static, - { - let handler = Arc::new(handler); - let mut tasks = JoinSet::new(); - loop { - tokio::select! { - Some(res) = tasks.join_next(), if !tasks.is_empty() => { - if let Err(e) = res { - if e.is_panic() { - error!("Panic handling RPC request: {e}"); - } - } - } - req = self.accept() => { - let req = match req { - Ok(req) => req, - Err(e) => { - warn!("Error accepting RPC request: {e}"); - continue; - } - }; - let handler = handler.clone(); - tasks.spawn(async move { - let (req, chan) = match req.read_first().await { - Ok((req, chan)) => (req, chan), - Err(e) => { - warn!("Error reading first message: {e}"); - return; - } - }; - if let Err(cause) = handler(req, chan).await { - warn!("Error handling RPC request: {}", cause.into()); - } - }); - } - } - } - } - - /// Spawn an accept loop and return a handle to the task. - pub fn spawn_accept_loop(self, handler: Fun) -> AbortOnDropHandle<()> - where - S: Service, - C: Listener, - Fun: Fn(S::Req, RpcChannel) -> Fut + Send + Sync + 'static, - Fut: Future> + Send + 'static, - E: Into + 'static, - { - AbortOnDropHandle::new(tokio::spawn(self.accept_loop(handler))) - } -} - -impl> AsRef for RpcServer { - fn as_ref(&self) -> &C { - &self.source - } -} - -/// A stream of updates -/// -/// If there is any error with receiving or with decoding the updates, the stream will stall and the error will -/// cause a termination of the RPC call. -#[pin_project] -#[derive(Debug)] -pub struct UpdateStream( - #[pin] C::RecvStream, - Option>>, - PhantomData, -) -where - C: StreamTypes; - -impl UpdateStream -where - C: StreamTypes, - T: TryFrom, -{ - pub(crate) fn new(recv: C::RecvStream) -> (Self, UnwrapToPending>) { - let (error_send, error_recv) = oneshot::channel(); - let error_recv = UnwrapToPending(futures_lite::future::fuse(error_recv)); - (Self(recv, Some(error_send), PhantomData), error_recv) - } -} - -impl Stream for UpdateStream -where - C: StreamTypes, - T: TryFrom, -{ - type Item = T; - - fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - let mut this = self.project(); - match Pin::new(&mut this.0).poll_next(cx) { - Poll::Ready(Some(msg)) => match msg { - Ok(msg) => { - let msg = T::try_from(msg).map_err(|_cause| ()); - match msg { - Ok(msg) => Poll::Ready(Some(msg)), - Err(_cause) => { - // we were unable to downcast, so we need to send an error - if let Some(tx) = this.1.take() { - let _ = tx.send(RpcServerError::UnexpectedUpdateMessage); - } - Poll::Pending - } - } - } - Err(cause) => { - // we got a recv error, so return pending and send the error - if let Some(tx) = this.1.take() { - let _ = tx.send(RpcServerError::RecvError(cause)); - } - Poll::Pending - } - }, - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - } - } -} - -/// Server error. All server DSL methods return a `Result` with this error type. -pub enum RpcServerError { - /// Unable to open a new channel - Accept(C::AcceptError), - /// Recv side for a channel was closed before getting the first message - EarlyClose, - /// Got an unexpected first message, e.g. an update message - UnexpectedStartMessage, - /// Error receiving a message - RecvError(C::RecvError), - /// Error sending a response - SendError(C::SendError), - /// Got an unexpected update message, e.g. a request message or a non-matching update message - UnexpectedUpdateMessage, -} - -impl - RpcServerError> -{ - /// For a mapped connection, map the error back to the original error type - pub fn map_back(self) -> RpcServerError { - match self { - RpcServerError::EarlyClose => RpcServerError::EarlyClose, - RpcServerError::UnexpectedStartMessage => RpcServerError::UnexpectedStartMessage, - RpcServerError::UnexpectedUpdateMessage => RpcServerError::UnexpectedUpdateMessage, - RpcServerError::SendError(x) => RpcServerError::SendError(x), - RpcServerError::Accept(x) => RpcServerError::Accept(x), - RpcServerError::RecvError(ErrorOrMapError::Inner(x)) => RpcServerError::RecvError(x), - RpcServerError::RecvError(ErrorOrMapError::Conversion) => { - RpcServerError::UnexpectedUpdateMessage - } - } - } -} - -impl RpcServerError { - /// Convert into a different error type provided the send, recv and accept errors can be converted - pub fn errors_into(self) -> RpcServerError - where - T: ConnectionErrors, - C::SendError: Into, - C::RecvError: Into, - C::AcceptError: Into, - { - match self { - RpcServerError::EarlyClose => RpcServerError::EarlyClose, - RpcServerError::UnexpectedStartMessage => RpcServerError::UnexpectedStartMessage, - RpcServerError::UnexpectedUpdateMessage => RpcServerError::UnexpectedUpdateMessage, - RpcServerError::SendError(x) => RpcServerError::SendError(x.into()), - RpcServerError::Accept(x) => RpcServerError::Accept(x.into()), - RpcServerError::RecvError(x) => RpcServerError::RecvError(x.into()), - } - } -} - -impl fmt::Debug for RpcServerError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Accept(arg0) => f.debug_tuple("Open").field(arg0).finish(), - Self::EarlyClose => write!(f, "EarlyClose"), - Self::RecvError(arg0) => f.debug_tuple("RecvError").field(arg0).finish(), - Self::SendError(arg0) => f.debug_tuple("SendError").field(arg0).finish(), - Self::UnexpectedStartMessage => f.debug_tuple("UnexpectedStartMessage").finish(), - Self::UnexpectedUpdateMessage => f.debug_tuple("UnexpectedStartMessage").finish(), - } - } -} - -impl fmt::Display for RpcServerError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - fmt::Debug::fmt(&self, f) - } -} - -impl error::Error for RpcServerError {} - -/// Take an oneshot receiver and just return Pending the underlying future returns `Err(oneshot::Canceled)` -pub(crate) struct UnwrapToPending(futures_lite::future::Fuse>); - -impl Future for UnwrapToPending { - type Output = T; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - // todo: use is_terminated from tokio 1.44 here to avoid the fused wrapper - match Pin::new(&mut self.0).poll(cx) { - Poll::Ready(Ok(x)) => Poll::Ready(x), - Poll::Ready(Err(_)) => Poll::Pending, - Poll::Pending => Poll::Pending, - } - } -} - -pub(crate) async fn race2, B: Future>(f1: A, f2: B) -> T { - tokio::select! { - x = f1 => x, - x = f2 => x, - } -} - -/// Run a server loop, invoking a handler callback for each request. -/// -/// Requests will be handled sequentially. -pub async fn run_server_loop( - _service_type: S, - conn: C, - target: T, - mut handler: F, -) -> Result<(), RpcServerError> -where - S: Service, - C: Listener, - T: Clone + Send + 'static, - F: FnMut(RpcChannel, S::Req, T) -> Fut + Send + 'static, - Fut: Future>> + Send + 'static, -{ - let server: RpcServer = RpcServer::::new(conn); - loop { - let (req, chan) = server.accept().await?.read_first().await?; - let target = target.clone(); - handler(chan, req, target).await?; - } -} diff --git a/src/transport/boxed.rs b/src/transport/boxed.rs deleted file mode 100644 index b440fd5a..00000000 --- a/src/transport/boxed.rs +++ /dev/null @@ -1,540 +0,0 @@ -//! Boxed transport with concrete types - -use std::{ - fmt::Debug, - future::Future, - pin::Pin, - task::{Context, Poll}, -}; - -use futures_lite::FutureExt; -use futures_sink::Sink; -use futures_util::{future::BoxFuture, SinkExt, Stream, StreamExt, TryStreamExt}; -use pin_project::pin_project; - -use super::{ConnectionErrors, StreamTypes}; -use crate::RpcMessage; -type BoxedFuture<'a, T> = Pin + Send + Sync + 'a>>; - -enum SendSinkInner { - #[cfg(feature = "flume-transport")] - Direct(::flume::r#async::SendSink<'static, T>), - Boxed(Pin + Send + Sync + 'static>>), -} - -/// A sink that can be used to send messages to the remote end of a channel. -/// -/// For local channels, this is a thin wrapper around a flume send sink. -/// For network channels, this contains a boxed sink, since it is reasonable -/// to assume that in that case the additional overhead of boxing is negligible. -#[pin_project] -pub struct SendSink(SendSinkInner); - -impl SendSink { - /// Create a new send sink from a boxed sink - pub fn boxed(sink: impl Sink + Send + Sync + 'static) -> Self { - Self(SendSinkInner::Boxed(Box::pin(sink))) - } - - /// Create a new send sink from a direct flume send sink - #[cfg(feature = "flume-transport")] - pub(crate) fn direct(sink: ::flume::r#async::SendSink<'static, T>) -> Self { - Self(SendSinkInner::Direct(sink)) - } -} - -impl Sink for SendSink { - type Error = anyhow::Error; - - fn poll_ready( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - match self.project().0 { - #[cfg(feature = "flume-transport")] - SendSinkInner::Direct(sink) => sink.poll_ready_unpin(cx).map_err(anyhow::Error::from), - SendSinkInner::Boxed(sink) => sink.poll_ready_unpin(cx).map_err(anyhow::Error::from), - } - } - - fn start_send(self: std::pin::Pin<&mut Self>, item: T) -> Result<(), Self::Error> { - match self.project().0 { - #[cfg(feature = "flume-transport")] - SendSinkInner::Direct(sink) => sink.start_send_unpin(item).map_err(anyhow::Error::from), - SendSinkInner::Boxed(sink) => sink.start_send_unpin(item), - } - } - - fn poll_flush( - self: std::pin::Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - match self.project().0 { - #[cfg(feature = "flume-transport")] - SendSinkInner::Direct(sink) => sink.poll_flush_unpin(cx).map_err(anyhow::Error::from), - SendSinkInner::Boxed(sink) => sink.poll_flush_unpin(cx).map_err(anyhow::Error::from), - } - } - - fn poll_close( - self: std::pin::Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - match self.project().0 { - #[cfg(feature = "flume-transport")] - SendSinkInner::Direct(sink) => sink.poll_close_unpin(cx).map_err(anyhow::Error::from), - SendSinkInner::Boxed(sink) => sink.poll_close_unpin(cx).map_err(anyhow::Error::from), - } - } -} - -enum RecvStreamInner { - #[cfg(feature = "flume-transport")] - Direct(::flume::r#async::RecvStream<'static, T>), - Boxed(Pin> + Send + Sync + 'static>>), -} - -/// A stream that can be used to receive messages from the remote end of a channel. -/// -/// For local channels, this is a thin wrapper around a flume receive stream. -/// For network channels, this contains a boxed stream, since it is reasonable -#[pin_project] -pub struct RecvStream(RecvStreamInner); - -impl RecvStream { - /// Create a new receive stream from a boxed stream - pub fn boxed( - stream: impl Stream> + Send + Sync + 'static, - ) -> Self { - Self(RecvStreamInner::Boxed(Box::pin(stream))) - } - - /// Create a new receive stream from a direct flume receive stream - #[cfg(feature = "flume-transport")] - pub(crate) fn direct(stream: ::flume::r#async::RecvStream<'static, T>) -> Self { - Self(RecvStreamInner::Direct(stream)) - } -} - -impl Stream for RecvStream { - type Item = Result; - - fn poll_next(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.project().0 { - #[cfg(feature = "flume-transport")] - RecvStreamInner::Direct(stream) => match stream.poll_next_unpin(cx) { - Poll::Ready(Some(item)) => Poll::Ready(Some(Ok(item))), - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - }, - RecvStreamInner::Boxed(stream) => stream.poll_next_unpin(cx), - } - } -} - -enum OpenFutureInner<'a, In: RpcMessage, Out: RpcMessage> { - /// A direct future (todo) - #[cfg(feature = "flume-transport")] - Direct(super::flume::OpenFuture), - /// A boxed future - Boxed(BoxFuture<'a, anyhow::Result<(SendSink, RecvStream)>>), -} - -/// A concrete future for opening a channel -#[pin_project] -pub struct OpenFuture<'a, In: RpcMessage, Out: RpcMessage>(OpenFutureInner<'a, In, Out>); - -impl<'a, In: RpcMessage, Out: RpcMessage> OpenFuture<'a, In, Out> { - #[cfg(feature = "flume-transport")] - fn direct(f: super::flume::OpenFuture) -> Self { - Self(OpenFutureInner::Direct(f)) - } - - /// Create a new boxed future - pub fn boxed( - f: impl Future, RecvStream)>> + Send + 'a, - ) -> Self { - Self(OpenFutureInner::Boxed(Box::pin(f))) - } -} - -impl Future for OpenFuture<'_, In, Out> { - type Output = anyhow::Result<(SendSink, RecvStream)>; - - fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - match self.project().0 { - #[cfg(feature = "flume-transport")] - OpenFutureInner::Direct(f) => f - .poll(cx) - .map_ok(|(send, recv)| (SendSink::direct(send.0), RecvStream::direct(recv.0))) - .map_err(|e| e.into()), - OpenFutureInner::Boxed(f) => f.poll(cx), - } - } -} - -enum AcceptFutureInner<'a, In: RpcMessage, Out: RpcMessage> { - /// A direct future - #[cfg(feature = "flume-transport")] - Direct(super::flume::AcceptFuture), - /// A boxed future - Boxed(BoxedFuture<'a, anyhow::Result<(SendSink, RecvStream)>>), -} - -/// Concrete accept future -#[pin_project] -pub struct AcceptFuture<'a, In: RpcMessage, Out: RpcMessage>(AcceptFutureInner<'a, In, Out>); - -impl<'a, In: RpcMessage, Out: RpcMessage> AcceptFuture<'a, In, Out> { - #[cfg(feature = "flume-transport")] - fn direct(f: super::flume::AcceptFuture) -> Self { - Self(AcceptFutureInner::Direct(f)) - } - - /// Create a new boxed future - pub fn boxed( - f: impl Future, RecvStream)>> + Send + Sync + 'a, - ) -> Self { - Self(AcceptFutureInner::Boxed(Box::pin(f))) - } -} - -impl Future for AcceptFuture<'_, In, Out> { - type Output = anyhow::Result<(SendSink, RecvStream)>; - - fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - match self.project().0 { - #[cfg(feature = "flume-transport")] - AcceptFutureInner::Direct(f) => f - .poll(cx) - .map_ok(|(send, recv)| (SendSink::direct(send.0), RecvStream::direct(recv.0))) - .map_err(|e| e.into()), - AcceptFutureInner::Boxed(f) => f.poll(cx), - } - } -} - -/// A boxable connector -pub trait BoxableConnector: Debug + Send + Sync + 'static { - /// Clone the connection and box it - fn clone_box(&self) -> Box>; - - /// Open a channel to the remote che - fn open_boxed(&self) -> OpenFuture; -} - -/// A boxed connector -#[derive(Debug)] -pub struct BoxedConnector(Box>); - -impl BoxedConnector { - /// Wrap a boxable connector into a box, transforming all the types to concrete types - pub fn new(x: impl BoxableConnector) -> Self { - Self(Box::new(x)) - } -} - -impl Clone for BoxedConnector { - fn clone(&self) -> Self { - Self(self.0.clone_box()) - } -} - -impl StreamTypes for BoxedConnector { - type In = In; - type Out = Out; - type RecvStream = RecvStream; - type SendSink = SendSink; -} - -impl ConnectionErrors for BoxedConnector { - type SendError = anyhow::Error; - type RecvError = anyhow::Error; - type OpenError = anyhow::Error; - type AcceptError = anyhow::Error; -} - -impl super::Connector for BoxedConnector { - async fn open(&self) -> Result<(Self::SendSink, Self::RecvStream), Self::OpenError> { - self.0.open_boxed().await - } -} - -/// Stream types for boxed streams -#[derive(Debug)] -pub struct BoxedStreamTypes { - _p: std::marker::PhantomData<(In, Out)>, -} - -impl Clone for BoxedStreamTypes { - fn clone(&self) -> Self { - Self { - _p: std::marker::PhantomData, - } - } -} - -impl ConnectionErrors for BoxedStreamTypes { - type SendError = anyhow::Error; - type RecvError = anyhow::Error; - type OpenError = anyhow::Error; - type AcceptError = anyhow::Error; -} - -impl StreamTypes for BoxedStreamTypes { - type In = In; - type Out = Out; - type RecvStream = RecvStream; - type SendSink = SendSink; -} - -/// A boxable listener -pub trait BoxableListener: Debug + Send + Sync + 'static { - /// Clone the listener and box it - fn clone_box(&self) -> Box>; - - /// Accept a channel from a remote client - fn accept_bi_boxed(&self) -> AcceptFuture; - - /// Get the local address - fn local_addr(&self) -> &[super::LocalAddr]; -} - -/// A boxed listener -#[derive(Debug)] -pub struct BoxedListener(Box>); - -impl BoxedListener { - /// Wrap a boxable listener into a box, transforming all the types to concrete types - pub fn new(x: impl BoxableListener) -> Self { - Self(Box::new(x)) - } -} - -impl Clone for BoxedListener { - fn clone(&self) -> Self { - Self(self.0.clone_box()) - } -} - -impl StreamTypes for BoxedListener { - type In = In; - type Out = Out; - type RecvStream = RecvStream; - type SendSink = SendSink; -} - -impl ConnectionErrors for BoxedListener { - type SendError = anyhow::Error; - type RecvError = anyhow::Error; - type OpenError = anyhow::Error; - type AcceptError = anyhow::Error; -} - -impl super::Listener for BoxedListener { - fn accept( - &self, - ) -> impl Future> + Send - { - self.0.accept_bi_boxed() - } - - fn local_addr(&self) -> &[super::LocalAddr] { - self.0.local_addr() - } -} -impl BoxableConnector for BoxedConnector { - fn clone_box(&self) -> Box> { - Box::new(self.clone()) - } - - fn open_boxed(&self) -> OpenFuture { - OpenFuture::boxed(crate::transport::Connector::open(self)) - } -} - -#[cfg(feature = "quinn-transport")] -impl BoxableConnector - for super::quinn::QuinnConnector -{ - fn clone_box(&self) -> Box> { - Box::new(self.clone()) - } - - fn open_boxed(&self) -> OpenFuture { - let f = Box::pin(async move { - let (send, recv) = super::Connector::open(self).await?; - // map the error types to anyhow - let send = send.sink_map_err(anyhow::Error::from); - let recv = recv.map_err(anyhow::Error::from); - // return the boxed streams - anyhow::Ok((SendSink::boxed(send), RecvStream::boxed(recv))) - }); - OpenFuture::boxed(f) - } -} - -#[cfg(feature = "quinn-transport")] -impl BoxableListener - for super::quinn::QuinnListener -{ - fn clone_box(&self) -> Box> { - Box::new(self.clone()) - } - - fn accept_bi_boxed(&self) -> AcceptFuture { - let f = async move { - let (send, recv) = super::Listener::accept(self).await?; - let send = send.sink_map_err(anyhow::Error::from); - let recv = recv.map_err(anyhow::Error::from); - anyhow::Ok((SendSink::boxed(send), RecvStream::boxed(recv))) - }; - AcceptFuture::boxed(f) - } - - fn local_addr(&self) -> &[super::LocalAddr] { - super::Listener::local_addr(self) - } -} - -#[cfg(feature = "iroh-transport")] -impl BoxableConnector - for super::iroh::IrohConnector -{ - fn clone_box(&self) -> Box> { - Box::new(self.clone()) - } - - fn open_boxed(&self) -> OpenFuture { - let f = Box::pin(async move { - let (send, recv) = super::Connector::open(self).await?; - // map the error types to anyhow - let send = send.sink_map_err(anyhow::Error::from); - let recv = recv.map_err(anyhow::Error::from); - // return the boxed streams - anyhow::Ok((SendSink::boxed(send), RecvStream::boxed(recv))) - }); - OpenFuture::boxed(f) - } -} - -#[cfg(feature = "iroh-transport")] -impl BoxableListener - for super::iroh::IrohListener -{ - fn clone_box(&self) -> Box> { - Box::new(self.clone()) - } - - fn accept_bi_boxed(&self) -> AcceptFuture { - let f = async move { - let (send, recv) = super::Listener::accept(self).await?; - let send = send.sink_map_err(anyhow::Error::from); - let recv = recv.map_err(anyhow::Error::from); - anyhow::Ok((SendSink::boxed(send), RecvStream::boxed(recv))) - }; - AcceptFuture::boxed(f) - } - - fn local_addr(&self) -> &[super::LocalAddr] { - super::Listener::local_addr(self) - } -} - -#[cfg(feature = "flume-transport")] -impl BoxableConnector - for super::flume::FlumeConnector -{ - fn clone_box(&self) -> Box> { - Box::new(self.clone()) - } - - fn open_boxed(&self) -> OpenFuture { - OpenFuture::direct(super::Connector::open(self)) - } -} - -#[cfg(feature = "flume-transport")] -impl BoxableListener - for super::flume::FlumeListener -{ - fn clone_box(&self) -> Box> { - Box::new(self.clone()) - } - - fn accept_bi_boxed(&self) -> AcceptFuture { - AcceptFuture::direct(super::Listener::accept(self)) - } - - fn local_addr(&self) -> &[super::LocalAddr] { - super::Listener::local_addr(self) - } -} - -impl BoxableConnector for super::mapped::MappedConnector -where - In: RpcMessage, - Out: RpcMessage, - C: super::Connector, - C::Out: From, - In: TryFrom, - C::SendError: Into, - C::RecvError: Into, - C::OpenError: Into, -{ - fn clone_box(&self) -> Box> { - Box::new(self.clone()) - } - - fn open_boxed(&self) -> OpenFuture { - let f = Box::pin(async move { - let (send, recv) = super::Connector::open(self).await.map_err(|e| e.into())?; - // map the error types to anyhow - let send = send.sink_map_err(|e| e.into()); - let recv = recv.map_err(|e| e.into()); - // return the boxed streams - anyhow::Ok((SendSink::boxed(send), RecvStream::boxed(recv))) - }); - OpenFuture::boxed(f) - } -} - -#[cfg(test)] -mod tests { - use crate::Service; - - #[derive(Debug, Clone)] - struct FooService; - - impl Service for FooService { - type Req = u64; - type Res = u64; - } - - #[cfg(feature = "flume-transport")] - #[tokio::test] - async fn box_smoke() { - use futures_lite::StreamExt; - use futures_util::SinkExt; - - use crate::transport::{Connector, Listener}; - - let (server, client) = crate::transport::flume::channel(1); - let server = super::BoxedListener::new(server); - let client = super::BoxedConnector::new(client); - // spawn echo server - tokio::spawn(async move { - while let Ok((mut send, mut recv)) = server.accept().await { - if let Some(Ok(msg)) = recv.next().await { - send.send(msg).await.ok(); - } - } - anyhow::Ok(()) - }); - if let Ok((mut send, mut recv)) = client.open().await { - send.send(1).await.ok(); - let res = recv.next().await; - println!("{:?}", res); - } - } -} diff --git a/src/transport/combined.rs b/src/transport/combined.rs deleted file mode 100644 index 60e6843a..00000000 --- a/src/transport/combined.rs +++ /dev/null @@ -1,292 +0,0 @@ -//! Transport that combines two other transports -use std::{ - error, fmt, - fmt::Debug, - pin::Pin, - task::{Context, Poll}, -}; - -use futures_lite::Stream; -use futures_sink::Sink; -use pin_project::pin_project; - -use super::{ConnectionErrors, Connector, Listener, LocalAddr, StreamTypes}; - -/// A connection that combines two other connections -#[derive(Debug, Clone)] -pub struct CombinedConnector { - /// First connection - pub a: Option, - /// Second connection - pub b: Option, -} - -impl> CombinedConnector { - /// Create a combined connection from two other connections - /// - /// It will always use the first connection that is not `None`. - pub fn new(a: Option, b: Option) -> Self { - Self { a, b } - } -} - -/// An endpoint that combines two other endpoints -#[derive(Debug, Clone)] -pub struct CombinedListener { - /// First endpoint - pub a: Option, - /// Second endpoint - pub b: Option, - /// Local addresses from all endpoints - local_addr: Vec, -} - -impl> CombinedListener { - /// Create a combined listener from two other listeners - /// - /// When listening for incoming connections with - /// [`Listener::accept`], all configured channels will be listened on, - /// and the first to receive a connection will be used. If no channels are configured, - /// accept will not throw an error but just wait forever. - pub fn new(a: Option, b: Option) -> Self { - let mut local_addr = Vec::with_capacity(2); - if let Some(a) = &a { - local_addr.extend(a.local_addr().iter().cloned()) - }; - if let Some(b) = &b { - local_addr.extend(b.local_addr().iter().cloned()) - }; - Self { a, b, local_addr } - } - - /// Get back the inner endpoints - pub fn into_inner(self) -> (Option, Option) { - (self.a, self.b) - } -} - -/// Send sink for combined channels -#[pin_project(project = SendSinkProj)] -pub enum SendSink { - /// A variant - A(#[pin] A::SendSink), - /// B variant - B(#[pin] B::SendSink), -} - -impl> Sink for SendSink { - type Error = self::SendError; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.project() { - SendSinkProj::A(sink) => sink.poll_ready(cx).map_err(Self::Error::A), - SendSinkProj::B(sink) => sink.poll_ready(cx).map_err(Self::Error::B), - } - } - - fn start_send(self: Pin<&mut Self>, item: A::Out) -> Result<(), Self::Error> { - match self.project() { - SendSinkProj::A(sink) => sink.start_send(item).map_err(Self::Error::A), - SendSinkProj::B(sink) => sink.start_send(item).map_err(Self::Error::B), - } - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.project() { - SendSinkProj::A(sink) => sink.poll_flush(cx).map_err(Self::Error::A), - SendSinkProj::B(sink) => sink.poll_flush(cx).map_err(Self::Error::B), - } - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.project() { - SendSinkProj::A(sink) => sink.poll_close(cx).map_err(Self::Error::A), - SendSinkProj::B(sink) => sink.poll_close(cx).map_err(Self::Error::B), - } - } -} - -/// RecvStream for combined channels -#[pin_project(project = ResStreamProj)] -pub enum RecvStream { - /// A variant - A(#[pin] A::RecvStream), - /// B variant - B(#[pin] B::RecvStream), -} - -impl> Stream for RecvStream { - type Item = Result>; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.project() { - ResStreamProj::A(stream) => stream.poll_next(cx).map_err(RecvError::::A), - ResStreamProj::B(stream) => stream.poll_next(cx).map_err(RecvError::::B), - } - } -} - -/// SendError for combined channels -#[derive(Debug)] -pub enum SendError { - /// A variant - A(A::SendError), - /// B variant - B(B::SendError), -} - -impl fmt::Display for SendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for SendError {} - -/// RecvError for combined channels -#[derive(Debug)] -pub enum RecvError { - /// A variant - A(A::RecvError), - /// B variant - B(B::RecvError), -} - -impl fmt::Display for RecvError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for RecvError {} - -/// OpenError for combined channels -#[derive(Debug)] -pub enum OpenError { - /// A variant - A(A::OpenError), - /// B variant - B(B::OpenError), - /// None of the two channels is configured - NoChannel, -} - -impl fmt::Display for OpenError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for OpenError {} - -/// AcceptError for combined channels -#[derive(Debug)] -pub enum AcceptError { - /// A variant - A(A::AcceptError), - /// B variant - B(B::AcceptError), -} - -impl fmt::Display for AcceptError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for AcceptError {} - -impl ConnectionErrors for CombinedConnector { - type SendError = self::SendError; - type RecvError = self::RecvError; - type OpenError = self::OpenError; - type AcceptError = self::AcceptError; -} - -impl> StreamTypes for CombinedConnector { - type In = A::In; - type Out = A::Out; - type RecvStream = self::RecvStream; - type SendSink = self::SendSink; -} - -impl> Connector for CombinedConnector { - async fn open(&self) -> Result<(Self::SendSink, Self::RecvStream), Self::OpenError> { - let this = self.clone(); - // try a first, then b - if let Some(a) = this.a { - let (send, recv) = a.open().await.map_err(OpenError::A)?; - Ok((SendSink::A(send), RecvStream::A(recv))) - } else if let Some(b) = this.b { - let (send, recv) = b.open().await.map_err(OpenError::B)?; - Ok((SendSink::B(send), RecvStream::B(recv))) - } else { - Err(OpenError::NoChannel) - } - } -} - -impl ConnectionErrors for CombinedListener { - type SendError = self::SendError; - type RecvError = self::RecvError; - type OpenError = self::OpenError; - type AcceptError = self::AcceptError; -} - -impl> StreamTypes for CombinedListener { - type In = A::In; - type Out = A::Out; - type RecvStream = self::RecvStream; - type SendSink = self::SendSink; -} - -impl> Listener for CombinedListener { - async fn accept(&self) -> Result<(Self::SendSink, Self::RecvStream), Self::AcceptError> { - let a_fut = async { - if let Some(a) = &self.a { - let (send, recv) = a.accept().await.map_err(AcceptError::A)?; - Ok((SendSink::A(send), RecvStream::A(recv))) - } else { - std::future::pending().await - } - }; - let b_fut = async { - if let Some(b) = &self.b { - let (send, recv) = b.accept().await.map_err(AcceptError::B)?; - Ok((SendSink::B(send), RecvStream::B(recv))) - } else { - std::future::pending().await - } - }; - async move { - tokio::select! { - res = a_fut => res, - res = b_fut => res, - } - } - .await - } - - fn local_addr(&self) -> &[LocalAddr] { - &self.local_addr - } -} - -#[cfg(test)] -#[cfg(feature = "flume-transport")] -mod tests { - use crate::transport::{ - combined::{self, OpenError}, - flume, Connector, - }; - - #[tokio::test] - async fn open_empty_channel() { - let channel = combined::CombinedConnector::< - flume::FlumeConnector<(), ()>, - flume::FlumeConnector<(), ()>, - >::new(None, None); - let res = channel.open().await; - assert!(matches!(res, Err(OpenError::NoChannel))); - } -} diff --git a/src/transport/flume.rs b/src/transport/flume.rs deleted file mode 100644 index 8db6d03c..00000000 --- a/src/transport/flume.rs +++ /dev/null @@ -1,345 +0,0 @@ -//! Memory transport implementation using [flume] -//! -//! [flume]: https://docs.rs/flume/ -use core::fmt; -use std::{error, fmt::Display, marker::PhantomData, pin::Pin, result, task::Poll}; - -use futures_lite::{Future, Stream}; -use futures_sink::Sink; - -use super::StreamTypes; -use crate::{ - transport::{ConnectionErrors, Connector, Listener, LocalAddr}, - RpcMessage, -}; - -/// Error when receiving from a channel -/// -/// This type has zero inhabitants, so it is always safe to unwrap a result with this error type. -#[derive(Debug)] -pub enum RecvError {} - -impl fmt::Display for RecvError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -/// Sink for memory channels -pub struct SendSink(pub(crate) flume::r#async::SendSink<'static, T>); - -impl fmt::Debug for SendSink { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SendSink").finish() - } -} - -impl Sink for SendSink { - type Error = self::SendError; - - fn poll_ready( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.0) - .poll_ready(cx) - .map_err(|_| SendError::ReceiverDropped) - } - - fn start_send(mut self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { - Pin::new(&mut self.0) - .start_send(item) - .map_err(|_| SendError::ReceiverDropped) - } - - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.0) - .poll_flush(cx) - .map_err(|_| SendError::ReceiverDropped) - } - - fn poll_close( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.0) - .poll_close(cx) - .map_err(|_| SendError::ReceiverDropped) - } -} - -/// Stream for memory channels -pub struct RecvStream(pub(crate) flume::r#async::RecvStream<'static, T>); - -impl fmt::Debug for RecvStream { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RecvStream").finish() - } -} - -impl Stream for RecvStream { - type Item = result::Result; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - match Pin::new(&mut self.0).poll_next(cx) { - Poll::Ready(Some(v)) => Poll::Ready(Some(Ok(v))), - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - } - } -} - -impl error::Error for RecvError {} - -/// A flume based listener. -/// -/// Created using [channel]. -pub struct FlumeListener { - #[allow(clippy::type_complexity)] - stream: flume::Receiver<(SendSink, RecvStream)>, -} - -impl Clone for FlumeListener { - fn clone(&self) -> Self { - Self { - stream: self.stream.clone(), - } - } -} - -impl fmt::Debug for FlumeListener { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FlumeListener") - .field("stream", &self.stream) - .finish() - } -} - -impl ConnectionErrors for FlumeListener { - type SendError = self::SendError; - type RecvError = self::RecvError; - type OpenError = self::OpenError; - type AcceptError = self::AcceptError; -} - -type Socket = (self::SendSink, self::RecvStream); - -/// Future returned by [FlumeConnector::open] -pub struct OpenFuture { - inner: flume::r#async::SendFut<'static, Socket>, - res: Option>, -} - -impl fmt::Debug for OpenFuture { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("OpenFuture").finish() - } -} - -impl OpenFuture { - fn new(inner: flume::r#async::SendFut<'static, Socket>, res: Socket) -> Self { - Self { - inner, - res: Some(res), - } - } -} - -impl Future for OpenFuture { - type Output = result::Result, self::OpenError>; - - fn poll( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll { - match Pin::new(&mut self.inner).poll(cx) { - Poll::Ready(Ok(())) => self - .res - .take() - .map(|x| Poll::Ready(Ok(x))) - .unwrap_or(Poll::Pending), - Poll::Ready(Err(_)) => Poll::Ready(Err(self::OpenError::RemoteDropped)), - Poll::Pending => Poll::Pending, - } - } -} - -/// Future returned by [FlumeListener::accept] -pub struct AcceptFuture { - wrapped: flume::r#async::RecvFut<'static, (SendSink, RecvStream)>, - _p: PhantomData<(In, Out)>, -} - -impl fmt::Debug for AcceptFuture { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("AcceptFuture").finish() - } -} - -impl Future for AcceptFuture { - type Output = result::Result<(SendSink, RecvStream), AcceptError>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - match Pin::new(&mut self.wrapped).poll(cx) { - Poll::Ready(Ok((send, recv))) => Poll::Ready(Ok((send, recv))), - Poll::Ready(Err(_)) => Poll::Ready(Err(AcceptError::RemoteDropped)), - Poll::Pending => Poll::Pending, - } - } -} - -impl StreamTypes for FlumeListener { - type In = In; - type Out = Out; - type SendSink = SendSink; - type RecvStream = RecvStream; -} - -impl Listener for FlumeListener { - #[allow(refining_impl_trait)] - fn accept(&self) -> AcceptFuture { - AcceptFuture { - wrapped: self.stream.clone().into_recv_async(), - _p: PhantomData, - } - } - - fn local_addr(&self) -> &[LocalAddr] { - &[LocalAddr::Mem] - } -} - -impl ConnectionErrors for FlumeConnector { - type SendError = self::SendError; - type RecvError = self::RecvError; - type OpenError = self::OpenError; - type AcceptError = self::AcceptError; -} - -impl StreamTypes for FlumeConnector { - type In = In; - type Out = Out; - type SendSink = SendSink; - type RecvStream = RecvStream; -} - -impl Connector for FlumeConnector { - #[allow(refining_impl_trait)] - fn open(&self) -> OpenFuture { - let (local_send, remote_recv) = flume::bounded::(128); - let (remote_send, local_recv) = flume::bounded::(128); - let remote_chan = ( - SendSink(remote_send.into_sink()), - RecvStream(remote_recv.into_stream()), - ); - let local_chan = ( - SendSink(local_send.into_sink()), - RecvStream(local_recv.into_stream()), - ); - OpenFuture::new(self.sink.clone().into_send_async(remote_chan), local_chan) - } -} - -/// A flume based connector. -/// -/// Created using [channel]. -pub struct FlumeConnector { - #[allow(clippy::type_complexity)] - sink: flume::Sender<(SendSink, RecvStream)>, -} - -impl Clone for FlumeConnector { - fn clone(&self) -> Self { - Self { - sink: self.sink.clone(), - } - } -} - -impl fmt::Debug for FlumeConnector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("FlumeClientChannel") - .field("sink", &self.sink) - .finish() - } -} - -/// AcceptError for mem channels. -/// -/// There is not much that can go wrong with mem channels. -#[derive(Debug)] -pub enum AcceptError { - /// The remote side of the channel was dropped - RemoteDropped, -} - -impl fmt::Display for AcceptError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for AcceptError {} - -/// SendError for mem channels. -/// -/// There is not much that can go wrong with mem channels. -#[derive(Debug)] -pub enum SendError { - /// Receiver was dropped - ReceiverDropped, -} - -impl Display for SendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl std::error::Error for SendError {} - -/// OpenError for mem channels. -#[derive(Debug)] -pub enum OpenError { - /// The remote side of the channel was dropped - RemoteDropped, -} - -impl Display for OpenError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl std::error::Error for OpenError {} - -/// CreateChannelError for mem channels. -/// -/// You can always create a mem channel, so there is no possible error. -/// Nevertheless we need a type for it. -#[derive(Debug, Clone, Copy)] -pub enum CreateChannelError {} - -impl Display for CreateChannelError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl std::error::Error for CreateChannelError {} - -/// Create a flume listener and a connected flume connector. -/// -/// `buffer` the size of the buffer for each channel. Keep this at a low value to get backpressure -pub fn channel( - buffer: usize, -) -> (FlumeListener, FlumeConnector) { - let (sink, stream) = flume::bounded(buffer); - (FlumeListener { stream }, FlumeConnector { sink }) -} diff --git a/src/transport/hyper.rs b/src/transport/hyper.rs deleted file mode 100644 index 3ef41903..00000000 --- a/src/transport/hyper.rs +++ /dev/null @@ -1,645 +0,0 @@ -//! http2 transport using [hyper] -//! -//! [hyper]: https://crates.io/crates/hyper/ -use std::{ - convert::Infallible, error, fmt, io, marker::PhantomData, net::SocketAddr, pin::Pin, result, - sync::Arc, task::Poll, -}; - -use bytes::Bytes; -use flume::{Receiver, Sender}; -use futures_lite::{Stream, StreamExt}; -use futures_sink::Sink; -use hyper::{ - client::{connect::Connect, HttpConnector, ResponseFuture}, - server::conn::{AddrIncoming, AddrStream}, - service::{make_service_fn, service_fn}, - Body, Client, Request, Response, Server, StatusCode, Uri, -}; -use tokio::{sync::mpsc, task::JoinHandle}; -use tracing::{debug, event, trace, Level}; - -use crate::{ - transport::{ConnectionErrors, Connector, Listener, LocalAddr, StreamTypes}, - RpcMessage, -}; - -struct HyperConnectionInner { - client: Box, - config: Arc, - uri: Uri, -} - -/// Hyper based connection to a server -pub struct HyperConnector { - inner: Arc, - _p: PhantomData<(In, Out)>, -} - -impl Clone for HyperConnector { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - _p: PhantomData, - } - } -} - -/// Trait so we don't have to drag around the hyper internals -trait Requester: Send + Sync + 'static { - fn request(&self, req: Request) -> ResponseFuture; -} - -impl Requester for Client { - fn request(&self, req: Request) -> ResponseFuture { - self.request(req) - } -} - -impl HyperConnector { - /// create a client given an uri and the default configuration - pub fn new(uri: Uri) -> Self { - Self::with_config(uri, ChannelConfig::default()) - } - - /// create a client given an uri and a custom configuration - pub fn with_config(uri: Uri, config: ChannelConfig) -> Self { - let mut connector = HttpConnector::new(); - connector.set_nodelay(true); - Self::with_connector(connector, uri, Arc::new(config)) - } - - /// create a client given an uri and a custom configuration - pub fn with_connector( - connector: C, - uri: Uri, - config: Arc, - ) -> Self { - let client = Client::builder() - .http2_only(true) - .http2_initial_connection_window_size(Some(config.max_frame_size)) - .http2_initial_stream_window_size(Some(config.max_frame_size)) - .http2_max_frame_size(Some(config.max_frame_size)) - .http2_max_send_buf_size(config.max_frame_size.try_into().unwrap()) - .build(connector); - Self { - inner: Arc::new(HyperConnectionInner { - client: Box::new(client), - uri, - config, - }), - _p: PhantomData, - } - } -} - -impl fmt::Debug for HyperConnector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ClientChannel") - .field("uri", &self.inner.uri) - .field("config", &self.inner.config) - .finish() - } -} - -/// A flume sender and receiver tuple. -type InternalChannel = ( - Receiver>, - Sender>, -); - -/// Error when setting a channel configuration -#[derive(Debug, Clone)] -pub enum ChannelConfigError { - /// The maximum frame size is invalid - InvalidMaxFrameSize(u32), - /// The maximum payload size is invalid - InvalidMaxPayloadSize(usize), -} - -impl fmt::Display for ChannelConfigError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&self, f) - } -} - -impl error::Error for ChannelConfigError {} - -/// Channel configuration -/// -/// These settings apply to both client and server channels. -#[derive(Debug, Clone)] -pub struct ChannelConfig { - /// The maximum frame size to use. - max_frame_size: u32, - max_payload_size: usize, -} - -impl ChannelConfig { - /// Set the maximum frame size. - pub fn max_frame_size(mut self, value: u32) -> result::Result { - if !(0x4000..=0xFFFFFF).contains(&value) { - return Err(ChannelConfigError::InvalidMaxFrameSize(value)); - } - self.max_frame_size = value; - Ok(self) - } - - /// Set the maximum payload size. - pub fn max_payload_size(mut self, value: usize) -> result::Result { - if !(4096..1024 * 1024 * 16).contains(&value) { - return Err(ChannelConfigError::InvalidMaxPayloadSize(value)); - } - self.max_payload_size = value; - Ok(self) - } -} - -impl Default for ChannelConfig { - fn default() -> Self { - Self { - max_frame_size: 0xFFFFFF, - max_payload_size: 0xFFFFFF, - } - } -} - -/// A listener using a hyper server -/// -/// Each request made by the any client connection this channel will yield a `(recv, send)` -/// pair which allows receiving the request and sending the response. Both these are -/// channels themselves to support streaming requests and responses. -/// -/// Creating this spawns a tokio task which runs the server, once dropped this task is shut -/// down: no new connections will be accepted and existing channels will stop. -#[derive(Debug)] -pub struct HyperListener { - /// The channel. - channel: Receiver>, - /// The configuration. - config: Arc, - /// The sender to stop the server. - /// - /// We never send anything over this really, simply dropping it makes the receiver - /// complete and will shut down the hyper server. - stop_tx: mpsc::Sender<()>, - /// The local address this server is bound to. - /// - /// This is useful when the listen address uses a random port, `:0`, to find out which - /// port was bound by the kernel. - local_addr: [LocalAddr; 1], - /// Phantom data for service - _p: PhantomData<(In, Out)>, -} - -impl HyperListener { - /// Creates a server listening on the [`SocketAddr`], with the default configuration. - pub fn serve(addr: &SocketAddr) -> hyper::Result { - Self::serve_with_config(addr, Default::default()) - } - - /// Creates a server listening on the [`SocketAddr`] with a custom configuration. - pub fn serve_with_config(addr: &SocketAddr, config: ChannelConfig) -> hyper::Result { - let (accept_tx, accept_rx) = flume::bounded(32); - - // The hyper "MakeService" which is called for each connection that is made to the - // server. It creates another Service which handles a single request. - let service = make_service_fn(move |socket: &AddrStream| { - let remote_addr = socket.remote_addr(); - event!(Level::TRACE, "Connection from {:?}", remote_addr); - - // Need a new accept_tx to move to the future on every call of this FnMut. - let accept_tx = accept_tx.clone(); - async move { - let one_req_service = service_fn(move |req: Request| { - // This closure is an FnMut as well, so clone accept_tx once more. - Self::handle_one_http2_request(req, accept_tx.clone()) - }); - Ok::<_, Infallible>(one_req_service) - } - }); - - let mut incoming = AddrIncoming::bind(addr)?; - incoming.set_nodelay(true); - let server = Server::builder(incoming) - .http2_only(true) - .http2_initial_connection_window_size(Some(config.max_frame_size)) - .http2_initial_stream_window_size(Some(config.max_frame_size)) - .http2_max_frame_size(Some(config.max_frame_size)) - .http2_max_send_buf_size(config.max_frame_size.try_into().unwrap()) - .serve(service); - let local_addr = server.local_addr(); - - let (stop_tx, mut stop_rx) = mpsc::channel::<()>(1); - let server = server.with_graceful_shutdown(async move { - // If the sender is dropped this will also gracefully terminate the server. - stop_rx.recv().await; - }); - tokio::spawn(server); - - Ok(Self { - channel: accept_rx, - config: Arc::new(config), - stop_tx, - local_addr: [LocalAddr::Socket(local_addr)], - _p: PhantomData, - }) - } - - /// Handles a single HTTP2 request. - /// - /// This creates the channels to communicate the (optionally streaming) request and - /// response and sends them to the [`ServerChannel`]. - async fn handle_one_http2_request( - req: Request, - accept_tx: Sender>, - ) -> Result, String> { - let (req_tx, req_rx) = flume::bounded::>(32); - let (res_tx, res_rx) = flume::bounded::>(32); - accept_tx - .send_async((req_rx, res_tx)) - .await - .map_err(|_e| "unable to send")?; - - spawn_recv_forwarder(req.into_body(), req_tx); - // Create a response with the response body channel as the response body - let response = Response::builder() - .status(StatusCode::OK) - .body(Body::wrap_stream(res_rx.into_stream())) - .map_err(|_| "unable to set body")?; - Ok(response) - } -} - -fn try_get_length_prefixed(buf: &[u8]) -> Option<&[u8]> { - if buf.len() < 4 { - return None; - } - let len = u32::from_be_bytes([buf[0], buf[1], buf[2], buf[3]]) as usize; - if buf.len() < 4 + len { - return None; - } - Some(&buf[4..4 + len]) -} - -/// Try forward all frames as deserialized messages from the buffer to the sender. -/// -/// On success, returns the number of forwarded bytes. -/// On forward error, returns the unit error. -/// -/// Deserialization errors don't cause an error, they will be sent. -/// On error the number of consumed bytes is not returned. There is nothing to do but -/// to stop the forwarder since there is nowhere to forward to anymore. -async fn try_forward_all( - buffer: &[u8], - req_tx: &Sender>, -) -> result::Result { - let mut sent = 0; - while let Some(msg) = try_get_length_prefixed(&buffer[sent..]) { - sent += msg.len() + 4; - let item = postcard::from_bytes::(msg).map_err(RecvError::DeserializeError); - if let Err(_cause) = req_tx.send_async(item).await { - // The receiver is gone, so we can't send any more data. - // - // This is a normal way for an interaction to end, when the server side is done processing - // the request and drops the receiver. - // - // don't log the cause. It does not contain any useful information. - trace!("Flume receiver dropped"); - return Err(()); - } - } - Ok(sent) -} - -/// Spawns a task which forwards requests from the network to a flume channel. -/// -/// This task will read chunks from the network, split them into length prefixed -/// frames, deserialize those frames, and send the result to the flume channel. -/// -/// If there is a network error or the flume channel closes or the request -/// stream is simply ended this task will terminate. -/// -/// So it is fine to ignore the returned [`JoinHandle`]. -/// -/// The HTTP2 request comes from *req* and the data is sent to `req_tx`. -fn spawn_recv_forwarder( - req: Body, - req_tx: Sender>, -) -> JoinHandle> { - tokio::spawn(async move { - let mut stream = req; - let mut buf = Vec::new(); - - while let Some(chunk) = stream.next().await { - match chunk.as_ref() { - Ok(chunk) => { - event!(Level::TRACE, "Server got {} bytes", chunk.len()); - if buf.is_empty() { - // try to forward directly from buffer - let sent = try_forward_all(chunk, &req_tx).await?; - // add just the rest, if any - buf.extend_from_slice(&chunk[sent..]); - } else { - // no choice but to add it all - buf.extend_from_slice(chunk); - } - } - Err(cause) => { - // Indicates that the connection has been closed on the client side. - // This is a normal occurrence, e.g. when the client has raced the RPC - // call with something else and has droppped the future. - debug!("Network error: {}", cause); - break; - } - }; - let sent = try_forward_all(&buf, &req_tx).await?; - // remove the forwarded bytes. - // Frequently this will be the entire buffer, so no memcpy but just set the size to 0 - buf.drain(..sent); - } - Ok(()) - }) -} - -// This does not want or need RpcMessage to be clone but still want to clone the -// ServerChannel and it's containing channels itself. The derive macro can't cope with this -// so this needs to be written by hand. -impl Clone for HyperListener { - fn clone(&self) -> Self { - Self { - channel: self.channel.clone(), - stop_tx: self.stop_tx.clone(), - local_addr: self.local_addr.clone(), - config: self.config.clone(), - _p: PhantomData, - } - } -} - -/// Receive stream for hyper channels. -/// -/// This is a newtype wrapper around a [`flume::async::RecvStream`] of deserialized -/// messages. -pub struct RecvStream { - recv: flume::r#async::RecvStream<'static, result::Result>, -} - -impl RecvStream { - /// Creates a new [`RecvStream`] from a [`flume::Receiver`]. - pub fn new(recv: flume::Receiver>) -> Self { - Self { - recv: recv.into_stream(), - } - } - - // we can not write into_inner, since all we got is a stream of already - // framed and deserialize messages. Might want to change that... -} - -impl Clone for RecvStream { - fn clone(&self) -> Self { - Self { - recv: self.recv.clone(), - } - } -} - -impl Stream for RecvStream { - type Item = Result; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.recv).poll_next(cx) - } -} - -/// Send sink for hyper channels -pub struct SendSink { - sink: flume::r#async::SendSink<'static, io::Result>, - config: Arc, - _p: PhantomData, -} - -impl SendSink { - fn new(sender: flume::Sender>, config: Arc) -> Self { - Self { - sink: sender.into_sink(), - config, - _p: PhantomData, - } - } - fn serialize(&self, item: Out) -> Result { - let mut data = Vec::with_capacity(1024); - data.extend_from_slice(&[0u8; 4]); - let mut data = postcard::to_extend(&item, data).map_err(SendError::SerializeError)?; - let len = data.len() - 4; - if len > self.config.max_payload_size { - return Err(SendError::SizeError(len)); - } - let len: u32 = len.try_into().expect("max_payload_size fits into u32"); - data[0..4].copy_from_slice(&len.to_be_bytes()); - Ok(data.into()) - } - - /// Consumes the [`SendSink`] and returns the underlying [`flume::async::SendSink`]. - /// - /// This is useful if you want to send raw [bytes::Bytes] without framing - /// directly to the channel. - pub fn into_inner(self) -> flume::r#async::SendSink<'static, io::Result> { - self.sink - } -} - -impl Sink for SendSink { - type Error = SendError; - - fn poll_ready( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.sink) - .poll_ready(cx) - .map_err(|_| SendError::ReceiverDropped) - } - - fn start_send(mut self: Pin<&mut Self>, item: Out) -> Result<(), Self::Error> { - // figure out what to send and what to return - let (send, res) = match self.serialize(item) { - Ok(data) => (Ok(data), Ok(())), - Err(cause) => ( - Err(io::Error::new(io::ErrorKind::Other, cause.to_string())), - Err(cause), - ), - }; - // attempt sending - Pin::new(&mut self.sink) - .start_send(send) - .map_err(|_| SendError::ReceiverDropped)?; - res - } - - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.sink) - .poll_flush(cx) - .map_err(|_| SendError::ReceiverDropped) - } - - fn poll_close( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.sink) - .poll_close(cx) - .map_err(|_| SendError::ReceiverDropped) - } -} - -/// Send error for hyper channels. -#[derive(Debug)] -pub enum SendError { - /// Error when postcard serializing the message. - SerializeError(postcard::Error), - /// The message is too large to be sent. - SizeError(usize), - /// The connection has been closed. - ReceiverDropped, -} - -impl fmt::Display for SendError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&self, f) - } -} - -impl error::Error for SendError {} - -/// Receive error for hyper channels. -#[derive(Debug)] -pub enum RecvError { - /// Error when postcard deserializing the message. - DeserializeError(postcard::Error), - /// Hyper network error. - NetworkError(hyper::Error), -} - -impl fmt::Display for RecvError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&self, f) - } -} - -impl error::Error for RecvError {} - -/// OpenError for hyper channels. -#[derive(Debug)] -pub enum OpenError { - /// Hyper http error - HyperHttp(hyper::http::Error), - /// Generic hyper error - Hyper(hyper::Error), - /// The remote side of the channel was dropped - RemoteDropped, -} - -impl fmt::Display for OpenError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl std::error::Error for OpenError {} - -/// AcceptError for hyper channels. -/// -/// There is not much that can go wrong with hyper channels. -#[derive(Debug)] -pub enum AcceptError { - /// Hyper error - Hyper(hyper::http::Error), - /// The remote side of the channel was dropped - RemoteDropped, -} - -impl fmt::Display for AcceptError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl error::Error for AcceptError {} - -impl ConnectionErrors for HyperConnector { - type SendError = self::SendError; - - type RecvError = self::RecvError; - - type OpenError = OpenError; - - type AcceptError = AcceptError; -} - -impl StreamTypes for HyperConnector { - type In = In; - type Out = Out; - type RecvStream = self::RecvStream; - type SendSink = self::SendSink; -} - -impl Connector for HyperConnector { - async fn open(&self) -> Result<(Self::SendSink, Self::RecvStream), Self::OpenError> { - let (out_tx, out_rx) = flume::bounded::>(32); - let req: Request = Request::post(&self.inner.uri) - .body(Body::wrap_stream(out_rx.into_stream())) - .map_err(OpenError::HyperHttp)?; - let res = self - .inner - .client - .request(req) - .await - .map_err(OpenError::Hyper)?; - let (in_tx, in_rx) = flume::bounded::>(32); - spawn_recv_forwarder(res.into_body(), in_tx); - - let out_tx = self::SendSink::new(out_tx, self.inner.config.clone()); - let in_rx = self::RecvStream::new(in_rx); - Ok((out_tx, in_rx)) - } -} - -impl ConnectionErrors for HyperListener { - type SendError = self::SendError; - type RecvError = self::RecvError; - type OpenError = AcceptError; - type AcceptError = AcceptError; -} - -impl StreamTypes for HyperListener { - type In = In; - type Out = Out; - type RecvStream = self::RecvStream; - type SendSink = self::SendSink; -} - -impl Listener for HyperListener { - fn local_addr(&self) -> &[LocalAddr] { - &self.local_addr - } - - async fn accept(&self) -> Result<(Self::SendSink, Self::RecvStream), AcceptError> { - let (recv, send) = self - .channel - .recv_async() - .await - .map_err(|_| AcceptError::RemoteDropped)?; - Ok(( - SendSink::new(send, self.config.clone()), - RecvStream::new(recv), - )) - } -} diff --git a/src/transport/iroh.rs b/src/transport/iroh.rs deleted file mode 100644 index 04802d36..00000000 --- a/src/transport/iroh.rs +++ /dev/null @@ -1,732 +0,0 @@ -//! iroh transport implementation based on [iroh](https://crates.io/crates/iroh) - -use std::{ - collections::BTreeSet, - fmt, - future::Future, - io, - iter::once, - marker::PhantomData, - net::SocketAddr, - pin::{pin, Pin}, - sync::Arc, - task::{Context, Poll}, -}; - -use flume::TryRecvError; -use futures_lite::Stream; -use futures_sink::Sink; -use iroh::{endpoint::Connection, NodeAddr, NodeId}; -use pin_project::pin_project; -use serde::{de::DeserializeOwned, Serialize}; -use tokio::{sync::oneshot, task::yield_now}; -use tracing::{debug_span, Instrument}; - -use super::{ - util::{FramedPostcardRead, FramedPostcardWrite}, - StreamTypes, -}; -use crate::{ - transport::{ConnectionErrors, Connector, Listener, LocalAddr}, - RpcMessage, -}; - -const MAX_FRAME_LENGTH: usize = 1024 * 1024 * 16; - -#[derive(Debug)] -struct ListenerInner { - endpoint: Option, - task: Option>, - local_addr: Vec, - receiver: flume::Receiver, -} - -impl Drop for ListenerInner { - fn drop(&mut self) { - tracing::debug!("Dropping server endpoint"); - if let Some(endpoint) = self.endpoint.take() { - if let Ok(handle) = tokio::runtime::Handle::try_current() { - // spawn a task to wait for the endpoint to notify peers that it is closing - let span = debug_span!("closing listener"); - handle.spawn( - async move { - // iroh endpoint's close is async, and internally it waits the - // underlying quinn endpoint to be idle. - endpoint.close().await; - } - .instrument(span), - ); - } - } - if let Some(task) = self.task.take() { - task.abort() - } - } -} - -/// Access control for the server, either unrestricted or limited to a list of nodes that can -/// connect to the server endpoint -#[derive(Debug, Clone)] -pub enum AccessControl { - /// Unrestricted access, anyone can connect - Unrestricted, - /// Restricted access, only nodes in the list can connect, all other nodes will be rejected - Allowed(Vec), -} - -/// A server endpoint using a quinn connection -#[derive(Debug)] -pub struct IrohListener { - inner: Arc, - _p: PhantomData<(In, Out)>, -} - -impl IrohListener { - /// handles RPC requests from a connection - /// - /// to cleanly shut down the handler, drop the receiver side of the sender. - async fn connection_handler(connection: Connection, sender: flume::Sender) { - loop { - tracing::debug!("Awaiting incoming bidi substream on existing connection..."); - let bidi_stream = match connection.accept_bi().await { - Ok(bidi_stream) => bidi_stream, - Err(quinn::ConnectionError::ApplicationClosed(e)) => { - tracing::debug!(?e, "Peer closed the connection"); - break; - } - Err(e) => { - tracing::debug!(?e, "Error accepting stream"); - break; - } - }; - tracing::debug!("Sending substream to be handled... {}", bidi_stream.0.id()); - if sender.send_async(bidi_stream).await.is_err() { - tracing::debug!("Receiver dropped"); - break; - } - } - } - - async fn endpoint_handler( - endpoint: iroh::Endpoint, - sender: flume::Sender, - allowed_node_ids: BTreeSet, - ) { - loop { - tracing::debug!("Waiting for incoming connection..."); - let connecting = match endpoint.accept().await { - Some(connecting) => connecting, - None => break, - }; - - tracing::debug!("Awaiting connection from connect..."); - let connection = match connecting.await { - Ok(connection) => connection, - Err(e) => { - tracing::warn!(?e, "Error accepting connection"); - continue; - } - }; - - // When the `allowed_node_ids` is empty, it's empty forever, so the CPU's branch - // prediction should always optimize this block away from this loop. - // The same applies when it isn't empty, ignoring the check for emptiness and always - // extracting the node id and checking if it's in the set. - if !allowed_node_ids.is_empty() { - let Ok(client_node_id) = connection.remote_node_id().map_err(|e| { - tracing::error!( - ?e, - "Failed to extract iroh node id from incoming connection", - ) - }) else { - connection.close(0u32.into(), b"failed to extract iroh node id"); - continue; - }; - - if !allowed_node_ids.contains(&client_node_id) { - connection.close(0u32.into(), b"forbidden node id"); - continue; - } - } - - tracing::debug!( - "Connection established from {:?}", - connection.remote_node_id() - ); - - tracing::debug!("Spawning connection handler..."); - tokio::spawn(Self::connection_handler(connection, sender.clone())); - } - } - - /// Create a new server channel, given a quinn endpoint, with unrestricted access by node id - /// - /// The server channel will take care of listening on the endpoint and spawning - /// handlers for new connections. - pub fn new(endpoint: iroh::Endpoint) -> io::Result { - Self::new_with_access_control(endpoint, AccessControl::Unrestricted) - } - - /// Create a new server endpoint, with specified access control - /// - /// The server channel will take care of listening on the endpoint and spawning - /// handlers for new connections. - pub fn new_with_access_control( - endpoint: iroh::Endpoint, - access_control: AccessControl, - ) -> io::Result { - let allowed_node_ids = match access_control { - AccessControl::Unrestricted => BTreeSet::new(), - AccessControl::Allowed(list) if list.is_empty() => { - return Err(io::Error::other( - "Empty list of allowed nodes, \ - endpoint would reject all connections", - )); - } - AccessControl::Allowed(list) => BTreeSet::from_iter(list), - }; - - let (ipv4_socket_addr, maybe_ipv6_socket_addr) = endpoint.bound_sockets(); - let (sender, receiver) = flume::bounded(16); - let task = tokio::spawn(Self::endpoint_handler( - endpoint.clone(), - sender, - allowed_node_ids, - )); - - Ok(Self { - inner: Arc::new(ListenerInner { - endpoint: Some(endpoint), - task: Some(task), - local_addr: once(LocalAddr::Socket(ipv4_socket_addr)) - .chain(maybe_ipv6_socket_addr.map(LocalAddr::Socket)) - .collect(), - receiver, - }), - _p: PhantomData, - }) - } - - /// Create a new server channel, given just a source of incoming connections - /// - /// This is useful if you want to manage the quinn endpoint yourself, - /// use multiple endpoints, or use an endpoint for multiple protocols. - pub fn handle_connections( - incoming: flume::Receiver, - local_addr: SocketAddr, - ) -> Self { - let (sender, receiver) = flume::bounded(16); - let task = tokio::spawn(async move { - // just grab all connections and spawn a handler for each one - while let Ok(connection) = incoming.recv_async().await { - tokio::spawn(Self::connection_handler(connection, sender.clone())); - } - }); - Self { - inner: Arc::new(ListenerInner { - endpoint: None, - task: Some(task), - local_addr: vec![LocalAddr::Socket(local_addr)], - receiver, - }), - _p: PhantomData, - } - } - - /// Create a new server channel, given just a source of incoming substreams - /// - /// This is useful if you want to manage the quinn endpoint yourself, - /// use multiple endpoints, or use an endpoint for multiple protocols. - pub fn handle_substreams( - receiver: flume::Receiver, - local_addr: SocketAddr, - ) -> Self { - Self { - inner: Arc::new(ListenerInner { - endpoint: None, - task: None, - local_addr: vec![LocalAddr::Socket(local_addr)], - receiver, - }), - _p: PhantomData, - } - } -} - -impl Clone for IrohListener { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - _p: PhantomData, - } - } -} - -impl ConnectionErrors for IrohListener { - type SendError = io::Error; - type RecvError = io::Error; - type OpenError = quinn::ConnectionError; - type AcceptError = quinn::ConnectionError; -} - -impl StreamTypes for IrohListener { - type In = In; - type Out = Out; - type SendSink = SendSink; - type RecvStream = RecvStream; -} - -impl Listener for IrohListener { - async fn accept(&self) -> Result<(Self::SendSink, Self::RecvStream), AcceptError> { - let (send, recv) = self - .inner - .receiver - .recv_async() - .await - .map_err(|_| quinn::ConnectionError::LocallyClosed)?; - - Ok((SendSink::new(send), RecvStream::new(recv))) - } - - fn local_addr(&self) -> &[LocalAddr] { - &self.inner.local_addr - } -} - -type SocketInner = (quinn::SendStream, quinn::RecvStream); - -#[derive(Debug)] -struct ClientConnectionInner { - /// The quinn endpoint, we just keep a clone of this for information - endpoint: Option, - /// The task that handles creating new connections - task: Option>, - /// The channel to send new received connections - requests_tx: flume::Sender>>, -} - -impl Drop for ClientConnectionInner { - fn drop(&mut self) { - tracing::debug!("Dropping client connection"); - if let Some(endpoint) = self.endpoint.take() { - if let Ok(handle) = tokio::runtime::Handle::try_current() { - // spawn a task to wait for the endpoint to notify peers that it is closing - let span = debug_span!("closing client endpoint"); - handle.spawn( - async move { - endpoint.close().await; - } - .instrument(span), - ); - } - } - // this should not be necessary, since the task would terminate when the receiver is dropped. - // but just to be on the safe side. - if let Some(task) = self.task.take() { - tracing::debug!("Aborting task"); - task.abort(); - } - } -} - -/// A connection using an iroh connection -pub struct IrohConnector { - inner: Arc, - _p: PhantomData<(In, Out)>, -} - -impl IrohConnector { - async fn single_connection_handler( - connection: Connection, - requests_rx: flume::Receiver>>, - ) { - loop { - tracing::debug!("Awaiting request for new bidi substream..."); - let Ok(request_tx) = requests_rx.recv_async().await else { - tracing::info!("Single connection handler finished"); - return; - }; - - tracing::debug!("Got request for new bidi substream"); - match connection.open_bi().await { - Ok(pair) => { - tracing::debug!("Bidi substream opened"); - if request_tx.send(Ok(pair)).is_err() { - tracing::debug!("requester dropped"); - } - } - Err(e) => { - tracing::warn!(?e, "error opening bidi substream"); - if request_tx - .send(anyhow::Context::context( - Err(e), - "error opening bidi substream", - )) - .is_err() - { - tracing::debug!("requester dropped"); - } - } - } - } - } - - /// Client connection handler. - /// - /// It will run until the send side of the channel is dropped. - /// All other errors are logged and handled internally. - /// It will try to keep a connection open at all times. - async fn reconnect_handler_inner( - endpoint: iroh::Endpoint, - node_addr: NodeAddr, - alpn: Vec, - requests_rx: flume::Receiver>>, - ) { - let mut reconnect = pin!(ReconnectHandler { - endpoint, - state: ConnectionState::NotConnected, - node_addr, - alpn, - }); - - let mut pending_request: Option>> = None; - let mut connection: Option = None; - - loop { - // First we check if there is already a request ready in the channel - if pending_request.is_none() { - pending_request = match requests_rx.try_recv() { - Ok(req) => Some(req), - Err(TryRecvError::Empty) => None, - Err(TryRecvError::Disconnected) => { - tracing::debug!("client dropped"); - if let Some(connection) = connection { - connection.close(0u32.into(), b"requester dropped"); - } - break; - } - }; - } - - // If not connected, we attempt to establish a connection - if !reconnect.connected() { - tracing::trace!("tick: connection result"); - match reconnect.as_mut().await { - Ok(new_connection) => { - connection = Some(new_connection); - } - Err(e) => { - // If there was a pending request, we error it out as we're not connected - if let Some(request_ack_tx) = pending_request.take() { - if request_ack_tx.send(Err(e)).is_err() { - tracing::debug!("requester dropped"); - } - } - - // Yielding back to the runtime, otherwise this can run on a busy loop - // due to the always ready nature of things, messing up with single thread - // runtime flavor of tokio - yield_now().await; - } - } - // If we didn't have a ready request in the channel, we wait for one - } else if pending_request.is_none() { - let Ok(req) = requests_rx.recv_async().await else { - tracing::debug!("client dropped"); - if let Some(connection) = connection { - connection.close(0u32.into(), b"requester dropped"); - } - break; - }; - - tracing::trace!("tick: bidi request"); - pending_request = Some(req); - } - - // If we have a connection and a pending request, we good, just process it - if let Some(connection) = connection.as_mut() { - if let Some(request) = pending_request.take() { - match connection.open_bi().await { - Ok(pair) => { - tracing::debug!("Bidi substream opened"); - if request.send(Ok(pair)).is_err() { - tracing::debug!("requester dropped"); - } - } - Err(e) => { - tracing::warn!(?e, "error opening bidi substream"); - tracing::warn!("recreating connection"); - // NOTE: the connection might be stale, so we recreate the - // connection and set the request as pending instead of - // sending the error as a response - reconnect.set_not_connected(); - pending_request = Some(request); - } - } - } - } - } - } - - async fn reconnect_handler( - endpoint: iroh::Endpoint, - addr: NodeAddr, - alpn: Vec, - requests_rx: flume::Receiver>>, - ) { - Self::reconnect_handler_inner(endpoint, addr, alpn, requests_rx).await; - tracing::info!("Reconnect handler finished"); - } - - /// Create a new channel - pub fn from_connection(connection: Connection) -> Self { - let (requests_tx, requests_rx) = flume::bounded(16); - let task = tokio::spawn(Self::single_connection_handler(connection, requests_rx)); - Self { - inner: Arc::new(ClientConnectionInner { - endpoint: None, - task: Some(task), - requests_tx, - }), - _p: PhantomData, - } - } - - /// Create a new channel - pub fn new(endpoint: iroh::Endpoint, node_addr: impl Into, alpn: Vec) -> Self { - let (requests_tx, requests_rx) = flume::bounded(16); - let task = tokio::spawn(Self::reconnect_handler( - endpoint.clone(), - node_addr.into(), - alpn, - requests_rx, - )); - Self { - inner: Arc::new(ClientConnectionInner { - endpoint: Some(endpoint), - task: Some(task), - requests_tx, - }), - _p: PhantomData, - } - } -} - -struct ReconnectHandler { - endpoint: iroh::Endpoint, - state: ConnectionState, - node_addr: NodeAddr, - alpn: Vec, -} - -impl ReconnectHandler { - pub fn set_not_connected(&mut self) { - self.state.set_not_connected() - } - - pub fn connected(&self) -> bool { - matches!(self.state, ConnectionState::Connected(_)) - } -} - -enum ConnectionState { - /// There is no active connection. An attempt to connect will be made. - NotConnected, - /// Connecting to the remote. - Connecting(Pin> + Send>>), - /// A connection is already established. In this state, no more connection attempts are made. - Connected(Connection), - /// Intermediate state while processing. - Poisoned, -} - -impl ConnectionState { - pub fn poison(&mut self) -> Self { - std::mem::replace(self, Self::Poisoned) - } - - pub fn set_not_connected(&mut self) { - *self = Self::NotConnected - } -} - -impl Future for ReconnectHandler { - type Output = anyhow::Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match self.state.poison() { - ConnectionState::NotConnected => { - self.state = ConnectionState::Connecting(Box::pin({ - let endpoint = self.endpoint.clone(); - let node_addr = self.node_addr.clone(); - let alpn = self.alpn.clone(); - async move { endpoint.connect(node_addr, &alpn).await } - })); - self.poll(cx) - } - - ConnectionState::Connecting(mut connecting) => match connecting.as_mut().poll(cx) { - Poll::Ready(res) => match res { - Ok(connection) => { - self.state = ConnectionState::Connected(connection.clone()); - Poll::Ready(Ok(connection)) - } - Err(e) => { - self.state = ConnectionState::NotConnected; - Poll::Ready(Err(e)) - } - }, - Poll::Pending => { - self.state = ConnectionState::Connecting(connecting); - Poll::Pending - } - }, - - ConnectionState::Connected(connection) => { - self.state = ConnectionState::Connected(connection.clone()); - Poll::Ready(Ok(connection)) - } - - ConnectionState::Poisoned => unreachable!("poisoned connection state"), - } - } -} - -impl fmt::Debug for IrohConnector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ClientChannel") - .field("inner", &self.inner) - .finish() - } -} - -impl Clone for IrohConnector { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - _p: PhantomData, - } - } -} - -impl ConnectionErrors for IrohConnector { - type SendError = io::Error; - type RecvError = io::Error; - type OpenError = anyhow::Error; - type AcceptError = anyhow::Error; -} - -impl StreamTypes for IrohConnector { - type In = In; - type Out = Out; - type SendSink = SendSink; - type RecvStream = RecvStream; -} - -impl Connector for IrohConnector { - async fn open(&self) -> Result<(Self::SendSink, Self::RecvStream), Self::OpenError> { - let (request_ack_tx, request_ack_rx) = oneshot::channel(); - - self.inner - .requests_tx - .send_async(request_ack_tx) - .await - .map_err(|_| quinn::ConnectionError::LocallyClosed)?; - - let (send, recv) = request_ack_rx - .await - .map_err(|_| quinn::ConnectionError::LocallyClosed)??; - - Ok((SendSink::new(send), RecvStream::new(recv))) - } -} - -/// A sink that wraps a quinn SendStream with length delimiting and postcard -/// -/// If you want to send bytes directly, use [SendSink::into_inner] to get the -/// underlying [quinn::SendStream]. -#[pin_project] -pub struct SendSink(#[pin] FramedPostcardWrite); - -impl fmt::Debug for SendSink { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SendSink").finish() - } -} - -impl SendSink { - fn new(inner: quinn::SendStream) -> Self { - let inner = FramedPostcardWrite::new(inner, MAX_FRAME_LENGTH); - Self(inner) - } -} - -impl SendSink { - /// Get the underlying [quinn::SendStream], which implements - /// [tokio::io::AsyncWrite] and can be used to send bytes directly. - pub fn into_inner(self) -> quinn::SendStream { - self.0.into_inner() - } -} - -impl Sink for SendSink { - type Error = io::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.project().0).poll_ready(cx) - } - - fn start_send(self: Pin<&mut Self>, item: Out) -> Result<(), Self::Error> { - Pin::new(&mut self.project().0).start_send(item) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.project().0).poll_flush(cx) - } - - fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.project().0).poll_close(cx) - } -} - -/// A stream that wraps a quinn RecvStream with length delimiting and postcard -/// -/// If you want to receive bytes directly, use [RecvStream::into_inner] to get -/// the underlying [quinn::RecvStream]. -#[pin_project] -pub struct RecvStream(#[pin] FramedPostcardRead); - -impl fmt::Debug for RecvStream { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RecvStream").finish() - } -} - -impl RecvStream { - fn new(inner: quinn::RecvStream) -> Self { - let inner = FramedPostcardRead::new(inner, MAX_FRAME_LENGTH); - Self(inner) - } -} - -impl RecvStream { - /// Get the underlying [quinn::RecvStream], which implements - /// [tokio::io::AsyncRead] and can be used to receive bytes directly. - pub fn into_inner(self) -> quinn::RecvStream { - self.0.into_inner() - } -} - -impl Stream for RecvStream { - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.project().0).poll_next(cx) - } -} - -/// Error for open. Currently just an anyhow::Error -pub type OpenBiError = anyhow::Error; - -/// Error for accept. Currently just a ConnectionError -pub type AcceptError = quinn::ConnectionError; diff --git a/src/transport/mapped.rs b/src/transport/mapped.rs deleted file mode 100644 index 649ac1f9..00000000 --- a/src/transport/mapped.rs +++ /dev/null @@ -1,325 +0,0 @@ -//! Transport with mapped input and output types. -use std::{ - fmt::{Debug, Display}, - marker::PhantomData, - task::{Context, Poll}, -}; - -use futures_lite::{Stream, StreamExt}; -use futures_util::SinkExt; -use pin_project::pin_project; - -use super::{ConnectionErrors, Connector, StreamTypes}; -use crate::{RpcError, RpcMessage}; - -/// A connection that maps input and output types -#[derive(Debug)] -pub struct MappedConnector { - inner: C, - _p: std::marker::PhantomData<(In, Out)>, -} - -impl MappedConnector -where - C: Connector, - In: TryFrom, - C::Out: From, -{ - /// Create a new mapped connection - pub fn new(inner: C) -> Self { - Self { - inner, - _p: std::marker::PhantomData, - } - } -} - -impl Clone for MappedConnector -where - C: Clone, -{ - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - _p: std::marker::PhantomData, - } - } -} - -impl ConnectionErrors for MappedConnector -where - In: RpcMessage, - Out: RpcMessage, - C: ConnectionErrors, -{ - type RecvError = ErrorOrMapError; - type SendError = C::SendError; - type OpenError = C::OpenError; - type AcceptError = C::AcceptError; -} - -impl StreamTypes for MappedConnector -where - C: StreamTypes, - In: RpcMessage, - Out: RpcMessage, - In: TryFrom, - C::Out: From, -{ - type In = In; - type Out = Out; - type RecvStream = MappedRecvStream; - type SendSink = MappedSendSink; -} - -impl Connector for MappedConnector -where - C: Connector, - In: RpcMessage, - Out: RpcMessage, - In: TryFrom, - C::Out: From, -{ - fn open( - &self, - ) -> impl std::future::Future> - + Send { - let inner = self.inner.open(); - async move { - let (send, recv) = inner.await?; - Ok((MappedSendSink::new(send), MappedRecvStream::new(recv))) - } - } -} - -/// A combinator that maps a stream of incoming messages to a different type -#[pin_project] -pub struct MappedRecvStream { - inner: S, - _p: std::marker::PhantomData, -} - -impl MappedRecvStream { - /// Create a new mapped receive stream - pub fn new(inner: S) -> Self { - Self { - inner, - _p: std::marker::PhantomData, - } - } -} - -/// Error mapping an incoming message to the inner type -#[derive(Debug)] -pub enum ErrorOrMapError { - /// Error from the inner stream - Inner(E), - /// Conversion error - Conversion, -} - -impl std::error::Error for ErrorOrMapError {} - -impl Display for ErrorOrMapError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ErrorOrMapError::Inner(e) => write!(f, "Inner error: {}", e), - ErrorOrMapError::Conversion => write!(f, "Conversion error"), - } - } -} - -impl Stream for MappedRecvStream -where - S: Stream> + Unpin, - In: TryFrom, - E: RpcError, -{ - type Item = Result>; - - fn poll_next(self: std::pin::Pin<&mut Self>, cx: &mut Context) -> Poll> { - match self.project().inner.poll_next(cx) { - Poll::Ready(Some(Ok(item))) => { - let item = item.try_into().map_err(|_| ErrorOrMapError::Conversion); - Poll::Ready(Some(item)) - } - Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(ErrorOrMapError::Inner(e)))), - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - } - } -} - -/// A sink that maps outgoing messages to a different type -/// -/// The conversion to the underlying message type always succeeds, so this -/// is relatively simple. -#[pin_project] -pub struct MappedSendSink { - inner: S, - _p: std::marker::PhantomData<(Out, OutS)>, -} - -impl MappedSendSink { - /// Create a new mapped send sink - pub fn new(inner: S) -> Self { - Self { - inner, - _p: std::marker::PhantomData, - } - } -} - -impl futures_sink::Sink for MappedSendSink -where - S: futures_sink::Sink + Unpin, - Out: Into, -{ - type Error = S::Error; - - fn poll_ready( - self: std::pin::Pin<&mut Self>, - cx: &mut Context, - ) -> Poll> { - self.project().inner.poll_ready_unpin(cx) - } - - fn start_send(self: std::pin::Pin<&mut Self>, item: Out) -> Result<(), Self::Error> { - self.project().inner.start_send_unpin(item.into()) - } - - fn poll_flush( - self: std::pin::Pin<&mut Self>, - cx: &mut Context, - ) -> Poll> { - self.project().inner.poll_flush_unpin(cx) - } - - fn poll_close( - self: std::pin::Pin<&mut Self>, - cx: &mut Context, - ) -> Poll> { - self.project().inner.poll_close_unpin(cx) - } -} - -/// Connection types for a mapped connection -pub struct MappedStreamTypes(PhantomData<(In, Out, C)>); - -impl Debug for MappedStreamTypes { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("MappedConnectionTypes").finish() - } -} - -impl Clone for MappedStreamTypes { - fn clone(&self) -> Self { - Self(PhantomData) - } -} - -impl ConnectionErrors for MappedStreamTypes -where - In: RpcMessage, - Out: RpcMessage, - C: ConnectionErrors, -{ - type RecvError = ErrorOrMapError; - type SendError = C::SendError; - type OpenError = C::OpenError; - type AcceptError = C::AcceptError; -} - -impl StreamTypes for MappedStreamTypes -where - C: StreamTypes, - In: RpcMessage, - Out: RpcMessage, - In: TryFrom, - C::Out: From, -{ - type In = In; - type Out = Out; - type RecvStream = MappedRecvStream; - type SendSink = MappedSendSink; -} - -#[cfg(test)] -#[cfg(feature = "flume-transport")] -mod tests { - - use serde::{Deserialize, Serialize}; - use testresult::TestResult; - - use super::*; - use crate::{ - server::{BoxedChannelTypes, RpcChannel}, - transport::Listener, - RpcClient, RpcServer, - }; - - #[derive(Debug, Clone, Serialize, Deserialize, derive_more::From, derive_more::TryInto)] - enum Request { - A(u64), - B(String), - } - - #[derive(Debug, Clone, Serialize, Deserialize, derive_more::From, derive_more::TryInto)] - enum Response { - A(u64), - B(String), - } - - #[derive(Debug, Clone)] - struct FullService; - - impl crate::Service for FullService { - type Req = Request; - type Res = Response; - } - - #[derive(Debug, Clone)] - struct SubService; - - impl crate::Service for SubService { - type Req = String; - type Res = String; - } - - #[tokio::test] - #[ignore] - async fn smoke() -> TestResult<()> { - async fn handle_sub_request( - _req: String, - _chan: RpcChannel>, - ) -> anyhow::Result<()> { - Ok(()) - } - // create a listener / connector pair. Type will be inferred - let (s, c) = crate::transport::flume::channel(32); - // wrap the server in a RpcServer, this is where the service type is specified - let server = RpcServer::::new(s.clone()); - // when using a boxed transport, we can omit the transport type and use the default - let _server_boxed: RpcServer = RpcServer::::new(s.boxed()); - // create a client in a RpcClient, this is where the service type is specified - let client = RpcClient::::new(c); - // when using a boxed transport, we can omit the transport type and use the default - let _boxed_client = client.clone().boxed(); - // map the client to a sub-service - let _sub_client: RpcClient = client.clone().map::(); - // when using a boxed transport, we can omit the transport type and use the default - let _sub_client_boxed: RpcClient = client.clone().map::().boxed(); - // we can not map the service to a sub-service, since we need the first message to determine which sub-service to use - while let Ok(accepting) = server.accept().await { - let (msg, chan) = accepting.read_first().await?; - match msg { - Request::A(_x) => todo!(), - Request::B(x) => { - // but we can map the channel to the sub-service, once we know which one to use - handle_sub_request(x, chan.map::().boxed()).await? - } - } - } - Ok(()) - } -} diff --git a/src/transport/misc/mod.rs b/src/transport/misc/mod.rs deleted file mode 100644 index 59bd19cd..00000000 --- a/src/transport/misc/mod.rs +++ /dev/null @@ -1,52 +0,0 @@ -//! Miscellaneous transport utilities -use std::convert::Infallible; - -use futures_lite::stream; -use futures_sink::Sink; - -use super::StreamTypes; -use crate::{ - transport::{ConnectionErrors, Listener}, - RpcMessage, -}; - -/// A dummy listener that does nothing -/// -/// This can be useful as a default if you want to configure -/// an optional listener. -#[derive(Debug, Default)] -pub struct DummyListener { - _p: std::marker::PhantomData<(In, Out)>, -} - -impl Clone for DummyListener { - fn clone(&self) -> Self { - Self { - _p: std::marker::PhantomData, - } - } -} - -impl ConnectionErrors for DummyListener { - type RecvError = Infallible; - type SendError = Infallible; - type OpenError = Infallible; - type AcceptError = Infallible; -} - -impl StreamTypes for DummyListener { - type In = In; - type Out = Out; - type RecvStream = stream::Pending>; - type SendSink = Box + Unpin + Send + Sync>; -} - -impl Listener for DummyListener { - async fn accept(&self) -> Result<(Self::SendSink, Self::RecvStream), Self::AcceptError> { - futures_lite::future::pending().await - } - - fn local_addr(&self) -> &[super::LocalAddr] { - &[] - } -} diff --git a/src/transport/mod.rs b/src/transport/mod.rs deleted file mode 100644 index cb7a9117..00000000 --- a/src/transport/mod.rs +++ /dev/null @@ -1,157 +0,0 @@ -//! Built in transports for quic-rpc -//! -//! There are two sides to a transport, a server side where connections are -//! accepted and a client side where connections are initiated. -//! -//! Connections are bidirectional typed channels, with a distinct type for -//! the send and receive side. They are *unrelated* to services. -//! -//! In the transport module, the message types are referred to as `In` and `Out`. -//! -//! A [`Connector`] can be used to *open* bidirectional typed channels using -//! [`Connector::open`]. A [`Listener`] can be used to *accept* bidirectional -//! typed channels from any of the currently opened connections to clients, using -//! [`Listener::accept`]. -//! -//! In both cases, the result is a tuple of a send side and a receive side. These -//! types are defined by implementing the [`StreamTypes`] trait. -//! -//! Errors for both sides are defined by implementing the [`ConnectionErrors`] trait. -use std::{ - fmt::{self, Debug, Display}, - net::SocketAddr, -}; - -use boxed::{BoxableConnector, BoxableListener, BoxedConnector, BoxedListener}; -use futures_lite::{Future, Stream}; -use futures_sink::Sink; -use mapped::MappedConnector; - -use crate::{RpcError, RpcMessage}; - -pub mod boxed; -pub mod combined; -#[cfg(feature = "flume-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "flume-transport")))] -pub mod flume; -#[cfg(feature = "hyper-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "hyper-transport")))] -pub mod hyper; -#[cfg(feature = "iroh-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "iroh-transport")))] -pub mod iroh; -pub mod mapped; -pub mod misc; -#[cfg(feature = "quinn-transport")] -#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "quinn-transport")))] -pub mod quinn; - -#[cfg(any(feature = "quinn-transport", feature = "iroh-transport"))] -#[cfg_attr( - quicrpc_docsrs, - doc(cfg(any(feature = "quinn-transport", feature = "iroh-transport"))) -)] -mod util; - -/// Errors that can happen when creating and using a [`Connector`] or [`Listener`]. -pub trait ConnectionErrors: Debug + Clone + Send + Sync + 'static { - /// Error when sending a message via a channel - type SendError: RpcError; - /// Error when receiving a message via a channel - type RecvError: RpcError; - /// Error when opening a channel - type OpenError: RpcError; - /// Error when accepting a channel - type AcceptError: RpcError; -} - -/// Types that are common to both [`Connector`] and [`Listener`]. -/// -/// Having this as a separate trait is useful when writing generic code that works with both. -pub trait StreamTypes: ConnectionErrors { - /// The type of messages that can be received on the channel - type In: RpcMessage; - /// The type of messages that can be sent on the channel - type Out: RpcMessage; - /// Receive side of a bidirectional typed channel - type RecvStream: Stream> - + Send - + Sync - + Unpin - + 'static; - /// Send side of a bidirectional typed channel - type SendSink: Sink + Send + Sync + Unpin + 'static; -} - -/// A connection to a specific remote machine -/// -/// A connection can be used to open bidirectional typed channels using [`Connector::open`]. -pub trait Connector: StreamTypes { - /// Open a channel to the remote che - fn open( - &self, - ) -> impl Future> + Send; - - /// Map the input and output types of this connection - fn map(self) -> MappedConnector - where - In1: TryFrom, - Self::Out: From, - { - MappedConnector::new(self) - } - - /// Box the connection - fn boxed(self) -> BoxedConnector - where - Self: BoxableConnector + Sized + 'static, - { - self::BoxedConnector::new(self) - } -} - -/// A listener that listens for connections -/// -/// A listener can be used to accept bidirectional typed channels from any of the -/// currently opened connections to clients, using [`Listener::accept`]. -pub trait Listener: StreamTypes { - /// Accept a new typed bidirectional channel on any of the connections we - /// have currently opened. - fn accept( - &self, - ) -> impl Future> + Send; - - /// The local addresses this endpoint is bound to. - fn local_addr(&self) -> &[LocalAddr]; - - /// Box the listener - fn boxed(self) -> BoxedListener - where - Self: BoxableListener + Sized + 'static, - { - BoxedListener::new(self) - } -} - -/// The kinds of local addresses a [Listener] can be bound to. -/// -/// Returned by [Listener::local_addr]. -/// -/// [`Display`]: fmt::Display -#[derive(Debug, Clone)] -#[non_exhaustive] -pub enum LocalAddr { - /// A local socket. - Socket(SocketAddr), - /// An in-memory address. - Mem, -} - -impl Display for LocalAddr { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - LocalAddr::Socket(sockaddr) => write!(f, "{sockaddr}"), - LocalAddr::Mem => write!(f, "mem"), - } - } -} diff --git a/src/transport/quinn.rs b/src/transport/quinn.rs deleted file mode 100644 index 89a69e66..00000000 --- a/src/transport/quinn.rs +++ /dev/null @@ -1,948 +0,0 @@ -//! QUIC transport implementation based on [quinn](https://crates.io/crates/quinn) -use std::{ - fmt, io, - marker::PhantomData, - net::SocketAddr, - pin::Pin, - result, - sync::Arc, - task::{Context, Poll}, -}; - -use futures_lite::{Future, Stream, StreamExt}; -use futures_sink::Sink; -use futures_util::FutureExt; -use pin_project::pin_project; -use serde::{de::DeserializeOwned, Serialize}; -use tokio::sync::oneshot; -use tracing::{debug_span, Instrument}; - -use super::{ - util::{FramedPostcardRead, FramedPostcardWrite}, - StreamTypes, -}; -use crate::{ - transport::{ConnectionErrors, Connector, Listener, LocalAddr}, - RpcMessage, -}; - -const MAX_FRAME_LENGTH: usize = 1024 * 1024 * 16; - -#[derive(Debug)] -struct ListenerInner { - endpoint: Option, - task: Option>, - local_addr: [LocalAddr; 1], - receiver: flume::Receiver, -} - -impl Drop for ListenerInner { - fn drop(&mut self) { - tracing::debug!("Dropping listener"); - if let Some(endpoint) = self.endpoint.take() { - endpoint.close(0u32.into(), b"Listener dropped"); - - if let Ok(handle) = tokio::runtime::Handle::try_current() { - // spawn a task to wait for the endpoint to notify peers that it is closing - let span = debug_span!("closing listener"); - handle.spawn( - async move { - endpoint.wait_idle().await; - } - .instrument(span), - ); - } - } - if let Some(task) = self.task.take() { - task.abort() - } - } -} - -/// A listener using a quinn connection -#[derive(Debug)] -pub struct QuinnListener { - inner: Arc, - _p: PhantomData<(In, Out)>, -} - -impl QuinnListener { - /// handles RPC requests from a connection - /// - /// to cleanly shutdown the handler, drop the receiver side of the sender. - async fn connection_handler(connection: quinn::Connection, sender: flume::Sender) { - loop { - tracing::debug!("Awaiting incoming bidi substream on existing connection..."); - let bidi_stream = match connection.accept_bi().await { - Ok(bidi_stream) => bidi_stream, - Err(quinn::ConnectionError::ApplicationClosed(e)) => { - tracing::debug!("Peer closed the connection {:?}", e); - break; - } - Err(e) => { - tracing::debug!("Error accepting stream: {}", e); - break; - } - }; - tracing::debug!("Sending substream to be handled... {}", bidi_stream.0.id()); - if sender.send_async(bidi_stream).await.is_err() { - tracing::debug!("Receiver dropped"); - break; - } - } - } - - async fn endpoint_handler(endpoint: quinn::Endpoint, sender: flume::Sender) { - loop { - tracing::debug!("Waiting for incoming connection..."); - let connecting = match endpoint.accept().await { - Some(connecting) => connecting, - None => break, - }; - tracing::debug!("Awaiting connection from connect..."); - let conection = match connecting.await { - Ok(conection) => conection, - Err(e) => { - tracing::warn!("Error accepting connection: {}", e); - continue; - } - }; - tracing::debug!( - "Connection established from {:?}", - conection.remote_address() - ); - tracing::debug!("Spawning connection handler..."); - tokio::spawn(Self::connection_handler(conection, sender.clone())); - } - } - - /// Create a new server channel, given a quinn endpoint. - /// - /// The endpoint must be a server endpoint. - /// - /// The server channel will take care of listening on the endpoint and spawning - /// handlers for new connections. - pub fn new(endpoint: quinn::Endpoint) -> io::Result { - let local_addr = endpoint.local_addr()?; - let (sender, receiver) = flume::bounded(16); - let task = tokio::spawn(Self::endpoint_handler(endpoint.clone(), sender)); - Ok(Self { - inner: Arc::new(ListenerInner { - endpoint: Some(endpoint), - task: Some(task), - local_addr: [LocalAddr::Socket(local_addr)], - receiver, - }), - _p: PhantomData, - }) - } - - /// Create a new server channel, given just a source of incoming connections - /// - /// This is useful if you want to manage the quinn endpoint yourself, - /// use multiple endpoints, or use an endpoint for multiple protocols. - pub fn handle_connections( - incoming: flume::Receiver, - local_addr: SocketAddr, - ) -> Self { - let (sender, receiver) = flume::bounded(16); - let task = tokio::spawn(async move { - // just grab all connections and spawn a handler for each one - while let Ok(connection) = incoming.recv_async().await { - tokio::spawn(Self::connection_handler(connection, sender.clone())); - } - }); - Self { - inner: Arc::new(ListenerInner { - endpoint: None, - task: Some(task), - local_addr: [LocalAddr::Socket(local_addr)], - receiver, - }), - _p: PhantomData, - } - } - - /// Create a new server channel, given just a source of incoming substreams - /// - /// This is useful if you want to manage the quinn endpoint yourself, - /// use multiple endpoints, or use an endpoint for multiple protocols. - pub fn handle_substreams( - receiver: flume::Receiver, - local_addr: SocketAddr, - ) -> Self { - Self { - inner: Arc::new(ListenerInner { - endpoint: None, - task: None, - local_addr: [LocalAddr::Socket(local_addr)], - receiver, - }), - _p: PhantomData, - } - } -} - -impl Clone for QuinnListener { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - _p: PhantomData, - } - } -} - -impl ConnectionErrors for QuinnListener { - type SendError = io::Error; - type RecvError = io::Error; - type OpenError = quinn::ConnectionError; - type AcceptError = quinn::ConnectionError; -} - -impl StreamTypes for QuinnListener { - type In = In; - type Out = Out; - type SendSink = self::SendSink; - type RecvStream = self::RecvStream; -} - -impl Listener for QuinnListener { - async fn accept(&self) -> Result<(Self::SendSink, Self::RecvStream), AcceptError> { - let (send, recv) = self - .inner - .receiver - .recv_async() - .await - .map_err(|_| quinn::ConnectionError::LocallyClosed)?; - Ok((SendSink::new(send), RecvStream::new(recv))) - } - - fn local_addr(&self) -> &[LocalAddr] { - &self.inner.local_addr - } -} - -type SocketInner = (quinn::SendStream, quinn::RecvStream); - -#[derive(Debug)] -struct ClientConnectionInner { - /// The quinn endpoint, we just keep a clone of this for information - endpoint: Option, - /// The task that handles creating new connections - task: Option>, - /// The channel to receive new connections - sender: flume::Sender>>, -} - -impl Drop for ClientConnectionInner { - fn drop(&mut self) { - tracing::debug!("Dropping client connection"); - if let Some(endpoint) = self.endpoint.take() { - endpoint.close(0u32.into(), b"client connection dropped"); - if let Ok(handle) = tokio::runtime::Handle::try_current() { - // spawn a task to wait for the endpoint to notify peers that it is closing - let span = debug_span!("closing client endpoint"); - handle.spawn( - async move { - endpoint.wait_idle().await; - } - .instrument(span), - ); - } - } - // this should not be necessary, since the task would terminate when the receiver is dropped. - // but just to be on the safe side. - if let Some(task) = self.task.take() { - tracing::debug!("Aborting task"); - task.abort(); - } - } -} - -/// A connection using a quinn connection -pub struct QuinnConnector { - inner: Arc, - _p: PhantomData<(In, Out)>, -} - -impl QuinnConnector { - async fn single_connection_handler_inner( - connection: quinn::Connection, - requests: flume::Receiver>>, - ) -> result::Result<(), flume::RecvError> { - loop { - tracing::debug!("Awaiting request for new bidi substream..."); - let request = requests.recv_async().await?; - tracing::debug!("Got request for new bidi substream"); - match connection.open_bi().await { - Ok(pair) => { - tracing::debug!("Bidi substream opened"); - if request.send(Ok(pair)).is_err() { - tracing::debug!("requester dropped"); - } - } - Err(e) => { - tracing::warn!("error opening bidi substream: {}", e); - if request.send(Err(e)).is_err() { - tracing::debug!("requester dropped"); - } - } - } - } - } - - async fn single_connection_handler( - connection: quinn::Connection, - requests: flume::Receiver>>, - ) { - if Self::single_connection_handler_inner(connection, requests) - .await - .is_err() - { - tracing::info!("Single connection handler finished"); - } else { - unreachable!() - } - } - - /// Client connection handler. - /// - /// It will run until the send side of the channel is dropped. - /// All other errors are logged and handled internally. - /// It will try to keep a connection open at all times. - async fn reconnect_handler_inner( - endpoint: quinn::Endpoint, - addr: SocketAddr, - name: String, - requests: flume::Receiver>>, - ) { - let reconnect = ReconnectHandler { - endpoint, - state: ConnectionState::NotConnected, - addr, - name, - }; - tokio::pin!(reconnect); - - let mut receiver = Receiver::new(&requests); - - let mut pending_request: Option< - oneshot::Sender>, - > = None; - let mut connection = None; - - enum Racer { - Reconnect(Result), - Channel(Option>>), - } - - loop { - let mut conn_result = None; - let mut chann_result = None; - if !reconnect.connected() && pending_request.is_none() { - match futures_lite::future::race( - reconnect.as_mut().map(Racer::Reconnect), - receiver.next().map(Racer::Channel), - ) - .await - { - Racer::Reconnect(connection_result) => conn_result = Some(connection_result), - Racer::Channel(channel_result) => { - chann_result = Some(channel_result); - } - } - } else if !reconnect.connected() { - // only need a new connection - conn_result = Some(reconnect.as_mut().await); - } else if pending_request.is_none() { - // there is a connection, just need a request - chann_result = Some(receiver.next().await); - } - - if let Some(conn_result) = conn_result { - tracing::trace!("tick: connection result"); - match conn_result { - Ok(new_connection) => { - connection = Some(new_connection); - } - Err(e) => { - let connection_err = match e { - ReconnectErr::Connect(e) => { - // TODO(@divma): the type for now accepts only a - // ConnectionError, not a ConnectError. I'm mapping this now to - // some ConnectionError since before it was not even reported. - // Maybe adjust the type? - tracing::warn!(%e, "error calling connect"); - quinn::ConnectionError::Reset - } - ReconnectErr::Connection(e) => { - tracing::warn!(%e, "failed to connect"); - e - } - }; - if let Some(request) = pending_request.take() { - if request.send(Err(connection_err)).is_err() { - tracing::debug!("requester dropped"); - } - } - } - } - } - - if let Some(req) = chann_result { - tracing::trace!("tick: bidi request"); - match req { - Some(request) => pending_request = Some(request), - None => { - tracing::debug!("client dropped"); - if let Some(connection) = connection { - connection.close(0u32.into(), b"requester dropped"); - } - break; - } - } - } - - if let Some(connection) = connection.as_mut() { - if let Some(request) = pending_request.take() { - match connection.open_bi().await { - Ok(pair) => { - tracing::debug!("Bidi substream opened"); - if request.send(Ok(pair)).is_err() { - tracing::debug!("requester dropped"); - } - } - Err(e) => { - tracing::warn!("error opening bidi substream: {}", e); - tracing::warn!("recreating connection"); - // NOTE: the connection might be stale, so we recreate the - // connection and set the request as pending instead of - // sending the error as a response - reconnect.set_not_connected(); - pending_request = Some(request); - } - } - } - } - } - } - - async fn reconnect_handler( - endpoint: quinn::Endpoint, - addr: SocketAddr, - name: String, - requests: flume::Receiver>>, - ) { - Self::reconnect_handler_inner(endpoint, addr, name, requests).await; - tracing::info!("Reconnect handler finished"); - } - - /// Create a new channel - pub fn from_connection(connection: quinn::Connection) -> Self { - let (sender, receiver) = flume::bounded(16); - let task = tokio::spawn(Self::single_connection_handler(connection, receiver)); - Self { - inner: Arc::new(ClientConnectionInner { - endpoint: None, - task: Some(task), - sender, - }), - _p: PhantomData, - } - } - - /// Create a new channel - pub fn new(endpoint: quinn::Endpoint, addr: SocketAddr, name: String) -> Self { - let (sender, receiver) = flume::bounded(16); - let task = tokio::spawn(Self::reconnect_handler( - endpoint.clone(), - addr, - name, - receiver, - )); - Self { - inner: Arc::new(ClientConnectionInner { - endpoint: Some(endpoint), - task: Some(task), - sender, - }), - _p: PhantomData, - } - } -} - -struct ReconnectHandler { - endpoint: quinn::Endpoint, - state: ConnectionState, - addr: SocketAddr, - name: String, -} - -impl ReconnectHandler { - pub fn set_not_connected(&mut self) { - self.state.set_not_connected() - } - - pub fn connected(&self) -> bool { - matches!(self.state, ConnectionState::Connected(_)) - } -} - -enum ConnectionState { - /// There is no active connection. An attempt to connect will be made. - NotConnected, - /// Connecting to the remote. - Connecting(quinn::Connecting), - /// A connection is already established. In this state, no more connection attempts are made. - Connected(quinn::Connection), - /// Intermediate state while processing. - Poisoned, -} - -impl ConnectionState { - pub fn poison(&mut self) -> ConnectionState { - std::mem::replace(self, ConnectionState::Poisoned) - } - - pub fn set_not_connected(&mut self) { - *self = ConnectionState::NotConnected - } -} - -enum ReconnectErr { - Connect(quinn::ConnectError), - Connection(quinn::ConnectionError), -} - -impl Future for ReconnectHandler { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - match self.state.poison() { - ConnectionState::NotConnected => match self.endpoint.connect(self.addr, &self.name) { - Ok(connecting) => { - self.state = ConnectionState::Connecting(connecting); - self.poll(cx) - } - Err(e) => { - self.state = ConnectionState::NotConnected; - Poll::Ready(Err(ReconnectErr::Connect(e))) - } - }, - ConnectionState::Connecting(mut connecting) => match Pin::new(&mut connecting).poll(cx) - { - Poll::Ready(res) => match res { - Ok(connection) => { - self.state = ConnectionState::Connected(connection.clone()); - Poll::Ready(Ok(connection)) - } - Err(e) => { - self.state = ConnectionState::NotConnected; - Poll::Ready(Err(ReconnectErr::Connection(e))) - } - }, - Poll::Pending => { - self.state = ConnectionState::Connecting(connecting); - Poll::Pending - } - }, - ConnectionState::Connected(connection) => { - self.state = ConnectionState::Connected(connection.clone()); - Poll::Ready(Ok(connection)) - } - ConnectionState::Poisoned => unreachable!("poisoned connection state"), - } - } -} - -/// Wrapper over [`flume::Receiver`] that can be used with [`tokio::select`]. -/// -/// NOTE: from https://github.com/zesterer/flume/issues/104: -/// > If RecvFut is dropped without being polled, the item is never received. -enum Receiver<'a, T> -where - Self: 'a, -{ - PreReceive(&'a flume::Receiver), - Receiving(&'a flume::Receiver, flume::r#async::RecvFut<'a, T>), - Poisoned, -} - -impl<'a, T> Receiver<'a, T> { - fn new(recv: &'a flume::Receiver) -> Self { - Receiver::PreReceive(recv) - } - - fn poison(&mut self) -> Self { - std::mem::replace(self, Self::Poisoned) - } -} - -impl Stream for Receiver<'_, T> { - type Item = T; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.poison() { - Receiver::PreReceive(recv) => { - let fut = recv.recv_async(); - *self = Receiver::Receiving(recv, fut); - self.poll_next(cx) - } - Receiver::Receiving(recv, mut fut) => match Pin::new(&mut fut).poll(cx) { - Poll::Ready(Ok(t)) => { - *self = Receiver::PreReceive(recv); - Poll::Ready(Some(t)) - } - Poll::Ready(Err(flume::RecvError::Disconnected)) => { - *self = Receiver::PreReceive(recv); - Poll::Ready(None) - } - Poll::Pending => { - *self = Receiver::Receiving(recv, fut); - Poll::Pending - } - }, - Receiver::Poisoned => unreachable!("poisoned receiver state"), - } - } -} - -impl fmt::Debug for QuinnConnector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("ClientChannel") - .field("inner", &self.inner) - .finish() - } -} - -impl Clone for QuinnConnector { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - _p: PhantomData, - } - } -} - -impl ConnectionErrors for QuinnConnector { - type SendError = io::Error; - type RecvError = io::Error; - type OpenError = quinn::ConnectionError; - type AcceptError = quinn::ConnectionError; -} - -impl StreamTypes for QuinnConnector { - type In = In; - type Out = Out; - type SendSink = self::SendSink; - type RecvStream = self::RecvStream; -} - -impl Connector for QuinnConnector { - async fn open(&self) -> Result<(Self::SendSink, Self::RecvStream), Self::OpenError> { - let (sender, receiver) = oneshot::channel(); - self.inner - .sender - .send_async(sender) - .await - .map_err(|_| quinn::ConnectionError::LocallyClosed)?; - let (send, recv) = receiver - .await - .map_err(|_| quinn::ConnectionError::LocallyClosed)??; - Ok((SendSink::new(send), RecvStream::new(recv))) - } -} - -/// A sink that wraps a quinn SendStream with length delimiting and postcard -/// -/// If you want to send bytes directly, use [SendSink::into_inner] to get the -/// underlying [quinn::SendStream]. -#[pin_project] -pub struct SendSink(#[pin] FramedPostcardWrite); - -impl fmt::Debug for SendSink { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SendSink").finish() - } -} - -impl SendSink { - fn new(inner: quinn::SendStream) -> Self { - let inner = FramedPostcardWrite::new(inner, MAX_FRAME_LENGTH); - Self(inner) - } -} - -impl SendSink { - /// Get the underlying [quinn::SendStream], which implements - /// [tokio::io::AsyncWrite] and can be used to send bytes directly. - pub fn into_inner(self) -> quinn::SendStream { - self.0.into_inner() - } -} - -impl Sink for SendSink { - type Error = io::Error; - - fn poll_ready( - self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - Pin::new(&mut self.project().0).poll_ready(cx) - } - - fn start_send(self: Pin<&mut Self>, item: Out) -> Result<(), Self::Error> { - Pin::new(&mut self.project().0).start_send(item) - } - - fn poll_flush( - self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - Pin::new(&mut self.project().0).poll_flush(cx) - } - - fn poll_close( - self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - Pin::new(&mut self.project().0).poll_close(cx) - } -} - -/// A stream that wraps a quinn RecvStream with length delimiting and postcard -/// -/// If you want to receive bytes directly, use [RecvStream::into_inner] to get -/// the underlying [quinn::RecvStream]. -#[pin_project] -pub struct RecvStream(#[pin] FramedPostcardRead); - -impl fmt::Debug for RecvStream { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RecvStream").finish() - } -} - -impl RecvStream { - fn new(inner: quinn::RecvStream) -> Self { - let inner = FramedPostcardRead::new(inner, MAX_FRAME_LENGTH); - Self(inner) - } -} - -impl RecvStream { - /// Get the underlying [quinn::RecvStream], which implements - /// [tokio::io::AsyncRead] and can be used to receive bytes directly. - pub fn into_inner(self) -> quinn::RecvStream { - self.0.into_inner() - } -} - -impl Stream for RecvStream { - type Item = result::Result; - - fn poll_next( - self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - Pin::new(&mut self.project().0).poll_next(cx) - } -} - -/// Error for open. Currently just a quinn::ConnectionError -pub type OpenError = quinn::ConnectionError; - -/// Error for accept. Currently just a quinn::ConnectionError -pub type AcceptError = quinn::ConnectionError; - -/// CreateChannelError for quinn channels. -#[derive(Debug, Clone)] -pub enum CreateChannelError { - /// Something went wrong immediately when creating the quinn endpoint - Io(io::ErrorKind, String), - /// Error directly when calling connect on the quinn endpoint - Connect(quinn::ConnectError), - /// Error produced by the future returned by connect - Connection(quinn::ConnectionError), -} - -impl From for CreateChannelError { - fn from(e: io::Error) -> Self { - CreateChannelError::Io(e.kind(), e.to_string()) - } -} - -impl From for CreateChannelError { - fn from(e: quinn::ConnectionError) -> Self { - CreateChannelError::Connection(e) - } -} - -impl From for CreateChannelError { - fn from(e: quinn::ConnectError) -> Self { - CreateChannelError::Connect(e) - } -} - -impl fmt::Display for CreateChannelError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(self, f) - } -} - -impl std::error::Error for CreateChannelError {} - -/// Get the handshake data from a quinn connection that uses rustls. -pub fn get_handshake_data( - connection: &quinn::Connection, -) -> Option { - let handshake_data = connection.handshake_data()?; - let tls_connection = handshake_data.downcast_ref::()?; - Some(quinn::crypto::rustls::HandshakeData { - protocol: tls_connection.protocol.clone(), - server_name: tls_connection.server_name.clone(), - }) -} - -#[cfg(feature = "test-utils")] -mod quinn_setup_utils { - use std::{net::SocketAddr, sync::Arc}; - - use anyhow::Result; - use quinn::{crypto::rustls::QuicClientConfig, ClientConfig, Endpoint, ServerConfig}; - - /// Builds default quinn client config and trusts given certificates. - /// - /// ## Args - /// - /// - server_certs: a list of trusted certificates in DER format. - pub fn configure_client(server_certs: &[&[u8]]) -> Result { - let mut certs = rustls::RootCertStore::empty(); - for cert in server_certs { - let cert = rustls::pki_types::CertificateDer::from(cert.to_vec()); - certs.add(cert)?; - } - - let crypto_client_config = rustls::ClientConfig::builder_with_provider(Arc::new( - rustls::crypto::ring::default_provider(), - )) - .with_protocol_versions(&[&rustls::version::TLS13]) - .expect("valid versions") - .with_root_certificates(certs) - .with_no_client_auth(); - let quic_client_config = - quinn::crypto::rustls::QuicClientConfig::try_from(crypto_client_config)?; - - Ok(ClientConfig::new(Arc::new(quic_client_config))) - } - - /// Constructs a QUIC endpoint configured for use a client only. - /// - /// ## Args - /// - /// - server_certs: list of trusted certificates. - pub fn make_client_endpoint(bind_addr: SocketAddr, server_certs: &[&[u8]]) -> Result { - let client_cfg = configure_client(server_certs)?; - let mut endpoint = Endpoint::client(bind_addr)?; - endpoint.set_default_client_config(client_cfg); - Ok(endpoint) - } - - /// Create a server endpoint with a self-signed certificate - /// - /// Returns the server endpoint and the certificate in DER format - pub fn make_server_endpoint(bind_addr: SocketAddr) -> Result<(Endpoint, Vec)> { - let (server_config, server_cert) = configure_server()?; - let endpoint = Endpoint::server(server_config, bind_addr)?; - Ok((endpoint, server_cert)) - } - - /// Create a quinn server config with a self-signed certificate - /// - /// Returns the server config and the certificate in DER format - pub fn configure_server() -> anyhow::Result<(ServerConfig, Vec)> { - let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()])?; - let cert_der = cert.cert.der(); - let priv_key = rustls::pki_types::PrivatePkcs8KeyDer::from(cert.key_pair.serialize_der()); - let cert_chain = vec![cert_der.clone()]; - - let mut server_config = ServerConfig::with_single_cert(cert_chain, priv_key.into())?; - Arc::get_mut(&mut server_config.transport) - .unwrap() - .max_concurrent_uni_streams(0_u8.into()); - - Ok((server_config, cert_der.to_vec())) - } - - /// Constructs a QUIC endpoint that trusts all certificates. - /// - /// This is useful for testing and local connections, but should be used with care. - pub fn make_insecure_client_endpoint(bind_addr: SocketAddr) -> Result { - let crypto = rustls::ClientConfig::builder() - .dangerous() - .with_custom_certificate_verifier(Arc::new(SkipServerVerification)) - .with_no_client_auth(); - - let client_cfg = QuicClientConfig::try_from(crypto)?; - let client_cfg = ClientConfig::new(Arc::new(client_cfg)); - let mut endpoint = Endpoint::client(bind_addr)?; - endpoint.set_default_client_config(client_cfg); - Ok(endpoint) - } - - #[derive(Debug)] - struct SkipServerVerification; - - impl rustls::client::danger::ServerCertVerifier for SkipServerVerification { - fn verify_server_cert( - &self, - _end_entity: &rustls::pki_types::CertificateDer<'_>, - _intermediates: &[rustls::pki_types::CertificateDer<'_>], - _server_name: &rustls::pki_types::ServerName<'_>, - _ocsp_response: &[u8], - _now: rustls::pki_types::UnixTime, - ) -> Result { - Ok(rustls::client::danger::ServerCertVerified::assertion()) - } - - fn verify_tls12_signature( - &self, - _message: &[u8], - _cert: &rustls::pki_types::CertificateDer<'_>, - _dss: &rustls::DigitallySignedStruct, - ) -> Result { - Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) - } - - fn verify_tls13_signature( - &self, - _message: &[u8], - _cert: &rustls::pki_types::CertificateDer<'_>, - _dss: &rustls::DigitallySignedStruct, - ) -> Result { - Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) - } - - fn supported_verify_schemes(&self) -> Vec { - use rustls::SignatureScheme::*; - // list them all, we don't care. - vec![ - RSA_PKCS1_SHA1, - ECDSA_SHA1_Legacy, - RSA_PKCS1_SHA256, - ECDSA_NISTP256_SHA256, - RSA_PKCS1_SHA384, - ECDSA_NISTP384_SHA384, - RSA_PKCS1_SHA512, - ECDSA_NISTP521_SHA512, - RSA_PSS_SHA256, - RSA_PSS_SHA384, - RSA_PSS_SHA512, - ED25519, - ED448, - ] - } - } -} -#[cfg(feature = "test-utils")] -pub use quinn_setup_utils::*; diff --git a/src/transport/util.rs b/src/transport/util.rs deleted file mode 100644 index 9157cd2b..00000000 --- a/src/transport/util.rs +++ /dev/null @@ -1,188 +0,0 @@ -use std::{ - pin::Pin, - task::{self, Poll}, -}; - -use futures_lite::Stream; -use futures_sink::Sink; -use pin_project::pin_project; -use serde::{de::DeserializeOwned, Serialize}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio_util::codec::LengthDelimitedCodec; - -#[pin_project] -pub struct FramedPostcardRead( - #[pin] - tokio_serde::SymmetricallyFramed< - tokio_util::codec::FramedRead, - In, - tokio_serde_postcard::SymmetricalPostcard, - >, -); - -impl FramedPostcardRead { - /// Wrap a socket in a length delimited codec and postcard encoding - pub fn new(inner: T, max_frame_length: usize) -> Self { - // configure length delimited codec with max frame length - let framing = LengthDelimitedCodec::builder() - .max_frame_length(max_frame_length) - .new_codec(); - // create the actual framing. This turns the AsyncRead/AsyncWrite into a Stream/Sink of Bytes/BytesMut - let framed = tokio_util::codec::FramedRead::new(inner, framing); - let postcard = tokio_serde_postcard::Postcard::new(); - // create the actual framing. This turns the Stream/Sink of Bytes/BytesMut into a Stream/Sink of In/Out - let framed = tokio_serde::Framed::new(framed, postcard); - Self(framed) - } -} - -impl FramedPostcardRead { - /// Get the underlying binary stream - /// - /// This can be useful if you want to drop the framing and use the underlying stream directly - /// after exchanging some messages. - pub fn into_inner(self) -> T { - self.0.into_inner().into_inner() - } -} - -impl Stream for FramedPostcardRead { - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll> { - Pin::new(&mut self.project().0).poll_next(cx) - } -} - -/// Wrapper that wraps a bidirectional binary stream in a length delimited codec and postcard encoding -/// to get a bidirectional stream of rpc Messages -#[pin_project] -pub struct FramedPostcardWrite( - #[pin] - tokio_serde::SymmetricallyFramed< - tokio_util::codec::FramedWrite, - Out, - tokio_serde_postcard::SymmetricalPostcard, - >, -); - -impl FramedPostcardWrite { - /// Wrap a socket in a length delimited codec and postcard encoding - pub fn new(inner: T, max_frame_length: usize) -> Self { - // configure length delimited codec with max frame length - let framing = LengthDelimitedCodec::builder() - .max_frame_length(max_frame_length) - .new_codec(); - // create the actual framing. This turns the AsyncRead/AsyncWrite into a Stream/Sink of Bytes/BytesMut - let framed = tokio_util::codec::FramedWrite::new(inner, framing); - let postcard = tokio_serde_postcard::SymmetricalPostcard::new(); - // create the actual framing. This turns the Stream/Sink of Bytes/BytesMut into a Stream/Sink of In/Out - let framed = tokio_serde::SymmetricallyFramed::new(framed, postcard); - Self(framed) - } -} - -impl FramedPostcardWrite { - /// Get the underlying binary stream - /// - /// This can be useful if you want to drop the framing and use the underlying stream directly - /// after exchanging some messages. - pub fn into_inner(self) -> T { - self.0.into_inner().into_inner() - } -} - -impl Sink for FramedPostcardWrite { - type Error = std::io::Error; - - fn poll_ready( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.project().0).poll_ready(cx) - } - - fn start_send(self: Pin<&mut Self>, item: Out) -> Result<(), Self::Error> { - Pin::new(&mut self.project().0).start_send(item) - } - - fn poll_flush( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.project().0).poll_flush(cx) - } - - fn poll_close( - self: Pin<&mut Self>, - cx: &mut task::Context<'_>, - ) -> Poll> { - Pin::new(&mut self.project().0).poll_close(cx) - } -} - -mod tokio_serde_postcard { - use std::{io, marker::PhantomData, pin::Pin}; - - use bytes::{BufMut as _, Bytes, BytesMut}; - use pin_project::pin_project; - use serde::{Deserialize, Serialize}; - use tokio_serde::{Deserializer, Serializer}; - - #[pin_project] - pub struct Postcard { - #[pin] - buffer: Box>, - _marker: PhantomData<(Item, SinkItem)>, - } - - impl Default for Postcard { - fn default() -> Self { - Self::new() - } - } - - impl Postcard { - pub fn new() -> Self { - Self { - buffer: Box::new(None), - _marker: PhantomData, - } - } - } - - pub type SymmetricalPostcard = Postcard; - - impl Deserializer for Postcard - where - for<'a> Item: Deserialize<'a>, - { - type Error = io::Error; - - fn deserialize(self: Pin<&mut Self>, src: &BytesMut) -> Result { - postcard::from_bytes(src).map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err)) - } - } - - impl Serializer for Postcard - where - SinkItem: Serialize, - { - type Error = io::Error; - - fn serialize(self: Pin<&mut Self>, data: &SinkItem) -> Result { - let mut this = self.project(); - let buffer = this.buffer.take().unwrap_or_default(); - let mut buffer = postcard::to_io(data, buffer.writer()) - .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))? - .into_inner(); - if buffer.len() <= 1024 { - let res = buffer.split().freeze(); - this.buffer.replace(buffer); - Ok(res) - } else { - Ok(buffer.freeze()) - } - } - } -} diff --git a/src/util.rs b/src/util.rs new file mode 100644 index 00000000..2c04a15d --- /dev/null +++ b/src/util.rs @@ -0,0 +1,374 @@ +//! Utilities +//! +//! This module contains utilities to read and write varints, as well as +//! functions to set up quinn endpoints for local rpc and testing. +#[cfg(feature = "quinn_endpoint_setup")] +#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "quinn_endpoint_setup")))] +mod quinn_setup_utils { + use std::{net::SocketAddr, sync::Arc}; + + use anyhow::Result; + use quinn::{crypto::rustls::QuicClientConfig, ClientConfig, Endpoint, ServerConfig}; + + /// Builds default quinn client config and trusts given certificates. + /// + /// ## Args + /// + /// - server_certs: a list of trusted certificates in DER format. + pub fn configure_client(server_certs: &[&[u8]]) -> Result { + let mut certs = rustls::RootCertStore::empty(); + for cert in server_certs { + let cert = rustls::pki_types::CertificateDer::from(cert.to_vec()); + certs.add(cert)?; + } + + let crypto_client_config = rustls::ClientConfig::builder_with_provider(Arc::new( + rustls::crypto::ring::default_provider(), + )) + .with_protocol_versions(&[&rustls::version::TLS13]) + .expect("valid versions") + .with_root_certificates(certs) + .with_no_client_auth(); + let quic_client_config = + quinn::crypto::rustls::QuicClientConfig::try_from(crypto_client_config)?; + + Ok(ClientConfig::new(Arc::new(quic_client_config))) + } + + /// Constructs a QUIC endpoint configured for use a client only. + /// + /// ## Args + /// + /// - server_certs: list of trusted certificates. + pub fn make_client_endpoint(bind_addr: SocketAddr, server_certs: &[&[u8]]) -> Result { + let client_cfg = configure_client(server_certs)?; + let mut endpoint = Endpoint::client(bind_addr)?; + endpoint.set_default_client_config(client_cfg); + Ok(endpoint) + } + + /// Create a server endpoint with a self-signed certificate + /// + /// Returns the server endpoint and the certificate in DER format + pub fn make_server_endpoint(bind_addr: SocketAddr) -> Result<(Endpoint, Vec)> { + let (server_config, server_cert) = configure_server()?; + let endpoint = Endpoint::server(server_config, bind_addr)?; + Ok((endpoint, server_cert)) + } + + /// Create a quinn server config with a self-signed certificate + /// + /// Returns the server config and the certificate in DER format + pub fn configure_server() -> anyhow::Result<(ServerConfig, Vec)> { + let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()])?; + let cert_der = cert.cert.der(); + let priv_key = rustls::pki_types::PrivatePkcs8KeyDer::from(cert.key_pair.serialize_der()); + let cert_chain = vec![cert_der.clone()]; + + let mut server_config = ServerConfig::with_single_cert(cert_chain, priv_key.into())?; + Arc::get_mut(&mut server_config.transport) + .unwrap() + .max_concurrent_uni_streams(0_u8.into()); + + Ok((server_config, cert_der.to_vec())) + } + + /// Constructs a QUIC endpoint that trusts all certificates. + /// + /// This is useful for testing and local connections, but should be used with care. + pub fn make_insecure_client_endpoint(bind_addr: SocketAddr) -> Result { + let crypto = rustls::ClientConfig::builder() + .dangerous() + .with_custom_certificate_verifier(Arc::new(SkipServerVerification)) + .with_no_client_auth(); + + let client_cfg = QuicClientConfig::try_from(crypto)?; + let client_cfg = ClientConfig::new(Arc::new(client_cfg)); + let mut endpoint = Endpoint::client(bind_addr)?; + endpoint.set_default_client_config(client_cfg); + Ok(endpoint) + } + + #[derive(Debug)] + struct SkipServerVerification; + + impl rustls::client::danger::ServerCertVerifier for SkipServerVerification { + fn verify_server_cert( + &self, + _end_entity: &rustls::pki_types::CertificateDer<'_>, + _intermediates: &[rustls::pki_types::CertificateDer<'_>], + _server_name: &rustls::pki_types::ServerName<'_>, + _ocsp_response: &[u8], + _now: rustls::pki_types::UnixTime, + ) -> Result { + Ok(rustls::client::danger::ServerCertVerified::assertion()) + } + + fn verify_tls12_signature( + &self, + _message: &[u8], + _cert: &rustls::pki_types::CertificateDer<'_>, + _dss: &rustls::DigitallySignedStruct, + ) -> Result { + Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + } + + fn verify_tls13_signature( + &self, + _message: &[u8], + _cert: &rustls::pki_types::CertificateDer<'_>, + _dss: &rustls::DigitallySignedStruct, + ) -> Result { + Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + } + + fn supported_verify_schemes(&self) -> Vec { + use rustls::SignatureScheme::*; + // list them all, we don't care. + vec![ + RSA_PKCS1_SHA1, + ECDSA_SHA1_Legacy, + RSA_PKCS1_SHA256, + ECDSA_NISTP256_SHA256, + RSA_PKCS1_SHA384, + ECDSA_NISTP384_SHA384, + RSA_PKCS1_SHA512, + ECDSA_NISTP521_SHA512, + RSA_PSS_SHA256, + RSA_PSS_SHA384, + RSA_PSS_SHA512, + ED25519, + ED448, + ] + } + } +} +#[cfg(feature = "quinn_endpoint_setup")] +#[cfg_attr(quicrpc_docsrs, doc(cfg(feature = "quinn_endpoint_setup")))] +pub use quinn_setup_utils::*; + +#[cfg(feature = "rpc")] +mod varint_util { + use std::{ + future::Future, + io::{self, Error}, + }; + + use serde::Serialize; + use tokio::io::{AsyncRead, AsyncReadExt}; + + /// Reads a u64 varint from an AsyncRead source, using the Postcard/LEB128 format. + /// + /// In Postcard's varint format (LEB128): + /// - Each byte uses 7 bits for the value + /// - The MSB (most significant bit) of each byte indicates if there are more bytes (1) or not (0) + /// - Values are stored in little-endian order (least significant group first) + /// + /// Returns the decoded u64 value. + pub async fn read_varint_u64(reader: &mut R) -> io::Result> + where + R: AsyncRead + Unpin, + { + let mut result: u64 = 0; + let mut shift: u32 = 0; + + loop { + // We can only shift up to 63 bits (for a u64) + if shift >= 64 { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Varint is too large for u64", + )); + } + + // Read a single byte + let res = reader.read_u8().await; + if shift == 0 { + if let Err(cause) = res { + if cause.kind() == io::ErrorKind::UnexpectedEof { + return Ok(None); + } else { + return Err(cause); + } + } + } + + let byte = res?; + + // Extract the 7 value bits (bits 0-6, excluding the MSB which is the continuation bit) + let value = (byte & 0x7F) as u64; + + // Add the bits to our result at the current shift position + result |= value << shift; + + // If the high bit is not set (0), this is the last byte + if byte & 0x80 == 0 { + break; + } + + // Move to the next 7 bits + shift += 7; + } + + Ok(Some(result)) + } + + /// Writes a u64 varint to any object that implements the `std::io::Write` trait. + /// + /// This encodes the value using LEB128 encoding. + /// + /// # Arguments + /// * `writer` - Any object implementing `std::io::Write` + /// * `value` - The u64 value to encode as a varint + /// + /// # Returns + /// The number of bytes written or an IO error + pub fn write_varint_u64_sync( + writer: &mut W, + value: u64, + ) -> std::io::Result { + // Handle zero as a special case + if value == 0 { + writer.write_all(&[0])?; + return Ok(1); + } + + let mut bytes_written = 0; + let mut remaining = value; + + while remaining > 0 { + // Extract the 7 least significant bits + let mut byte = (remaining & 0x7F) as u8; + remaining >>= 7; + + // Set the continuation bit if there's more data + if remaining > 0 { + byte |= 0x80; + } + + writer.write_all(&[byte])?; + bytes_written += 1; + } + + Ok(bytes_written) + } + + pub fn write_length_prefixed( + mut write: impl std::io::Write, + value: T, + ) -> io::Result<()> { + let size = postcard::experimental::serialized_size(&value) + .map_err(|e| Error::new(io::ErrorKind::InvalidData, e))? as u64; + write_varint_u64_sync(&mut write, size)?; + postcard::to_io(&value, &mut write) + .map_err(|e| Error::new(io::ErrorKind::InvalidData, e))?; + Ok(()) + } + + /// Provides a fn to read a varint from an AsyncRead source. + pub trait AsyncReadVarintExt: AsyncRead + Unpin { + /// Reads a u64 varint from an AsyncRead source, using the Postcard/LEB128 format. + /// + /// If the stream is at the end, this returns `Ok(None)`. + fn read_varint_u64(&mut self) -> impl Future>>; + } + + impl AsyncReadVarintExt for T { + fn read_varint_u64(&mut self) -> impl Future>> { + read_varint_u64(self) + } + } + + /// Provides a fn to write a varint to an [`io::Write`] target, as well as a + /// helper to write a length-prefixed value. + pub trait WriteVarintExt: std::io::Write { + /// Write a varint + #[allow(dead_code)] + fn write_varint_u64(&mut self, value: u64) -> io::Result; + /// Write a value with a varint enoded length prefix. + fn write_length_prefixed(&mut self, value: T) -> io::Result<()>; + } + + impl WriteVarintExt for T { + fn write_varint_u64(&mut self, value: u64) -> io::Result { + write_varint_u64_sync(self, value) + } + + fn write_length_prefixed(&mut self, value: V) -> io::Result<()> { + write_length_prefixed(self, value) + } + } +} +#[cfg(feature = "rpc")] +pub use varint_util::{AsyncReadVarintExt, WriteVarintExt}; + +mod fuse_wrapper { + use std::{ + future::Future, + pin::Pin, + result::Result, + task::{Context, Poll}, + }; + + pub struct FusedOneshotReceiver(pub tokio::sync::oneshot::Receiver); + + impl Future for FusedOneshotReceiver { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if self.0.is_terminated() { + // don't panic when polling a terminated receiver + Poll::Pending + } else { + Future::poll(Pin::new(&mut self.0), cx) + } + } + } +} +pub(crate) use fuse_wrapper::FusedOneshotReceiver; + +#[cfg(feature = "rpc")] +mod now_or_never { + use std::{ + future::Future, + pin::Pin, + task::{Context, Poll, RawWaker, RawWakerVTable, Waker}, + }; + + // Simple pin_mut! macro implementation + macro_rules! pin_mut { + ($($x:ident),* $(,)?) => { + $( + let mut $x = $x; + #[allow(unused_mut)] + let mut $x = unsafe { Pin::new_unchecked(&mut $x) }; + )* + } +} + + // Minimal implementation of a no-op waker + fn noop_waker() -> Waker { + fn noop(_: *const ()) {} + fn clone(_: *const ()) -> RawWaker { + let vtable = &RawWakerVTable::new(clone, noop, noop, noop); + RawWaker::new(std::ptr::null(), vtable) + } + + unsafe { Waker::from_raw(clone(std::ptr::null())) } + } + + /// Attempts to complete a future immediately, returning None if it would block + pub(crate) fn now_or_never(future: F) -> Option { + let waker = noop_waker(); + let mut cx = Context::from_waker(&waker); + + pin_mut!(future); + + match future.poll(&mut cx) { + Poll::Ready(x) => Some(x), + Poll::Pending => None, + } + } +} +#[cfg(feature = "rpc")] +pub(crate) use now_or_never::now_or_never; diff --git a/tests/flume.rs b/tests/flume.rs deleted file mode 100644 index 34fc9009..00000000 --- a/tests/flume.rs +++ /dev/null @@ -1,103 +0,0 @@ -#![cfg(feature = "flume-transport")] -#![allow(non_local_definitions)] -mod math; -use math::*; -use quic_rpc::{ - server::{RpcChannel, RpcServerError}, - transport::flume, - RpcClient, RpcServer, Service, -}; -use tokio_util::task::AbortOnDropHandle; - -#[tokio::test] -async fn flume_channel_bench() -> anyhow::Result<()> { - tracing_subscriber::fmt::try_init().ok(); - let (server, client) = flume::channel(1); - - let server = RpcServer::::new(server); - let _server_handle = AbortOnDropHandle::new(tokio::spawn(ComputeService::server(server))); - let client = RpcClient::::new(client); - bench(client, 1000000).await?; - Ok(()) -} - -#[tokio::test] -async fn flume_channel_mapped_bench() -> anyhow::Result<()> { - use derive_more::{From, TryInto}; - use serde::{Deserialize, Serialize}; - - tracing_subscriber::fmt::try_init().ok(); - - #[derive(Debug, Serialize, Deserialize, From, TryInto)] - enum OuterRequest { - Inner(InnerRequest), - } - #[derive(Debug, Serialize, Deserialize, From, TryInto)] - enum InnerRequest { - Compute(ComputeRequest), - } - #[derive(Debug, Serialize, Deserialize, From, TryInto)] - enum OuterResponse { - Inner(InnerResponse), - } - #[derive(Debug, Serialize, Deserialize, From, TryInto)] - enum InnerResponse { - Compute(ComputeResponse), - } - #[derive(Debug, Clone)] - struct OuterService; - impl Service for OuterService { - type Req = OuterRequest; - type Res = OuterResponse; - } - #[derive(Debug, Clone)] - struct InnerService; - impl Service for InnerService { - type Req = InnerRequest; - type Res = InnerResponse; - } - let (server, client) = flume::channel(1); - - let server = RpcServer::::new(server); - let server_handle: tokio::task::JoinHandle>> = - tokio::task::spawn(async move { - let service = ComputeService; - loop { - let (req, chan) = server.accept().await?.read_first().await?; - let service = service.clone(); - tokio::spawn(async move { - let req: OuterRequest = req; - match req { - OuterRequest::Inner(InnerRequest::Compute(req)) => { - let chan: RpcChannel = chan.map(); - let chan: RpcChannel = chan.map(); - ComputeService::handle_rpc_request(service, req, chan).await - } - } - }); - } - }); - - let client = RpcClient::::new(client); - let client: RpcClient = client.map(); - let client: RpcClient = client.map(); - bench(client, 1000000).await?; - // dropping the client will cause the server to terminate - match server_handle.await? { - Err(RpcServerError::Accept(_)) => {} - e => panic!("unexpected termination result {e:?}"), - } - Ok(()) -} - -/// simple happy path test for all 4 patterns -#[tokio::test] -async fn flume_channel_smoke() -> anyhow::Result<()> { - tracing_subscriber::fmt::try_init().ok(); - let (server, client) = flume::channel(1); - - let server = RpcServer::::new(server); - let _server_handle = AbortOnDropHandle::new(tokio::spawn(ComputeService::server(server))); - smoke_test(client).await?; - Ok(()) -} diff --git a/tests/hyper.rs b/tests/hyper.rs deleted file mode 100644 index fa671446..00000000 --- a/tests/hyper.rs +++ /dev/null @@ -1,297 +0,0 @@ -#![cfg(feature = "hyper-transport")] -#![cfg(feature = "macros")] -use std::{assert, net::SocketAddr, result}; - -use ::hyper::Uri; -use derive_more::{From, TryInto}; -use flume::Receiver; -use quic_rpc::{ - declare_rpc, - server::RpcServerError, - transport::hyper::{self, HyperConnector, HyperListener, RecvError}, - RpcClient, RpcServer, Service, -}; -use serde::{Deserialize, Serialize}; -use tokio::task::JoinHandle; - -mod math; -use math::*; -use tokio_util::task::AbortOnDropHandle; -mod util; - -fn run_server(addr: &SocketAddr) -> AbortOnDropHandle<()> { - let channel = HyperListener::serve(addr).unwrap(); - let server = RpcServer::new(channel); - ComputeService::server(server) -} - -#[derive(Debug, Serialize, Deserialize, From, TryInto)] -enum TestResponse { - Unit(()), - Big(Vec), - NoSer(NoSer), - NoDeser(NoDeser), -} - -type SC = HyperListener; - -/// request that can be too big -#[derive(Debug, Serialize, Deserialize)] -pub struct BigRequest(Vec); - -/// request that looks serializable but isn't -#[derive(Debug, Serialize, Deserialize)] -pub struct NoSerRequest(NoSer); - -/// request that looks deserializable but isn't -#[derive(Debug, Serialize, Deserialize)] -pub struct NoDeserRequest(NoDeser); - -/// request where the response is not serializable -#[derive(Debug, Serialize, Deserialize)] -pub struct NoSerResponseRequest; - -/// request where the response is not deserializable -#[derive(Debug, Serialize, Deserialize)] -pub struct NoDeserResponseRequest; - -/// request that can produce a response that is too big -#[derive(Debug, Serialize, Deserialize)] -pub struct BigResponseRequest(usize); - -/// helper struct that implements serde::Serialize but errors on serialization -#[derive(Debug, Deserialize)] -pub struct NoSer; - -impl serde::Serialize for NoSer { - fn serialize(&self, _serializer: S) -> Result - where - S: serde::Serializer, - { - Err(serde::ser::Error::custom("nope")) - } -} - -/// helper struct that implements serde::Deserialize but errors on deserialization -#[derive(Debug, Serialize)] -pub struct NoDeser; - -impl<'de> serde::Deserialize<'de> for NoDeser { - fn deserialize(_deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - Err(serde::de::Error::custom("nope")) - } -} - -#[allow(clippy::enum_variant_names)] -#[derive(Debug, Serialize, Deserialize, From, TryInto)] -enum TestRequest { - BigRequest(BigRequest), - NoSerRequest(NoSerRequest), - NoDeserRequest(NoDeserRequest), - NoSerResponseRequest(NoSerResponseRequest), - NoDeserResponseRequest(NoDeserResponseRequest), - BigResponseRequest(BigResponseRequest), -} - -#[derive(Debug, Clone)] -struct TestService; - -impl Service for TestService { - type Req = TestRequest; - type Res = TestResponse; -} - -impl TestService { - async fn big(self, _req: BigRequest) {} - - async fn noser(self, _req: NoSerRequest) {} - - async fn nodeser(self, _req: NoDeserRequest) {} - - async fn noserresponse(self, _req: NoSerResponseRequest) -> NoSer { - NoSer - } - - async fn nodeserresponse(self, _req: NoDeserResponseRequest) -> NoDeser { - NoDeser - } - - async fn bigresponse(self, req: BigResponseRequest) -> Vec { - vec![0; req.0] - } -} - -#[tokio::test] -async fn hyper_channel_bench() -> anyhow::Result<()> { - let addr: SocketAddr = "127.0.0.1:3000".parse()?; - let uri: Uri = "http://127.0.0.1:3000".parse()?; - let _server_handle = run_server(&addr); - let client = HyperConnector::new(uri); - let client = RpcClient::new(client); - bench(client, 50000).await?; - println!("terminating server"); - Ok(()) -} - -#[tokio::test] -async fn hyper_channel_smoke() -> anyhow::Result<()> { - let addr: SocketAddr = "127.0.0.1:3001".parse()?; - let uri: Uri = "http://127.0.0.1:3001".parse()?; - let _server_handle = run_server(&addr); - let client = HyperConnector::new(uri); - smoke_test(client).await?; - Ok(()) -} - -declare_rpc!(TestService, BigRequest, ()); -declare_rpc!(TestService, NoSerRequest, ()); -declare_rpc!(TestService, NoDeserRequest, ()); -declare_rpc!(TestService, NoSerResponseRequest, NoSer); -declare_rpc!(TestService, NoDeserResponseRequest, NoDeser); -declare_rpc!(TestService, BigResponseRequest, Vec); - -#[tokio::test] -async fn hyper_channel_errors() -> anyhow::Result<()> { - #[allow(clippy::type_complexity)] - fn run_test_server( - addr: &SocketAddr, - ) -> ( - JoinHandle>, - Receiver>>, - ) { - let channel = HyperListener::serve(addr).unwrap(); - let server = RpcServer::new(channel); - let (res_tx, res_rx) = flume::unbounded(); - let handle = tokio::spawn(async move { - loop { - let Ok(x) = server.accept().await else { - continue; - }; - let res = match x.read_first().await { - Ok((req, chan)) => match req { - TestRequest::BigRequest(req) => { - chan.rpc(req, TestService, TestService::big).await - } - TestRequest::NoSerRequest(req) => { - chan.rpc(req, TestService, TestService::noser).await - } - TestRequest::NoDeserRequest(req) => { - chan.rpc(req, TestService, TestService::nodeser).await - } - TestRequest::NoSerResponseRequest(req) => { - chan.rpc(req, TestService, TestService::noserresponse).await - } - TestRequest::NoDeserResponseRequest(req) => { - chan.rpc(req, TestService, TestService::nodeserresponse) - .await - } - TestRequest::BigResponseRequest(req) => { - chan.rpc(req, TestService, TestService::bigresponse).await - } - }, - Err(e) => Err(e), - }; - res_tx.send_async(res).await.unwrap(); - } - #[allow(unreachable_code)] - anyhow::Ok(()) - }); - (handle, res_rx) - } - - let addr: SocketAddr = "127.0.0.1:3002".parse()?; - let uri: Uri = "http://127.0.0.1:3002".parse()?; - let (server_handle, server_results) = run_test_server(&addr); - let client = HyperConnector::new(uri); - let client = RpcClient::new(client); - - macro_rules! assert_matches { - ($e:expr, $p:pat) => { - assert!( - matches!($e, $p), - "expected {} to match {}", - stringify!($e), - stringify!($p) - ); - }; - } - macro_rules! assert_server_result { - ($p:pat) => { - let server_result = server_results.recv_async().await.unwrap(); - assert!( - matches!(server_result, $p), - "expected server result to match {}", - stringify!($p) - ); - assert!(server_results.is_empty()); - }; - } - - // small enough - should succeed - let res = client.rpc(BigRequest(vec![0; 10_000_000])).await; - assert_matches!(res, Ok(())); - assert_server_result!(Ok(())); - - // too big - should fail immediately after opening a connection - let res = client.rpc(BigRequest(vec![0; 20_000_000])).await; - assert_matches!( - res, - Err(quic_rpc::pattern::rpc::Error::Send( - hyper::SendError::SizeError(_) - )) - ); - assert_server_result!(Err(RpcServerError::EarlyClose)); - - // not serializable - should fail immediately after opening a connection - let res = client.rpc(NoSerRequest(NoSer)).await; - assert_matches!( - res, - Err(quic_rpc::pattern::rpc::Error::Send( - hyper::SendError::SerializeError(_) - )) - ); - assert_server_result!(Err(RpcServerError::EarlyClose)); - - // not deserializable - should fail on the server side - let res = client.rpc(NoDeserRequest(NoDeser)).await; - assert_matches!(res, Err(quic_rpc::pattern::rpc::Error::EarlyClose)); - assert_server_result!(Err(RpcServerError::RecvError( - hyper::RecvError::DeserializeError(_) - ))); - - // response not serializable - should fail on the server side - let res = client.rpc(NoSerResponseRequest).await; - assert_matches!(res, Err(quic_rpc::pattern::rpc::Error::EarlyClose)); - assert_server_result!(Err(RpcServerError::SendError( - hyper::SendError::SerializeError(_) - ))); - - // response not deserializable - should succeed on the server side fail on the client side - let res = client.rpc(NoDeserResponseRequest).await; - assert_matches!( - res, - Err(quic_rpc::pattern::rpc::Error::RecvError( - RecvError::DeserializeError(_) - )) - ); - assert_server_result!(Ok(())); - - // response small - should succeed - let res = client.rpc(BigResponseRequest(10_000_000)).await; - assert!(res.is_ok()); - assert_server_result!(Ok(())); - - // response big - should fail - let res = client.rpc(BigResponseRequest(20_000_000)).await; - assert_matches!(res, Err(quic_rpc::pattern::rpc::Error::EarlyClose)); - assert_server_result!(Err(RpcServerError::SendError(hyper::SendError::SizeError( - _ - )))); - - println!("terminating server"); - server_handle.abort(); - Ok(()) -} diff --git a/tests/iroh.rs b/tests/iroh.rs deleted file mode 100644 index 8acbe869..00000000 --- a/tests/iroh.rs +++ /dev/null @@ -1,168 +0,0 @@ -#![cfg(feature = "iroh-transport")] - -use iroh::{NodeAddr, SecretKey}; -use quic_rpc::{transport, RpcClient, RpcServer}; -use testresult::TestResult; - -use crate::transport::iroh::{IrohConnector, IrohListener}; - -mod math; -use math::*; -use tokio_util::task::AbortOnDropHandle; -mod util; - -const ALPN: &[u8] = b"quic-rpc/iroh/test"; - -/// Constructs an iroh endpoint -/// -/// ## Args -/// -/// - alpn: the ALPN protocol to use -pub async fn make_endpoint(secret_key: SecretKey, alpn: &[u8]) -> anyhow::Result { - iroh::Endpoint::builder() - .secret_key(secret_key) - .alpns(vec![alpn.to_vec()]) - .bind() - .await -} - -pub struct Endpoints { - client: iroh::Endpoint, - server: iroh::Endpoint, - server_node_addr: NodeAddr, -} - -impl Endpoints { - pub async fn new() -> anyhow::Result { - let server = make_endpoint(SecretKey::generate(rand::thread_rng()), ALPN).await?; - - Ok(Endpoints { - client: make_endpoint(SecretKey::generate(rand::thread_rng()), ALPN).await?, - server_node_addr: server.node_addr().await?, - server, - }) - } -} - -fn run_server(server: iroh::Endpoint) -> AbortOnDropHandle<()> { - let connection = IrohListener::new(server).unwrap(); - let server = RpcServer::new(connection); - ComputeService::server(server) -} - -// #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -#[tokio::test] -async fn iroh_channel_bench() -> anyhow::Result<()> { - tracing_subscriber::fmt::try_init().ok(); - - let Endpoints { - client, - server, - server_node_addr, - } = Endpoints::new().await?; - tracing::debug!("Starting server"); - let _server_handle = run_server(server); - tracing::debug!("Starting client"); - - let client = RpcClient::new(IrohConnector::new(client, server_node_addr, ALPN.into())); - tracing::debug!("Starting benchmark"); - bench(client, 50000).await?; - Ok(()) -} - -#[tokio::test] -async fn iroh_channel_smoke() -> anyhow::Result<()> { - tracing_subscriber::fmt::try_init().ok(); - let Endpoints { - client, - server, - server_node_addr, - } = Endpoints::new().await?; - let _server_handle = run_server(server); - let client_connection = IrohConnector::new(client, server_node_addr, ALPN.into()); - smoke_test(client_connection).await?; - Ok(()) -} - -/// Test that using the client after the server goes away and comes back behaves as if the server -/// had never gone away in the first place. -/// -/// This is a regression test. -#[tokio::test] -async fn server_away_and_back() -> TestResult<()> { - tracing_subscriber::fmt::try_init().ok(); - tracing::info!("Creating endpoints"); - - let client_endpoint = make_endpoint(SecretKey::generate(rand::thread_rng()), ALPN).await?; - - let server_secret_key = SecretKey::generate(rand::thread_rng()); - let server_node_id = server_secret_key.public(); - - // create the RPC client - let client_connection = transport::iroh::IrohConnector::::new( - client_endpoint.clone(), - server_node_id, - ALPN.into(), - ); - let client = RpcClient::< - ComputeService, - transport::iroh::IrohConnector, - >::new(client_connection); - - // send a request. No server available so it should fail - client.rpc(Sqr(4)).await.unwrap_err(); - - let server_endpoint = make_endpoint(server_secret_key.clone(), ALPN).await?; - - // create the RPC Server - let connection = transport::iroh::IrohListener::new(server_endpoint.clone())?; - let server = RpcServer::new(connection); - let server_handle = tokio::spawn(ComputeService::server_bounded(server, 1)); - - // wait a bit for connection due to Windows test failing on CI - tokio::time::sleep(tokio::time::Duration::from_millis(300)).await; - - // Passing the server node address directly to client endpoint to not depend - // on a discovery service - let addr = server_endpoint.node_addr().await?; - println!("adding addr {:?}", addr); - client_endpoint.add_node_addr(addr)?; - - // send the first request and wait for the response to ensure everything works as expected - let SqrResponse(response) = client.rpc(Sqr(4)).await?; - assert_eq!(response, 16); - - println!("shutting down"); - let server = server_handle.await??; - drop(server); - server_endpoint.close().await; - - // wait for drop to free the socket - tokio::time::sleep(tokio::time::Duration::from_millis(300)).await; - - // send a request. No server available so it should fail - client.rpc(Sqr(4)).await.unwrap_err(); - - println!("creating new endpoint"); - let server_endpoint = make_endpoint(server_secret_key.clone(), ALPN).await?; - - // make the server run again - let connection = transport::iroh::IrohListener::new(server_endpoint.clone())?; - let server = RpcServer::new(connection); - let server_handle = tokio::spawn(ComputeService::server_bounded(server, 5)); - - // wait a bit for connection due to Windows test failing on CI - tokio::time::sleep(tokio::time::Duration::from_millis(300)).await; - - // Passing the server node address directly to client endpoint to not depend - // on a discovery service - let addr = server_endpoint.node_addr().await?; - println!("adding addr {:?}", addr); - client_endpoint.add_node_addr(addr)?; - - // server is running, this should work - let SqrResponse(response) = client.rpc(Sqr(3)).await?; - assert_eq!(response, 9); - server_handle.abort(); - Ok(()) -} diff --git a/tests/math.rs b/tests/math.rs deleted file mode 100644 index b628c525..00000000 --- a/tests/math.rs +++ /dev/null @@ -1,390 +0,0 @@ -#![cfg(any( - feature = "flume-transport", - feature = "hyper-transport", - feature = "quinn-transport", - feature = "iroh-transport", -))] -#![allow(dead_code)] -use std::{ - io::{self, Write}, - result, -}; - -use async_stream::stream; -use derive_more::{From, TryInto}; -use futures_buffered::BufferedStreamExt; -use futures_lite::{Stream, StreamExt}; -use futures_util::SinkExt; -use quic_rpc::{ - message::{ - BidiStreaming, BidiStreamingMsg, ClientStreaming, ClientStreamingMsg, Msg, RpcMsg, - ServerStreaming, ServerStreamingMsg, - }, - server::{RpcChannel, RpcServerError}, - transport::StreamTypes, - Connector, Listener, RpcClient, RpcServer, Service, -}; -use serde::{Deserialize, Serialize}; -use thousands::Separable; -use tokio_util::task::AbortOnDropHandle; - -/// compute the square of a number -#[derive(Debug, Serialize, Deserialize)] -pub struct Sqr(pub u64); - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] -pub struct SqrResponse(pub u128); - -/// sum a stream of numbers -#[derive(Debug, Serialize, Deserialize)] -pub struct Sum; - -#[derive(Debug, Serialize, Deserialize)] -pub struct SumUpdate(pub u64); - -#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] -pub struct SumResponse(pub u128); - -/// compute the fibonacci sequence as a stream -#[derive(Debug, Serialize, Deserialize)] -pub struct Fibonacci(pub u64); - -#[derive(Debug, Serialize, Deserialize)] -pub struct FibonacciResponse(pub u128); - -/// multiply a stream of numbers, returning a stream -#[derive(Debug, Serialize, Deserialize)] -pub struct Multiply(pub u64); - -#[derive(Debug, Serialize, Deserialize)] -pub struct MultiplyUpdate(pub u64); - -#[derive(Debug, Serialize, Deserialize)] -pub struct MultiplyResponse(pub u128); - -/// request enum -#[derive(Debug, Serialize, Deserialize, From, TryInto)] -pub enum ComputeRequest { - Sqr(Sqr), - Sum(Sum), - SumUpdate(SumUpdate), - Fibonacci(Fibonacci), - Multiply(Multiply), - MultiplyUpdate(MultiplyUpdate), -} - -/// response enum -#[allow(clippy::enum_variant_names)] -#[derive(Debug, Serialize, Deserialize, From, TryInto)] -pub enum ComputeResponse { - SqrResponse(SqrResponse), - SumResponse(SumResponse), - FibonacciResponse(FibonacciResponse), - MultiplyResponse(MultiplyResponse), -} - -#[derive(Debug, Clone)] -pub struct ComputeService; - -impl Service for ComputeService { - type Req = ComputeRequest; - type Res = ComputeResponse; -} - -impl RpcMsg for Sqr { - type Response = SqrResponse; -} - -impl Msg for Sum { - type Pattern = ClientStreaming; -} - -impl ClientStreamingMsg for Sum { - type Update = SumUpdate; - type Response = SumResponse; -} - -impl Msg for Fibonacci { - type Pattern = ServerStreaming; -} - -impl ServerStreamingMsg for Fibonacci { - type Response = FibonacciResponse; -} - -impl Msg for Multiply { - type Pattern = BidiStreaming; -} - -impl BidiStreamingMsg for Multiply { - type Update = MultiplyUpdate; - type Response = MultiplyResponse; -} - -impl ComputeService { - async fn sqr(self, req: Sqr) -> SqrResponse { - SqrResponse(req.0 as u128 * req.0 as u128) - } - - async fn sum(self, _req: Sum, updates: impl Stream) -> SumResponse { - let mut sum = 0u128; - tokio::pin!(updates); - while let Some(SumUpdate(n)) = updates.next().await { - sum += n as u128; - } - SumResponse(sum) - } - - fn fibonacci(self, req: Fibonacci) -> impl Stream { - let mut a = 0u128; - let mut b = 1u128; - let mut n = req.0; - stream! { - while n > 0 { - yield FibonacciResponse(a); - let c = a + b; - a = b; - b = c; - n -= 1; - } - } - } - - fn multiply( - self, - req: Multiply, - updates: impl Stream, - ) -> impl Stream { - let product = req.0 as u128; - stream! { - tokio::pin!(updates); - while let Some(MultiplyUpdate(n)) = updates.next().await { - yield MultiplyResponse(product * n as u128); - } - } - } - - pub fn server>( - server: RpcServer, - ) -> AbortOnDropHandle<()> { - server.spawn_accept_loop(|req, chan| Self::handle_rpc_request(ComputeService, req, chan)) - } - - pub async fn handle_rpc_request( - self, - req: ComputeRequest, - chan: RpcChannel, - ) -> Result<(), RpcServerError> - where - E: StreamTypes, - { - use ComputeRequest::*; - #[rustfmt::skip] - match req { - Sqr(msg) => chan.rpc(msg, self, Self::sqr).await, - Sum(msg) => chan.client_streaming(msg, self, Self::sum).await, - Fibonacci(msg) => chan.server_streaming(msg, self, Self::fibonacci).await, - Multiply(msg) => chan.bidi_streaming(msg, self, Self::multiply).await, - MultiplyUpdate(_) => Err(RpcServerError::UnexpectedStartMessage)?, - SumUpdate(_) => Err(RpcServerError::UnexpectedStartMessage)?, - }?; - Ok(()) - } - - /// Runs the service until `count` requests have been received. - pub async fn server_bounded>( - server: RpcServer, - count: usize, - ) -> result::Result, RpcServerError> { - tracing::info!(%count, "server running"); - let s = server; - let mut received = 0; - let service = ComputeService; - while received < count { - received += 1; - let (req, chan) = s.accept().await?.read_first().await?; - let service = service.clone(); - tokio::spawn(async move { - use ComputeRequest::*; - tracing::info!(?req, "got request"); - #[rustfmt::skip] - match req { - Sqr(msg) => chan.rpc(msg, service, ComputeService::sqr).await, - Sum(msg) => chan.client_streaming(msg, service, ComputeService::sum).await, - Fibonacci(msg) => chan.server_streaming(msg, service, ComputeService::fibonacci).await, - Multiply(msg) => chan.bidi_streaming(msg, service, ComputeService::multiply).await, - SumUpdate(_) => Err(RpcServerError::UnexpectedStartMessage)?, - MultiplyUpdate(_) => Err(RpcServerError::UnexpectedStartMessage)?, - }?; - Ok::<_, RpcServerError>(()) - }); - } - tracing::info!(%count, "server finished"); - Ok(s) - } - - pub async fn server_par>( - server: RpcServer, - parallelism: usize, - ) -> result::Result<(), RpcServerError> { - let s = server.clone(); - let s2 = s.clone(); - let service = ComputeService; - let request_stream = stream! { - loop { - yield s2.accept().await?.read_first().await; - } - }; - let process_stream = request_stream.map(move |r| { - let service = service.clone(); - async move { - let (req, chan) = r?; - use ComputeRequest::*; - #[rustfmt::skip] - match req { - Sqr(msg) => chan.rpc(msg, service, ComputeService::sqr).await, - Sum(msg) => chan.client_streaming(msg, service, ComputeService::sum).await, - Fibonacci(msg) => chan.server_streaming(msg, service, ComputeService::fibonacci).await, - Multiply(msg) => chan.bidi_streaming(msg, service, ComputeService::multiply).await, - SumUpdate(_) => Err(RpcServerError::UnexpectedStartMessage)?, - MultiplyUpdate(_) => Err(RpcServerError::UnexpectedStartMessage)?, - }?; - Ok::<_, RpcServerError>(()) - } - }); - process_stream - .buffered_unordered(parallelism) - .for_each(|x| { - if let Err(e) = x { - eprintln!("error: {e:?}"); - } - }) - .await; - Ok(()) - } -} - -pub async fn smoke_test>(client: C) -> anyhow::Result<()> { - let client = RpcClient::::new(client); - // a rpc call - tracing::debug!("calling rpc S(1234)"); - let res = client.rpc(Sqr(1234)).await?; - tracing::debug!("got response {:?}", res); - assert_eq!(res, SqrResponse(1522756)); - - // client streaming call - tracing::debug!("calling client_streaming Sum"); - let (mut send, recv) = client.client_streaming(Sum).await?; - tokio::task::spawn(async move { - for i in 1..=3 { - send.send(SumUpdate(i)).await?; - } - Ok::<_, C::SendError>(()) - }); - let res = recv.await?; - tracing::debug!("got response {:?}", res); - assert_eq!(res, SumResponse(6)); - - // server streaming call - tracing::debug!("calling server_streaming Fibonacci(10)"); - let s = client.server_streaming(Fibonacci(10)).await?; - let res: Vec<_> = s.map(|x| x.map(|x| x.0)).try_collect().await?; - tracing::debug!("got response {:?}", res); - assert_eq!(res, vec![0, 1, 1, 2, 3, 5, 8, 13, 21, 34]); - - // bidi streaming call - tracing::debug!("calling bidi Multiply(2)"); - let (mut send, recv) = client.bidi(Multiply(2)).await?; - tokio::task::spawn(async move { - for i in 1..=3 { - send.send(MultiplyUpdate(i)).await?; - } - Ok::<_, C::SendError>(()) - }); - let res: Vec<_> = recv.map(|x| x.map(|x| x.0)).try_collect().await?; - tracing::debug!("got response {:?}", res); - assert_eq!(res, vec![2, 4, 6]); - - tracing::debug!("dropping client!"); - Ok(()) -} - -fn clear_line() { - print!("\r{}\r", " ".repeat(80)); -} - -pub async fn bench(client: RpcClient, n: u64) -> anyhow::Result<()> -where - C::SendError: std::error::Error, - C: Connector, -{ - // individual RPCs - { - let mut sum = 0; - let t0 = std::time::Instant::now(); - for i in 0..n { - sum += client.rpc(Sqr(i)).await?.0; - if i % 10000 == 0 { - print!("."); - io::stdout().flush()?; - } - } - let rps = ((n as f64) / t0.elapsed().as_secs_f64()).round(); - assert_eq!(sum, sum_of_squares(n)); - clear_line(); - println!("RPC seq {} rps", rps.separate_with_underscores(),); - } - // parallel RPCs - { - let t0 = std::time::Instant::now(); - let reqs = futures_lite::stream::iter((0..n).map(Sqr)); - let resp: Vec<_> = reqs - .map(|x| { - let client = client.clone(); - async move { - let res = client.rpc(x).await?.0; - anyhow::Ok(res) - } - }) - .buffered_unordered(32) - .try_collect() - .await?; - let sum = resp.into_iter().sum::(); - let rps = ((n as f64) / t0.elapsed().as_secs_f64()).round(); - assert_eq!(sum, sum_of_squares(n)); - clear_line(); - println!("RPC par {} rps", rps.separate_with_underscores(),); - } - // sequential streaming - { - let t0 = std::time::Instant::now(); - let (send, recv) = client.bidi(Multiply(2)).await?; - let handle = tokio::task::spawn(async move { - let requests = futures_lite::stream::iter((0..n).map(MultiplyUpdate)); - futures_util::StreamExt::forward(requests.map(Ok), send).await?; - anyhow::Result::<()>::Ok(()) - }); - let mut sum = 0; - tokio::pin!(recv); - let mut i = 0; - while let Some(res) = recv.next().await { - sum += res?.0; - if i % 10000 == 0 { - print!("."); - io::stdout().flush()?; - } - i += 1; - } - assert_eq!(sum, (0..n as u128).map(|x| x * 2).sum()); - let rps = ((n as f64) / t0.elapsed().as_secs_f64()).round(); - clear_line(); - println!("bidi seq {} rps", rps.separate_with_underscores(),); - - handle.await??; - } - Ok(()) -} - -fn sum_of_squares(n: u64) -> u128 { - (0..n).map(|x| (x * x) as u128).sum() -} diff --git a/tests/quinn.rs b/tests/quinn.rs deleted file mode 100644 index 54f42cf5..00000000 --- a/tests/quinn.rs +++ /dev/null @@ -1,127 +0,0 @@ -#![cfg(feature = "quinn-transport")] -#![cfg(feature = "test-utils")] -use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; - -use quic_rpc::{ - transport::{ - self, - quinn::{ - configure_server, make_client_endpoint, make_server_endpoint, QuinnConnector, - QuinnListener, - }, - }, - RpcClient, RpcServer, -}; -use quinn::Endpoint; - -mod math; -use math::*; -use testresult::TestResult; -use tokio_util::task::AbortOnDropHandle; -mod util; - -pub struct Endpoints { - client: Endpoint, - server: Endpoint, - server_addr: SocketAddr, -} - -pub fn make_endpoints(port: u16) -> anyhow::Result { - let server_addr: SocketAddr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, port)); - let (server, server_certs) = make_server_endpoint(server_addr)?; - let client = make_client_endpoint("0.0.0.0:0".parse()?, &[&server_certs])?; - Ok(Endpoints { - client, - server, - server_addr, - }) -} - -fn run_server(server: quinn::Endpoint) -> AbortOnDropHandle<()> { - let listener = QuinnListener::new(server).unwrap(); - let listener = RpcServer::new(listener); - ComputeService::server(listener) -} - -// #[tokio::test(flavor = "multi_thread", worker_threads = 2)] -#[tokio::test] -async fn quinn_channel_bench() -> anyhow::Result<()> { - tracing_subscriber::fmt::try_init().ok(); - let Endpoints { - client, - server, - server_addr, - } = make_endpoints(12345)?; - tracing::debug!("Starting server"); - let _server_handle = run_server(server); - tracing::debug!("Starting client"); - let client = QuinnConnector::new(client, server_addr, "localhost".into()); - let client = RpcClient::new(client); - tracing::debug!("Starting benchmark"); - bench(client, 50000).await?; - Ok(()) -} - -#[tokio::test] -async fn quinn_channel_smoke() -> anyhow::Result<()> { - tracing_subscriber::fmt::try_init().ok(); - let Endpoints { - client, - server, - server_addr, - } = make_endpoints(12346)?; - let _server_handle = run_server(server); - let client_connection = - transport::quinn::QuinnConnector::new(client, server_addr, "localhost".into()); - smoke_test(client_connection).await?; - Ok(()) -} - -/// Test that using the client after the server goes away and comes back behaves as if the server -/// had never gone away in the first place. -/// -/// This is a regression test. -#[tokio::test] -async fn server_away_and_back() -> TestResult<()> { - tracing_subscriber::fmt::try_init().ok(); - tracing::info!("Creating endpoints"); - - let server_addr: SocketAddr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::LOCALHOST, 12347)); - let (server_config, server_cert) = configure_server()?; - - // create the RPC client - let client = make_client_endpoint("0.0.0.0:0".parse()?, &[&server_cert])?; - let client_connection = - transport::quinn::QuinnConnector::new(client, server_addr, "localhost".into()); - let client = RpcClient::new(client_connection); - - // send a request. No server available so it should fail - client.rpc(Sqr(4)).await.unwrap_err(); - - // create the RPC Server - let server = Endpoint::server(server_config.clone(), server_addr)?; - let connection = transport::quinn::QuinnListener::new(server)?; - let server = RpcServer::new(connection); - let server_handle = tokio::task::spawn(ComputeService::server_bounded(server, 1)); - - // send the first request and wait for the response to ensure everything works as expected - let SqrResponse(response) = client.rpc(Sqr(4)).await?; - assert_eq!(response, 16); - - let server = server_handle.await??; - drop(server); - // wait for drop to free the socket - tokio::time::sleep(tokio::time::Duration::from_millis(300)).await; - - // make the server run again - let server = Endpoint::server(server_config, server_addr)?; - let connection = transport::quinn::QuinnListener::new(server)?; - let server = RpcServer::new(connection); - let server_handle = tokio::task::spawn(ComputeService::server_bounded(server, 5)); - - // server is running, this should work - let SqrResponse(response) = client.rpc(Sqr(3)).await?; - assert_eq!(response, 9); - server_handle.abort(); - Ok(()) -} diff --git a/tests/slow_math.rs b/tests/slow_math.rs deleted file mode 100644 index 2060a9ae..00000000 --- a/tests/slow_math.rs +++ /dev/null @@ -1,131 +0,0 @@ -#![cfg(any( - feature = "flume-transport", - feature = "hyper-transport", - feature = "quinn-transport", - feature = "iroh-transport", -))] -mod math; -use std::result; - -use async_stream::stream; -use futures_lite::{Stream, StreamExt}; -use math::*; -use quic_rpc::{ - message::{ - BidiStreaming, BidiStreamingMsg, ClientStreaming, ClientStreamingMsg, Msg, RpcMsg, - ServerStreaming, ServerStreamingMsg, - }, - server::RpcServerError, - Listener, RpcServer, Service, -}; - -#[derive(Debug, Clone)] -pub struct ComputeService; - -impl Service for ComputeService { - type Req = ComputeRequest; - type Res = ComputeResponse; -} - -impl RpcMsg for Sqr { - type Response = SqrResponse; -} - -impl Msg for Sum { - type Pattern = ClientStreaming; -} - -impl ClientStreamingMsg for Sum { - type Update = SumUpdate; - type Response = SumResponse; -} - -impl Msg for Fibonacci { - type Pattern = ServerStreaming; -} - -impl ServerStreamingMsg for Fibonacci { - type Response = FibonacciResponse; -} - -impl Msg for Multiply { - type Pattern = BidiStreaming; -} - -impl BidiStreamingMsg for Multiply { - type Update = MultiplyUpdate; - type Response = MultiplyResponse; -} - -async fn sleep_ms(ms: u64) { - tokio::time::sleep(std::time::Duration::from_millis(ms)).await; -} - -impl ComputeService { - async fn sqr(self, req: Sqr) -> SqrResponse { - sleep_ms(10000).await; - SqrResponse(req.0 as u128 * req.0 as u128) - } - - async fn sum(self, _req: Sum, updates: impl Stream) -> SumResponse { - let mut sum = 0u128; - tokio::pin!(updates); - while let Some(SumUpdate(n)) = updates.next().await { - sleep_ms(100).await; - sum += n as u128; - } - SumResponse(sum) - } - - fn fibonacci(self, req: Fibonacci) -> impl Stream { - let mut a = 0u128; - let mut b = 1u128; - let mut n = req.0; - stream! { - sleep_ms(100).await; - while n > 0 { - yield FibonacciResponse(a); - let c = a + b; - a = b; - b = c; - n -= 1; - } - } - } - - fn multiply( - self, - req: Multiply, - updates: impl Stream, - ) -> impl Stream { - let product = req.0 as u128; - stream! { - tokio::pin!(updates); - while let Some(MultiplyUpdate(n)) = updates.next().await { - sleep_ms(100).await; - yield MultiplyResponse(product * n as u128); - } - } - } - - pub async fn server>( - server: RpcServer, - ) -> result::Result<(), RpcServerError> { - let s = server; - let service = ComputeService; - loop { - let (req, chan) = s.accept().await?.read_first().await?; - use ComputeRequest::*; - let service = service.clone(); - #[rustfmt::skip] - match req { - Sqr(msg) => chan.rpc(msg, service, ComputeService::sqr).await, - Sum(msg) => chan.client_streaming(msg, service, ComputeService::sum).await, - Fibonacci(msg) => chan.server_streaming(msg, service, ComputeService::fibonacci).await, - Multiply(msg) => chan.bidi_streaming(msg, service, ComputeService::multiply).await, - SumUpdate(_) => Err(RpcServerError::UnexpectedStartMessage)?, - MultiplyUpdate(_) => Err(RpcServerError::UnexpectedStartMessage)?, - }?; - } - } -} diff --git a/tests/try.rs b/tests/try.rs deleted file mode 100644 index b11f633f..00000000 --- a/tests/try.rs +++ /dev/null @@ -1,103 +0,0 @@ -#![cfg(feature = "flume-transport")] -use derive_more::{From, TryInto}; -use futures_lite::{Stream, StreamExt}; -use quic_rpc::{ - message::Msg, - pattern::try_server_streaming::{StreamCreated, TryServerStreaming, TryServerStreamingMsg}, - server::RpcServerError, - transport::flume, - RpcClient, RpcServer, Service, -}; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone)] -struct TryService; - -impl Service for TryService { - type Req = TryRequest; - type Res = TryResponse; -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct StreamN { - n: u64, -} - -impl Msg for StreamN { - type Pattern = TryServerStreaming; -} - -impl TryServerStreamingMsg for StreamN { - type Item = u64; - type ItemError = String; - type CreateError = String; -} - -/// request enum -#[derive(Debug, Serialize, Deserialize, From, TryInto)] -pub enum TryRequest { - StreamN(StreamN), -} - -#[derive(Debug, Serialize, Deserialize, From, TryInto, Clone)] -pub enum TryResponse { - StreamN(std::result::Result), - StreamNError(std::result::Result), -} - -#[derive(Clone)] -struct Handler; - -impl Handler { - async fn try_stream_n( - self, - req: StreamN, - ) -> std::result::Result>, String> { - if req.n % 2 != 0 { - return Err("odd n not allowed".to_string()); - } - let stream = async_stream::stream! { - for i in 0..req.n { - if i > 5 { - yield Err("n too large".to_string()); - return; - } - yield Ok(i); - } - }; - Ok(stream) - } -} - -#[tokio::test] -async fn try_server_streaming() -> anyhow::Result<()> { - tracing_subscriber::fmt::try_init().ok(); - let (server, client) = flume::channel(1); - - let server = RpcServer::::new(server); - let server_handle = tokio::task::spawn(async move { - loop { - let (req, chan) = server.accept().await?.read_first().await?; - let handler = Handler; - match req { - TryRequest::StreamN(req) => { - chan.try_server_streaming(req, handler, Handler::try_stream_n) - .await?; - } - } - } - #[allow(unreachable_code)] - Ok(()) - }); - let client = RpcClient::::new(client); - let stream_n = client.try_server_streaming(StreamN { n: 10 }).await?; - let items: Vec<_> = stream_n.collect().await; - println!("{:?}", items); - drop(client); - // dropping the client will cause the server to terminate - match server_handle.await? { - Err(RpcServerError::Accept(_)) => {} - e => panic!("unexpected termination result {e:?}"), - } - Ok(()) -} diff --git a/tests/util.rs b/tests/util.rs deleted file mode 100644 index cd946e46..00000000 --- a/tests/util.rs +++ /dev/null @@ -1,20 +0,0 @@ -use anyhow::Context; -use quic_rpc::{server::RpcServerError, transport::Connector}; - -#[allow(unused)] -pub async fn check_termination_anyhow( - server_handle: tokio::task::JoinHandle>, -) -> anyhow::Result<()> { - // dropping the client will cause the server to terminate - match server_handle.await? { - Err(e) => { - let err: RpcServerError = e.downcast().context("unexpected termination result")?; - match err { - RpcServerError::Accept(_) => {} - e => panic!("unexpected termination error {e:?}"), - } - } - e => panic!("server should have terminated with an error {e:?}"), - } - Ok(()) -}