|
1 |
| -use zeromq::prelude::*; |
2 |
| -use zeromq::Endpoint; |
3 |
| -use zeromq::ZmqMessage; |
4 |
| -use zeromq::__async_rt as async_rt; |
| 1 | +#[cfg(test)] |
| 2 | +mod test { |
| 3 | + use zeromq::prelude::*; |
| 4 | + use zeromq::Endpoint; |
| 5 | + use zeromq::ZmqMessage; |
| 6 | + use zeromq::__async_rt as async_rt; |
5 | 7 |
|
6 |
| -use futures_channel::{mpsc, oneshot}; |
7 |
| -use futures_util::{SinkExt, StreamExt}; |
8 |
| -use std::time::Duration; |
| 8 | + use futures_channel::{mpsc, oneshot}; |
| 9 | + use futures_util::{SinkExt, StreamExt}; |
| 10 | + use std::time::Duration; |
9 | 11 |
|
10 |
| -#[async_rt::test] |
11 |
| -async fn test_pub_sub_sockets() { |
12 |
| - pretty_env_logger::try_init().ok(); |
| 12 | + #[async_rt::test] |
| 13 | + async fn test_pub_sub_sockets() { |
| 14 | + pretty_env_logger::try_init().ok(); |
13 | 15 |
|
14 |
| - async fn helper(bind_addr: &'static str) { |
15 |
| - // We will join on these at the end to determine if any tasks we spawned |
16 |
| - // panicked |
17 |
| - let mut task_handles = Vec::new(); |
18 |
| - let payload = chrono::Utc::now().to_rfc2822(); |
| 16 | + async fn helper(bind_addr: &'static str) { |
| 17 | + // We will join on these at the end to determine if any tasks we spawned |
| 18 | + // panicked |
| 19 | + let mut task_handles = Vec::new(); |
| 20 | + let payload = chrono::Utc::now().to_rfc2822(); |
19 | 21 |
|
20 |
| - let cloned_payload = payload.clone(); |
21 |
| - let (server_stop_sender, mut server_stop) = oneshot::channel::<()>(); |
22 |
| - let (has_bound_sender, has_bound) = oneshot::channel::<Endpoint>(); |
23 |
| - task_handles.push(async_rt::task::spawn(async move { |
24 |
| - let mut pub_socket = zeromq::PubSocket::new(); |
25 |
| - let bound_to = pub_socket |
26 |
| - .bind(bind_addr) |
27 |
| - .await |
28 |
| - .unwrap_or_else(|e| panic!("Failed to bind to {}: {}", bind_addr, e)); |
29 |
| - has_bound_sender |
30 |
| - .send(bound_to) |
31 |
| - .expect("channel was dropped"); |
| 22 | + let cloned_payload = payload.clone(); |
| 23 | + let (server_stop_sender, mut server_stop) = oneshot::channel::<()>(); |
| 24 | + let (has_bound_sender, has_bound) = oneshot::channel::<Endpoint>(); |
| 25 | + task_handles.push(async_rt::task::spawn(async move { |
| 26 | + let mut pub_socket = zeromq::PubSocket::new(); |
| 27 | + let bound_to = pub_socket |
| 28 | + .bind(bind_addr) |
| 29 | + .await |
| 30 | + .unwrap_or_else(|e| panic!("Failed to bind to {}: {}", bind_addr, e)); |
| 31 | + has_bound_sender |
| 32 | + .send(bound_to) |
| 33 | + .expect("channel was dropped"); |
32 | 34 |
|
33 |
| - loop { |
34 |
| - if let Ok(Some(_)) = server_stop.try_recv() { |
35 |
| - break; |
| 35 | + loop { |
| 36 | + if let Ok(Some(_)) = server_stop.try_recv() { |
| 37 | + break; |
| 38 | + } |
| 39 | + |
| 40 | + let s: String = cloned_payload.clone(); |
| 41 | + let m = ZmqMessage::from(s); |
| 42 | + pub_socket.send(m).await.expect("Failed to send"); |
| 43 | + async_rt::task::sleep(Duration::from_millis(1)).await; |
36 | 44 | }
|
37 | 45 |
|
38 |
| - let s: String = cloned_payload.clone(); |
39 |
| - let m = ZmqMessage::from(s); |
40 |
| - pub_socket.send(m).await.expect("Failed to send"); |
41 |
| - async_rt::task::sleep(Duration::from_millis(1)).await; |
| 46 | + let errs = pub_socket.close().await; |
| 47 | + if !errs.is_empty() { |
| 48 | + panic!("Could not unbind socket: {:?}", errs); |
| 49 | + } |
| 50 | + })); |
| 51 | + // Block until the pub has finished binding |
| 52 | + // TODO: ZMQ sockets should not care about this sort of ordering. |
| 53 | + // See https://github.com/zeromq/zmq.rs/issues/73 |
| 54 | + let bound_addr = has_bound.await.expect("channel was cancelled"); |
| 55 | + if let Endpoint::Tcp(_host, port) = bound_addr.clone() { |
| 56 | + assert_ne!(port, 0); |
42 | 57 | }
|
43 | 58 |
|
44 |
| - let errs = pub_socket.close().await; |
45 |
| - if !errs.is_empty() { |
46 |
| - panic!("Could not unbind socket: {:?}", errs); |
47 |
| - } |
48 |
| - })); |
49 |
| - // Block until the pub has finished binding |
50 |
| - // TODO: ZMQ sockets should not care about this sort of ordering. |
51 |
| - // See https://github.com/zeromq/zmq.rs/issues/73 |
52 |
| - let bound_addr = has_bound.await.expect("channel was cancelled"); |
53 |
| - if let Endpoint::Tcp(_host, port) = bound_addr.clone() { |
54 |
| - assert_ne!(port, 0); |
55 |
| - } |
| 59 | + let (sub_results_sender, sub_results) = mpsc::channel(100); |
| 60 | + for _ in 0..10 { |
| 61 | + let mut cloned_sub_sender = sub_results_sender.clone(); |
| 62 | + let cloned_payload = payload.clone(); |
| 63 | + let cloned_bound_addr = bound_addr.to_string(); |
| 64 | + task_handles.push(async_rt::task::spawn(async move { |
| 65 | + let mut sub_socket = zeromq::SubSocket::new(); |
| 66 | + sub_socket |
| 67 | + .connect(&cloned_bound_addr) |
| 68 | + .await |
| 69 | + .unwrap_or_else(|_| panic!("Failed to connect to {}", bind_addr)); |
56 | 70 |
|
57 |
| - let (sub_results_sender, sub_results) = mpsc::channel(100); |
58 |
| - for _ in 0..10 { |
59 |
| - let mut cloned_sub_sender = sub_results_sender.clone(); |
60 |
| - let cloned_payload = payload.clone(); |
61 |
| - let cloned_bound_addr = bound_addr.to_string(); |
62 |
| - task_handles.push(async_rt::task::spawn(async move { |
63 |
| - let mut sub_socket = zeromq::SubSocket::new(); |
64 |
| - sub_socket |
65 |
| - .connect(&cloned_bound_addr) |
66 |
| - .await |
67 |
| - .unwrap_or_else(|_| panic!("Failed to connect to {}", bind_addr)); |
| 71 | + sub_socket.subscribe("").await.expect("Failed to subscribe"); |
68 | 72 |
|
69 |
| - sub_socket.subscribe("").await.expect("Failed to subscribe"); |
| 73 | + async_rt::task::sleep(std::time::Duration::from_millis(500)).await; |
70 | 74 |
|
71 |
| - async_rt::task::sleep(std::time::Duration::from_millis(500)).await; |
| 75 | + for _ in 0..10 { |
| 76 | + let recv_message = sub_socket.recv().await.unwrap(); |
| 77 | + let recv_payload = |
| 78 | + String::from_utf8(recv_message.get(0).unwrap().to_vec()).unwrap(); |
| 79 | + assert_eq!(cloned_payload, recv_payload); |
| 80 | + cloned_sub_sender.send(()).await.unwrap(); |
| 81 | + } |
| 82 | + })); |
| 83 | + } |
| 84 | + drop(sub_results_sender); |
| 85 | + let res_vec: Vec<()> = sub_results.collect().await; |
| 86 | + assert_eq!(100, res_vec.len()); |
72 | 87 |
|
73 |
| - for _ in 0..10 { |
74 |
| - let recv_message = sub_socket.recv().await.unwrap(); |
75 |
| - let recv_payload = |
76 |
| - String::from_utf8(recv_message.get(0).unwrap().to_vec()).unwrap(); |
77 |
| - assert_eq!(cloned_payload, recv_payload); |
78 |
| - cloned_sub_sender.send(()).await.unwrap(); |
79 |
| - } |
80 |
| - })); |
| 88 | + server_stop_sender.send(()).unwrap(); |
| 89 | + for t in task_handles { |
| 90 | + t.await.expect("Task failed unexpectedly!"); |
| 91 | + } |
81 | 92 | }
|
82 |
| - drop(sub_results_sender); |
83 |
| - let res_vec: Vec<()> = sub_results.collect().await; |
84 |
| - assert_eq!(100, res_vec.len()); |
85 | 93 |
|
86 |
| - server_stop_sender.send(()).unwrap(); |
87 |
| - for t in task_handles { |
88 |
| - t.await.expect("Task failed unexpectedly!"); |
89 |
| - } |
| 94 | + let addrs = vec![ |
| 95 | + "tcp://localhost:0", |
| 96 | + "tcp://127.0.0.1:0", |
| 97 | + "tcp://[::1]:0", |
| 98 | + "tcp://127.0.0.1:0", |
| 99 | + "tcp://localhost:0", |
| 100 | + "tcp://127.0.0.1:0", |
| 101 | + "tcp://[::1]:0", |
| 102 | + "ipc://asdf.sock", |
| 103 | + "ipc://anothersocket-asdf", |
| 104 | + ]; |
| 105 | + futures_util::future::join_all(addrs.into_iter().map(helper)).await; |
90 | 106 | }
|
91 |
| - |
92 |
| - let addrs = vec![ |
93 |
| - "tcp://localhost:0", |
94 |
| - "tcp://127.0.0.1:0", |
95 |
| - "tcp://[::1]:0", |
96 |
| - "tcp://127.0.0.1:0", |
97 |
| - "tcp://localhost:0", |
98 |
| - "tcp://127.0.0.1:0", |
99 |
| - "tcp://[::1]:0", |
100 |
| - "ipc://asdf.sock", |
101 |
| - "ipc://anothersocket-asdf", |
102 |
| - ]; |
103 |
| - futures_util::future::join_all(addrs.into_iter().map(helper)).await; |
104 | 107 | }
|
0 commit comments