|
1 | 1 | // Copyright 2018-2026 the Deno authors. MIT license. |
2 | 2 |
|
3 | 3 | use std::borrow::Cow; |
| 4 | +use std::collections::BinaryHeap; |
| 5 | +use std::collections::HashMap; |
| 6 | +use std::ffi::c_void; |
4 | 7 | use std::sync::Mutex; |
5 | 8 | use std::sync::Once; |
6 | 9 | use std::sync::atomic::AtomicBool; |
7 | 10 | use std::sync::atomic::Ordering; |
| 11 | +use std::time::Duration; |
| 12 | +use std::time::Instant; |
| 13 | + |
| 14 | +use futures::task::AtomicWaker; |
8 | 15 |
|
9 | 16 | use super::bindings; |
10 | 17 | use super::snapshot; |
11 | 18 | use super::snapshot::V8Snapshot; |
12 | 19 |
|
| 20 | +/// Extract the raw isolate address from an `UnsafeRawIsolatePtr`. |
| 21 | +/// |
| 22 | +/// `UnsafeRawIsolatePtr` is `#[repr(transparent)]` over `*mut RealIsolate`, |
| 23 | +/// so its bit-pattern is a single pointer-sized value. We use transmute |
| 24 | +/// because the inner field is private. |
| 25 | +/// |
| 26 | +/// The compile-time assert below guarantees the layout assumption holds. |
| 27 | +const _: () = assert!( |
| 28 | + std::mem::size_of::<v8::UnsafeRawIsolatePtr>() |
| 29 | + == std::mem::size_of::<usize>() |
| 30 | +); |
| 31 | + |
| 32 | +pub(crate) fn isolate_ptr_to_key(ptr: v8::UnsafeRawIsolatePtr) -> usize { |
| 33 | + // SAFETY: UnsafeRawIsolatePtr is #[repr(transparent)] over *mut RealIsolate, |
| 34 | + // which is pointer-sized. The compile-time assert above guarantees this. |
| 35 | + unsafe { std::mem::transmute::<v8::UnsafeRawIsolatePtr, usize>(ptr) } |
| 36 | +} |
| 37 | + |
| 38 | +/// Per-isolate state shared between the V8 platform callback and |
| 39 | +/// the event loop. |
| 40 | +struct IsolateWakeEntry { |
| 41 | + waker: std::sync::Arc<AtomicWaker>, |
| 42 | +} |
| 43 | + |
| 44 | +/// Global registry mapping isolate pointers to their event loop wake state. |
| 45 | +/// When V8 posts a foreground task for an isolate, the callback looks up |
| 46 | +/// the state here, sets the notification flag, and wakes the event loop. |
| 47 | +/// Isolates that received a notification before their state was registered |
| 48 | +/// are tracked in `pending_wakes` so `register_isolate_waker` can wake |
| 49 | +/// them immediately. |
| 50 | +struct IsolateWakerRegistry { |
| 51 | + entries: HashMap<usize, IsolateWakeEntry>, |
| 52 | + pending_wakes: std::collections::HashSet<usize>, |
| 53 | +} |
| 54 | + |
| 55 | +static ISOLATE_WAKERS: std::sync::LazyLock<Mutex<IsolateWakerRegistry>> = |
| 56 | + std::sync::LazyLock::new(|| { |
| 57 | + Mutex::new(IsolateWakerRegistry { |
| 58 | + entries: HashMap::new(), |
| 59 | + pending_wakes: std::collections::HashSet::new(), |
| 60 | + }) |
| 61 | + }); |
| 62 | + |
| 63 | +/// Register a waker and notification flag for an isolate so foreground |
| 64 | +/// task notifications wake the correct event loop. If a notification |
| 65 | +/// arrived before registration, the waker is triggered immediately. |
| 66 | +pub fn register_isolate_waker( |
| 67 | + isolate_ptr: usize, |
| 68 | + waker: std::sync::Arc<AtomicWaker>, |
| 69 | +) { |
| 70 | + let mut reg = ISOLATE_WAKERS.lock().unwrap(); |
| 71 | + if reg.pending_wakes.remove(&isolate_ptr) { |
| 72 | + waker.wake(); |
| 73 | + } |
| 74 | + reg.entries.insert(isolate_ptr, IsolateWakeEntry { waker }); |
| 75 | +} |
| 76 | + |
| 77 | +/// Unregister an isolate's wake state (called on isolate drop). |
| 78 | +pub fn unregister_isolate_waker(isolate_ptr: usize) { |
| 79 | + let mut reg = ISOLATE_WAKERS.lock().unwrap(); |
| 80 | + reg.entries.remove(&isolate_ptr); |
| 81 | + reg.pending_wakes.remove(&isolate_ptr); |
| 82 | +} |
| 83 | + |
| 84 | +/// Wake the event loop for a given isolate. Sets the notification flag |
| 85 | +/// and wakes the AtomicWaker. If the isolate's state is not yet |
| 86 | +/// registered, marks it as pending so registration notifies it. |
| 87 | +fn wake_isolate(key: usize) { |
| 88 | + let mut reg = ISOLATE_WAKERS.lock().unwrap(); |
| 89 | + if let Some(entry) = reg.entries.get(&key) { |
| 90 | + entry.waker.wake(); |
| 91 | + } else { |
| 92 | + reg.pending_wakes.insert(key); |
| 93 | + } |
| 94 | +} |
| 95 | + |
| 96 | +/// Entry in the delayed-task timer queue. |
| 97 | +struct TimerEntry { |
| 98 | + deadline: Instant, |
| 99 | + isolate_key: usize, |
| 100 | +} |
| 101 | + |
| 102 | +impl PartialEq for TimerEntry { |
| 103 | + fn eq(&self, other: &Self) -> bool { |
| 104 | + self.deadline == other.deadline |
| 105 | + } |
| 106 | +} |
| 107 | + |
| 108 | +impl Eq for TimerEntry {} |
| 109 | + |
| 110 | +impl PartialOrd for TimerEntry { |
| 111 | + fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { |
| 112 | + Some(self.cmp(other)) |
| 113 | + } |
| 114 | +} |
| 115 | + |
| 116 | +impl Ord for TimerEntry { |
| 117 | + fn cmp(&self, other: &Self) -> std::cmp::Ordering { |
| 118 | + // Reverse so BinaryHeap (max-heap) yields the earliest deadline first. |
| 119 | + other.deadline.cmp(&self.deadline) |
| 120 | + } |
| 121 | +} |
| 122 | + |
| 123 | +/// Single shared timer thread that processes all delayed V8 foreground |
| 124 | +/// task wake-ups, avoiding one OS thread per delayed task. |
| 125 | +static DELAYED_TASK_SENDER: std::sync::LazyLock< |
| 126 | + Mutex<std::sync::mpsc::Sender<TimerEntry>>, |
| 127 | +> = std::sync::LazyLock::new(|| { |
| 128 | + let (tx, rx) = std::sync::mpsc::channel(); |
| 129 | + std::thread::Builder::new() |
| 130 | + .name("deno-v8-timer".into()) |
| 131 | + .spawn(move || delayed_task_thread(rx)) |
| 132 | + .unwrap(); |
| 133 | + Mutex::new(tx) |
| 134 | +}); |
| 135 | + |
| 136 | +fn delayed_task_thread(rx: std::sync::mpsc::Receiver<TimerEntry>) { |
| 137 | + let mut heap: BinaryHeap<TimerEntry> = BinaryHeap::new(); |
| 138 | + loop { |
| 139 | + // Block until either a new entry arrives or the next timer fires. |
| 140 | + if heap.is_empty() { |
| 141 | + match rx.recv() { |
| 142 | + Ok(entry) => heap.push(entry), |
| 143 | + Err(_) => break, |
| 144 | + } |
| 145 | + } else { |
| 146 | + let timeout = heap |
| 147 | + .peek() |
| 148 | + .unwrap() |
| 149 | + .deadline |
| 150 | + .saturating_duration_since(Instant::now()); |
| 151 | + match rx.recv_timeout(timeout) { |
| 152 | + Ok(entry) => heap.push(entry), |
| 153 | + Err(std::sync::mpsc::RecvTimeoutError::Timeout) => {} |
| 154 | + Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => break, |
| 155 | + } |
| 156 | + } |
| 157 | + |
| 158 | + // Drain any additional entries that arrived. |
| 159 | + while let Ok(entry) = rx.try_recv() { |
| 160 | + heap.push(entry); |
| 161 | + } |
| 162 | + |
| 163 | + // Fire all expired timers. |
| 164 | + let now = Instant::now(); |
| 165 | + while let Some(entry) = heap.peek() { |
| 166 | + if entry.deadline <= now { |
| 167 | + let entry = heap.pop().unwrap(); |
| 168 | + wake_isolate(entry.isolate_key); |
| 169 | + } else { |
| 170 | + break; |
| 171 | + } |
| 172 | + } |
| 173 | + } |
| 174 | +} |
| 175 | + |
| 176 | +/// Custom V8 platform implementation that wakes isolate event loops |
| 177 | +/// when foreground tasks are posted from any thread (including V8 |
| 178 | +/// background compilation threads). |
| 179 | +struct DenoPlatformImpl; |
| 180 | + |
| 181 | +impl DenoPlatformImpl { |
| 182 | + fn wake_immediate(&self, isolate_ptr: *mut c_void) { |
| 183 | + wake_isolate(isolate_ptr as usize); |
| 184 | + } |
| 185 | + |
| 186 | + fn wake_delayed(&self, isolate_ptr: *mut c_void, delay_in_seconds: f64) { |
| 187 | + let entry = TimerEntry { |
| 188 | + deadline: Instant::now() + Duration::from_secs_f64(delay_in_seconds), |
| 189 | + isolate_key: isolate_ptr as usize, |
| 190 | + }; |
| 191 | + let _ = DELAYED_TASK_SENDER.lock().unwrap().send(entry); |
| 192 | + } |
| 193 | +} |
| 194 | + |
| 195 | +impl v8::PlatformImpl for DenoPlatformImpl { |
| 196 | + fn post_task(&self, isolate_ptr: *mut c_void) { |
| 197 | + self.wake_immediate(isolate_ptr); |
| 198 | + } |
| 199 | + |
| 200 | + fn post_non_nestable_task(&self, isolate_ptr: *mut c_void) { |
| 201 | + self.wake_immediate(isolate_ptr); |
| 202 | + } |
| 203 | + |
| 204 | + fn post_delayed_task(&self, isolate_ptr: *mut c_void, delay_in_seconds: f64) { |
| 205 | + self.wake_delayed(isolate_ptr, delay_in_seconds); |
| 206 | + } |
| 207 | + |
| 208 | + fn post_non_nestable_delayed_task( |
| 209 | + &self, |
| 210 | + isolate_ptr: *mut c_void, |
| 211 | + delay_in_seconds: f64, |
| 212 | + ) { |
| 213 | + self.wake_delayed(isolate_ptr, delay_in_seconds); |
| 214 | + } |
| 215 | + |
| 216 | + fn post_idle_task(&self, isolate_ptr: *mut c_void) { |
| 217 | + self.wake_immediate(isolate_ptr); |
| 218 | + } |
| 219 | +} |
| 220 | + |
13 | 221 | fn v8_init( |
14 | 222 | v8_platform: Option<v8::SharedRef<v8::Platform>>, |
15 | 223 | snapshot: bool, |
@@ -51,13 +259,12 @@ fn v8_init( |
51 | 259 | v8::V8::set_flags_from_string(&flags); |
52 | 260 |
|
53 | 261 | let v8_platform = v8_platform.unwrap_or_else(|| { |
54 | | - if cfg!(any(test, feature = "unsafe_use_unprotected_platform")) { |
55 | | - // We want to use the unprotected platform for unit tests |
56 | | - v8::new_unprotected_default_platform(0, false) |
57 | | - } else { |
58 | | - v8::new_default_platform(0, false) |
59 | | - } |
60 | | - .make_shared() |
| 262 | + // Use a custom platform that notifies isolate event loops when V8 |
| 263 | + // background threads post foreground tasks. |
| 264 | + let unprotected = |
| 265 | + cfg!(any(test, feature = "unsafe_use_unprotected_platform")); |
| 266 | + v8::new_custom_platform(0, false, unprotected, DenoPlatformImpl) |
| 267 | + .make_shared() |
61 | 268 | }); |
62 | 269 | v8::V8::initialize_platform(v8_platform.clone()); |
63 | 270 | v8::V8::initialize(); |
|
0 commit comments