Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 0 additions & 14 deletions .gn
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,10 @@ default_args = {

v8_embedder_string = "-rusty"

v8_enable_sandbox = false
v8_enable_javascript_promise_hooks = true
v8_promise_internal_field_count = 1
v8_use_external_startup_data = false

v8_enable_pointer_compression = false

v8_imminent_deprecation_warnings = false

# This flag speeds up the performance of fork/execve on Linux systems for
Expand All @@ -65,17 +62,6 @@ default_args = {
v8_array_buffer_internal_field_count = 2
v8_array_buffer_view_internal_field_count = 2

# Enabling the shared read-only heap comes with a restriction that all
# isolates running at the same time must be created from the same snapshot.
# This is problematic for Deno, which has separate "runtime" and "typescript
# compiler" snapshots, and sometimes uses them both at the same time.
v8_enable_shared_ro_heap = false

# V8 11.6 hardcoded an assumption in `mksnapshot` that shared RO heap
# is enabled. In our case it's disabled so without this flag we can't
# compile.
v8_enable_verify_heap = false

# Enable V8 object print for debugging.
# v8_enable_object_print = true

Expand Down
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ opt-level = 1
default = ["use_custom_libcxx"]
use_custom_libcxx = []
v8_enable_pointer_compression = []
v8_enable_sandbox = []
v8_enable_v8_checks = []

[dependencies]
Expand Down
62 changes: 58 additions & 4 deletions build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -209,14 +209,65 @@ fn build_v8(is_asan: bool) {
"use_custom_libcxx={}",
env::var("CARGO_FEATURE_USE_CUSTOM_LIBCXX").is_ok()
));
gn_args.push(format!(
"v8_enable_pointer_compression={}",
env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok()
));

let extra_args = {
if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok()
&& env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok()
{
panic!(
"Sandbox and pointer compression cannot be enabled at the same time"
);
}

if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok() {
vec![
// Enable pointer compression (along with its dependencies)
"v8_enable_sandbox=true",
"v8_enable_external_code_space=true", // Needed for sandbox
"v8_enable_pointer_compression=true",
// Note that sandbox requires shared_ro_heap and verify_heap
// to be true/default
]
} else {
let mut opts = vec![
// Disable sandbox
"v8_enable_sandbox=false",
// Enabling the shared read-only heap comes with a restriction that all
// isolates running at the same time must be created from the same snapshot.
// This is problematic for Deno, which has separate "runtime" and "typescript
// compiler" snapshots, and sometimes uses them both at the same time.
//
// NOTE FOR FUTURE: Check if this flag even exists anymore as it has likely been
// removed
"v8_enable_shared_ro_heap=false",
// V8 11.6 hardcoded an assumption in `mksnapshot` that shared RO heap
// is enabled. In our case it's disabled so without this flag we can't
// compile.
//
// NOTE FOR FUTURE: Check if this flag even exists anymore as it has likely been
// removed
"v8_enable_verify_heap=false",
];

if env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() {
opts.push("v8_enable_pointer_compression=true");
} else {
opts.push("v8_enable_pointer_compression=false");
}

opts
}
};

for arg in extra_args {
gn_args.push(arg.to_string());
}

gn_args.push(format!(
"v8_enable_v8_checks={}",
env::var("CARGO_FEATURE_V8_ENABLE_V8_CHECKS").is_ok()
));

// Fix GN's host_cpu detection when using x86_64 bins on Apple Silicon
if cfg!(target_os = "macos") && cfg!(target_arch = "aarch64") {
gn_args.push("host_cpu=\"arm64\"".to_string());
Expand Down Expand Up @@ -439,6 +490,9 @@ fn prebuilt_features_suffix() -> String {
if env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() {
features.push_str("_ptrcomp");
}
if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok() {
features.push_str("_sandbox");
}
features
}

Expand Down
6 changes: 6 additions & 0 deletions src/V8.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ unsafe extern "C" {
fn v8__V8__Dispose() -> bool;
fn v8__V8__DisposePlatform();
fn v8__V8__SetFatalErrorHandler(that: V8FatalErrorCallback);
fn v8__V8__IsSandboxEnabled() -> bool;
}

pub type V8FatalErrorCallback = unsafe extern "C" fn(
Expand Down Expand Up @@ -83,6 +84,11 @@ use GlobalState::*;

static GLOBAL_STATE: Mutex<GlobalState> = Mutex::new(Uninitialized);

/// Returns true if V8 is sandboxed.
pub fn is_sandboxed() -> bool {
unsafe { v8__V8__IsSandboxEnabled() }
}

pub fn assert_initialized() {
let global_state_guard = GLOBAL_STATE.lock().unwrap();
match *global_state_guard {
Expand Down
119 changes: 105 additions & 14 deletions src/array_buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,6 @@ use crate::support::long;

unsafe extern "C" {
fn v8__ArrayBuffer__Allocator__NewDefaultAllocator() -> *mut Allocator;
fn v8__ArrayBuffer__Allocator__NewRustAllocator(
handle: *const c_void,
vtable: *const RustAllocatorVtable<c_void>,
) -> *mut Allocator;
fn v8__ArrayBuffer__Allocator__DELETE(this: *mut Allocator);
fn v8__ArrayBuffer__New__with_byte_length(
isolate: *mut RealIsolate,
Expand All @@ -54,12 +50,6 @@ unsafe extern "C" {
isolate: *mut RealIsolate,
byte_length: usize,
) -> *mut BackingStore;
fn v8__ArrayBuffer__NewBackingStore__with_data(
data: *mut c_void,
byte_length: usize,
deleter: BackingStoreDeleterCallback,
deleter_data: *mut c_void,
) -> *mut BackingStore;

fn v8__BackingStore__Data(this: *const BackingStore) -> *mut c_void;
fn v8__BackingStore__ByteLength(this: *const BackingStore) -> usize;
Expand Down Expand Up @@ -108,6 +98,31 @@ unsafe extern "C" {
) -> long;
}

// Rust allocator feature is only available in non-sandboxed mode
#[cfg(not(feature = "v8_enable_sandbox"))]
unsafe extern "C" {
fn v8__ArrayBuffer__NewBackingStore__with_data(
data: *mut c_void,
byte_length: usize,
deleter: BackingStoreDeleterCallback,
deleter_data: *mut c_void,
) -> *mut BackingStore;

fn v8__ArrayBuffer__Allocator__NewRustAllocator(
handle: *const c_void,
vtable: *const RustAllocatorVtable<c_void>,
) -> *mut Allocator;
}

#[cfg(feature = "v8_enable_sandbox")]
unsafe extern "C" {
fn v8__ArrayBuffer__NewBackingStore__with_data_sandboxed(
isolate: *mut RealIsolate,
data: *mut c_void,
byte_length: usize,
) -> *mut BackingStore;
}

/// A thread-safe allocator that V8 uses to allocate |ArrayBuffer|'s memory.
/// The allocator is a global V8 setting. It has to be set via
/// Isolate::CreateParams.
Expand All @@ -130,6 +145,7 @@ unsafe extern "C" {
pub struct Allocator(Opaque);

/// A wrapper around the V8 Allocator class.
#[cfg(not(feature = "v8_enable_sandbox"))]
#[repr(C)]
pub struct RustAllocatorVtable<T> {
pub allocate: unsafe extern "C" fn(handle: &T, len: usize) -> *mut c_void,
Expand Down Expand Up @@ -172,7 +188,10 @@ pub fn new_default_allocator() -> UniqueRef<Allocator> {
/// Creates an allocator managed by Rust code.
///
/// Marked `unsafe` because the caller must ensure that `handle` is valid and matches what `vtable` expects.
///
/// Not usable in sandboxed mode
#[inline(always)]
#[cfg(not(feature = "v8_enable_sandbox"))]
pub unsafe fn new_rust_allocator<T: Sized + Send + Sync + 'static>(
handle: *const T,
vtable: &'static RustAllocatorVtable<T>,
Expand All @@ -187,6 +206,7 @@ pub unsafe fn new_rust_allocator<T: Sized + Send + Sync + 'static>(
}

#[test]
#[cfg(not(feature = "v8_enable_sandbox"))]
fn test_rust_allocator() {
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
Expand Down Expand Up @@ -226,6 +246,10 @@ fn test_rust_allocator() {

#[test]
fn test_default_allocator() {
crate::V8::initialize_platform(
crate::new_default_platform(0, false).make_shared(),
);
crate::V8::initialize();
new_default_allocator();
}

Expand Down Expand Up @@ -548,9 +572,10 @@ impl ArrayBuffer {
/// to the buffer must not be passed again to any V8 API function.
#[inline(always)]
pub fn new_backing_store_from_boxed_slice(
scope: &mut Isolate,
data: Box<[u8]>,
) -> UniqueRef<BackingStore> {
Self::new_backing_store_from_bytes(data)
Self::new_backing_store_from_bytes(scope, data)
}

/// Returns a new standalone BackingStore that takes over the ownership of
Expand All @@ -561,8 +586,11 @@ impl ArrayBuffer {
/// The result can be later passed to ArrayBuffer::New. The raw pointer
/// to the buffer must not be passed again to any V8 API function.
#[inline(always)]
pub fn new_backing_store_from_vec(data: Vec<u8>) -> UniqueRef<BackingStore> {
Self::new_backing_store_from_bytes(data)
pub fn new_backing_store_from_vec(
scope: &mut Isolate,
data: Vec<u8>,
) -> UniqueRef<BackingStore> {
Self::new_backing_store_from_bytes(scope, data)
}

/// Returns a new standalone BackingStore backed by a container that dereferences
Expand All @@ -573,6 +601,10 @@ impl ArrayBuffer {
/// `Box<[u8]>`, and `Vec<u8>`. This will also support most other mutable bytes containers (including `bytes::BytesMut`),
/// though these buffers will need to be boxed to manage ownership of memory.
///
/// If v8 sandbox is used, this will copy the entire contents of the container into the v8 sandbox using ``memcpy``,
/// otherwise a fast-path will be taken in which the container will be held by Rust. Note that it
/// is unsafe/undefine dbehavior to use a backingstore once the backing isolate has been destroyed when in sandbox mode.
///
/// ```
/// // Vector of bytes
/// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(vec![1, 2, 3]);
Expand All @@ -584,11 +616,66 @@ impl ArrayBuffer {
/// ```
#[inline(always)]
pub fn new_backing_store_from_bytes<T>(
mut bytes: T,
scope: &mut Isolate,
bytes: T,
) -> UniqueRef<BackingStore>
where
T: sealed::Rawable,
{
#[cfg(not(feature = "v8_enable_sandbox"))]
{
let _ = scope; // Unused (for now) when no sandbox
Self::new_backing_store_from_bytes_nosandbox(bytes)
}
#[cfg(feature = "v8_enable_sandbox")]
{
Self::new_backing_store_from_bytes_sandbox(scope, bytes)
}
}

// Internal slowpath for sandboxed mode.
#[cfg(feature = "v8_enable_sandbox")]
#[inline(always)]
fn new_backing_store_from_bytes_sandbox<T>(
scope: &mut Isolate,
bytes: T,
) -> UniqueRef<BackingStore>
where
T: sealed::Rawable,
{
let mut bytes = bytes; // Make mutable
let len = bytes.byte_len();

let (ptr, slice) = T::into_raw(bytes);

let unique_ref = unsafe {
UniqueRef::from_raw(
v8__ArrayBuffer__NewBackingStore__with_data_sandboxed(
(*scope).as_real_ptr(),
slice as *mut c_void,
len,
),
)
};

// SAFETY: V8 copies the data
unsafe {
T::drop_raw(ptr, len);
}

unique_ref
}

// Internal fastpath for non-sandboxed mode.
#[cfg(not(feature = "v8_enable_sandbox"))]
#[inline(always)]
fn new_backing_store_from_bytes_nosandbox<T>(
bytes: T,
) -> UniqueRef<BackingStore>
where
T: sealed::Rawable,
{
let mut bytes = bytes; // Make mutable
let len = bytes.byte_len();

let (ptr, slice) = T::into_raw(bytes);
Expand Down Expand Up @@ -618,7 +705,11 @@ impl ArrayBuffer {
///
/// SAFETY: This API consumes raw pointers so is inherently
/// unsafe. Usually you should use new_backing_store_from_boxed_slice.
///
/// This API is incompatible with the v8 sandbox due to safety (use after free)
/// concerns that trigger when using this in sandbox mode.
#[inline(always)]
#[cfg(not(feature = "v8_enable_sandbox"))]
pub unsafe fn new_backing_store_from_ptr(
data_ptr: *mut c_void,
byte_length: usize,
Expand Down
Loading
Loading