Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 0 additions & 14 deletions .gn
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,10 @@ default_args = {

v8_embedder_string = "-rusty"

v8_enable_sandbox = false
v8_enable_javascript_promise_hooks = true
v8_promise_internal_field_count = 1
v8_use_external_startup_data = false

v8_enable_pointer_compression = false

v8_imminent_deprecation_warnings = false

# This flag speeds up the performance of fork/execve on Linux systems for
Expand All @@ -65,17 +62,6 @@ default_args = {
v8_array_buffer_internal_field_count = 2
v8_array_buffer_view_internal_field_count = 2

# Enabling the shared read-only heap comes with a restriction that all
# isolates running at the same time must be created from the same snapshot.
# This is problematic for Deno, which has separate "runtime" and "typescript
# compiler" snapshots, and sometimes uses them both at the same time.
v8_enable_shared_ro_heap = false

# V8 11.6 hardcoded an assumption in `mksnapshot` that shared RO heap
# is enabled. In our case it's disabled so without this flag we can't
# compile.
v8_enable_verify_heap = false

# Enable V8 object print for debugging.
# v8_enable_object_print = true

Expand Down
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ opt-level = 1
default = ["use_custom_libcxx"]
use_custom_libcxx = []
v8_enable_pointer_compression = []
v8_enable_sandbox = []
v8_enable_v8_checks = []

[dependencies]
Expand Down
6 changes: 6 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -235,3 +235,9 @@ for M1 build.
$ V8_FROM_SOURCE=1 cargo build
$ V8_FROM_SOURCE=1 cargo build --release
```

## Experimental Features

rusty_v8 includes experimental support for certain feature(s) that may be useful in security focused contexts but are not as well tested and do not undergo any sort of CI related testing or prebuilt archives. Due to their experimental status, these features require either ``V8_FROM_SOURCE=1`` to be set or the use of a custom-built archive of v8.

- ``v8_enable_sandbox``: Enables v8 sandbox mode. The v8 sandbox enables improved safety while executing potentially malicious JavaScript code through the use of memory cages. Note that the v8 sandbox will allocate ~1TB of virtual memory (although this should not be an issue as many operating systems allow 128-256TB of virtual memory per process). Creating isolates with the sandbox enabled comes with API limitations and may have increased overhead.
62 changes: 58 additions & 4 deletions build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -209,14 +209,65 @@ fn build_v8(is_asan: bool) {
"use_custom_libcxx={}",
env::var("CARGO_FEATURE_USE_CUSTOM_LIBCXX").is_ok()
));
gn_args.push(format!(
"v8_enable_pointer_compression={}",
env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok()
));

let extra_args = {
if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok()
&& env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok()
{
panic!(
"Sandbox and pointer compression cannot be enabled at the same time"
);
}

if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok() {
vec![
// Enable pointer compression (along with its dependencies)
"v8_enable_sandbox=true",
"v8_enable_external_code_space=true", // Needed for sandbox
"v8_enable_pointer_compression=true",
// Note that sandbox requires shared_ro_heap and verify_heap
// to be true/default
]
} else {
let mut opts = vec![
// Disable sandbox
"v8_enable_sandbox=false",
// Enabling the shared read-only heap comes with a restriction that all
// isolates running at the same time must be created from the same snapshot.
// This is problematic for Deno, which has separate "runtime" and "typescript
// compiler" snapshots, and sometimes uses them both at the same time.
//
// NOTE FOR FUTURE: Check if this flag even exists anymore as it has likely been
// removed
"v8_enable_shared_ro_heap=false",
// V8 11.6 hardcoded an assumption in `mksnapshot` that shared RO heap
// is enabled. In our case it's disabled so without this flag we can't
// compile.
//
// NOTE FOR FUTURE: Check if this flag even exists anymore as it has likely been
// removed
"v8_enable_verify_heap=false",
];

if env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() {
opts.push("v8_enable_pointer_compression=true");
} else {
opts.push("v8_enable_pointer_compression=false");
}

opts
}
};

for arg in extra_args {
gn_args.push(arg.to_string());
}

gn_args.push(format!(
"v8_enable_v8_checks={}",
env::var("CARGO_FEATURE_V8_ENABLE_V8_CHECKS").is_ok()
));

// Fix GN's host_cpu detection when using x86_64 bins on Apple Silicon
if cfg!(target_os = "macos") && cfg!(target_arch = "aarch64") {
gn_args.push("host_cpu=\"arm64\"".to_string());
Expand Down Expand Up @@ -439,6 +490,9 @@ fn prebuilt_features_suffix() -> String {
if env::var("CARGO_FEATURE_V8_ENABLE_POINTER_COMPRESSION").is_ok() {
features.push_str("_ptrcomp");
}
if env::var("CARGO_FEATURE_V8_ENABLE_SANDBOX").is_ok() {
features.push_str("_sandbox");
}
features
}

Expand Down
6 changes: 6 additions & 0 deletions src/V8.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ unsafe extern "C" {
fn v8__V8__Dispose() -> bool;
fn v8__V8__DisposePlatform();
fn v8__V8__SetFatalErrorHandler(that: V8FatalErrorCallback);
fn v8__V8__IsSandboxEnabled() -> bool;
}

pub type V8FatalErrorCallback = unsafe extern "C" fn(
Expand Down Expand Up @@ -83,6 +84,11 @@ use GlobalState::*;

static GLOBAL_STATE: Mutex<GlobalState> = Mutex::new(Uninitialized);

/// Returns true if V8 is sandboxed.
pub fn is_sandboxed() -> bool {
unsafe { v8__V8__IsSandboxEnabled() }
}

pub fn assert_initialized() {
let global_state_guard = GLOBAL_STATE.lock().unwrap();
match *global_state_guard {
Expand Down
58 changes: 50 additions & 8 deletions src/array_buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,6 @@ use crate::support::long;

unsafe extern "C" {
fn v8__ArrayBuffer__Allocator__NewDefaultAllocator() -> *mut Allocator;
fn v8__ArrayBuffer__Allocator__NewRustAllocator(
handle: *const c_void,
vtable: *const RustAllocatorVtable<c_void>,
) -> *mut Allocator;
fn v8__ArrayBuffer__Allocator__DELETE(this: *mut Allocator);
fn v8__ArrayBuffer__New__with_byte_length(
isolate: *mut RealIsolate,
Expand Down Expand Up @@ -60,7 +56,6 @@ unsafe extern "C" {
deleter: BackingStoreDeleterCallback,
deleter_data: *mut c_void,
) -> *mut BackingStore;

fn v8__BackingStore__Data(this: *const BackingStore) -> *mut c_void;
fn v8__BackingStore__ByteLength(this: *const BackingStore) -> usize;
fn v8__BackingStore__IsShared(this: *const BackingStore) -> bool;
Expand Down Expand Up @@ -108,6 +103,15 @@ unsafe extern "C" {
) -> long;
}

// Rust allocator feature is only available in non-sandboxed mode
#[cfg(not(feature = "v8_enable_sandbox"))]
unsafe extern "C" {
fn v8__ArrayBuffer__Allocator__NewRustAllocator(
handle: *const c_void,
vtable: *const RustAllocatorVtable<c_void>,
) -> *mut Allocator;
}

/// A thread-safe allocator that V8 uses to allocate |ArrayBuffer|'s memory.
/// The allocator is a global V8 setting. It has to be set via
/// Isolate::CreateParams.
Expand All @@ -130,6 +134,7 @@ unsafe extern "C" {
pub struct Allocator(Opaque);

/// A wrapper around the V8 Allocator class.
#[cfg(not(feature = "v8_enable_sandbox"))]
#[repr(C)]
pub struct RustAllocatorVtable<T> {
pub allocate: unsafe extern "C" fn(handle: &T, len: usize) -> *mut c_void,
Expand Down Expand Up @@ -172,7 +177,10 @@ pub fn new_default_allocator() -> UniqueRef<Allocator> {
/// Creates an allocator managed by Rust code.
///
/// Marked `unsafe` because the caller must ensure that `handle` is valid and matches what `vtable` expects.
///
/// Not usable in sandboxed mode
#[inline(always)]
#[cfg(not(feature = "v8_enable_sandbox"))]
pub unsafe fn new_rust_allocator<T: Sized + Send + Sync + 'static>(
handle: *const T,
vtable: &'static RustAllocatorVtable<T>,
Expand All @@ -187,6 +195,7 @@ pub unsafe fn new_rust_allocator<T: Sized + Send + Sync + 'static>(
}

#[test]
#[cfg(not(feature = "v8_enable_sandbox"))]
fn test_rust_allocator() {
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
Expand Down Expand Up @@ -226,6 +235,10 @@ fn test_rust_allocator() {

#[test]
fn test_default_allocator() {
crate::V8::initialize_platform(
crate::new_default_platform(0, false).make_shared(),
);
crate::V8::initialize();
new_default_allocator();
}

Expand All @@ -241,6 +254,7 @@ pub type BackingStoreDeleterCallback = unsafe extern "C" fn(
deleter_data: *mut c_void,
);

#[cfg(not(feature = "v8_enable_sandbox"))]
pub(crate) mod sealed {
pub trait Rawable {
fn byte_len(&mut self) -> usize;
Expand All @@ -249,6 +263,7 @@ pub(crate) mod sealed {
}
}

#[cfg(not(feature = "v8_enable_sandbox"))]
macro_rules! rawable {
($ty:ty) => {
impl sealed::Rawable for Box<[$ty]> {
Expand Down Expand Up @@ -289,15 +304,24 @@ macro_rules! rawable {
};
}

#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(u8);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(u16);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(u32);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(u64);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(i8);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(i16);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(i32);
#[cfg(not(feature = "v8_enable_sandbox"))]
rawable!(i64);

#[cfg(not(feature = "v8_enable_sandbox"))]
impl<T: Sized> sealed::Rawable for Box<T>
where
T: AsMut<[u8]>,
Expand Down Expand Up @@ -546,7 +570,10 @@ impl ArrayBuffer {
///
/// The result can be later passed to ArrayBuffer::New. The raw pointer
/// to the buffer must not be passed again to any V8 API function.
///
/// Not available in Sandbox Mode, see new_backing_store_from_bytes for a potential alternative
#[inline(always)]
#[cfg(not(feature = "v8_enable_sandbox"))]
pub fn new_backing_store_from_boxed_slice(
data: Box<[u8]>,
) -> UniqueRef<BackingStore> {
Expand All @@ -560,7 +587,10 @@ impl ArrayBuffer {
///
/// The result can be later passed to ArrayBuffer::New. The raw pointer
/// to the buffer must not be passed again to any V8 API function.
///
/// Not available in Sandbox Mode, see new_backing_store_from_bytes for a potential alternative
#[inline(always)]
#[cfg(not(feature = "v8_enable_sandbox"))]
pub fn new_backing_store_from_vec(data: Vec<u8>) -> UniqueRef<BackingStore> {
Self::new_backing_store_from_bytes(data)
}
Expand All @@ -573,6 +603,12 @@ impl ArrayBuffer {
/// `Box<[u8]>`, and `Vec<u8>`. This will also support most other mutable bytes containers (including `bytes::BytesMut`),
/// though these buffers will need to be boxed to manage ownership of memory.
///
/// Not available in sandbox mode. Sandbox mode requires data to be allocated
/// within the sandbox's address space. Within sandbox mode, consider the below alternatives
///
/// 1. consider using new_backing_store and BackingStore::data() followed by doing a std::ptr::copy to copy the data into a BackingStore.
/// 2. If you truly do have data that is allocated inside the sandbox address space, consider using the unsafe new_backing_store_from_ptr API
///
/// ```
/// // Vector of bytes
/// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(vec![1, 2, 3]);
Expand All @@ -583,12 +619,12 @@ impl ArrayBuffer {
/// let backing_store = v8::ArrayBuffer::new_backing_store_from_bytes(Box::new(bytes::BytesMut::new()));
/// ```
#[inline(always)]
pub fn new_backing_store_from_bytes<T>(
mut bytes: T,
) -> UniqueRef<BackingStore>
#[cfg(not(feature = "v8_enable_sandbox"))]
pub fn new_backing_store_from_bytes<T>(bytes: T) -> UniqueRef<BackingStore>
where
T: sealed::Rawable,
{
let mut bytes = bytes; // Make mutable
let len = bytes.byte_len();

let (ptr, slice) = T::into_raw(bytes);
Expand Down Expand Up @@ -618,6 +654,12 @@ impl ArrayBuffer {
///
/// SAFETY: This API consumes raw pointers so is inherently
/// unsafe. Usually you should use new_backing_store_from_boxed_slice.
///
/// WARNING: Using sandbox mode has extra limitations that may cause crashes
/// or memory safety violations if this API is used incorrectly:
///
/// 1. Sandbox mode requires data to be allocated within the sandbox's address space.
/// 2. It is very easy to cause memory safety errors when using this API with sandbox mode
#[inline(always)]
pub unsafe fn new_backing_store_from_ptr(
data_ptr: *mut c_void,
Expand Down
6 changes: 6 additions & 0 deletions src/binding.cc
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,12 @@ static_assert(sizeof(v8::Isolate::DisallowJavascriptExecutionScope) == 12,
"DisallowJavascriptExecutionScope size mismatch");
#endif

// Note: this currently uses an internal API to determine if the v8 sandbox is
// enabled in the testsuite etc.
extern "C" bool v8__V8__IsSandboxEnabled() {
return v8::internal::SandboxIsEnabled();
}

extern "C" {
void v8__V8__SetFlagsFromCommandLine(int* argc, char** argv,
const char* usage) {
Expand Down
Loading
Loading