Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 7 additions & 4 deletions src/hyperlight_wasm/src/sandbox/loaded_wasm_sandbox.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,11 +112,14 @@ impl LoadedWasmSandbox {
}
}

/// Unload the wasm module and return a `WasmSandbox` that can be used to load another module.
/// Unload the wasm module and return a `WasmSandbox` that can be
/// used to load another module.
///
/// This method internally calls [`restore()`](Self::restore) to reset the sandbox to its
/// pre-module state, which also clears any poisoned state. This means `unload_module()`
/// can be called on a poisoned sandbox to recover it.
/// This method defers calling [`restore()`](Self::restore) to
/// reset the sandbox to its pre-module state until a new module
/// is loaded. However, the sandbox will always be restored when a
/// new module is loaded, so a poisoned sandbox can be recovered
/// by unloading and reloading a module.
pub fn unload_module(mut self) -> Result<WasmSandbox> {
let sandbox = self
.inner
Expand Down
77 changes: 75 additions & 2 deletions src/hyperlight_wasm/src/sandbox/wasm_sandbox.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ pub struct WasmSandbox {
// Snapshot of state of an initial WasmSandbox (runtime loaded, but no guest module code loaded).
// Used for LoadedWasmSandbox to be able restore state back to WasmSandbox
snapshot: Option<Arc<Snapshot>>,
needs_restore: bool,
}

const MAPPED_BINARY_VA: u64 = 0x1_0000_0000u64;
Expand All @@ -56,6 +57,7 @@ impl WasmSandbox {
Ok(WasmSandbox {
inner: Some(inner),
snapshot: Some(snapshot),
needs_restore: false,
})
}

Expand All @@ -64,24 +66,41 @@ impl WasmSandbox {
/// the snapshot has already been created in that case.
/// Expects a snapshot of the state where wasm runtime is loaded, but no guest module code is loaded.
pub(super) fn new_from_loaded(
mut loaded: MultiUseSandbox,
loaded: MultiUseSandbox,
snapshot: Arc<Snapshot>,
) -> Result<Self> {
loaded.restore(snapshot.clone())?;
metrics::gauge!(METRIC_ACTIVE_WASM_SANDBOXES).increment(1);
metrics::counter!(METRIC_TOTAL_WASM_SANDBOXES).increment(1);
Ok(WasmSandbox {
inner: Some(loaded),
snapshot: Some(snapshot),
needs_restore: true,
})
}

fn restore_if_needed(&mut self) -> Result<()> {
if self.needs_restore {
self.inner
.as_mut()
.ok_or(new_error!("WasmSandbox is none"))?
.restore(
self.snapshot
.as_ref()
.ok_or(new_error!("Snapshot is none"))?
.clone(),
)?;
self.needs_restore = false;
}
Ok(())
}

/// Load a Wasm module at the given path into the sandbox and return a `LoadedWasmSandbox`
/// able to execute code in the loaded Wasm Module.
///
/// Before you can call guest functions in the sandbox, you must call
/// this function and use the returned value to call guest functions.
pub fn load_module(mut self, file: impl AsRef<Path>) -> Result<LoadedWasmSandbox> {
self.restore_if_needed()?;
let inner = self
.inner
.as_mut()
Expand All @@ -97,6 +116,18 @@ impl WasmSandbox {
self.finalize_module_load()
}

/// Load a Wasm module by restoring a Hyperlight snapshot taken
/// from a `LoadedWasmSandbox`.
pub fn load_from_snapshot(mut self, snapshot: Arc<Snapshot>) -> Result<LoadedWasmSandbox> {
let sb = self
.inner
.as_mut()
.ok_or_else(|| new_error!("WasmSandbox is None"))?;
sb.restore(snapshot)?;

self.finalize_module_load()
}

/// Load a Wasm module that is currently present in a buffer in
/// host memory, by mapping the host memory directly into the
/// sandbox.
Expand All @@ -114,6 +145,7 @@ impl WasmSandbox {
base: *mut libc::c_void,
len: usize,
) -> Result<LoadedWasmSandbox> {
self.restore_if_needed()?;
let inner = self
.inner
.as_mut()
Expand Down Expand Up @@ -142,6 +174,7 @@ impl WasmSandbox {
/// Before you can call guest functions in the sandbox, you must call
/// this function and use the returned value to call guest functions.
pub fn load_module_from_buffer(mut self, buffer: &[u8]) -> Result<LoadedWasmSandbox> {
self.restore_if_needed()?;
let inner = self
.inner
.as_mut()
Expand Down Expand Up @@ -473,6 +506,46 @@ mod tests {
}
}

#[test]
fn test_load_from_snapshot() {
let mut sandbox = SandboxBuilder::new().build().unwrap();
sandbox
.register(
"GetTimeSinceBootMicrosecond",
get_time_since_boot_microsecond,
)
.unwrap();
let sb = sandbox.load_runtime().unwrap();

let helloworld_wasm = get_test_file_path("HelloWorld.aot").unwrap();
let runwasm_wasm = get_test_file_path("RunWasm.aot").unwrap();

// load one module, and make sure that a function in it
// can be called
let mut lb1 = sb.load_module(helloworld_wasm).unwrap();
let result: i32 = lb1
.call_guest_function("HelloWorld", "Message from Rust Test".to_string())
.unwrap();
assert_eq!(result, 0);
let snapshot = lb1.snapshot().unwrap();

// load another module, and make sure that a function in
// it can be called
let sb = lb1.unload_module().unwrap();
let mut lb2 = sb.load_module(runwasm_wasm).unwrap();
let result: i32 = lb2.call_guest_function("CalcFib", 10i32).unwrap();
assert_eq!(result, 55);

// reload the first module via snapshot, and make sure the
// original function can be called again
let sb = lb2.unload_module().unwrap();
let mut lb3 = sb.load_from_snapshot(snapshot).unwrap();
let result: i32 = lb3
.call_guest_function("HelloWorld", "Message from Rust Test".to_string())
.unwrap();
assert_eq!(result, 0);
}

#[test]
fn test_load_module_buffer() {
let sandboxes = get_test_wasm_sandboxes().unwrap();
Expand Down
118 changes: 106 additions & 12 deletions src/hyperlight_wasm_runtime/src/platform.rs
Original file line number Diff line number Diff line change
Expand Up @@ -172,32 +172,126 @@ pub extern "C" fn wasmtime_init_traps(handler: wasmtime_trap_handler_t) -> i32 {
0
}

// The wasmtime_memory_image APIs are not yet supported.
// Copy a VA range to a new VA. Old and new VA, and len, must be
// page-aligned.
fn copy_va_mapping(base: *const u8, len: usize, to_va: *mut u8, remap_original: bool) {
// TODO: all this barrier code is amd64 specific. It should be
// refactored to use some better architecture-independent APIs.
//
// On amd64, "upgrades" including the first time that a a valid
// translation exists for a VA, only need a light (serialising
// instruction) barrier. Since invlpg is also a barrier, we don't
// even need that, if we did do a downgrade remap just before.
let mut needs_first_valid_exposure_barrier = false;

// TODO: make this more efficient by directly exposing the ability
// to traverse an entire VA range in
// hyperlight_guest_bin::paging::virt_to_phys, and coalescing
// continuous ranges there.
let base_u = base as u64;
let va_page_bases = (base_u..(base_u + len as u64)).step_by(vmem::PAGE_SIZE);
let mappings = va_page_bases.flat_map(paging::virt_to_phys);
for mapping in mappings {
// TODO: Deduplicate with identical logic in hyperlight_host snapshot.
let (new_kind, was_writable) = match mapping.kind {
// Skip unmapped pages, since they will be unmapped in
// both the original and the new copy
vmem::MappingKind::Unmapped => continue,
vmem::MappingKind::Basic(bm) if bm.writable => (
vmem::MappingKind::Cow(vmem::CowMapping {
readable: bm.readable,
executable: bm.executable,
}),
true,
),
vmem::MappingKind::Basic(bm) => (
vmem::MappingKind::Basic(vmem::BasicMapping {
readable: bm.readable,
writable: false,
executable: bm.executable,
}),
false,
),
vmem::MappingKind::Cow(cm) => (vmem::MappingKind::Cow(cm), false),
};
let do_downgrade = remap_original && was_writable;
if do_downgrade {
// If necessary, remap the original page as Cow, instead
// of whatever it is now, to ensure that any more writes to
// that region do not change the image base.
//
// TODO: could the table traversal needed for this be fused
// with the table traversal that got the original mapping,
// above?
unsafe {
paging::map_region(
mapping.phys_base,
mapping.virt_base as *mut u8,
vmem::PAGE_SIZE as u64,
new_kind,
);
}
}
// map the same pages to the new VA
unsafe {
paging::map_region(
mapping.phys_base,
to_va.wrapping_add((mapping.virt_base - base_u) as usize),
vmem::PAGE_SIZE as u64,
new_kind,
);
}
if do_downgrade {
// Since we have downgraded a page from writable to CoW we
// need to do an invlpg on it. Because invlpg is a
// serialising instruction, we don't need need the other
// barrier for the new mapping.
unsafe {
core::arch::asm!("invlpg [{}]", in(reg) mapping.virt_base, options(readonly, nostack, preserves_flags));
}
needs_first_valid_exposure_barrier = false;
} else {
needs_first_valid_exposure_barrier = true;
}
}
if needs_first_valid_exposure_barrier {
paging::barrier::first_valid_same_ctx();
}
}

// Create a copy-on-write memory image from some existing VA range.
// `ptr` and `len` must be page-aligned (which is guaranteed by the
// wasmtime-platform.h interface).
#[no_mangle]
pub extern "C" fn wasmtime_memory_image_new(
_ptr: *const u8,
_len: usize,
ptr: *const u8,
len: usize,
ret: &mut *mut c_void,
) -> i32 {
*ret = core::ptr::null_mut();
// Choose an arbitrary VA, which we will use as the memory image
// identifier. We will construct the image by mapping a copy of
// the original VA range here, making the original copy CoW as we
// go.
let new_virt = FIRST_VADDR.fetch_add(0x100_0000_0000, Ordering::Relaxed) as *mut u8;
copy_va_mapping(ptr, len, new_virt, true);
*ret = new_virt as *mut c_void;
0
}

#[no_mangle]
pub extern "C" fn wasmtime_memory_image_map_at(
_image: *mut c_void,
_addr: *mut u8,
_len: usize,
image: *mut c_void,
addr: *mut u8,
len: usize,
) -> i32 {
/* This should never be called because wasmtime_memory_image_new
* returns NULL */
panic!("wasmtime_memory_image_map_at");
copy_va_mapping(image as *mut u8, len, addr, false);
0
}

#[no_mangle]
pub extern "C" fn wasmtime_memory_image_free(_image: *mut c_void) {
/* This should never be called because wasmtime_memory_image_new
* returns NULL */
/* This should never be called in practice, because we simply
* restore the snapshot rather than actually unload/destroy instances */
panic!("wasmtime_memory_image_free");
}

Expand Down
Loading