wasmtime/runtime/vm/vmcontext.rs
1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::prelude::*;
8use crate::runtime::vm::{InterpreterRef, VMGcRef, VmPtr, VmSafe, f32x4, f64x2, i8x16};
9use crate::store::StoreOpaque;
10use crate::vm::stack_switching::VMStackChain;
11use core::cell::UnsafeCell;
12use core::ffi::c_void;
13use core::fmt;
14use core::marker;
15use core::mem::{self, MaybeUninit};
16use core::ops::Range;
17use core::ptr::{self, NonNull};
18use core::sync::atomic::{AtomicUsize, Ordering};
19use wasmtime_environ::{
20 BuiltinFunctionIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex,
21 DefinedTagIndex, VMCONTEXT_MAGIC, VMSharedTypeIndex, WasmHeapTopType, WasmValType,
22};
23
24/// A function pointer that exposes the array calling convention.
25///
26/// Regardless of the underlying Wasm function type, all functions using the
27/// array calling convention have the same Rust signature.
28///
29/// Arguments:
30///
31/// * Callee `vmctx` for the function itself.
32///
33/// * Caller's `vmctx` (so that host functions can access the linear memory of
34/// their Wasm callers).
35///
36/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
37/// this function, and where results are returned from this function.
38///
39/// * The capacity of the `ValRaw` buffer. Must always be at least
40/// `max(len(wasm_params), len(wasm_results))`.
41///
42/// Return value:
43///
44/// * `true` if this call succeeded.
45/// * `false` if this call failed and a trap was recorded in TLS.
46pub type VMArrayCallNative = unsafe extern "C" fn(
47 NonNull<VMOpaqueContext>,
48 NonNull<VMContext>,
49 NonNull<ValRaw>,
50 usize,
51) -> bool;
52
53/// An opaque function pointer which might be `VMArrayCallNative` or it might be
54/// pulley bytecode. Requires external knowledge to determine what kind of
55/// function pointer this is.
56#[repr(transparent)]
57pub struct VMArrayCallFunction(VMFunctionBody);
58
59/// A function pointer that exposes the Wasm calling convention.
60///
61/// In practice, different Wasm function types end up mapping to different Rust
62/// function types, so this isn't simply a type alias the way that
63/// `VMArrayCallFunction` is. However, the exact details of the calling
64/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
65/// code never does anything with these function pointers except shuffle them
66/// around and pass them back to Wasm.
67#[repr(transparent)]
68pub struct VMWasmCallFunction(VMFunctionBody);
69
70/// An imported function.
71///
72/// Basically the same as `VMFuncRef`, except that `wasm_call` is not optional.
73#[derive(Debug, Clone)]
74#[repr(C)]
75pub struct VMFunctionImport {
76 /// Same as `VMFuncRef::array_call`.
77 pub array_call: VmPtr<VMArrayCallFunction>,
78
79 /// Same as `VMFuncRef::wasm_call`, except always non-null. Must be filled
80 /// in by the time Wasm is importing this function!
81 pub wasm_call: VmPtr<VMWasmCallFunction>,
82
83 /// Function signature's _actual_ type id.
84 ///
85 /// This is the type that the function was defined with, not the type that
86 /// it was imported as. These two can be different in the face of subtyping
87 /// and we need the former for to correctly implement dynamic downcasts.
88 pub type_index: VMSharedTypeIndex,
89
90 /// Same as `VMFuncRef::vmctx`.
91 pub vmctx: VmPtr<VMOpaqueContext>,
92 // If more elements are added here, remember to add offset_of tests below!
93}
94
95// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
96unsafe impl VmSafe for VMFunctionImport {}
97
98impl VMFunctionImport {
99 /// Convert `&VMFunctionImport` into `&VMFuncRef`.
100 pub fn as_func_ref(&self) -> &VMFuncRef {
101 // Safety: `VMFunctionImport` and `VMFuncRef` have the same
102 // representation.
103 unsafe { Self::as_non_null_func_ref(NonNull::from(self)).as_ref() }
104 }
105
106 /// Convert `NonNull<VMFunctionImport>` into `NonNull<VMFuncRef>`.
107 pub fn as_non_null_func_ref(p: NonNull<VMFunctionImport>) -> NonNull<VMFuncRef> {
108 p.cast()
109 }
110
111 /// Convert `*mut VMFunctionImport` into `*mut VMFuncRef`.
112 pub fn as_func_ref_ptr(p: *mut VMFunctionImport) -> *mut VMFuncRef {
113 p.cast()
114 }
115}
116
117#[cfg(test)]
118mod test_vmfunction_import {
119 use super::{VMFuncRef, VMFunctionImport};
120 use core::mem::offset_of;
121 use std::mem::size_of;
122 use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
123
124 #[test]
125 fn check_vmfunction_import_offsets() {
126 let module = Module::new(StaticModuleIndex::from_u32(0));
127 let offsets = VMOffsets::new(HostPtr, &module);
128 assert_eq!(
129 size_of::<VMFunctionImport>(),
130 usize::from(offsets.size_of_vmfunction_import())
131 );
132 assert_eq!(
133 offset_of!(VMFunctionImport, array_call),
134 usize::from(offsets.vmfunction_import_array_call())
135 );
136 assert_eq!(
137 offset_of!(VMFunctionImport, wasm_call),
138 usize::from(offsets.vmfunction_import_wasm_call())
139 );
140 assert_eq!(
141 offset_of!(VMFunctionImport, type_index),
142 usize::from(offsets.vmfunction_import_type_index())
143 );
144 assert_eq!(
145 offset_of!(VMFunctionImport, vmctx),
146 usize::from(offsets.vmfunction_import_vmctx())
147 );
148 }
149
150 #[test]
151 fn vmfunction_import_and_vmfunc_ref_have_same_layout() {
152 assert_eq!(size_of::<VMFunctionImport>(), size_of::<VMFuncRef>());
153 assert_eq!(
154 offset_of!(VMFunctionImport, array_call),
155 offset_of!(VMFuncRef, array_call),
156 );
157 assert_eq!(
158 offset_of!(VMFunctionImport, wasm_call),
159 offset_of!(VMFuncRef, wasm_call),
160 );
161 assert_eq!(
162 offset_of!(VMFunctionImport, type_index),
163 offset_of!(VMFuncRef, type_index),
164 );
165 assert_eq!(
166 offset_of!(VMFunctionImport, vmctx),
167 offset_of!(VMFuncRef, vmctx),
168 );
169 }
170}
171
172/// A placeholder byte-sized type which is just used to provide some amount of type
173/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
174/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
175/// around.
176#[repr(C)]
177pub struct VMFunctionBody(u8);
178
179// SAFETY: this structure is never read and is safe to pass to jit code.
180unsafe impl VmSafe for VMFunctionBody {}
181
182#[cfg(test)]
183mod test_vmfunction_body {
184 use super::VMFunctionBody;
185 use std::mem::size_of;
186
187 #[test]
188 fn check_vmfunction_body_offsets() {
189 assert_eq!(size_of::<VMFunctionBody>(), 1);
190 }
191}
192
193/// The fields compiled code needs to access to utilize a WebAssembly table
194/// imported from another instance.
195#[derive(Debug, Copy, Clone)]
196#[repr(C)]
197pub struct VMTableImport {
198 /// A pointer to the imported table description.
199 pub from: VmPtr<VMTableDefinition>,
200
201 /// A pointer to the `VMContext` that owns the table description.
202 pub vmctx: VmPtr<VMContext>,
203
204 /// The table index, within `vmctx`, this definition resides at.
205 pub index: DefinedTableIndex,
206}
207
208// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
209unsafe impl VmSafe for VMTableImport {}
210
211#[cfg(test)]
212mod test_vmtable {
213 use super::VMTableImport;
214 use core::mem::offset_of;
215 use std::mem::size_of;
216 use wasmtime_environ::component::{Component, VMComponentOffsets};
217 use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
218
219 #[test]
220 fn check_vmtable_offsets() {
221 let module = Module::new(StaticModuleIndex::from_u32(0));
222 let offsets = VMOffsets::new(HostPtr, &module);
223 assert_eq!(
224 size_of::<VMTableImport>(),
225 usize::from(offsets.size_of_vmtable_import())
226 );
227 assert_eq!(
228 offset_of!(VMTableImport, from),
229 usize::from(offsets.vmtable_import_from())
230 );
231 assert_eq!(
232 offset_of!(VMTableImport, vmctx),
233 usize::from(offsets.vmtable_import_vmctx())
234 );
235 assert_eq!(
236 offset_of!(VMTableImport, index),
237 usize::from(offsets.vmtable_import_index())
238 );
239 }
240
241 #[test]
242 fn ensure_sizes_match() {
243 // Because we use `VMTableImport` for recording tables used by components, we
244 // want to make sure that the size calculations between `VMOffsets` and
245 // `VMComponentOffsets` stay the same.
246 let module = Module::new(StaticModuleIndex::from_u32(0));
247 let vm_offsets = VMOffsets::new(HostPtr, &module);
248 let component = Component::default();
249 let vm_component_offsets = VMComponentOffsets::new(HostPtr, &component);
250 assert_eq!(
251 vm_offsets.size_of_vmtable_import(),
252 vm_component_offsets.size_of_vmtable_import()
253 );
254 }
255}
256
257/// The fields compiled code needs to access to utilize a WebAssembly linear
258/// memory imported from another instance.
259#[derive(Debug, Copy, Clone)]
260#[repr(C)]
261pub struct VMMemoryImport {
262 /// A pointer to the imported memory description.
263 pub from: VmPtr<VMMemoryDefinition>,
264
265 /// A pointer to the `VMContext` that owns the memory description.
266 pub vmctx: VmPtr<VMContext>,
267
268 /// The index of the memory in the containing `vmctx`.
269 pub index: DefinedMemoryIndex,
270}
271
272// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
273unsafe impl VmSafe for VMMemoryImport {}
274
275#[cfg(test)]
276mod test_vmmemory_import {
277 use super::VMMemoryImport;
278 use core::mem::offset_of;
279 use std::mem::size_of;
280 use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
281
282 #[test]
283 fn check_vmmemory_import_offsets() {
284 let module = Module::new(StaticModuleIndex::from_u32(0));
285 let offsets = VMOffsets::new(HostPtr, &module);
286 assert_eq!(
287 size_of::<VMMemoryImport>(),
288 usize::from(offsets.size_of_vmmemory_import())
289 );
290 assert_eq!(
291 offset_of!(VMMemoryImport, from),
292 usize::from(offsets.vmmemory_import_from())
293 );
294 assert_eq!(
295 offset_of!(VMMemoryImport, vmctx),
296 usize::from(offsets.vmmemory_import_vmctx())
297 );
298 assert_eq!(
299 offset_of!(VMMemoryImport, index),
300 usize::from(offsets.vmmemory_import_index())
301 );
302 }
303}
304
305/// The fields compiled code needs to access to utilize a WebAssembly global
306/// variable imported from another instance.
307///
308/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
309/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
310/// require a `vmctx` pointer to access.
311#[derive(Debug, Copy, Clone)]
312#[repr(C)]
313pub struct VMGlobalImport {
314 /// A pointer to the imported global variable description.
315 pub from: VmPtr<VMGlobalDefinition>,
316
317 /// A pointer to the context that owns the global.
318 ///
319 /// Exactly what's stored here is dictated by `kind` below. This is `None`
320 /// for `VMGlobalKind::Host`, it's a `VMContext` for
321 /// `VMGlobalKind::Instance`, and it's `VMComponentContext` for
322 /// `VMGlobalKind::ComponentFlags`.
323 pub vmctx: Option<VmPtr<VMOpaqueContext>>,
324
325 /// The kind of global, and extra location information in addition to
326 /// `vmctx` above.
327 pub kind: VMGlobalKind,
328}
329
330// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
331unsafe impl VmSafe for VMGlobalImport {}
332
333/// The kinds of globals that Wasmtime has.
334#[derive(Debug, Copy, Clone)]
335#[repr(C, u32)]
336pub enum VMGlobalKind {
337 /// Host globals, stored in a `StoreOpaque`.
338 Host(DefinedGlobalIndex),
339 /// Instance globals, stored in `VMContext`s
340 Instance(DefinedGlobalIndex),
341 /// Flags for a component instance, stored in `VMComponentContext`.
342 #[cfg(feature = "component-model")]
343 ComponentFlags(wasmtime_environ::component::RuntimeComponentInstanceIndex),
344 #[cfg(feature = "component-model")]
345 TaskMayBlock,
346}
347
348// SAFETY: the above enum is repr(C) and stores nothing else
349unsafe impl VmSafe for VMGlobalKind {}
350
351#[cfg(test)]
352mod test_vmglobal_import {
353 use super::VMGlobalImport;
354 use core::mem::offset_of;
355 use std::mem::size_of;
356 use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
357
358 #[test]
359 fn check_vmglobal_import_offsets() {
360 let module = Module::new(StaticModuleIndex::from_u32(0));
361 let offsets = VMOffsets::new(HostPtr, &module);
362 assert_eq!(
363 size_of::<VMGlobalImport>(),
364 usize::from(offsets.size_of_vmglobal_import())
365 );
366 assert_eq!(
367 offset_of!(VMGlobalImport, from),
368 usize::from(offsets.vmglobal_import_from())
369 );
370 }
371}
372
373/// The fields compiled code needs to access to utilize a WebAssembly
374/// tag imported from another instance.
375#[derive(Debug, Copy, Clone)]
376#[repr(C)]
377pub struct VMTagImport {
378 /// A pointer to the imported tag description.
379 pub from: VmPtr<VMTagDefinition>,
380
381 /// The instance that owns this tag.
382 pub vmctx: VmPtr<VMContext>,
383
384 /// The index of the tag in the containing `vmctx`.
385 pub index: DefinedTagIndex,
386}
387
388// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
389unsafe impl VmSafe for VMTagImport {}
390
391#[cfg(test)]
392mod test_vmtag_import {
393 use super::VMTagImport;
394 use core::mem::{offset_of, size_of};
395 use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
396
397 #[test]
398 fn check_vmtag_import_offsets() {
399 let module = Module::new(StaticModuleIndex::from_u32(0));
400 let offsets = VMOffsets::new(HostPtr, &module);
401 assert_eq!(
402 size_of::<VMTagImport>(),
403 usize::from(offsets.size_of_vmtag_import())
404 );
405 assert_eq!(
406 offset_of!(VMTagImport, from),
407 usize::from(offsets.vmtag_import_from())
408 );
409 assert_eq!(
410 offset_of!(VMTagImport, vmctx),
411 usize::from(offsets.vmtag_import_vmctx())
412 );
413 assert_eq!(
414 offset_of!(VMTagImport, index),
415 usize::from(offsets.vmtag_import_index())
416 );
417 }
418}
419
420/// The fields compiled code needs to access to utilize a WebAssembly linear
421/// memory defined within the instance, namely the start address and the
422/// size in bytes.
423#[derive(Debug)]
424#[repr(C)]
425pub struct VMMemoryDefinition {
426 /// The start address.
427 pub base: VmPtr<u8>,
428
429 /// The current logical size of this linear memory in bytes.
430 ///
431 /// This is atomic because shared memories must be able to grow their length
432 /// atomically. For relaxed access, see
433 /// [`VMMemoryDefinition::current_length()`].
434 pub current_length: AtomicUsize,
435}
436
437// SAFETY: the above definition has `repr(C)` and each field individually
438// implements `VmSafe`, which satisfies the requirements of this trait.
439unsafe impl VmSafe for VMMemoryDefinition {}
440
441impl VMMemoryDefinition {
442 /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
443 /// performing a relaxed load; do not use this function for situations in
444 /// which a precise length is needed. Owned memories (i.e., non-shared) will
445 /// always return a precise result (since no concurrent modification is
446 /// possible) but shared memories may see an imprecise value--a
447 /// `current_length` potentially smaller than what some other thread
448 /// observes. Since Wasm memory only grows, this under-estimation may be
449 /// acceptable in certain cases.
450 #[inline]
451 pub fn current_length(&self) -> usize {
452 self.current_length.load(Ordering::Relaxed)
453 }
454
455 /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
456 /// `current_length`; see [`VMMemoryDefinition::current_length()`].
457 #[inline]
458 pub unsafe fn load(ptr: *mut Self) -> Self {
459 let other = unsafe { &*ptr };
460 VMMemoryDefinition {
461 base: other.base,
462 current_length: other.current_length().into(),
463 }
464 }
465}
466
467#[cfg(test)]
468mod test_vmmemory_definition {
469 use super::VMMemoryDefinition;
470 use core::mem::offset_of;
471 use std::mem::size_of;
472 use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
473
474 #[test]
475 fn check_vmmemory_definition_offsets() {
476 let module = Module::new(StaticModuleIndex::from_u32(0));
477 let offsets = VMOffsets::new(HostPtr, &module);
478 assert_eq!(
479 size_of::<VMMemoryDefinition>(),
480 usize::from(offsets.ptr.size_of_vmmemory_definition())
481 );
482 assert_eq!(
483 offset_of!(VMMemoryDefinition, base),
484 usize::from(offsets.ptr.vmmemory_definition_base())
485 );
486 assert_eq!(
487 offset_of!(VMMemoryDefinition, current_length),
488 usize::from(offsets.ptr.vmmemory_definition_current_length())
489 );
490 /* TODO: Assert that the size of `current_length` matches.
491 assert_eq!(
492 size_of::<VMMemoryDefinition::current_length>(),
493 usize::from(offsets.size_of_vmmemory_definition_current_length())
494 );
495 */
496 }
497}
498
499/// The fields compiled code needs to access to utilize a WebAssembly table
500/// defined within the instance.
501#[derive(Debug, Copy, Clone)]
502#[repr(C)]
503pub struct VMTableDefinition {
504 /// Pointer to the table data.
505 pub base: VmPtr<u8>,
506
507 /// The current number of elements in the table.
508 pub current_elements: usize,
509}
510
511// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
512unsafe impl VmSafe for VMTableDefinition {}
513
514#[cfg(test)]
515mod test_vmtable_definition {
516 use super::VMTableDefinition;
517 use core::mem::offset_of;
518 use std::mem::size_of;
519 use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
520
521 #[test]
522 fn check_vmtable_definition_offsets() {
523 let module = Module::new(StaticModuleIndex::from_u32(0));
524 let offsets = VMOffsets::new(HostPtr, &module);
525 assert_eq!(
526 size_of::<VMTableDefinition>(),
527 usize::from(offsets.size_of_vmtable_definition())
528 );
529 assert_eq!(
530 offset_of!(VMTableDefinition, base),
531 usize::from(offsets.vmtable_definition_base())
532 );
533 assert_eq!(
534 offset_of!(VMTableDefinition, current_elements),
535 usize::from(offsets.vmtable_definition_current_elements())
536 );
537 }
538}
539
540/// The storage for a WebAssembly global defined within the instance.
541///
542/// TODO: Pack the globals more densely, rather than using the same size
543/// for every type.
544#[derive(Debug)]
545#[repr(C, align(16))]
546pub struct VMGlobalDefinition {
547 storage: [u8; 16],
548 // If more elements are added here, remember to add offset_of tests below!
549}
550
551// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
552unsafe impl VmSafe for VMGlobalDefinition {}
553
554#[cfg(test)]
555mod test_vmglobal_definition {
556 use super::VMGlobalDefinition;
557 use std::mem::{align_of, size_of};
558 use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
559
560 #[test]
561 fn check_vmglobal_definition_alignment() {
562 assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
563 assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
564 assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
565 assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
566 assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
567 assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f32; 4]>());
568 assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f64; 2]>());
569 }
570
571 #[test]
572 fn check_vmglobal_definition_offsets() {
573 let module = Module::new(StaticModuleIndex::from_u32(0));
574 let offsets = VMOffsets::new(HostPtr, &module);
575 assert_eq!(
576 size_of::<VMGlobalDefinition>(),
577 usize::from(offsets.ptr.size_of_vmglobal_definition())
578 );
579 }
580
581 #[test]
582 fn check_vmglobal_begins_aligned() {
583 let module = Module::new(StaticModuleIndex::from_u32(0));
584 let offsets = VMOffsets::new(HostPtr, &module);
585 assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
586 }
587
588 #[test]
589 #[cfg(feature = "gc")]
590 fn check_vmglobal_can_contain_gc_ref() {
591 assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
592 }
593}
594
595impl VMGlobalDefinition {
596 /// Construct a `VMGlobalDefinition`.
597 pub fn new() -> Self {
598 Self { storage: [0; 16] }
599 }
600
601 /// Create a `VMGlobalDefinition` from a `ValRaw`.
602 ///
603 /// # Unsafety
604 ///
605 /// This raw value's type must match the given `WasmValType`.
606 pub unsafe fn from_val_raw(
607 store: &mut StoreOpaque,
608 wasm_ty: WasmValType,
609 raw: ValRaw,
610 ) -> Result<Self> {
611 let mut global = Self::new();
612 unsafe {
613 match wasm_ty {
614 WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
615 WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
616 WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
617 WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
618 WasmValType::V128 => global.set_u128(raw.get_v128()),
619 WasmValType::Ref(r) => match r.heap_type.top() {
620 WasmHeapTopType::Extern => {
621 let r = VMGcRef::from_raw_u32(raw.get_externref());
622 global.init_gc_ref(store, r.as_ref())
623 }
624 WasmHeapTopType::Any => {
625 let r = VMGcRef::from_raw_u32(raw.get_anyref());
626 global.init_gc_ref(store, r.as_ref())
627 }
628 WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
629 WasmHeapTopType::Cont => *global.as_func_ref_mut() = raw.get_funcref().cast(), // TODO(#10248): temporary hack.
630 WasmHeapTopType::Exn => {
631 let r = VMGcRef::from_raw_u32(raw.get_exnref());
632 global.init_gc_ref(store, r.as_ref())
633 }
634 },
635 }
636 }
637 Ok(global)
638 }
639
640 /// Get this global's value as a `ValRaw`.
641 ///
642 /// # Unsafety
643 ///
644 /// This global's value's type must match the given `WasmValType`.
645 pub unsafe fn to_val_raw(
646 &self,
647 store: &mut StoreOpaque,
648 wasm_ty: WasmValType,
649 ) -> Result<ValRaw> {
650 unsafe {
651 Ok(match wasm_ty {
652 WasmValType::I32 => ValRaw::i32(*self.as_i32()),
653 WasmValType::I64 => ValRaw::i64(*self.as_i64()),
654 WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
655 WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
656 WasmValType::V128 => ValRaw::v128(self.get_u128()),
657 WasmValType::Ref(r) => match r.heap_type.top() {
658 WasmHeapTopType::Extern => ValRaw::externref(match self.as_gc_ref() {
659 Some(r) => store.clone_gc_ref(r).as_raw_u32(),
660 None => 0,
661 }),
662 WasmHeapTopType::Any => ValRaw::anyref({
663 match self.as_gc_ref() {
664 Some(r) => store.clone_gc_ref(r).as_raw_u32(),
665 None => 0,
666 }
667 }),
668 WasmHeapTopType::Exn => ValRaw::exnref({
669 match self.as_gc_ref() {
670 Some(r) => store.clone_gc_ref(r).as_raw_u32(),
671 None => 0,
672 }
673 }),
674 WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
675 WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
676 },
677 })
678 }
679 }
680
681 /// Return a reference to the value as an i32.
682 pub unsafe fn as_i32(&self) -> &i32 {
683 unsafe { &*(self.storage.as_ref().as_ptr().cast::<i32>()) }
684 }
685
686 /// Return a mutable reference to the value as an i32.
687 pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
688 unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>()) }
689 }
690
691 /// Return a reference to the value as a u32.
692 pub unsafe fn as_u32(&self) -> &u32 {
693 unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
694 }
695
696 /// Return a mutable reference to the value as an u32.
697 pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
698 unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
699 }
700
701 /// Return a reference to the value as an i64.
702 pub unsafe fn as_i64(&self) -> &i64 {
703 unsafe { &*(self.storage.as_ref().as_ptr().cast::<i64>()) }
704 }
705
706 /// Return a mutable reference to the value as an i64.
707 pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
708 unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>()) }
709 }
710
711 /// Return a reference to the value as an u64.
712 pub unsafe fn as_u64(&self) -> &u64 {
713 unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
714 }
715
716 /// Return a mutable reference to the value as an u64.
717 pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
718 unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
719 }
720
721 /// Return a reference to the value as an f32.
722 pub unsafe fn as_f32(&self) -> &f32 {
723 unsafe { &*(self.storage.as_ref().as_ptr().cast::<f32>()) }
724 }
725
726 /// Return a mutable reference to the value as an f32.
727 pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
728 unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>()) }
729 }
730
731 /// Return a reference to the value as f32 bits.
732 pub unsafe fn as_f32_bits(&self) -> &u32 {
733 unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
734 }
735
736 /// Return a mutable reference to the value as f32 bits.
737 pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
738 unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
739 }
740
741 /// Return a reference to the value as an f64.
742 pub unsafe fn as_f64(&self) -> &f64 {
743 unsafe { &*(self.storage.as_ref().as_ptr().cast::<f64>()) }
744 }
745
746 /// Return a mutable reference to the value as an f64.
747 pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
748 unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>()) }
749 }
750
751 /// Return a reference to the value as f64 bits.
752 pub unsafe fn as_f64_bits(&self) -> &u64 {
753 unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
754 }
755
756 /// Return a mutable reference to the value as f64 bits.
757 pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
758 unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
759 }
760
761 /// Gets the underlying 128-bit vector value.
762 //
763 // Note that vectors are stored in little-endian format while other types
764 // are stored in native-endian format.
765 pub unsafe fn get_u128(&self) -> u128 {
766 unsafe { u128::from_le(*(self.storage.as_ref().as_ptr().cast::<u128>())) }
767 }
768
769 /// Sets the 128-bit vector values.
770 //
771 // Note that vectors are stored in little-endian format while other types
772 // are stored in native-endian format.
773 pub unsafe fn set_u128(&mut self, val: u128) {
774 unsafe {
775 *self.storage.as_mut().as_mut_ptr().cast::<u128>() = val.to_le();
776 }
777 }
778
779 /// Return a reference to the value as u128 bits.
780 pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
781 unsafe { &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>()) }
782 }
783
784 /// Return a mutable reference to the value as u128 bits.
785 pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
786 unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>()) }
787 }
788
789 /// Return a reference to the global value as a borrowed GC reference.
790 pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
791 let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
792 let ret = unsafe { (*raw_ptr).as_ref() };
793 assert!(cfg!(feature = "gc") || ret.is_none());
794 ret
795 }
796
797 /// Initialize a global to the given GC reference.
798 pub unsafe fn init_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) {
799 let dest = unsafe {
800 &mut *(self
801 .storage
802 .as_mut()
803 .as_mut_ptr()
804 .cast::<MaybeUninit<Option<VMGcRef>>>())
805 };
806
807 store.init_gc_ref(dest, gc_ref)
808 }
809
810 /// Write a GC reference into this global value.
811 pub unsafe fn write_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) {
812 let dest = unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>()) };
813 store.write_gc_ref(dest, gc_ref)
814 }
815
816 /// Return a reference to the value as a `VMFuncRef`.
817 pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
818 unsafe { *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>()) }
819 }
820
821 /// Return a mutable reference to the value as a `VMFuncRef`.
822 pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
823 unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>()) }
824 }
825}
826
827#[cfg(test)]
828mod test_vmshared_type_index {
829 use super::VMSharedTypeIndex;
830 use std::mem::size_of;
831 use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
832
833 #[test]
834 fn check_vmshared_type_index() {
835 let module = Module::new(StaticModuleIndex::from_u32(0));
836 let offsets = VMOffsets::new(HostPtr, &module);
837 assert_eq!(
838 size_of::<VMSharedTypeIndex>(),
839 usize::from(offsets.size_of_vmshared_type_index())
840 );
841 }
842}
843
844/// A WebAssembly tag defined within the instance.
845///
846#[derive(Debug)]
847#[repr(C)]
848pub struct VMTagDefinition {
849 /// Function signature's type id.
850 pub type_index: VMSharedTypeIndex,
851}
852
853impl VMTagDefinition {
854 pub fn new(type_index: VMSharedTypeIndex) -> Self {
855 Self { type_index }
856 }
857}
858
859// SAFETY: the above structure is repr(C) and only contains VmSafe
860// fields.
861unsafe impl VmSafe for VMTagDefinition {}
862
863#[cfg(test)]
864mod test_vmtag_definition {
865 use super::VMTagDefinition;
866 use std::mem::size_of;
867 use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
868
869 #[test]
870 fn check_vmtag_definition_offsets() {
871 let module = Module::new(StaticModuleIndex::from_u32(0));
872 let offsets = VMOffsets::new(HostPtr, &module);
873 assert_eq!(
874 size_of::<VMTagDefinition>(),
875 usize::from(offsets.ptr.size_of_vmtag_definition())
876 );
877 }
878
879 #[test]
880 fn check_vmtag_begins_aligned() {
881 let module = Module::new(StaticModuleIndex::from_u32(0));
882 let offsets = VMOffsets::new(HostPtr, &module);
883 assert_eq!(offsets.vmctx_tags_begin() % 16, 0);
884 }
885}
886
887/// The VM caller-checked "funcref" record, for caller-side signature checking.
888///
889/// It consists of function pointer(s), a type id to be checked by the
890/// caller, and the vmctx closure associated with this function.
891#[derive(Debug, Clone)]
892#[repr(C)]
893pub struct VMFuncRef {
894 /// Function pointer for this funcref if being called via the "array"
895 /// calling convention that `Func::new` et al use.
896 pub array_call: VmPtr<VMArrayCallFunction>,
897
898 /// Function pointer for this funcref if being called via the calling
899 /// convention we use when compiling Wasm.
900 ///
901 /// Most functions come with a function pointer that we can use when they
902 /// are called from Wasm. The notable exception is when we `Func::wrap` a
903 /// host function, and we don't have a Wasm compiler on hand to compile a
904 /// Wasm-to-native trampoline for the function. In this case, we leave
905 /// `wasm_call` empty until the function is passed as an import to Wasm (or
906 /// otherwise exposed to Wasm via tables/globals). At this point, we look up
907 /// a Wasm-to-native trampoline for the function in the Wasm's compiled
908 /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
909 /// there is no guarantee that the Wasm module has a trampoline for this
910 /// function's signature. The Wasm module only has trampolines for its
911 /// types, and if this function isn't of one of those types, then the Wasm
912 /// module will not have a trampoline for it. This is actually okay, because
913 /// it means that the Wasm cannot actually call this function. But it does
914 /// mean that this field needs to be an `Option` even though it is non-null
915 /// the vast vast vast majority of the time.
916 pub wasm_call: Option<VmPtr<VMWasmCallFunction>>,
917
918 /// Function signature's type id.
919 pub type_index: VMSharedTypeIndex,
920
921 /// The VM state associated with this function.
922 ///
923 /// The actual definition of what this pointer points to depends on the
924 /// function being referenced: for core Wasm functions, this is a `*mut
925 /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
926 /// component functions it is a `*mut VMComponentContext`.
927 pub vmctx: VmPtr<VMOpaqueContext>,
928 // If more elements are added here, remember to add offset_of tests below!
929}
930
931// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
932unsafe impl VmSafe for VMFuncRef {}
933
934impl VMFuncRef {
935 /// Invokes the `array_call` field of this `VMFuncRef` with the supplied
936 /// arguments.
937 ///
938 /// This will invoke the function pointer in the `array_call` field with:
939 ///
940 /// * the `callee` vmctx as `self.vmctx`
941 /// * the `caller` as `caller` specified here
942 /// * the args pointer as `args_and_results`
943 /// * the args length as `args_and_results`
944 ///
945 /// The `args_and_results` area must be large enough to both load all
946 /// arguments from and store all results to.
947 ///
948 /// Returns whether a trap was recorded in TLS for raising.
949 ///
950 /// # Unsafety
951 ///
952 /// This method is unsafe because it can be called with any pointers. They
953 /// must all be valid for this wasm function call to proceed. For example
954 /// the `caller` must be valid machine code if `pulley` is `None` or it must
955 /// be valid bytecode if `pulley` is `Some`. Additionally `args_and_results`
956 /// must be large enough to handle all the arguments/results for this call.
957 ///
958 /// Note that the unsafety invariants to maintain here are not currently
959 /// exhaustively documented.
960 #[inline]
961 pub unsafe fn array_call(
962 me: NonNull<VMFuncRef>,
963 pulley: Option<InterpreterRef<'_>>,
964 caller: NonNull<VMContext>,
965 args_and_results: NonNull<[ValRaw]>,
966 ) -> bool {
967 match pulley {
968 Some(vm) => unsafe { Self::array_call_interpreted(me, vm, caller, args_and_results) },
969 None => unsafe { Self::array_call_native(me, caller, args_and_results) },
970 }
971 }
972
973 unsafe fn array_call_interpreted(
974 me: NonNull<VMFuncRef>,
975 vm: InterpreterRef<'_>,
976 caller: NonNull<VMContext>,
977 args_and_results: NonNull<[ValRaw]>,
978 ) -> bool {
979 // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the
980 // interpreter, even though it's available, as `array_call` will be
981 // native code.
982 unsafe {
983 if me.as_ref().vmctx.as_non_null().as_ref().magic
984 == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC
985 {
986 return Self::array_call_native(me, caller, args_and_results);
987 }
988 vm.call(
989 me.as_ref().array_call.as_non_null().cast(),
990 me.as_ref().vmctx.as_non_null(),
991 caller,
992 args_and_results,
993 )
994 }
995 }
996
997 #[inline]
998 unsafe fn array_call_native(
999 me: NonNull<VMFuncRef>,
1000 caller: NonNull<VMContext>,
1001 args_and_results: NonNull<[ValRaw]>,
1002 ) -> bool {
1003 unsafe {
1004 union GetNativePointer {
1005 native: VMArrayCallNative,
1006 ptr: NonNull<VMArrayCallFunction>,
1007 }
1008 let native = GetNativePointer {
1009 ptr: me.as_ref().array_call.as_non_null(),
1010 }
1011 .native;
1012 native(
1013 me.as_ref().vmctx.as_non_null(),
1014 caller,
1015 args_and_results.cast(),
1016 args_and_results.len(),
1017 )
1018 }
1019 }
1020
1021 pub(crate) fn as_vm_function_import(&self) -> Option<&VMFunctionImport> {
1022 if self.wasm_call.is_some() {
1023 // Safety: `VMFuncRef` and `VMFunctionImport` have the same layout
1024 // and `wasm_call` is non-null.
1025 Some(unsafe { NonNull::from(self).cast::<VMFunctionImport>().as_ref() })
1026 } else {
1027 None
1028 }
1029 }
1030}
1031
1032#[cfg(test)]
1033mod test_vm_func_ref {
1034 use super::VMFuncRef;
1035 use core::mem::offset_of;
1036 use std::mem::size_of;
1037 use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
1038
1039 #[test]
1040 fn check_vm_func_ref_offsets() {
1041 let module = Module::new(StaticModuleIndex::from_u32(0));
1042 let offsets = VMOffsets::new(HostPtr, &module);
1043 assert_eq!(
1044 size_of::<VMFuncRef>(),
1045 usize::from(offsets.ptr.size_of_vm_func_ref())
1046 );
1047 assert_eq!(
1048 offset_of!(VMFuncRef, array_call),
1049 usize::from(offsets.ptr.vm_func_ref_array_call())
1050 );
1051 assert_eq!(
1052 offset_of!(VMFuncRef, wasm_call),
1053 usize::from(offsets.ptr.vm_func_ref_wasm_call())
1054 );
1055 assert_eq!(
1056 offset_of!(VMFuncRef, type_index),
1057 usize::from(offsets.ptr.vm_func_ref_type_index())
1058 );
1059 assert_eq!(
1060 offset_of!(VMFuncRef, vmctx),
1061 usize::from(offsets.ptr.vm_func_ref_vmctx())
1062 );
1063 }
1064}
1065
1066macro_rules! define_builtin_array {
1067 (
1068 $(
1069 $( #[$attr:meta] )*
1070 $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
1071 )*
1072 ) => {
1073 /// An array that stores addresses of builtin functions. We translate code
1074 /// to use indirect calls. This way, we don't have to patch the code.
1075 #[repr(C)]
1076 #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
1077 pub struct VMBuiltinFunctionsArray {
1078 $(
1079 $name: unsafe extern "C" fn(
1080 $(define_builtin_array!(@ty $param)),*
1081 ) $( -> define_builtin_array!(@ty $result))?,
1082 )*
1083 }
1084
1085 impl VMBuiltinFunctionsArray {
1086 pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
1087 $(
1088 $name: crate::runtime::vm::libcalls::raw::$name,
1089 )*
1090 };
1091
1092 /// Helper to call `expose_provenance()` on all contained pointers.
1093 ///
1094 /// This is required to be called at least once before entering wasm
1095 /// to inform the compiler that these function pointers may all be
1096 /// loaded/stored and used on the "other end" to reacquire
1097 /// provenance in Pulley. Pulley models hostcalls with a host
1098 /// pointer as the first parameter that's a function pointer under
1099 /// the hood, and this call ensures that the use of the function
1100 /// pointer is considered valid.
1101 pub fn expose_provenance(&self) -> NonNull<Self>{
1102 $(
1103 (self.$name as *mut u8).expose_provenance();
1104 )*
1105 NonNull::from(self)
1106 }
1107 }
1108 };
1109
1110 (@ty u32) => (u32);
1111 (@ty u64) => (u64);
1112 (@ty f32) => (f32);
1113 (@ty f64) => (f64);
1114 (@ty u8) => (u8);
1115 (@ty i8x16) => (i8x16);
1116 (@ty f32x4) => (f32x4);
1117 (@ty f64x2) => (f64x2);
1118 (@ty bool) => (bool);
1119 (@ty pointer) => (*mut u8);
1120 (@ty size) => (usize);
1121 (@ty vmctx) => (NonNull<VMContext>);
1122}
1123
1124// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1125unsafe impl VmSafe for VMBuiltinFunctionsArray {}
1126
1127wasmtime_environ::foreach_builtin_function!(define_builtin_array);
1128
1129const _: () = {
1130 assert!(
1131 mem::size_of::<VMBuiltinFunctionsArray>()
1132 == mem::size_of::<usize>() * (BuiltinFunctionIndex::len() as usize)
1133 )
1134};
1135
1136/// Structure that holds all mutable context that is shared across all instances
1137/// in a store, for example data related to fuel or epochs.
1138///
1139/// `VMStoreContext`s are one-to-one with `wasmtime::Store`s, the same way that
1140/// `VMContext`s are one-to-one with `wasmtime::Instance`s. And the same way
1141/// that multiple `wasmtime::Instance`s may be associated with the same
1142/// `wasmtime::Store`, multiple `VMContext`s hold a pointer to the same
1143/// `VMStoreContext` when they are associated with the same `wasmtime::Store`.
1144#[derive(Debug)]
1145#[repr(C)]
1146pub struct VMStoreContext {
1147 // NB: 64-bit integer fields are located first with pointer-sized fields
1148 // trailing afterwards. That makes the offsets in this structure easier to
1149 // calculate on 32-bit platforms as we don't have to worry about the
1150 // alignment of 64-bit integers.
1151 //
1152 /// Indicator of how much fuel has been consumed and is remaining to
1153 /// WebAssembly.
1154 ///
1155 /// This field is typically negative and increments towards positive. Upon
1156 /// turning positive a wasm trap will be generated. This field is only
1157 /// modified if wasm is configured to consume fuel.
1158 pub fuel_consumed: UnsafeCell<i64>,
1159
1160 /// Deadline epoch for interruption: if epoch-based interruption
1161 /// is enabled and the global (per engine) epoch counter is
1162 /// observed to reach or exceed this value, the guest code will
1163 /// yield if running asynchronously.
1164 pub epoch_deadline: UnsafeCell<u64>,
1165
1166 /// The "store version".
1167 ///
1168 /// This is used to test whether stack-frame handles referring to
1169 /// suspended stack frames remain valid.
1170 ///
1171 /// The invariant that this upward-counting number must satisfy
1172 /// is: the number must be incremented whenever execution starts
1173 /// or resumes in the `Store` or when any stack is
1174 /// dropped/freed. That way, if we take a reference to some
1175 /// suspended stack frame and track the "version" at the time we
1176 /// took that reference, if the version still matches, we can be
1177 /// sure that nothing could have unwound the referenced Wasm
1178 /// frame.
1179 ///
1180 /// This version number is incremented in exactly one place: the
1181 /// Wasm-to-host trampolines, after return from host code. Note
1182 /// that this captures both the normal "return into Wasm" case
1183 /// (where Wasm frames can subsequently return normally and thus
1184 /// invalidate frames), and the "trap/exception unwinds Wasm
1185 /// frames" case, which is done internally via the `raise` libcall
1186 /// invoked after the main hostcall returns an error, and after we
1187 /// increment this version number.
1188 ///
1189 /// Note that this also handles the fiber/future-drop case because
1190 /// because we *always* return into the trampoline to clean up;
1191 /// that trampoline immediately raises an error and uses the
1192 /// longjmp-like unwind within Cranelift frames to skip over all
1193 /// the guest Wasm frames, but not before it increments the
1194 /// store's execution version number.
1195 ///
1196 /// This field is in use only if guest debugging is enabled.
1197 pub execution_version: u64,
1198
1199 /// Current stack limit of the wasm module.
1200 ///
1201 /// For more information see `crates/cranelift/src/lib.rs`.
1202 pub stack_limit: UnsafeCell<usize>,
1203
1204 /// The `VMMemoryDefinition` for this store's GC heap.
1205 pub gc_heap: VMMemoryDefinition,
1206
1207 /// The value of the frame pointer register in the trampoline used
1208 /// to call from Wasm to the host.
1209 ///
1210 /// Maintained by our Wasm-to-host trampoline, and cleared just
1211 /// before calling into Wasm in `catch_traps`.
1212 ///
1213 /// This member is `0` when Wasm is actively running and has not called out
1214 /// to the host.
1215 ///
1216 /// Used to find the start of a contiguous sequence of Wasm frames
1217 /// when walking the stack. Note that we record the FP of the
1218 /// *trampoline*'s frame, not the last Wasm frame, because we need
1219 /// to know the SP (bottom of frame) of the last Wasm frame as
1220 /// well in case we need to resume to an exception handler in that
1221 /// frame. The FP of the last Wasm frame can be recovered by
1222 /// loading the saved FP value at this FP address.
1223 pub last_wasm_exit_trampoline_fp: UnsafeCell<usize>,
1224
1225 /// The last Wasm program counter before we called from Wasm to the host.
1226 ///
1227 /// Maintained by our Wasm-to-host trampoline, and cleared just before
1228 /// calling into Wasm in `catch_traps`.
1229 ///
1230 /// This member is `0` when Wasm is actively running and has not called out
1231 /// to the host.
1232 ///
1233 /// Used when walking a contiguous sequence of Wasm frames.
1234 pub last_wasm_exit_pc: UnsafeCell<usize>,
1235
1236 /// The last host stack pointer before we called into Wasm from the host.
1237 ///
1238 /// Maintained by our host-to-Wasm trampoline. This member is `0` when Wasm
1239 /// is not running, and it's set to nonzero once a host-to-wasm trampoline
1240 /// is executed.
1241 ///
1242 /// When a host function is wrapped into a `wasmtime::Func`, and is then
1243 /// called from the host, then this member is not changed meaning that the
1244 /// previous activation in pointed to by `last_wasm_exit_trampoline_fp` is
1245 /// still the last wasm set of frames on the stack.
1246 ///
1247 /// This field is saved/restored during fiber suspension/resumption
1248 /// resumption as part of `CallThreadState::swap`.
1249 ///
1250 /// This field is used to find the end of a contiguous sequence of Wasm
1251 /// frames when walking the stack. Additionally it's used when a trap is
1252 /// raised as part of the set of parameters used to resume in the entry
1253 /// trampoline's "catch" block.
1254 pub last_wasm_entry_sp: UnsafeCell<usize>,
1255
1256 /// Same as `last_wasm_entry_sp`, but for the `fp` of the trampoline.
1257 pub last_wasm_entry_fp: UnsafeCell<usize>,
1258
1259 /// The last trap handler from a host-to-wasm entry trampoline on the stack.
1260 ///
1261 /// This field is configured when the host calls into wasm by the trampoline
1262 /// itself. It stores the `pc` of an exception handler suitable to handle
1263 /// all traps (or uncaught exceptions).
1264 pub last_wasm_entry_trap_handler: UnsafeCell<usize>,
1265
1266 /// Stack information used by stack switching instructions. See documentation
1267 /// on `VMStackChain` for details.
1268 pub stack_chain: UnsafeCell<VMStackChain>,
1269
1270 /// A pointer to the embedder's `T` inside a `Store<T>`, for use with the
1271 /// `store-data-address` unsafe intrinsic.
1272 pub store_data: VmPtr<()>,
1273
1274 /// The range, in addresses, of the guard page that is currently in use.
1275 ///
1276 /// This field is used when signal handlers are run to determine whether a
1277 /// faulting address lies within the guard page of an async stack for
1278 /// example. If this happens then the signal handler aborts with a stack
1279 /// overflow message similar to what would happen had the stack overflow
1280 /// happened on the main thread. This field is, by default a null..null
1281 /// range indicating that no async guard is in use (aka no fiber). In such a
1282 /// situation while this field is read it'll never classify a fault as an
1283 /// guard page fault.
1284 pub async_guard_range: Range<*mut u8>,
1285}
1286
1287impl VMStoreContext {
1288 /// From the current saved trampoline FP, get the FP of the last
1289 /// Wasm frame. If the current saved trampoline FP is null, return
1290 /// null.
1291 ///
1292 /// We store only the trampoline FP, because (i) we need the
1293 /// trampoline FP, so we know the size (bottom) of the last Wasm
1294 /// frame; and (ii) the last Wasm frame, just above the trampoline
1295 /// frame, can be recovered via the FP chain.
1296 ///
1297 /// # Safety
1298 ///
1299 /// This function requires that the `last_wasm_exit_trampoline_fp`
1300 /// field either points to an active trampoline frame or is a null
1301 /// pointer.
1302 pub(crate) unsafe fn last_wasm_exit_fp(&self) -> usize {
1303 // SAFETY: the unsafe cell is safe to load (no other threads
1304 // will be writing our store when we have control), and the
1305 // helper function's safety condition is the same as ours.
1306 unsafe {
1307 let trampoline_fp = *self.last_wasm_exit_trampoline_fp.get();
1308 Self::wasm_exit_fp_from_trampoline_fp(trampoline_fp)
1309 }
1310 }
1311
1312 /// From any saved trampoline FP, get the FP of the last Wasm
1313 /// frame. If the given trampoline FP is null, return null.
1314 ///
1315 /// This differs from `last_wasm_exit_fp()` above in that it
1316 /// allows accessing activations further up the stack as well,
1317 /// e.g. via `CallThreadState::old_state`.
1318 ///
1319 /// # Safety
1320 ///
1321 /// This function requires that the provided FP value is valid,
1322 /// and points to an active trampoline frame, or is null.
1323 ///
1324 /// This function depends on the invariant that on all supported
1325 /// architectures, we store the previous FP value under the
1326 /// current FP. This is a property of our ABI that we control and
1327 /// ensure.
1328 pub(crate) unsafe fn wasm_exit_fp_from_trampoline_fp(trampoline_fp: usize) -> usize {
1329 if trampoline_fp != 0 {
1330 // SAFETY: We require that trampoline_fp points to a valid
1331 // frame, which will (by definition) contain an old FP value
1332 // that we can load.
1333 unsafe { *(trampoline_fp as *const usize) }
1334 } else {
1335 0
1336 }
1337 }
1338}
1339
1340// The `VMStoreContext` type is a pod-type with no destructor, and we don't
1341// access any fields from other threads, so add in these trait impls which are
1342// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
1343// variables in `VMStoreContext`.
1344unsafe impl Send for VMStoreContext {}
1345unsafe impl Sync for VMStoreContext {}
1346
1347// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1348unsafe impl VmSafe for VMStoreContext {}
1349
1350impl Default for VMStoreContext {
1351 fn default() -> VMStoreContext {
1352 VMStoreContext {
1353 fuel_consumed: UnsafeCell::new(0),
1354 epoch_deadline: UnsafeCell::new(0),
1355 execution_version: 0,
1356 stack_limit: UnsafeCell::new(usize::max_value()),
1357 gc_heap: VMMemoryDefinition {
1358 base: NonNull::dangling().into(),
1359 current_length: AtomicUsize::new(0),
1360 },
1361 last_wasm_exit_trampoline_fp: UnsafeCell::new(0),
1362 last_wasm_exit_pc: UnsafeCell::new(0),
1363 last_wasm_entry_fp: UnsafeCell::new(0),
1364 last_wasm_entry_sp: UnsafeCell::new(0),
1365 last_wasm_entry_trap_handler: UnsafeCell::new(0),
1366 stack_chain: UnsafeCell::new(VMStackChain::Absent),
1367 async_guard_range: ptr::null_mut()..ptr::null_mut(),
1368 store_data: VmPtr::dangling(),
1369 }
1370 }
1371}
1372
1373#[cfg(test)]
1374mod test_vmstore_context {
1375 use super::{VMMemoryDefinition, VMStoreContext};
1376 use core::mem::offset_of;
1377 use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
1378
1379 #[test]
1380 fn field_offsets() {
1381 let module = Module::new(StaticModuleIndex::from_u32(0));
1382 let offsets = VMOffsets::new(HostPtr, &module);
1383 assert_eq!(
1384 offset_of!(VMStoreContext, stack_limit),
1385 usize::from(offsets.ptr.vmstore_context_stack_limit())
1386 );
1387 assert_eq!(
1388 offset_of!(VMStoreContext, fuel_consumed),
1389 usize::from(offsets.ptr.vmstore_context_fuel_consumed())
1390 );
1391 assert_eq!(
1392 offset_of!(VMStoreContext, epoch_deadline),
1393 usize::from(offsets.ptr.vmstore_context_epoch_deadline())
1394 );
1395 assert_eq!(
1396 offset_of!(VMStoreContext, execution_version),
1397 usize::from(offsets.ptr.vmstore_context_execution_version())
1398 );
1399 assert_eq!(
1400 offset_of!(VMStoreContext, gc_heap),
1401 usize::from(offsets.ptr.vmstore_context_gc_heap())
1402 );
1403 assert_eq!(
1404 offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, base),
1405 usize::from(offsets.ptr.vmstore_context_gc_heap_base())
1406 );
1407 assert_eq!(
1408 offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, current_length),
1409 usize::from(offsets.ptr.vmstore_context_gc_heap_current_length())
1410 );
1411 assert_eq!(
1412 offset_of!(VMStoreContext, last_wasm_exit_trampoline_fp),
1413 usize::from(offsets.ptr.vmstore_context_last_wasm_exit_trampoline_fp())
1414 );
1415 assert_eq!(
1416 offset_of!(VMStoreContext, last_wasm_exit_pc),
1417 usize::from(offsets.ptr.vmstore_context_last_wasm_exit_pc())
1418 );
1419 assert_eq!(
1420 offset_of!(VMStoreContext, last_wasm_entry_fp),
1421 usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp())
1422 );
1423 assert_eq!(
1424 offset_of!(VMStoreContext, last_wasm_entry_sp),
1425 usize::from(offsets.ptr.vmstore_context_last_wasm_entry_sp())
1426 );
1427 assert_eq!(
1428 offset_of!(VMStoreContext, last_wasm_entry_trap_handler),
1429 usize::from(offsets.ptr.vmstore_context_last_wasm_entry_trap_handler())
1430 );
1431 assert_eq!(
1432 offset_of!(VMStoreContext, stack_chain),
1433 usize::from(offsets.ptr.vmstore_context_stack_chain())
1434 );
1435 assert_eq!(
1436 offset_of!(VMStoreContext, store_data),
1437 usize::from(offsets.ptr.vmstore_context_store_data())
1438 );
1439 }
1440}
1441
1442/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
1443/// This has information about globals, memories, tables, and other runtime
1444/// state associated with the current instance.
1445///
1446/// The struct here is empty, as the sizes of these fields are dynamic, and
1447/// we can't describe them in Rust's type system. Sufficient memory is
1448/// allocated at runtime.
1449#[derive(Debug)]
1450#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
1451pub struct VMContext {
1452 _magic: u32,
1453}
1454
1455impl VMContext {
1456 /// Helper function to cast between context types using a debug assertion to
1457 /// protect against some mistakes.
1458 #[inline]
1459 pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMContext> {
1460 // Note that in general the offset of the "magic" field is stored in
1461 // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
1462 // about converting this pointer to another type we ideally don't want
1463 // to read the offset from potentially corrupt memory. Instead it would
1464 // be better to catch errors here as soon as possible.
1465 //
1466 // To accomplish this the `VMContext` structure is laid out with the
1467 // magic field at a statically known offset (here it's 0 for now). This
1468 // static offset is asserted in `VMOffsets::from` and needs to be kept
1469 // in sync with this line for this debug assertion to work.
1470 //
1471 // Also note that this magic is only ever invalid in the presence of
1472 // bugs, meaning we don't actually read the magic and act differently
1473 // at runtime depending what it is, so this is a debug assertion as
1474 // opposed to a regular assertion.
1475 unsafe {
1476 debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC);
1477 }
1478 opaque.cast()
1479 }
1480}
1481
1482/// A "raw" and unsafe representation of a WebAssembly value.
1483///
1484/// This is provided for use with the `Func::new_unchecked` and
1485/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
1486/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
1487///
1488/// This is notably an "unsafe" way to work with `Val` and it's recommended to
1489/// instead use `Val` where possible. An important note about this union is that
1490/// fields are all stored in little-endian format, regardless of the endianness
1491/// of the host system.
1492#[repr(C)]
1493#[derive(Copy, Clone)]
1494pub union ValRaw {
1495 /// A WebAssembly `i32` value.
1496 ///
1497 /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
1498 /// type does not assign an interpretation of the upper bit as either signed
1499 /// or unsigned. The Rust type `i32` is simply chosen for convenience.
1500 ///
1501 /// This value is always stored in a little-endian format.
1502 i32: i32,
1503
1504 /// A WebAssembly `i64` value.
1505 ///
1506 /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
1507 /// type does not assign an interpretation of the upper bit as either signed
1508 /// or unsigned. The Rust type `i64` is simply chosen for convenience.
1509 ///
1510 /// This value is always stored in a little-endian format.
1511 i64: i64,
1512
1513 /// A WebAssembly `f32` value.
1514 ///
1515 /// Note that the payload here is a Rust `u32`. This is to allow passing any
1516 /// representation of NaN into WebAssembly without risk of changing NaN
1517 /// payload bits as its gets passed around the system. Otherwise though this
1518 /// `u32` value is the return value of `f32::to_bits` in Rust.
1519 ///
1520 /// This value is always stored in a little-endian format.
1521 f32: u32,
1522
1523 /// A WebAssembly `f64` value.
1524 ///
1525 /// Note that the payload here is a Rust `u64`. This is to allow passing any
1526 /// representation of NaN into WebAssembly without risk of changing NaN
1527 /// payload bits as its gets passed around the system. Otherwise though this
1528 /// `u64` value is the return value of `f64::to_bits` in Rust.
1529 ///
1530 /// This value is always stored in a little-endian format.
1531 f64: u64,
1532
1533 /// A WebAssembly `v128` value.
1534 ///
1535 /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1536 /// but note that `v128` in WebAssembly is often considered a vector type
1537 /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1538 /// of the underlying bits is left up to the instructions which consume
1539 /// this value.
1540 ///
1541 /// This value is always stored in a little-endian format.
1542 v128: [u8; 16],
1543
1544 /// A WebAssembly `funcref` value (or one of its subtypes).
1545 ///
1546 /// The payload here is a pointer which is runtime-defined. This is one of
1547 /// the main points of unsafety about the `ValRaw` type as the validity of
1548 /// the pointer here is not easily verified and must be preserved by
1549 /// carefully calling the correct functions throughout the runtime.
1550 ///
1551 /// This value is always stored in a little-endian format.
1552 funcref: *mut c_void,
1553
1554 /// A WebAssembly `externref` value (or one of its subtypes).
1555 ///
1556 /// The payload here is a compressed pointer value which is
1557 /// runtime-defined. This is one of the main points of unsafety about the
1558 /// `ValRaw` type as the validity of the pointer here is not easily verified
1559 /// and must be preserved by carefully calling the correct functions
1560 /// throughout the runtime.
1561 ///
1562 /// This value is always stored in a little-endian format.
1563 externref: u32,
1564
1565 /// A WebAssembly `anyref` value (or one of its subtypes).
1566 ///
1567 /// The payload here is a compressed pointer value which is
1568 /// runtime-defined. This is one of the main points of unsafety about the
1569 /// `ValRaw` type as the validity of the pointer here is not easily verified
1570 /// and must be preserved by carefully calling the correct functions
1571 /// throughout the runtime.
1572 ///
1573 /// This value is always stored in a little-endian format.
1574 anyref: u32,
1575
1576 /// A WebAssembly `exnref` value (or one of its subtypes).
1577 ///
1578 /// The payload here is a compressed pointer value which is
1579 /// runtime-defined. This is one of the main points of unsafety about the
1580 /// `ValRaw` type as the validity of the pointer here is not easily verified
1581 /// and must be preserved by carefully calling the correct functions
1582 /// throughout the runtime.
1583 ///
1584 /// This value is always stored in a little-endian format.
1585 exnref: u32,
1586}
1587
1588// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1589// are some simple assertions about the shape of the type which are additionally
1590// matched in C.
1591const _: () = {
1592 assert!(mem::size_of::<ValRaw>() == 16);
1593 assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1594};
1595
1596// This type is just a bag-of-bits so it's up to the caller to figure out how
1597// to safely deal with threading concerns and safely access interior bits.
1598unsafe impl Send for ValRaw {}
1599unsafe impl Sync for ValRaw {}
1600
1601impl fmt::Debug for ValRaw {
1602 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1603 struct Hex<T>(T);
1604 impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1605 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1606 let bytes = mem::size_of::<T>();
1607 let hex_digits_per_byte = 2;
1608 let hex_digits = bytes * hex_digits_per_byte;
1609 write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1610 }
1611 }
1612
1613 unsafe {
1614 f.debug_struct("ValRaw")
1615 .field("i32", &Hex(self.i32))
1616 .field("i64", &Hex(self.i64))
1617 .field("f32", &Hex(self.f32))
1618 .field("f64", &Hex(self.f64))
1619 .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1620 .field("funcref", &self.funcref)
1621 .field("externref", &Hex(self.externref))
1622 .field("anyref", &Hex(self.anyref))
1623 .field("exnref", &Hex(self.exnref))
1624 .finish()
1625 }
1626 }
1627}
1628
1629impl ValRaw {
1630 /// Create a null reference that is compatible with any of
1631 /// `{any,extern,func,exn}ref`.
1632 pub fn null() -> ValRaw {
1633 unsafe {
1634 let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1635 debug_assert_eq!(raw.get_anyref(), 0);
1636 debug_assert_eq!(raw.get_exnref(), 0);
1637 debug_assert_eq!(raw.get_externref(), 0);
1638 debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1639 raw
1640 }
1641 }
1642
1643 /// Creates a WebAssembly `i32` value
1644 #[inline]
1645 pub fn i32(i: i32) -> ValRaw {
1646 // Note that this is intentionally not setting the `i32` field, instead
1647 // setting the `i64` field with a zero-extended version of `i`. For more
1648 // information on this see the comments on `Lower for Result` in the
1649 // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1650 // otherwise constrained to guarantee that the initial 64-bits are
1651 // always initialized.
1652 ValRaw::u64(i.cast_unsigned().into())
1653 }
1654
1655 /// Creates a WebAssembly `i64` value
1656 #[inline]
1657 pub fn i64(i: i64) -> ValRaw {
1658 ValRaw { i64: i.to_le() }
1659 }
1660
1661 /// Creates a WebAssembly `i32` value
1662 #[inline]
1663 pub fn u32(i: u32) -> ValRaw {
1664 // See comments in `ValRaw::i32` for why this is setting the upper
1665 // 32-bits as well.
1666 ValRaw::u64(i.into())
1667 }
1668
1669 /// Creates a WebAssembly `i64` value
1670 #[inline]
1671 pub fn u64(i: u64) -> ValRaw {
1672 ValRaw::i64(i as i64)
1673 }
1674
1675 /// Creates a WebAssembly `f32` value
1676 #[inline]
1677 pub fn f32(i: u32) -> ValRaw {
1678 // See comments in `ValRaw::i32` for why this is setting the upper
1679 // 32-bits as well.
1680 ValRaw::u64(i.into())
1681 }
1682
1683 /// Creates a WebAssembly `f64` value
1684 #[inline]
1685 pub fn f64(i: u64) -> ValRaw {
1686 ValRaw { f64: i.to_le() }
1687 }
1688
1689 /// Creates a WebAssembly `v128` value
1690 #[inline]
1691 pub fn v128(i: u128) -> ValRaw {
1692 ValRaw {
1693 v128: i.to_le_bytes(),
1694 }
1695 }
1696
1697 /// Creates a WebAssembly `funcref` value
1698 #[inline]
1699 pub fn funcref(i: *mut c_void) -> ValRaw {
1700 ValRaw {
1701 funcref: i.map_addr(|i| i.to_le()),
1702 }
1703 }
1704
1705 /// Creates a WebAssembly `externref` value
1706 #[inline]
1707 pub fn externref(e: u32) -> ValRaw {
1708 assert!(cfg!(feature = "gc") || e == 0);
1709 ValRaw {
1710 externref: e.to_le(),
1711 }
1712 }
1713
1714 /// Creates a WebAssembly `anyref` value
1715 #[inline]
1716 pub fn anyref(r: u32) -> ValRaw {
1717 assert!(cfg!(feature = "gc") || r == 0);
1718 ValRaw { anyref: r.to_le() }
1719 }
1720
1721 /// Creates a WebAssembly `exnref` value
1722 #[inline]
1723 pub fn exnref(r: u32) -> ValRaw {
1724 assert!(cfg!(feature = "gc") || r == 0);
1725 ValRaw { exnref: r.to_le() }
1726 }
1727
1728 /// Gets the WebAssembly `i32` value
1729 #[inline]
1730 pub fn get_i32(&self) -> i32 {
1731 unsafe { i32::from_le(self.i32) }
1732 }
1733
1734 /// Gets the WebAssembly `i64` value
1735 #[inline]
1736 pub fn get_i64(&self) -> i64 {
1737 unsafe { i64::from_le(self.i64) }
1738 }
1739
1740 /// Gets the WebAssembly `i32` value
1741 #[inline]
1742 pub fn get_u32(&self) -> u32 {
1743 self.get_i32().cast_unsigned()
1744 }
1745
1746 /// Gets the WebAssembly `i64` value
1747 #[inline]
1748 pub fn get_u64(&self) -> u64 {
1749 self.get_i64().cast_unsigned()
1750 }
1751
1752 /// Gets the WebAssembly `f32` value
1753 #[inline]
1754 pub fn get_f32(&self) -> u32 {
1755 unsafe { u32::from_le(self.f32) }
1756 }
1757
1758 /// Gets the WebAssembly `f64` value
1759 #[inline]
1760 pub fn get_f64(&self) -> u64 {
1761 unsafe { u64::from_le(self.f64) }
1762 }
1763
1764 /// Gets the WebAssembly `v128` value
1765 #[inline]
1766 pub fn get_v128(&self) -> u128 {
1767 unsafe { u128::from_le_bytes(self.v128) }
1768 }
1769
1770 /// Gets the WebAssembly `funcref` value
1771 #[inline]
1772 pub fn get_funcref(&self) -> *mut c_void {
1773 let addr = unsafe { usize::from_le(self.funcref.addr()) };
1774 core::ptr::with_exposed_provenance_mut(addr)
1775 }
1776
1777 /// Gets the WebAssembly `externref` value
1778 #[inline]
1779 pub fn get_externref(&self) -> u32 {
1780 let externref = u32::from_le(unsafe { self.externref });
1781 assert!(cfg!(feature = "gc") || externref == 0);
1782 externref
1783 }
1784
1785 /// Gets the WebAssembly `anyref` value
1786 #[inline]
1787 pub fn get_anyref(&self) -> u32 {
1788 let anyref = u32::from_le(unsafe { self.anyref });
1789 assert!(cfg!(feature = "gc") || anyref == 0);
1790 anyref
1791 }
1792
1793 /// Gets the WebAssembly `exnref` value
1794 #[inline]
1795 pub fn get_exnref(&self) -> u32 {
1796 let exnref = u32::from_le(unsafe { self.exnref });
1797 assert!(cfg!(feature = "gc") || exnref == 0);
1798 exnref
1799 }
1800
1801 /// Convert this `&ValRaw` into a pointer to its inner `VMGcRef`.
1802 #[cfg(feature = "gc")]
1803 pub(crate) fn as_vmgc_ref_ptr(&self) -> Option<NonNull<crate::vm::VMGcRef>> {
1804 if self.get_anyref() == 0 {
1805 return None;
1806 }
1807 let ptr = &raw const self.anyref;
1808 let ptr = NonNull::new(ptr.cast_mut()).unwrap();
1809 Some(ptr.cast())
1810 }
1811}
1812
1813/// An "opaque" version of `VMContext` which must be explicitly casted to a
1814/// target context.
1815///
1816/// This context is used to represent that contexts specified in
1817/// `VMFuncRef` can have any type and don't have an implicit
1818/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1819/// structure of an opaque context in general and only the code which configured
1820/// the context is able to rely on a particular structure. This is because the
1821/// context pointer configured for `VMFuncRef` is guaranteed to be
1822/// the first parameter passed.
1823///
1824/// Note that Wasmtime currently has a layout where all contexts that are casted
1825/// to an opaque context start with a 32-bit "magic" which can be used in debug
1826/// mode to debug-assert that the casts here are correct and have at least a
1827/// little protection against incorrect casts.
1828pub struct VMOpaqueContext {
1829 pub(crate) magic: u32,
1830 _marker: marker::PhantomPinned,
1831}
1832
1833impl VMOpaqueContext {
1834 /// Helper function to clearly indicate that casts are desired.
1835 #[inline]
1836 pub fn from_vmcontext(ptr: NonNull<VMContext>) -> NonNull<VMOpaqueContext> {
1837 ptr.cast()
1838 }
1839
1840 /// Helper function to clearly indicate that casts are desired.
1841 #[inline]
1842 pub fn from_vm_array_call_host_func_context(
1843 ptr: NonNull<VMArrayCallHostFuncContext>,
1844 ) -> NonNull<VMOpaqueContext> {
1845 ptr.cast()
1846 }
1847}