wasmtime/runtime/vm/
vmcontext.rs

1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::prelude::*;
8use crate::runtime::vm::{InterpreterRef, VMGcRef, VmPtr, VmSafe, f32x4, f64x2, i8x16};
9use crate::store::StoreOpaque;
10use crate::vm::stack_switching::VMStackChain;
11use core::cell::UnsafeCell;
12use core::ffi::c_void;
13use core::fmt;
14use core::marker;
15use core::mem::{self, MaybeUninit};
16use core::ops::Range;
17use core::ptr::{self, NonNull};
18use core::sync::atomic::{AtomicUsize, Ordering};
19use wasmtime_environ::{
20    BuiltinFunctionIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex,
21    DefinedTagIndex, VMCONTEXT_MAGIC, VMSharedTypeIndex, WasmHeapTopType, WasmValType,
22};
23
24/// A function pointer that exposes the array calling convention.
25///
26/// Regardless of the underlying Wasm function type, all functions using the
27/// array calling convention have the same Rust signature.
28///
29/// Arguments:
30///
31/// * Callee `vmctx` for the function itself.
32///
33/// * Caller's `vmctx` (so that host functions can access the linear memory of
34///   their Wasm callers).
35///
36/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
37///   this function, and where results are returned from this function.
38///
39/// * The capacity of the `ValRaw` buffer. Must always be at least
40///   `max(len(wasm_params), len(wasm_results))`.
41///
42/// Return value:
43///
44/// * `true` if this call succeeded.
45/// * `false` if this call failed and a trap was recorded in TLS.
46pub type VMArrayCallNative = unsafe extern "C" fn(
47    NonNull<VMOpaqueContext>,
48    NonNull<VMContext>,
49    NonNull<ValRaw>,
50    usize,
51) -> bool;
52
53/// An opaque function pointer which might be `VMArrayCallNative` or it might be
54/// pulley bytecode. Requires external knowledge to determine what kind of
55/// function pointer this is.
56#[repr(transparent)]
57pub struct VMArrayCallFunction(VMFunctionBody);
58
59/// A function pointer that exposes the Wasm calling convention.
60///
61/// In practice, different Wasm function types end up mapping to different Rust
62/// function types, so this isn't simply a type alias the way that
63/// `VMArrayCallFunction` is. However, the exact details of the calling
64/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
65/// code never does anything with these function pointers except shuffle them
66/// around and pass them back to Wasm.
67#[repr(transparent)]
68pub struct VMWasmCallFunction(VMFunctionBody);
69
70/// An imported function.
71#[derive(Debug, Copy, Clone)]
72#[repr(C)]
73pub struct VMFunctionImport {
74    /// Function pointer to use when calling this imported function from Wasm.
75    pub wasm_call: VmPtr<VMWasmCallFunction>,
76
77    /// Function pointer to use when calling this imported function with the
78    /// "array" calling convention that `Func::new` et al use.
79    pub array_call: VmPtr<VMArrayCallFunction>,
80
81    /// The VM state associated with this function.
82    ///
83    /// For Wasm functions defined by core wasm instances this will be `*mut
84    /// VMContext`, but for lifted/lowered component model functions this will
85    /// be a `VMComponentContext`, and for a host function it will be a
86    /// `VMHostFuncContext`, etc.
87    pub vmctx: VmPtr<VMOpaqueContext>,
88}
89
90// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
91unsafe impl VmSafe for VMFunctionImport {}
92
93#[cfg(test)]
94mod test_vmfunction_import {
95    use super::VMFunctionImport;
96    use core::mem::offset_of;
97    use std::mem::size_of;
98    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
99
100    #[test]
101    fn check_vmfunction_import_offsets() {
102        let module = Module::new(StaticModuleIndex::from_u32(0));
103        let offsets = VMOffsets::new(HostPtr, &module);
104        assert_eq!(
105            size_of::<VMFunctionImport>(),
106            usize::from(offsets.size_of_vmfunction_import())
107        );
108        assert_eq!(
109            offset_of!(VMFunctionImport, wasm_call),
110            usize::from(offsets.vmfunction_import_wasm_call())
111        );
112        assert_eq!(
113            offset_of!(VMFunctionImport, array_call),
114            usize::from(offsets.vmfunction_import_array_call())
115        );
116        assert_eq!(
117            offset_of!(VMFunctionImport, vmctx),
118            usize::from(offsets.vmfunction_import_vmctx())
119        );
120    }
121}
122
123/// A placeholder byte-sized type which is just used to provide some amount of type
124/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
125/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
126/// around.
127#[repr(C)]
128pub struct VMFunctionBody(u8);
129
130// SAFETY: this structure is never read and is safe to pass to jit code.
131unsafe impl VmSafe for VMFunctionBody {}
132
133#[cfg(test)]
134mod test_vmfunction_body {
135    use super::VMFunctionBody;
136    use std::mem::size_of;
137
138    #[test]
139    fn check_vmfunction_body_offsets() {
140        assert_eq!(size_of::<VMFunctionBody>(), 1);
141    }
142}
143
144/// The fields compiled code needs to access to utilize a WebAssembly table
145/// imported from another instance.
146#[derive(Debug, Copy, Clone)]
147#[repr(C)]
148pub struct VMTableImport {
149    /// A pointer to the imported table description.
150    pub from: VmPtr<VMTableDefinition>,
151
152    /// A pointer to the `VMContext` that owns the table description.
153    pub vmctx: VmPtr<VMContext>,
154
155    /// The table index, within `vmctx`, this definition resides at.
156    pub index: DefinedTableIndex,
157}
158
159// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
160unsafe impl VmSafe for VMTableImport {}
161
162#[cfg(test)]
163mod test_vmtable {
164    use super::VMTableImport;
165    use core::mem::offset_of;
166    use std::mem::size_of;
167    use wasmtime_environ::component::{Component, VMComponentOffsets};
168    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
169
170    #[test]
171    fn check_vmtable_offsets() {
172        let module = Module::new(StaticModuleIndex::from_u32(0));
173        let offsets = VMOffsets::new(HostPtr, &module);
174        assert_eq!(
175            size_of::<VMTableImport>(),
176            usize::from(offsets.size_of_vmtable_import())
177        );
178        assert_eq!(
179            offset_of!(VMTableImport, from),
180            usize::from(offsets.vmtable_import_from())
181        );
182        assert_eq!(
183            offset_of!(VMTableImport, vmctx),
184            usize::from(offsets.vmtable_import_vmctx())
185        );
186        assert_eq!(
187            offset_of!(VMTableImport, index),
188            usize::from(offsets.vmtable_import_index())
189        );
190    }
191
192    #[test]
193    fn ensure_sizes_match() {
194        // Because we use `VMTableImport` for recording tables used by components, we
195        // want to make sure that the size calculations between `VMOffsets` and
196        // `VMComponentOffsets` stay the same.
197        let module = Module::new(StaticModuleIndex::from_u32(0));
198        let vm_offsets = VMOffsets::new(HostPtr, &module);
199        let component = Component::default();
200        let vm_component_offsets = VMComponentOffsets::new(HostPtr, &component);
201        assert_eq!(
202            vm_offsets.size_of_vmtable_import(),
203            vm_component_offsets.size_of_vmtable_import()
204        );
205    }
206}
207
208/// The fields compiled code needs to access to utilize a WebAssembly linear
209/// memory imported from another instance.
210#[derive(Debug, Copy, Clone)]
211#[repr(C)]
212pub struct VMMemoryImport {
213    /// A pointer to the imported memory description.
214    pub from: VmPtr<VMMemoryDefinition>,
215
216    /// A pointer to the `VMContext` that owns the memory description.
217    pub vmctx: VmPtr<VMContext>,
218
219    /// The index of the memory in the containing `vmctx`.
220    pub index: DefinedMemoryIndex,
221}
222
223// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
224unsafe impl VmSafe for VMMemoryImport {}
225
226#[cfg(test)]
227mod test_vmmemory_import {
228    use super::VMMemoryImport;
229    use core::mem::offset_of;
230    use std::mem::size_of;
231    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
232
233    #[test]
234    fn check_vmmemory_import_offsets() {
235        let module = Module::new(StaticModuleIndex::from_u32(0));
236        let offsets = VMOffsets::new(HostPtr, &module);
237        assert_eq!(
238            size_of::<VMMemoryImport>(),
239            usize::from(offsets.size_of_vmmemory_import())
240        );
241        assert_eq!(
242            offset_of!(VMMemoryImport, from),
243            usize::from(offsets.vmmemory_import_from())
244        );
245        assert_eq!(
246            offset_of!(VMMemoryImport, vmctx),
247            usize::from(offsets.vmmemory_import_vmctx())
248        );
249        assert_eq!(
250            offset_of!(VMMemoryImport, index),
251            usize::from(offsets.vmmemory_import_index())
252        );
253    }
254}
255
256/// The fields compiled code needs to access to utilize a WebAssembly global
257/// variable imported from another instance.
258///
259/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
260/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
261/// require a `vmctx` pointer to access.
262#[derive(Debug, Copy, Clone)]
263#[repr(C)]
264pub struct VMGlobalImport {
265    /// A pointer to the imported global variable description.
266    pub from: VmPtr<VMGlobalDefinition>,
267
268    /// A pointer to the context that owns the global.
269    ///
270    /// Exactly what's stored here is dictated by `kind` below. This is `None`
271    /// for `VMGlobalKind::Host`, it's a `VMContext` for
272    /// `VMGlobalKind::Instance`, and it's `VMComponentContext` for
273    /// `VMGlobalKind::ComponentFlags`.
274    pub vmctx: Option<VmPtr<VMOpaqueContext>>,
275
276    /// The kind of global, and extra location information in addition to
277    /// `vmctx` above.
278    pub kind: VMGlobalKind,
279}
280
281// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
282unsafe impl VmSafe for VMGlobalImport {}
283
284/// The kinds of globals that Wasmtime has.
285#[derive(Debug, Copy, Clone)]
286#[repr(C, u32)]
287pub enum VMGlobalKind {
288    /// Host globals, stored in a `StoreOpaque`.
289    Host(DefinedGlobalIndex),
290    /// Instance globals, stored in `VMContext`s
291    Instance(DefinedGlobalIndex),
292    /// Flags for a component instance, stored in `VMComponentContext`.
293    #[cfg(feature = "component-model")]
294    ComponentFlags(wasmtime_environ::component::RuntimeComponentInstanceIndex),
295}
296
297// SAFETY: the above enum is repr(C) and stores nothing else
298unsafe impl VmSafe for VMGlobalKind {}
299
300#[cfg(test)]
301mod test_vmglobal_import {
302    use super::VMGlobalImport;
303    use core::mem::offset_of;
304    use std::mem::size_of;
305    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
306
307    #[test]
308    fn check_vmglobal_import_offsets() {
309        let module = Module::new(StaticModuleIndex::from_u32(0));
310        let offsets = VMOffsets::new(HostPtr, &module);
311        assert_eq!(
312            size_of::<VMGlobalImport>(),
313            usize::from(offsets.size_of_vmglobal_import())
314        );
315        assert_eq!(
316            offset_of!(VMGlobalImport, from),
317            usize::from(offsets.vmglobal_import_from())
318        );
319    }
320}
321
322/// The fields compiled code needs to access to utilize a WebAssembly
323/// tag imported from another instance.
324#[derive(Debug, Copy, Clone)]
325#[repr(C)]
326pub struct VMTagImport {
327    /// A pointer to the imported tag description.
328    pub from: VmPtr<VMTagDefinition>,
329
330    /// The instance that owns this tag.
331    pub vmctx: VmPtr<VMContext>,
332
333    /// The index of the tag in the containing `vmctx`.
334    pub index: DefinedTagIndex,
335}
336
337// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
338unsafe impl VmSafe for VMTagImport {}
339
340#[cfg(test)]
341mod test_vmtag_import {
342    use super::VMTagImport;
343    use core::mem::{offset_of, size_of};
344    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
345
346    #[test]
347    fn check_vmtag_import_offsets() {
348        let module = Module::new(StaticModuleIndex::from_u32(0));
349        let offsets = VMOffsets::new(HostPtr, &module);
350        assert_eq!(
351            size_of::<VMTagImport>(),
352            usize::from(offsets.size_of_vmtag_import())
353        );
354        assert_eq!(
355            offset_of!(VMTagImport, from),
356            usize::from(offsets.vmtag_import_from())
357        );
358        assert_eq!(
359            offset_of!(VMTagImport, vmctx),
360            usize::from(offsets.vmtag_import_vmctx())
361        );
362        assert_eq!(
363            offset_of!(VMTagImport, index),
364            usize::from(offsets.vmtag_import_index())
365        );
366    }
367}
368
369/// The fields compiled code needs to access to utilize a WebAssembly linear
370/// memory defined within the instance, namely the start address and the
371/// size in bytes.
372#[derive(Debug)]
373#[repr(C)]
374pub struct VMMemoryDefinition {
375    /// The start address.
376    pub base: VmPtr<u8>,
377
378    /// The current logical size of this linear memory in bytes.
379    ///
380    /// This is atomic because shared memories must be able to grow their length
381    /// atomically. For relaxed access, see
382    /// [`VMMemoryDefinition::current_length()`].
383    pub current_length: AtomicUsize,
384}
385
386// SAFETY: the above definition has `repr(C)` and each field individually
387// implements `VmSafe`, which satisfies the requirements of this trait.
388unsafe impl VmSafe for VMMemoryDefinition {}
389
390impl VMMemoryDefinition {
391    /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
392    /// performing a relaxed load; do not use this function for situations in
393    /// which a precise length is needed. Owned memories (i.e., non-shared) will
394    /// always return a precise result (since no concurrent modification is
395    /// possible) but shared memories may see an imprecise value--a
396    /// `current_length` potentially smaller than what some other thread
397    /// observes. Since Wasm memory only grows, this under-estimation may be
398    /// acceptable in certain cases.
399    #[inline]
400    pub fn current_length(&self) -> usize {
401        self.current_length.load(Ordering::Relaxed)
402    }
403
404    /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
405    /// `current_length`; see [`VMMemoryDefinition::current_length()`].
406    #[inline]
407    pub unsafe fn load(ptr: *mut Self) -> Self {
408        let other = unsafe { &*ptr };
409        VMMemoryDefinition {
410            base: other.base,
411            current_length: other.current_length().into(),
412        }
413    }
414}
415
416#[cfg(test)]
417mod test_vmmemory_definition {
418    use super::VMMemoryDefinition;
419    use core::mem::offset_of;
420    use std::mem::size_of;
421    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
422
423    #[test]
424    fn check_vmmemory_definition_offsets() {
425        let module = Module::new(StaticModuleIndex::from_u32(0));
426        let offsets = VMOffsets::new(HostPtr, &module);
427        assert_eq!(
428            size_of::<VMMemoryDefinition>(),
429            usize::from(offsets.ptr.size_of_vmmemory_definition())
430        );
431        assert_eq!(
432            offset_of!(VMMemoryDefinition, base),
433            usize::from(offsets.ptr.vmmemory_definition_base())
434        );
435        assert_eq!(
436            offset_of!(VMMemoryDefinition, current_length),
437            usize::from(offsets.ptr.vmmemory_definition_current_length())
438        );
439        /* TODO: Assert that the size of `current_length` matches.
440        assert_eq!(
441            size_of::<VMMemoryDefinition::current_length>(),
442            usize::from(offsets.size_of_vmmemory_definition_current_length())
443        );
444        */
445    }
446}
447
448/// The fields compiled code needs to access to utilize a WebAssembly table
449/// defined within the instance.
450#[derive(Debug, Copy, Clone)]
451#[repr(C)]
452pub struct VMTableDefinition {
453    /// Pointer to the table data.
454    pub base: VmPtr<u8>,
455
456    /// The current number of elements in the table.
457    pub current_elements: usize,
458}
459
460// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
461unsafe impl VmSafe for VMTableDefinition {}
462
463#[cfg(test)]
464mod test_vmtable_definition {
465    use super::VMTableDefinition;
466    use core::mem::offset_of;
467    use std::mem::size_of;
468    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
469
470    #[test]
471    fn check_vmtable_definition_offsets() {
472        let module = Module::new(StaticModuleIndex::from_u32(0));
473        let offsets = VMOffsets::new(HostPtr, &module);
474        assert_eq!(
475            size_of::<VMTableDefinition>(),
476            usize::from(offsets.size_of_vmtable_definition())
477        );
478        assert_eq!(
479            offset_of!(VMTableDefinition, base),
480            usize::from(offsets.vmtable_definition_base())
481        );
482        assert_eq!(
483            offset_of!(VMTableDefinition, current_elements),
484            usize::from(offsets.vmtable_definition_current_elements())
485        );
486    }
487}
488
489/// The storage for a WebAssembly global defined within the instance.
490///
491/// TODO: Pack the globals more densely, rather than using the same size
492/// for every type.
493#[derive(Debug)]
494#[repr(C, align(16))]
495pub struct VMGlobalDefinition {
496    storage: [u8; 16],
497    // If more elements are added here, remember to add offset_of tests below!
498}
499
500// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
501unsafe impl VmSafe for VMGlobalDefinition {}
502
503#[cfg(test)]
504mod test_vmglobal_definition {
505    use super::VMGlobalDefinition;
506    use std::mem::{align_of, size_of};
507    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
508
509    #[test]
510    fn check_vmglobal_definition_alignment() {
511        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
512        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
513        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
514        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
515        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
516        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f32; 4]>());
517        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f64; 2]>());
518    }
519
520    #[test]
521    fn check_vmglobal_definition_offsets() {
522        let module = Module::new(StaticModuleIndex::from_u32(0));
523        let offsets = VMOffsets::new(HostPtr, &module);
524        assert_eq!(
525            size_of::<VMGlobalDefinition>(),
526            usize::from(offsets.ptr.size_of_vmglobal_definition())
527        );
528    }
529
530    #[test]
531    fn check_vmglobal_begins_aligned() {
532        let module = Module::new(StaticModuleIndex::from_u32(0));
533        let offsets = VMOffsets::new(HostPtr, &module);
534        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
535    }
536
537    #[test]
538    #[cfg(feature = "gc")]
539    fn check_vmglobal_can_contain_gc_ref() {
540        assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
541    }
542}
543
544impl VMGlobalDefinition {
545    /// Construct a `VMGlobalDefinition`.
546    pub fn new() -> Self {
547        Self { storage: [0; 16] }
548    }
549
550    /// Create a `VMGlobalDefinition` from a `ValRaw`.
551    ///
552    /// # Unsafety
553    ///
554    /// This raw value's type must match the given `WasmValType`.
555    pub unsafe fn from_val_raw(
556        store: &mut StoreOpaque,
557        wasm_ty: WasmValType,
558        raw: ValRaw,
559    ) -> Result<Self> {
560        let mut global = Self::new();
561        unsafe {
562            match wasm_ty {
563                WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
564                WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
565                WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
566                WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
567                WasmValType::V128 => global.set_u128(raw.get_v128()),
568                WasmValType::Ref(r) => match r.heap_type.top() {
569                    WasmHeapTopType::Extern => {
570                        let r = VMGcRef::from_raw_u32(raw.get_externref());
571                        global.init_gc_ref(store, r.as_ref())
572                    }
573                    WasmHeapTopType::Any => {
574                        let r = VMGcRef::from_raw_u32(raw.get_anyref());
575                        global.init_gc_ref(store, r.as_ref())
576                    }
577                    WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
578                    WasmHeapTopType::Cont => *global.as_func_ref_mut() = raw.get_funcref().cast(), // TODO(#10248): temporary hack.
579                    WasmHeapTopType::Exn => {
580                        let r = VMGcRef::from_raw_u32(raw.get_exnref());
581                        global.init_gc_ref(store, r.as_ref())
582                    }
583                },
584            }
585        }
586        Ok(global)
587    }
588
589    /// Get this global's value as a `ValRaw`.
590    ///
591    /// # Unsafety
592    ///
593    /// This global's value's type must match the given `WasmValType`.
594    pub unsafe fn to_val_raw(
595        &self,
596        store: &mut StoreOpaque,
597        wasm_ty: WasmValType,
598    ) -> Result<ValRaw> {
599        unsafe {
600            Ok(match wasm_ty {
601                WasmValType::I32 => ValRaw::i32(*self.as_i32()),
602                WasmValType::I64 => ValRaw::i64(*self.as_i64()),
603                WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
604                WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
605                WasmValType::V128 => ValRaw::v128(self.get_u128()),
606                WasmValType::Ref(r) => match r.heap_type.top() {
607                    WasmHeapTopType::Extern => ValRaw::externref(match self.as_gc_ref() {
608                        Some(r) => store.clone_gc_ref(r).as_raw_u32(),
609                        None => 0,
610                    }),
611                    WasmHeapTopType::Any => ValRaw::anyref({
612                        match self.as_gc_ref() {
613                            Some(r) => store.clone_gc_ref(r).as_raw_u32(),
614                            None => 0,
615                        }
616                    }),
617                    WasmHeapTopType::Exn => ValRaw::exnref({
618                        match self.as_gc_ref() {
619                            Some(r) => store.clone_gc_ref(r).as_raw_u32(),
620                            None => 0,
621                        }
622                    }),
623                    WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
624                    WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
625                },
626            })
627        }
628    }
629
630    /// Return a reference to the value as an i32.
631    pub unsafe fn as_i32(&self) -> &i32 {
632        unsafe { &*(self.storage.as_ref().as_ptr().cast::<i32>()) }
633    }
634
635    /// Return a mutable reference to the value as an i32.
636    pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
637        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>()) }
638    }
639
640    /// Return a reference to the value as a u32.
641    pub unsafe fn as_u32(&self) -> &u32 {
642        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
643    }
644
645    /// Return a mutable reference to the value as an u32.
646    pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
647        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
648    }
649
650    /// Return a reference to the value as an i64.
651    pub unsafe fn as_i64(&self) -> &i64 {
652        unsafe { &*(self.storage.as_ref().as_ptr().cast::<i64>()) }
653    }
654
655    /// Return a mutable reference to the value as an i64.
656    pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
657        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>()) }
658    }
659
660    /// Return a reference to the value as an u64.
661    pub unsafe fn as_u64(&self) -> &u64 {
662        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
663    }
664
665    /// Return a mutable reference to the value as an u64.
666    pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
667        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
668    }
669
670    /// Return a reference to the value as an f32.
671    pub unsafe fn as_f32(&self) -> &f32 {
672        unsafe { &*(self.storage.as_ref().as_ptr().cast::<f32>()) }
673    }
674
675    /// Return a mutable reference to the value as an f32.
676    pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
677        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>()) }
678    }
679
680    /// Return a reference to the value as f32 bits.
681    pub unsafe fn as_f32_bits(&self) -> &u32 {
682        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
683    }
684
685    /// Return a mutable reference to the value as f32 bits.
686    pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
687        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
688    }
689
690    /// Return a reference to the value as an f64.
691    pub unsafe fn as_f64(&self) -> &f64 {
692        unsafe { &*(self.storage.as_ref().as_ptr().cast::<f64>()) }
693    }
694
695    /// Return a mutable reference to the value as an f64.
696    pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
697        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>()) }
698    }
699
700    /// Return a reference to the value as f64 bits.
701    pub unsafe fn as_f64_bits(&self) -> &u64 {
702        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
703    }
704
705    /// Return a mutable reference to the value as f64 bits.
706    pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
707        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
708    }
709
710    /// Gets the underlying 128-bit vector value.
711    //
712    // Note that vectors are stored in little-endian format while other types
713    // are stored in native-endian format.
714    pub unsafe fn get_u128(&self) -> u128 {
715        unsafe { u128::from_le(*(self.storage.as_ref().as_ptr().cast::<u128>())) }
716    }
717
718    /// Sets the 128-bit vector values.
719    //
720    // Note that vectors are stored in little-endian format while other types
721    // are stored in native-endian format.
722    pub unsafe fn set_u128(&mut self, val: u128) {
723        unsafe {
724            *self.storage.as_mut().as_mut_ptr().cast::<u128>() = val.to_le();
725        }
726    }
727
728    /// Return a reference to the value as u128 bits.
729    pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
730        unsafe { &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>()) }
731    }
732
733    /// Return a mutable reference to the value as u128 bits.
734    pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
735        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>()) }
736    }
737
738    /// Return a reference to the global value as a borrowed GC reference.
739    pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
740        let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
741        let ret = unsafe { (*raw_ptr).as_ref() };
742        assert!(cfg!(feature = "gc") || ret.is_none());
743        ret
744    }
745
746    /// Initialize a global to the given GC reference.
747    pub unsafe fn init_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) {
748        let dest = unsafe {
749            &mut *(self
750                .storage
751                .as_mut()
752                .as_mut_ptr()
753                .cast::<MaybeUninit<Option<VMGcRef>>>())
754        };
755
756        store.init_gc_ref(dest, gc_ref)
757    }
758
759    /// Write a GC reference into this global value.
760    pub unsafe fn write_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) {
761        let dest = unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>()) };
762        store.write_gc_ref(dest, gc_ref)
763    }
764
765    /// Return a reference to the value as a `VMFuncRef`.
766    pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
767        unsafe { *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>()) }
768    }
769
770    /// Return a mutable reference to the value as a `VMFuncRef`.
771    pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
772        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>()) }
773    }
774}
775
776#[cfg(test)]
777mod test_vmshared_type_index {
778    use super::VMSharedTypeIndex;
779    use std::mem::size_of;
780    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
781
782    #[test]
783    fn check_vmshared_type_index() {
784        let module = Module::new(StaticModuleIndex::from_u32(0));
785        let offsets = VMOffsets::new(HostPtr, &module);
786        assert_eq!(
787            size_of::<VMSharedTypeIndex>(),
788            usize::from(offsets.size_of_vmshared_type_index())
789        );
790    }
791}
792
793/// A WebAssembly tag defined within the instance.
794///
795#[derive(Debug)]
796#[repr(C)]
797pub struct VMTagDefinition {
798    /// Function signature's type id.
799    pub type_index: VMSharedTypeIndex,
800}
801
802impl VMTagDefinition {
803    pub fn new(type_index: VMSharedTypeIndex) -> Self {
804        Self { type_index }
805    }
806}
807
808// SAFETY: the above structure is repr(C) and only contains VmSafe
809// fields.
810unsafe impl VmSafe for VMTagDefinition {}
811
812#[cfg(test)]
813mod test_vmtag_definition {
814    use super::VMTagDefinition;
815    use std::mem::size_of;
816    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
817
818    #[test]
819    fn check_vmtag_definition_offsets() {
820        let module = Module::new(StaticModuleIndex::from_u32(0));
821        let offsets = VMOffsets::new(HostPtr, &module);
822        assert_eq!(
823            size_of::<VMTagDefinition>(),
824            usize::from(offsets.ptr.size_of_vmtag_definition())
825        );
826    }
827
828    #[test]
829    fn check_vmtag_begins_aligned() {
830        let module = Module::new(StaticModuleIndex::from_u32(0));
831        let offsets = VMOffsets::new(HostPtr, &module);
832        assert_eq!(offsets.vmctx_tags_begin() % 16, 0);
833    }
834}
835
836/// The VM caller-checked "funcref" record, for caller-side signature checking.
837///
838/// It consists of function pointer(s), a type id to be checked by the
839/// caller, and the vmctx closure associated with this function.
840#[derive(Debug, Clone)]
841#[repr(C)]
842pub struct VMFuncRef {
843    /// Function pointer for this funcref if being called via the "array"
844    /// calling convention that `Func::new` et al use.
845    pub array_call: VmPtr<VMArrayCallFunction>,
846
847    /// Function pointer for this funcref if being called via the calling
848    /// convention we use when compiling Wasm.
849    ///
850    /// Most functions come with a function pointer that we can use when they
851    /// are called from Wasm. The notable exception is when we `Func::wrap` a
852    /// host function, and we don't have a Wasm compiler on hand to compile a
853    /// Wasm-to-native trampoline for the function. In this case, we leave
854    /// `wasm_call` empty until the function is passed as an import to Wasm (or
855    /// otherwise exposed to Wasm via tables/globals). At this point, we look up
856    /// a Wasm-to-native trampoline for the function in the Wasm's compiled
857    /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
858    /// there is no guarantee that the Wasm module has a trampoline for this
859    /// function's signature. The Wasm module only has trampolines for its
860    /// types, and if this function isn't of one of those types, then the Wasm
861    /// module will not have a trampoline for it. This is actually okay, because
862    /// it means that the Wasm cannot actually call this function. But it does
863    /// mean that this field needs to be an `Option` even though it is non-null
864    /// the vast vast vast majority of the time.
865    pub wasm_call: Option<VmPtr<VMWasmCallFunction>>,
866
867    /// Function signature's type id.
868    pub type_index: VMSharedTypeIndex,
869
870    /// The VM state associated with this function.
871    ///
872    /// The actual definition of what this pointer points to depends on the
873    /// function being referenced: for core Wasm functions, this is a `*mut
874    /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
875    /// component functions it is a `*mut VMComponentContext`.
876    pub vmctx: VmPtr<VMOpaqueContext>,
877    // If more elements are added here, remember to add offset_of tests below!
878}
879
880// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
881unsafe impl VmSafe for VMFuncRef {}
882
883impl VMFuncRef {
884    /// Invokes the `array_call` field of this `VMFuncRef` with the supplied
885    /// arguments.
886    ///
887    /// This will invoke the function pointer in the `array_call` field with:
888    ///
889    /// * the `callee` vmctx as `self.vmctx`
890    /// * the `caller` as `caller` specified here
891    /// * the args pointer as `args_and_results`
892    /// * the args length as `args_and_results`
893    ///
894    /// The `args_and_results` area must be large enough to both load all
895    /// arguments from and store all results to.
896    ///
897    /// Returns whether a trap was recorded in TLS for raising.
898    ///
899    /// # Unsafety
900    ///
901    /// This method is unsafe because it can be called with any pointers. They
902    /// must all be valid for this wasm function call to proceed. For example
903    /// the `caller` must be valid machine code if `pulley` is `None` or it must
904    /// be valid bytecode if `pulley` is `Some`. Additionally `args_and_results`
905    /// must be large enough to handle all the arguments/results for this call.
906    ///
907    /// Note that the unsafety invariants to maintain here are not currently
908    /// exhaustively documented.
909    #[inline]
910    pub unsafe fn array_call(
911        me: NonNull<VMFuncRef>,
912        pulley: Option<InterpreterRef<'_>>,
913        caller: NonNull<VMContext>,
914        args_and_results: NonNull<[ValRaw]>,
915    ) -> bool {
916        match pulley {
917            Some(vm) => unsafe { Self::array_call_interpreted(me, vm, caller, args_and_results) },
918            None => unsafe { Self::array_call_native(me, caller, args_and_results) },
919        }
920    }
921
922    unsafe fn array_call_interpreted(
923        me: NonNull<VMFuncRef>,
924        vm: InterpreterRef<'_>,
925        caller: NonNull<VMContext>,
926        args_and_results: NonNull<[ValRaw]>,
927    ) -> bool {
928        // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the
929        // interpreter, even though it's available, as `array_call` will be
930        // native code.
931        unsafe {
932            if me.as_ref().vmctx.as_non_null().as_ref().magic
933                == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC
934            {
935                return Self::array_call_native(me, caller, args_and_results);
936            }
937            vm.call(
938                me.as_ref().array_call.as_non_null().cast(),
939                me.as_ref().vmctx.as_non_null(),
940                caller,
941                args_and_results,
942            )
943        }
944    }
945
946    #[inline]
947    unsafe fn array_call_native(
948        me: NonNull<VMFuncRef>,
949        caller: NonNull<VMContext>,
950        args_and_results: NonNull<[ValRaw]>,
951    ) -> bool {
952        unsafe {
953            union GetNativePointer {
954                native: VMArrayCallNative,
955                ptr: NonNull<VMArrayCallFunction>,
956            }
957            let native = GetNativePointer {
958                ptr: me.as_ref().array_call.as_non_null(),
959            }
960            .native;
961            native(
962                me.as_ref().vmctx.as_non_null(),
963                caller,
964                args_and_results.cast(),
965                args_and_results.len(),
966            )
967        }
968    }
969}
970
971#[cfg(test)]
972mod test_vm_func_ref {
973    use super::VMFuncRef;
974    use core::mem::offset_of;
975    use std::mem::size_of;
976    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
977
978    #[test]
979    fn check_vm_func_ref_offsets() {
980        let module = Module::new(StaticModuleIndex::from_u32(0));
981        let offsets = VMOffsets::new(HostPtr, &module);
982        assert_eq!(
983            size_of::<VMFuncRef>(),
984            usize::from(offsets.ptr.size_of_vm_func_ref())
985        );
986        assert_eq!(
987            offset_of!(VMFuncRef, array_call),
988            usize::from(offsets.ptr.vm_func_ref_array_call())
989        );
990        assert_eq!(
991            offset_of!(VMFuncRef, wasm_call),
992            usize::from(offsets.ptr.vm_func_ref_wasm_call())
993        );
994        assert_eq!(
995            offset_of!(VMFuncRef, type_index),
996            usize::from(offsets.ptr.vm_func_ref_type_index())
997        );
998        assert_eq!(
999            offset_of!(VMFuncRef, vmctx),
1000            usize::from(offsets.ptr.vm_func_ref_vmctx())
1001        );
1002    }
1003}
1004
1005macro_rules! define_builtin_array {
1006    (
1007        $(
1008            $( #[$attr:meta] )*
1009            $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
1010        )*
1011    ) => {
1012        /// An array that stores addresses of builtin functions. We translate code
1013        /// to use indirect calls. This way, we don't have to patch the code.
1014        #[repr(C)]
1015        #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
1016        pub struct VMBuiltinFunctionsArray {
1017            $(
1018                $name: unsafe extern "C" fn(
1019                    $(define_builtin_array!(@ty $param)),*
1020                ) $( -> define_builtin_array!(@ty $result))?,
1021            )*
1022        }
1023
1024        impl VMBuiltinFunctionsArray {
1025            pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
1026                $(
1027                    $name: crate::runtime::vm::libcalls::raw::$name,
1028                )*
1029            };
1030
1031            /// Helper to call `expose_provenance()` on all contained pointers.
1032            ///
1033            /// This is required to be called at least once before entering wasm
1034            /// to inform the compiler that these function pointers may all be
1035            /// loaded/stored and used on the "other end" to reacquire
1036            /// provenance in Pulley. Pulley models hostcalls with a host
1037            /// pointer as the first parameter that's a function pointer under
1038            /// the hood, and this call ensures that the use of the function
1039            /// pointer is considered valid.
1040            pub fn expose_provenance(&self) -> NonNull<Self>{
1041                $(
1042                    (self.$name as *mut u8).expose_provenance();
1043                )*
1044                NonNull::from(self)
1045            }
1046        }
1047    };
1048
1049    (@ty u32) => (u32);
1050    (@ty u64) => (u64);
1051    (@ty f32) => (f32);
1052    (@ty f64) => (f64);
1053    (@ty u8) => (u8);
1054    (@ty i8x16) => (i8x16);
1055    (@ty f32x4) => (f32x4);
1056    (@ty f64x2) => (f64x2);
1057    (@ty bool) => (bool);
1058    (@ty pointer) => (*mut u8);
1059    (@ty size) => (usize);
1060    (@ty vmctx) => (NonNull<VMContext>);
1061}
1062
1063// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1064unsafe impl VmSafe for VMBuiltinFunctionsArray {}
1065
1066wasmtime_environ::foreach_builtin_function!(define_builtin_array);
1067
1068const _: () = {
1069    assert!(
1070        mem::size_of::<VMBuiltinFunctionsArray>()
1071            == mem::size_of::<usize>() * (BuiltinFunctionIndex::len() as usize)
1072    )
1073};
1074
1075/// Structure that holds all mutable context that is shared across all instances
1076/// in a store, for example data related to fuel or epochs.
1077///
1078/// `VMStoreContext`s are one-to-one with `wasmtime::Store`s, the same way that
1079/// `VMContext`s are one-to-one with `wasmtime::Instance`s. And the same way
1080/// that multiple `wasmtime::Instance`s may be associated with the same
1081/// `wasmtime::Store`, multiple `VMContext`s hold a pointer to the same
1082/// `VMStoreContext` when they are associated with the same `wasmtime::Store`.
1083#[derive(Debug)]
1084#[repr(C)]
1085pub struct VMStoreContext {
1086    // NB: 64-bit integer fields are located first with pointer-sized fields
1087    // trailing afterwards. That makes the offsets in this structure easier to
1088    // calculate on 32-bit platforms as we don't have to worry about the
1089    // alignment of 64-bit integers.
1090    //
1091    /// Indicator of how much fuel has been consumed and is remaining to
1092    /// WebAssembly.
1093    ///
1094    /// This field is typically negative and increments towards positive. Upon
1095    /// turning positive a wasm trap will be generated. This field is only
1096    /// modified if wasm is configured to consume fuel.
1097    pub fuel_consumed: UnsafeCell<i64>,
1098
1099    /// Deadline epoch for interruption: if epoch-based interruption
1100    /// is enabled and the global (per engine) epoch counter is
1101    /// observed to reach or exceed this value, the guest code will
1102    /// yield if running asynchronously.
1103    pub epoch_deadline: UnsafeCell<u64>,
1104
1105    /// Current stack limit of the wasm module.
1106    ///
1107    /// For more information see `crates/cranelift/src/lib.rs`.
1108    pub stack_limit: UnsafeCell<usize>,
1109
1110    /// The `VMMemoryDefinition` for this store's GC heap.
1111    pub gc_heap: VMMemoryDefinition,
1112
1113    /// The value of the frame pointer register in the trampoline used
1114    /// to call from Wasm to the host.
1115    ///
1116    /// Maintained by our Wasm-to-host trampoline, and cleared just
1117    /// before calling into Wasm in `catch_traps`.
1118    ///
1119    /// This member is `0` when Wasm is actively running and has not called out
1120    /// to the host.
1121    ///
1122    /// Used to find the start of a contiguous sequence of Wasm frames
1123    /// when walking the stack. Note that we record the FP of the
1124    /// *trampoline*'s frame, not the last Wasm frame, because we need
1125    /// to know the SP (bottom of frame) of the last Wasm frame as
1126    /// well in case we need to resume to an exception handler in that
1127    /// frame. The FP of the last Wasm frame can be recovered by
1128    /// loading the saved FP value at this FP address.
1129    pub last_wasm_exit_trampoline_fp: UnsafeCell<usize>,
1130
1131    /// The last Wasm program counter before we called from Wasm to the host.
1132    ///
1133    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1134    /// calling into Wasm in `catch_traps`.
1135    ///
1136    /// This member is `0` when Wasm is actively running and has not called out
1137    /// to the host.
1138    ///
1139    /// Used when walking a contiguous sequence of Wasm frames.
1140    pub last_wasm_exit_pc: UnsafeCell<usize>,
1141
1142    /// The last host stack pointer before we called into Wasm from the host.
1143    ///
1144    /// Maintained by our host-to-Wasm trampoline. This member is `0` when Wasm
1145    /// is not running, and it's set to nonzero once a host-to-wasm trampoline
1146    /// is executed.
1147    ///
1148    /// When a host function is wrapped into a `wasmtime::Func`, and is then
1149    /// called from the host, then this member is not changed meaning that the
1150    /// previous activation in pointed to by `last_wasm_exit_trampoline_fp` is
1151    /// still the last wasm set of frames on the stack.
1152    ///
1153    /// This field is saved/restored during fiber suspension/resumption
1154    /// resumption as part of `CallThreadState::swap`.
1155    ///
1156    /// This field is used to find the end of a contiguous sequence of Wasm
1157    /// frames when walking the stack. Additionally it's used when a trap is
1158    /// raised as part of the set of parameters used to resume in the entry
1159    /// trampoline's "catch" block.
1160    pub last_wasm_entry_sp: UnsafeCell<usize>,
1161
1162    /// Same as `last_wasm_entry_sp`, but for the `fp` of the trampoline.
1163    pub last_wasm_entry_fp: UnsafeCell<usize>,
1164
1165    /// The last trap handler from a host-to-wasm entry trampoline on the stack.
1166    ///
1167    /// This field is configured when the host calls into wasm by the trampoline
1168    /// itself. It stores the `pc` of an exception handler suitable to handle
1169    /// all traps (or uncaught exceptions).
1170    pub last_wasm_entry_trap_handler: UnsafeCell<usize>,
1171
1172    /// Stack information used by stack switching instructions. See documentation
1173    /// on `VMStackChain` for details.
1174    pub stack_chain: UnsafeCell<VMStackChain>,
1175
1176    /// The range, in addresses, of the guard page that is currently in use.
1177    ///
1178    /// This field is used when signal handlers are run to determine whether a
1179    /// faulting address lies within the guard page of an async stack for
1180    /// example. If this happens then the signal handler aborts with a stack
1181    /// overflow message similar to what would happen had the stack overflow
1182    /// happened on the main thread. This field is, by default a null..null
1183    /// range indicating that no async guard is in use (aka no fiber). In such a
1184    /// situation while this field is read it'll never classify a fault as an
1185    /// guard page fault.
1186    pub async_guard_range: Range<*mut u8>,
1187}
1188
1189impl VMStoreContext {
1190    /// From the current saved trampoline FP, get the FP of the last
1191    /// Wasm frame. If the current saved trampoline FP is null, return
1192    /// null.
1193    ///
1194    /// We store only the trampoline FP, because (i) we need the
1195    /// trampoline FP, so we know the size (bottom) of the last Wasm
1196    /// frame; and (ii) the last Wasm frame, just above the trampoline
1197    /// frame, can be recovered via the FP chain.
1198    ///
1199    /// # Safety
1200    ///
1201    /// This function requires that the `last_wasm_exit_trampoline_fp`
1202    /// field either points to an active trampoline frame or is a null
1203    /// pointer.
1204    pub(crate) unsafe fn last_wasm_exit_fp(&self) -> usize {
1205        // SAFETY: the unsafe cell is safe to load (no other threads
1206        // will be writing our store when we have control), and the
1207        // helper function's safety condition is the same as ours.
1208        unsafe {
1209            let trampoline_fp = *self.last_wasm_exit_trampoline_fp.get();
1210            Self::wasm_exit_fp_from_trampoline_fp(trampoline_fp)
1211        }
1212    }
1213
1214    /// From any saved trampoline FP, get the FP of the last Wasm
1215    /// frame. If the given trampoline FP is null, return null.
1216    ///
1217    /// This differs from `last_wasm_exit_fp()` above in that it
1218    /// allows accessing activations further up the stack as well,
1219    /// e.g. via `CallThreadState::old_state`.
1220    ///
1221    /// # Safety
1222    ///
1223    /// This function requires that the provided FP value is valid,
1224    /// and points to an active trampoline frame, or is null.
1225    ///
1226    /// This function depends on the invariant that on all supported
1227    /// architectures, we store the previous FP value under the
1228    /// current FP. This is a property of our ABI that we control and
1229    /// ensure.
1230    pub(crate) unsafe fn wasm_exit_fp_from_trampoline_fp(trampoline_fp: usize) -> usize {
1231        if trampoline_fp != 0 {
1232            // SAFETY: We require that trampoline_fp points to a valid
1233            // frame, which will (by definition) contain an old FP value
1234            // that we can load.
1235            unsafe { *(trampoline_fp as *const usize) }
1236        } else {
1237            0
1238        }
1239    }
1240}
1241
1242// The `VMStoreContext` type is a pod-type with no destructor, and we don't
1243// access any fields from other threads, so add in these trait impls which are
1244// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
1245// variables in `VMStoreContext`.
1246unsafe impl Send for VMStoreContext {}
1247unsafe impl Sync for VMStoreContext {}
1248
1249// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1250unsafe impl VmSafe for VMStoreContext {}
1251
1252impl Default for VMStoreContext {
1253    fn default() -> VMStoreContext {
1254        VMStoreContext {
1255            fuel_consumed: UnsafeCell::new(0),
1256            epoch_deadline: UnsafeCell::new(0),
1257            stack_limit: UnsafeCell::new(usize::max_value()),
1258            gc_heap: VMMemoryDefinition {
1259                base: NonNull::dangling().into(),
1260                current_length: AtomicUsize::new(0),
1261            },
1262            last_wasm_exit_trampoline_fp: UnsafeCell::new(0),
1263            last_wasm_exit_pc: UnsafeCell::new(0),
1264            last_wasm_entry_fp: UnsafeCell::new(0),
1265            last_wasm_entry_sp: UnsafeCell::new(0),
1266            last_wasm_entry_trap_handler: UnsafeCell::new(0),
1267            stack_chain: UnsafeCell::new(VMStackChain::Absent),
1268            async_guard_range: ptr::null_mut()..ptr::null_mut(),
1269        }
1270    }
1271}
1272
1273#[cfg(test)]
1274mod test_vmstore_context {
1275    use super::{VMMemoryDefinition, VMStoreContext};
1276    use core::mem::offset_of;
1277    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
1278
1279    #[test]
1280    fn field_offsets() {
1281        let module = Module::new(StaticModuleIndex::from_u32(0));
1282        let offsets = VMOffsets::new(HostPtr, &module);
1283        assert_eq!(
1284            offset_of!(VMStoreContext, stack_limit),
1285            usize::from(offsets.ptr.vmstore_context_stack_limit())
1286        );
1287        assert_eq!(
1288            offset_of!(VMStoreContext, fuel_consumed),
1289            usize::from(offsets.ptr.vmstore_context_fuel_consumed())
1290        );
1291        assert_eq!(
1292            offset_of!(VMStoreContext, epoch_deadline),
1293            usize::from(offsets.ptr.vmstore_context_epoch_deadline())
1294        );
1295        assert_eq!(
1296            offset_of!(VMStoreContext, gc_heap),
1297            usize::from(offsets.ptr.vmstore_context_gc_heap())
1298        );
1299        assert_eq!(
1300            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, base),
1301            usize::from(offsets.ptr.vmstore_context_gc_heap_base())
1302        );
1303        assert_eq!(
1304            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, current_length),
1305            usize::from(offsets.ptr.vmstore_context_gc_heap_current_length())
1306        );
1307        assert_eq!(
1308            offset_of!(VMStoreContext, last_wasm_exit_trampoline_fp),
1309            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_trampoline_fp())
1310        );
1311        assert_eq!(
1312            offset_of!(VMStoreContext, last_wasm_exit_pc),
1313            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_pc())
1314        );
1315        assert_eq!(
1316            offset_of!(VMStoreContext, last_wasm_entry_fp),
1317            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp())
1318        );
1319        assert_eq!(
1320            offset_of!(VMStoreContext, last_wasm_entry_sp),
1321            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_sp())
1322        );
1323        assert_eq!(
1324            offset_of!(VMStoreContext, last_wasm_entry_trap_handler),
1325            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_trap_handler())
1326        );
1327        assert_eq!(
1328            offset_of!(VMStoreContext, stack_chain),
1329            usize::from(offsets.ptr.vmstore_context_stack_chain())
1330        )
1331    }
1332}
1333
1334/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
1335/// This has information about globals, memories, tables, and other runtime
1336/// state associated with the current instance.
1337///
1338/// The struct here is empty, as the sizes of these fields are dynamic, and
1339/// we can't describe them in Rust's type system. Sufficient memory is
1340/// allocated at runtime.
1341#[derive(Debug)]
1342#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
1343pub struct VMContext {
1344    _magic: u32,
1345}
1346
1347impl VMContext {
1348    /// Helper function to cast between context types using a debug assertion to
1349    /// protect against some mistakes.
1350    #[inline]
1351    pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMContext> {
1352        // Note that in general the offset of the "magic" field is stored in
1353        // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
1354        // about converting this pointer to another type we ideally don't want
1355        // to read the offset from potentially corrupt memory. Instead it would
1356        // be better to catch errors here as soon as possible.
1357        //
1358        // To accomplish this the `VMContext` structure is laid out with the
1359        // magic field at a statically known offset (here it's 0 for now). This
1360        // static offset is asserted in `VMOffsets::from` and needs to be kept
1361        // in sync with this line for this debug assertion to work.
1362        //
1363        // Also note that this magic is only ever invalid in the presence of
1364        // bugs, meaning we don't actually read the magic and act differently
1365        // at runtime depending what it is, so this is a debug assertion as
1366        // opposed to a regular assertion.
1367        unsafe {
1368            debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC);
1369        }
1370        opaque.cast()
1371    }
1372}
1373
1374/// A "raw" and unsafe representation of a WebAssembly value.
1375///
1376/// This is provided for use with the `Func::new_unchecked` and
1377/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
1378/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
1379///
1380/// This is notably an "unsafe" way to work with `Val` and it's recommended to
1381/// instead use `Val` where possible. An important note about this union is that
1382/// fields are all stored in little-endian format, regardless of the endianness
1383/// of the host system.
1384#[repr(C)]
1385#[derive(Copy, Clone)]
1386pub union ValRaw {
1387    /// A WebAssembly `i32` value.
1388    ///
1389    /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
1390    /// type does not assign an interpretation of the upper bit as either signed
1391    /// or unsigned. The Rust type `i32` is simply chosen for convenience.
1392    ///
1393    /// This value is always stored in a little-endian format.
1394    i32: i32,
1395
1396    /// A WebAssembly `i64` value.
1397    ///
1398    /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
1399    /// type does not assign an interpretation of the upper bit as either signed
1400    /// or unsigned. The Rust type `i64` is simply chosen for convenience.
1401    ///
1402    /// This value is always stored in a little-endian format.
1403    i64: i64,
1404
1405    /// A WebAssembly `f32` value.
1406    ///
1407    /// Note that the payload here is a Rust `u32`. This is to allow passing any
1408    /// representation of NaN into WebAssembly without risk of changing NaN
1409    /// payload bits as its gets passed around the system. Otherwise though this
1410    /// `u32` value is the return value of `f32::to_bits` in Rust.
1411    ///
1412    /// This value is always stored in a little-endian format.
1413    f32: u32,
1414
1415    /// A WebAssembly `f64` value.
1416    ///
1417    /// Note that the payload here is a Rust `u64`. This is to allow passing any
1418    /// representation of NaN into WebAssembly without risk of changing NaN
1419    /// payload bits as its gets passed around the system. Otherwise though this
1420    /// `u64` value is the return value of `f64::to_bits` in Rust.
1421    ///
1422    /// This value is always stored in a little-endian format.
1423    f64: u64,
1424
1425    /// A WebAssembly `v128` value.
1426    ///
1427    /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1428    /// but note that `v128` in WebAssembly is often considered a vector type
1429    /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1430    /// of the underlying bits is left up to the instructions which consume
1431    /// this value.
1432    ///
1433    /// This value is always stored in a little-endian format.
1434    v128: [u8; 16],
1435
1436    /// A WebAssembly `funcref` value (or one of its subtypes).
1437    ///
1438    /// The payload here is a pointer which is runtime-defined. This is one of
1439    /// the main points of unsafety about the `ValRaw` type as the validity of
1440    /// the pointer here is not easily verified and must be preserved by
1441    /// carefully calling the correct functions throughout the runtime.
1442    ///
1443    /// This value is always stored in a little-endian format.
1444    funcref: *mut c_void,
1445
1446    /// A WebAssembly `externref` value (or one of its subtypes).
1447    ///
1448    /// The payload here is a compressed pointer value which is
1449    /// runtime-defined. This is one of the main points of unsafety about the
1450    /// `ValRaw` type as the validity of the pointer here is not easily verified
1451    /// and must be preserved by carefully calling the correct functions
1452    /// throughout the runtime.
1453    ///
1454    /// This value is always stored in a little-endian format.
1455    externref: u32,
1456
1457    /// A WebAssembly `anyref` value (or one of its subtypes).
1458    ///
1459    /// The payload here is a compressed pointer value which is
1460    /// runtime-defined. This is one of the main points of unsafety about the
1461    /// `ValRaw` type as the validity of the pointer here is not easily verified
1462    /// and must be preserved by carefully calling the correct functions
1463    /// throughout the runtime.
1464    ///
1465    /// This value is always stored in a little-endian format.
1466    anyref: u32,
1467
1468    /// A WebAssembly `exnref` value (or one of its subtypes).
1469    ///
1470    /// The payload here is a compressed pointer value which is
1471    /// runtime-defined. This is one of the main points of unsafety about the
1472    /// `ValRaw` type as the validity of the pointer here is not easily verified
1473    /// and must be preserved by carefully calling the correct functions
1474    /// throughout the runtime.
1475    ///
1476    /// This value is always stored in a little-endian format.
1477    exnref: u32,
1478}
1479
1480// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1481// are some simple assertions about the shape of the type which are additionally
1482// matched in C.
1483const _: () = {
1484    assert!(mem::size_of::<ValRaw>() == 16);
1485    assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1486};
1487
1488// This type is just a bag-of-bits so it's up to the caller to figure out how
1489// to safely deal with threading concerns and safely access interior bits.
1490unsafe impl Send for ValRaw {}
1491unsafe impl Sync for ValRaw {}
1492
1493impl fmt::Debug for ValRaw {
1494    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1495        struct Hex<T>(T);
1496        impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1497            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1498                let bytes = mem::size_of::<T>();
1499                let hex_digits_per_byte = 2;
1500                let hex_digits = bytes * hex_digits_per_byte;
1501                write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1502            }
1503        }
1504
1505        unsafe {
1506            f.debug_struct("ValRaw")
1507                .field("i32", &Hex(self.i32))
1508                .field("i64", &Hex(self.i64))
1509                .field("f32", &Hex(self.f32))
1510                .field("f64", &Hex(self.f64))
1511                .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1512                .field("funcref", &self.funcref)
1513                .field("externref", &Hex(self.externref))
1514                .field("anyref", &Hex(self.anyref))
1515                .field("exnref", &Hex(self.exnref))
1516                .finish()
1517        }
1518    }
1519}
1520
1521impl ValRaw {
1522    /// Create a null reference that is compatible with any of
1523    /// `{any,extern,func,exn}ref`.
1524    pub fn null() -> ValRaw {
1525        unsafe {
1526            let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1527            debug_assert_eq!(raw.get_anyref(), 0);
1528            debug_assert_eq!(raw.get_exnref(), 0);
1529            debug_assert_eq!(raw.get_externref(), 0);
1530            debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1531            raw
1532        }
1533    }
1534
1535    /// Creates a WebAssembly `i32` value
1536    #[inline]
1537    pub fn i32(i: i32) -> ValRaw {
1538        // Note that this is intentionally not setting the `i32` field, instead
1539        // setting the `i64` field with a zero-extended version of `i`. For more
1540        // information on this see the comments on `Lower for Result` in the
1541        // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1542        // otherwise constrained to guarantee that the initial 64-bits are
1543        // always initialized.
1544        ValRaw::u64(i.cast_unsigned().into())
1545    }
1546
1547    /// Creates a WebAssembly `i64` value
1548    #[inline]
1549    pub fn i64(i: i64) -> ValRaw {
1550        ValRaw { i64: i.to_le() }
1551    }
1552
1553    /// Creates a WebAssembly `i32` value
1554    #[inline]
1555    pub fn u32(i: u32) -> ValRaw {
1556        // See comments in `ValRaw::i32` for why this is setting the upper
1557        // 32-bits as well.
1558        ValRaw::u64(i.into())
1559    }
1560
1561    /// Creates a WebAssembly `i64` value
1562    #[inline]
1563    pub fn u64(i: u64) -> ValRaw {
1564        ValRaw::i64(i as i64)
1565    }
1566
1567    /// Creates a WebAssembly `f32` value
1568    #[inline]
1569    pub fn f32(i: u32) -> ValRaw {
1570        // See comments in `ValRaw::i32` for why this is setting the upper
1571        // 32-bits as well.
1572        ValRaw::u64(i.into())
1573    }
1574
1575    /// Creates a WebAssembly `f64` value
1576    #[inline]
1577    pub fn f64(i: u64) -> ValRaw {
1578        ValRaw { f64: i.to_le() }
1579    }
1580
1581    /// Creates a WebAssembly `v128` value
1582    #[inline]
1583    pub fn v128(i: u128) -> ValRaw {
1584        ValRaw {
1585            v128: i.to_le_bytes(),
1586        }
1587    }
1588
1589    /// Creates a WebAssembly `funcref` value
1590    #[inline]
1591    pub fn funcref(i: *mut c_void) -> ValRaw {
1592        ValRaw {
1593            funcref: i.map_addr(|i| i.to_le()),
1594        }
1595    }
1596
1597    /// Creates a WebAssembly `externref` value
1598    #[inline]
1599    pub fn externref(e: u32) -> ValRaw {
1600        assert!(cfg!(feature = "gc") || e == 0);
1601        ValRaw {
1602            externref: e.to_le(),
1603        }
1604    }
1605
1606    /// Creates a WebAssembly `anyref` value
1607    #[inline]
1608    pub fn anyref(r: u32) -> ValRaw {
1609        assert!(cfg!(feature = "gc") || r == 0);
1610        ValRaw { anyref: r.to_le() }
1611    }
1612
1613    /// Creates a WebAssembly `exnref` value
1614    #[inline]
1615    pub fn exnref(r: u32) -> ValRaw {
1616        assert!(cfg!(feature = "gc") || r == 0);
1617        ValRaw { exnref: r.to_le() }
1618    }
1619
1620    /// Gets the WebAssembly `i32` value
1621    #[inline]
1622    pub fn get_i32(&self) -> i32 {
1623        unsafe { i32::from_le(self.i32) }
1624    }
1625
1626    /// Gets the WebAssembly `i64` value
1627    #[inline]
1628    pub fn get_i64(&self) -> i64 {
1629        unsafe { i64::from_le(self.i64) }
1630    }
1631
1632    /// Gets the WebAssembly `i32` value
1633    #[inline]
1634    pub fn get_u32(&self) -> u32 {
1635        self.get_i32().cast_unsigned()
1636    }
1637
1638    /// Gets the WebAssembly `i64` value
1639    #[inline]
1640    pub fn get_u64(&self) -> u64 {
1641        self.get_i64().cast_unsigned()
1642    }
1643
1644    /// Gets the WebAssembly `f32` value
1645    #[inline]
1646    pub fn get_f32(&self) -> u32 {
1647        unsafe { u32::from_le(self.f32) }
1648    }
1649
1650    /// Gets the WebAssembly `f64` value
1651    #[inline]
1652    pub fn get_f64(&self) -> u64 {
1653        unsafe { u64::from_le(self.f64) }
1654    }
1655
1656    /// Gets the WebAssembly `v128` value
1657    #[inline]
1658    pub fn get_v128(&self) -> u128 {
1659        unsafe { u128::from_le_bytes(self.v128) }
1660    }
1661
1662    /// Gets the WebAssembly `funcref` value
1663    #[inline]
1664    pub fn get_funcref(&self) -> *mut c_void {
1665        unsafe { self.funcref.map_addr(|i| usize::from_le(i)) }
1666    }
1667
1668    /// Gets the WebAssembly `externref` value
1669    #[inline]
1670    pub fn get_externref(&self) -> u32 {
1671        let externref = u32::from_le(unsafe { self.externref });
1672        assert!(cfg!(feature = "gc") || externref == 0);
1673        externref
1674    }
1675
1676    /// Gets the WebAssembly `anyref` value
1677    #[inline]
1678    pub fn get_anyref(&self) -> u32 {
1679        let anyref = u32::from_le(unsafe { self.anyref });
1680        assert!(cfg!(feature = "gc") || anyref == 0);
1681        anyref
1682    }
1683
1684    /// Gets the WebAssembly `exnref` value
1685    #[inline]
1686    pub fn get_exnref(&self) -> u32 {
1687        let exnref = u32::from_le(unsafe { self.exnref });
1688        assert!(cfg!(feature = "gc") || exnref == 0);
1689        exnref
1690    }
1691}
1692
1693/// An "opaque" version of `VMContext` which must be explicitly casted to a
1694/// target context.
1695///
1696/// This context is used to represent that contexts specified in
1697/// `VMFuncRef` can have any type and don't have an implicit
1698/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1699/// structure of an opaque context in general and only the code which configured
1700/// the context is able to rely on a particular structure. This is because the
1701/// context pointer configured for `VMFuncRef` is guaranteed to be
1702/// the first parameter passed.
1703///
1704/// Note that Wasmtime currently has a layout where all contexts that are casted
1705/// to an opaque context start with a 32-bit "magic" which can be used in debug
1706/// mode to debug-assert that the casts here are correct and have at least a
1707/// little protection against incorrect casts.
1708pub struct VMOpaqueContext {
1709    pub(crate) magic: u32,
1710    _marker: marker::PhantomPinned,
1711}
1712
1713impl VMOpaqueContext {
1714    /// Helper function to clearly indicate that casts are desired.
1715    #[inline]
1716    pub fn from_vmcontext(ptr: NonNull<VMContext>) -> NonNull<VMOpaqueContext> {
1717        ptr.cast()
1718    }
1719
1720    /// Helper function to clearly indicate that casts are desired.
1721    #[inline]
1722    pub fn from_vm_array_call_host_func_context(
1723        ptr: NonNull<VMArrayCallHostFuncContext>,
1724    ) -> NonNull<VMOpaqueContext> {
1725        ptr.cast()
1726    }
1727}