wasmtime/runtime/vm/
vmcontext.rs

1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::prelude::*;
8use crate::runtime::vm::{InterpreterRef, VMGcRef, VmPtr, VmSafe, f32x4, f64x2, i8x16};
9use crate::store::StoreOpaque;
10use crate::vm::stack_switching::VMStackChain;
11use core::cell::UnsafeCell;
12use core::ffi::c_void;
13use core::fmt;
14use core::marker;
15use core::mem::{self, MaybeUninit};
16use core::ops::Range;
17use core::ptr::{self, NonNull};
18use core::sync::atomic::{AtomicUsize, Ordering};
19use wasmtime_environ::{
20    BuiltinFunctionIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex,
21    DefinedTagIndex, VMCONTEXT_MAGIC, VMSharedTypeIndex, WasmHeapTopType, WasmValType,
22};
23
24/// A function pointer that exposes the array calling convention.
25///
26/// Regardless of the underlying Wasm function type, all functions using the
27/// array calling convention have the same Rust signature.
28///
29/// Arguments:
30///
31/// * Callee `vmctx` for the function itself.
32///
33/// * Caller's `vmctx` (so that host functions can access the linear memory of
34///   their Wasm callers).
35///
36/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
37///   this function, and where results are returned from this function.
38///
39/// * The capacity of the `ValRaw` buffer. Must always be at least
40///   `max(len(wasm_params), len(wasm_results))`.
41///
42/// Return value:
43///
44/// * `true` if this call succeeded.
45/// * `false` if this call failed and a trap was recorded in TLS.
46pub type VMArrayCallNative = unsafe extern "C" fn(
47    NonNull<VMOpaqueContext>,
48    NonNull<VMContext>,
49    NonNull<ValRaw>,
50    usize,
51) -> bool;
52
53/// An opaque function pointer which might be `VMArrayCallNative` or it might be
54/// pulley bytecode. Requires external knowledge to determine what kind of
55/// function pointer this is.
56#[repr(transparent)]
57pub struct VMArrayCallFunction(VMFunctionBody);
58
59/// A function pointer that exposes the Wasm calling convention.
60///
61/// In practice, different Wasm function types end up mapping to different Rust
62/// function types, so this isn't simply a type alias the way that
63/// `VMArrayCallFunction` is. However, the exact details of the calling
64/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
65/// code never does anything with these function pointers except shuffle them
66/// around and pass them back to Wasm.
67#[repr(transparent)]
68pub struct VMWasmCallFunction(VMFunctionBody);
69
70/// An imported function.
71#[derive(Debug, Copy, Clone)]
72#[repr(C)]
73pub struct VMFunctionImport {
74    /// Function pointer to use when calling this imported function from Wasm.
75    pub wasm_call: VmPtr<VMWasmCallFunction>,
76
77    /// Function pointer to use when calling this imported function with the
78    /// "array" calling convention that `Func::new` et al use.
79    pub array_call: VmPtr<VMArrayCallFunction>,
80
81    /// The VM state associated with this function.
82    ///
83    /// For Wasm functions defined by core wasm instances this will be `*mut
84    /// VMContext`, but for lifted/lowered component model functions this will
85    /// be a `VMComponentContext`, and for a host function it will be a
86    /// `VMHostFuncContext`, etc.
87    pub vmctx: VmPtr<VMOpaqueContext>,
88}
89
90// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
91unsafe impl VmSafe for VMFunctionImport {}
92
93#[cfg(test)]
94mod test_vmfunction_import {
95    use super::VMFunctionImport;
96    use core::mem::offset_of;
97    use std::mem::size_of;
98    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
99
100    #[test]
101    fn check_vmfunction_import_offsets() {
102        let module = Module::new(StaticModuleIndex::from_u32(0));
103        let offsets = VMOffsets::new(HostPtr, &module);
104        assert_eq!(
105            size_of::<VMFunctionImport>(),
106            usize::from(offsets.size_of_vmfunction_import())
107        );
108        assert_eq!(
109            offset_of!(VMFunctionImport, wasm_call),
110            usize::from(offsets.vmfunction_import_wasm_call())
111        );
112        assert_eq!(
113            offset_of!(VMFunctionImport, array_call),
114            usize::from(offsets.vmfunction_import_array_call())
115        );
116        assert_eq!(
117            offset_of!(VMFunctionImport, vmctx),
118            usize::from(offsets.vmfunction_import_vmctx())
119        );
120    }
121}
122
123/// A placeholder byte-sized type which is just used to provide some amount of type
124/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
125/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
126/// around.
127#[repr(C)]
128pub struct VMFunctionBody(u8);
129
130// SAFETY: this structure is never read and is safe to pass to jit code.
131unsafe impl VmSafe for VMFunctionBody {}
132
133#[cfg(test)]
134mod test_vmfunction_body {
135    use super::VMFunctionBody;
136    use std::mem::size_of;
137
138    #[test]
139    fn check_vmfunction_body_offsets() {
140        assert_eq!(size_of::<VMFunctionBody>(), 1);
141    }
142}
143
144/// The fields compiled code needs to access to utilize a WebAssembly table
145/// imported from another instance.
146#[derive(Debug, Copy, Clone)]
147#[repr(C)]
148pub struct VMTableImport {
149    /// A pointer to the imported table description.
150    pub from: VmPtr<VMTableDefinition>,
151
152    /// A pointer to the `VMContext` that owns the table description.
153    pub vmctx: VmPtr<VMContext>,
154
155    /// The table index, within `vmctx`, this definition resides at.
156    pub index: DefinedTableIndex,
157}
158
159// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
160unsafe impl VmSafe for VMTableImport {}
161
162#[cfg(test)]
163mod test_vmtable {
164    use super::VMTableImport;
165    use core::mem::offset_of;
166    use std::mem::size_of;
167    use wasmtime_environ::component::{Component, VMComponentOffsets};
168    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
169
170    #[test]
171    fn check_vmtable_offsets() {
172        let module = Module::new(StaticModuleIndex::from_u32(0));
173        let offsets = VMOffsets::new(HostPtr, &module);
174        assert_eq!(
175            size_of::<VMTableImport>(),
176            usize::from(offsets.size_of_vmtable_import())
177        );
178        assert_eq!(
179            offset_of!(VMTableImport, from),
180            usize::from(offsets.vmtable_import_from())
181        );
182        assert_eq!(
183            offset_of!(VMTableImport, vmctx),
184            usize::from(offsets.vmtable_import_vmctx())
185        );
186        assert_eq!(
187            offset_of!(VMTableImport, index),
188            usize::from(offsets.vmtable_import_index())
189        );
190    }
191
192    #[test]
193    fn ensure_sizes_match() {
194        // Because we use `VMTableImport` for recording tables used by components, we
195        // want to make sure that the size calculations between `VMOffsets` and
196        // `VMComponentOffsets` stay the same.
197        let module = Module::new(StaticModuleIndex::from_u32(0));
198        let vm_offsets = VMOffsets::new(HostPtr, &module);
199        let component = Component::default();
200        let vm_component_offsets = VMComponentOffsets::new(HostPtr, &component);
201        assert_eq!(
202            vm_offsets.size_of_vmtable_import(),
203            vm_component_offsets.size_of_vmtable_import()
204        );
205    }
206}
207
208/// The fields compiled code needs to access to utilize a WebAssembly linear
209/// memory imported from another instance.
210#[derive(Debug, Copy, Clone)]
211#[repr(C)]
212pub struct VMMemoryImport {
213    /// A pointer to the imported memory description.
214    pub from: VmPtr<VMMemoryDefinition>,
215
216    /// A pointer to the `VMContext` that owns the memory description.
217    pub vmctx: VmPtr<VMContext>,
218
219    /// The index of the memory in the containing `vmctx`.
220    pub index: DefinedMemoryIndex,
221}
222
223// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
224unsafe impl VmSafe for VMMemoryImport {}
225
226#[cfg(test)]
227mod test_vmmemory_import {
228    use super::VMMemoryImport;
229    use core::mem::offset_of;
230    use std::mem::size_of;
231    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
232
233    #[test]
234    fn check_vmmemory_import_offsets() {
235        let module = Module::new(StaticModuleIndex::from_u32(0));
236        let offsets = VMOffsets::new(HostPtr, &module);
237        assert_eq!(
238            size_of::<VMMemoryImport>(),
239            usize::from(offsets.size_of_vmmemory_import())
240        );
241        assert_eq!(
242            offset_of!(VMMemoryImport, from),
243            usize::from(offsets.vmmemory_import_from())
244        );
245        assert_eq!(
246            offset_of!(VMMemoryImport, vmctx),
247            usize::from(offsets.vmmemory_import_vmctx())
248        );
249        assert_eq!(
250            offset_of!(VMMemoryImport, index),
251            usize::from(offsets.vmmemory_import_index())
252        );
253    }
254}
255
256/// The fields compiled code needs to access to utilize a WebAssembly global
257/// variable imported from another instance.
258///
259/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
260/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
261/// require a `vmctx` pointer to access.
262#[derive(Debug, Copy, Clone)]
263#[repr(C)]
264pub struct VMGlobalImport {
265    /// A pointer to the imported global variable description.
266    pub from: VmPtr<VMGlobalDefinition>,
267
268    /// A pointer to the context that owns the global.
269    ///
270    /// Exactly what's stored here is dictated by `kind` below. This is `None`
271    /// for `VMGlobalKind::Host`, it's a `VMContext` for
272    /// `VMGlobalKind::Instance`, and it's `VMComponentContext` for
273    /// `VMGlobalKind::ComponentFlags`.
274    pub vmctx: Option<VmPtr<VMOpaqueContext>>,
275
276    /// The kind of global, and extra location information in addition to
277    /// `vmctx` above.
278    pub kind: VMGlobalKind,
279}
280
281// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
282unsafe impl VmSafe for VMGlobalImport {}
283
284/// The kinds of globals that Wasmtime has.
285#[derive(Debug, Copy, Clone)]
286#[repr(C, u32)]
287pub enum VMGlobalKind {
288    /// Host globals, stored in a `StoreOpaque`.
289    Host(DefinedGlobalIndex),
290    /// Instance globals, stored in `VMContext`s
291    Instance(DefinedGlobalIndex),
292    /// Flags for a component instance, stored in `VMComponentContext`.
293    #[cfg(feature = "component-model")]
294    ComponentFlags(wasmtime_environ::component::RuntimeComponentInstanceIndex),
295}
296
297// SAFETY: the above enum is repr(C) and stores nothing else
298unsafe impl VmSafe for VMGlobalKind {}
299
300#[cfg(test)]
301mod test_vmglobal_import {
302    use super::VMGlobalImport;
303    use core::mem::offset_of;
304    use std::mem::size_of;
305    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
306
307    #[test]
308    fn check_vmglobal_import_offsets() {
309        let module = Module::new(StaticModuleIndex::from_u32(0));
310        let offsets = VMOffsets::new(HostPtr, &module);
311        assert_eq!(
312            size_of::<VMGlobalImport>(),
313            usize::from(offsets.size_of_vmglobal_import())
314        );
315        assert_eq!(
316            offset_of!(VMGlobalImport, from),
317            usize::from(offsets.vmglobal_import_from())
318        );
319    }
320}
321
322/// The fields compiled code needs to access to utilize a WebAssembly
323/// tag imported from another instance.
324#[derive(Debug, Copy, Clone)]
325#[repr(C)]
326pub struct VMTagImport {
327    /// A pointer to the imported tag description.
328    pub from: VmPtr<VMTagDefinition>,
329
330    /// The instance that owns this tag.
331    pub vmctx: VmPtr<VMContext>,
332
333    /// The index of the tag in the containing `vmctx`.
334    pub index: DefinedTagIndex,
335}
336
337// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
338unsafe impl VmSafe for VMTagImport {}
339
340#[cfg(test)]
341mod test_vmtag_import {
342    use super::VMTagImport;
343    use core::mem::{offset_of, size_of};
344    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
345
346    #[test]
347    fn check_vmtag_import_offsets() {
348        let module = Module::new(StaticModuleIndex::from_u32(0));
349        let offsets = VMOffsets::new(HostPtr, &module);
350        assert_eq!(
351            size_of::<VMTagImport>(),
352            usize::from(offsets.size_of_vmtag_import())
353        );
354        assert_eq!(
355            offset_of!(VMTagImport, from),
356            usize::from(offsets.vmtag_import_from())
357        );
358        assert_eq!(
359            offset_of!(VMTagImport, vmctx),
360            usize::from(offsets.vmtag_import_vmctx())
361        );
362        assert_eq!(
363            offset_of!(VMTagImport, index),
364            usize::from(offsets.vmtag_import_index())
365        );
366    }
367}
368
369/// The fields compiled code needs to access to utilize a WebAssembly linear
370/// memory defined within the instance, namely the start address and the
371/// size in bytes.
372#[derive(Debug)]
373#[repr(C)]
374pub struct VMMemoryDefinition {
375    /// The start address.
376    pub base: VmPtr<u8>,
377
378    /// The current logical size of this linear memory in bytes.
379    ///
380    /// This is atomic because shared memories must be able to grow their length
381    /// atomically. For relaxed access, see
382    /// [`VMMemoryDefinition::current_length()`].
383    pub current_length: AtomicUsize,
384}
385
386// SAFETY: the above definition has `repr(C)` and each field individually
387// implements `VmSafe`, which satisfies the requirements of this trait.
388unsafe impl VmSafe for VMMemoryDefinition {}
389
390impl VMMemoryDefinition {
391    /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
392    /// performing a relaxed load; do not use this function for situations in
393    /// which a precise length is needed. Owned memories (i.e., non-shared) will
394    /// always return a precise result (since no concurrent modification is
395    /// possible) but shared memories may see an imprecise value--a
396    /// `current_length` potentially smaller than what some other thread
397    /// observes. Since Wasm memory only grows, this under-estimation may be
398    /// acceptable in certain cases.
399    #[inline]
400    pub fn current_length(&self) -> usize {
401        self.current_length.load(Ordering::Relaxed)
402    }
403
404    /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
405    /// `current_length`; see [`VMMemoryDefinition::current_length()`].
406    #[inline]
407    pub unsafe fn load(ptr: *mut Self) -> Self {
408        let other = unsafe { &*ptr };
409        VMMemoryDefinition {
410            base: other.base,
411            current_length: other.current_length().into(),
412        }
413    }
414}
415
416#[cfg(test)]
417mod test_vmmemory_definition {
418    use super::VMMemoryDefinition;
419    use core::mem::offset_of;
420    use std::mem::size_of;
421    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
422
423    #[test]
424    fn check_vmmemory_definition_offsets() {
425        let module = Module::new(StaticModuleIndex::from_u32(0));
426        let offsets = VMOffsets::new(HostPtr, &module);
427        assert_eq!(
428            size_of::<VMMemoryDefinition>(),
429            usize::from(offsets.ptr.size_of_vmmemory_definition())
430        );
431        assert_eq!(
432            offset_of!(VMMemoryDefinition, base),
433            usize::from(offsets.ptr.vmmemory_definition_base())
434        );
435        assert_eq!(
436            offset_of!(VMMemoryDefinition, current_length),
437            usize::from(offsets.ptr.vmmemory_definition_current_length())
438        );
439        /* TODO: Assert that the size of `current_length` matches.
440        assert_eq!(
441            size_of::<VMMemoryDefinition::current_length>(),
442            usize::from(offsets.size_of_vmmemory_definition_current_length())
443        );
444        */
445    }
446}
447
448/// The fields compiled code needs to access to utilize a WebAssembly table
449/// defined within the instance.
450#[derive(Debug, Copy, Clone)]
451#[repr(C)]
452pub struct VMTableDefinition {
453    /// Pointer to the table data.
454    pub base: VmPtr<u8>,
455
456    /// The current number of elements in the table.
457    pub current_elements: usize,
458}
459
460// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
461unsafe impl VmSafe for VMTableDefinition {}
462
463#[cfg(test)]
464mod test_vmtable_definition {
465    use super::VMTableDefinition;
466    use core::mem::offset_of;
467    use std::mem::size_of;
468    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
469
470    #[test]
471    fn check_vmtable_definition_offsets() {
472        let module = Module::new(StaticModuleIndex::from_u32(0));
473        let offsets = VMOffsets::new(HostPtr, &module);
474        assert_eq!(
475            size_of::<VMTableDefinition>(),
476            usize::from(offsets.size_of_vmtable_definition())
477        );
478        assert_eq!(
479            offset_of!(VMTableDefinition, base),
480            usize::from(offsets.vmtable_definition_base())
481        );
482        assert_eq!(
483            offset_of!(VMTableDefinition, current_elements),
484            usize::from(offsets.vmtable_definition_current_elements())
485        );
486    }
487}
488
489/// The storage for a WebAssembly global defined within the instance.
490///
491/// TODO: Pack the globals more densely, rather than using the same size
492/// for every type.
493#[derive(Debug)]
494#[repr(C, align(16))]
495pub struct VMGlobalDefinition {
496    storage: [u8; 16],
497    // If more elements are added here, remember to add offset_of tests below!
498}
499
500// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
501unsafe impl VmSafe for VMGlobalDefinition {}
502
503#[cfg(test)]
504mod test_vmglobal_definition {
505    use super::VMGlobalDefinition;
506    use std::mem::{align_of, size_of};
507    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
508
509    #[test]
510    fn check_vmglobal_definition_alignment() {
511        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
512        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
513        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
514        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
515        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
516        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f32; 4]>());
517        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f64; 2]>());
518    }
519
520    #[test]
521    fn check_vmglobal_definition_offsets() {
522        let module = Module::new(StaticModuleIndex::from_u32(0));
523        let offsets = VMOffsets::new(HostPtr, &module);
524        assert_eq!(
525            size_of::<VMGlobalDefinition>(),
526            usize::from(offsets.ptr.size_of_vmglobal_definition())
527        );
528    }
529
530    #[test]
531    fn check_vmglobal_begins_aligned() {
532        let module = Module::new(StaticModuleIndex::from_u32(0));
533        let offsets = VMOffsets::new(HostPtr, &module);
534        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
535    }
536
537    #[test]
538    #[cfg(feature = "gc")]
539    fn check_vmglobal_can_contain_gc_ref() {
540        assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
541    }
542}
543
544impl VMGlobalDefinition {
545    /// Construct a `VMGlobalDefinition`.
546    pub fn new() -> Self {
547        Self { storage: [0; 16] }
548    }
549
550    /// Create a `VMGlobalDefinition` from a `ValRaw`.
551    ///
552    /// # Unsafety
553    ///
554    /// This raw value's type must match the given `WasmValType`.
555    pub unsafe fn from_val_raw(
556        store: &mut StoreOpaque,
557        wasm_ty: WasmValType,
558        raw: ValRaw,
559    ) -> Result<Self> {
560        let mut global = Self::new();
561        unsafe {
562            match wasm_ty {
563                WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
564                WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
565                WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
566                WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
567                WasmValType::V128 => global.set_u128(raw.get_v128()),
568                WasmValType::Ref(r) => match r.heap_type.top() {
569                    WasmHeapTopType::Extern => {
570                        let r = VMGcRef::from_raw_u32(raw.get_externref());
571                        global.init_gc_ref(store, r.as_ref())
572                    }
573                    WasmHeapTopType::Any => {
574                        let r = VMGcRef::from_raw_u32(raw.get_anyref());
575                        global.init_gc_ref(store, r.as_ref())
576                    }
577                    WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
578                    WasmHeapTopType::Cont => *global.as_func_ref_mut() = raw.get_funcref().cast(), // TODO(#10248): temporary hack.
579                    WasmHeapTopType::Exn => {
580                        let r = VMGcRef::from_raw_u32(raw.get_exnref());
581                        global.init_gc_ref(store, r.as_ref())
582                    }
583                },
584            }
585        }
586        Ok(global)
587    }
588
589    /// Get this global's value as a `ValRaw`.
590    ///
591    /// # Unsafety
592    ///
593    /// This global's value's type must match the given `WasmValType`.
594    pub unsafe fn to_val_raw(
595        &self,
596        store: &mut StoreOpaque,
597        wasm_ty: WasmValType,
598    ) -> Result<ValRaw> {
599        unsafe {
600            Ok(match wasm_ty {
601                WasmValType::I32 => ValRaw::i32(*self.as_i32()),
602                WasmValType::I64 => ValRaw::i64(*self.as_i64()),
603                WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
604                WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
605                WasmValType::V128 => ValRaw::v128(self.get_u128()),
606                WasmValType::Ref(r) => match r.heap_type.top() {
607                    WasmHeapTopType::Extern => ValRaw::externref(match self.as_gc_ref() {
608                        Some(r) => store.clone_gc_ref(r).as_raw_u32(),
609                        None => 0,
610                    }),
611                    WasmHeapTopType::Any => ValRaw::anyref({
612                        match self.as_gc_ref() {
613                            Some(r) => store.clone_gc_ref(r).as_raw_u32(),
614                            None => 0,
615                        }
616                    }),
617                    WasmHeapTopType::Exn => ValRaw::exnref({
618                        match self.as_gc_ref() {
619                            Some(r) => store.clone_gc_ref(r).as_raw_u32(),
620                            None => 0,
621                        }
622                    }),
623                    WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
624                    WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
625                },
626            })
627        }
628    }
629
630    /// Return a reference to the value as an i32.
631    pub unsafe fn as_i32(&self) -> &i32 {
632        unsafe { &*(self.storage.as_ref().as_ptr().cast::<i32>()) }
633    }
634
635    /// Return a mutable reference to the value as an i32.
636    pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
637        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>()) }
638    }
639
640    /// Return a reference to the value as a u32.
641    pub unsafe fn as_u32(&self) -> &u32 {
642        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
643    }
644
645    /// Return a mutable reference to the value as an u32.
646    pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
647        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
648    }
649
650    /// Return a reference to the value as an i64.
651    pub unsafe fn as_i64(&self) -> &i64 {
652        unsafe { &*(self.storage.as_ref().as_ptr().cast::<i64>()) }
653    }
654
655    /// Return a mutable reference to the value as an i64.
656    pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
657        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>()) }
658    }
659
660    /// Return a reference to the value as an u64.
661    pub unsafe fn as_u64(&self) -> &u64 {
662        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
663    }
664
665    /// Return a mutable reference to the value as an u64.
666    pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
667        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
668    }
669
670    /// Return a reference to the value as an f32.
671    pub unsafe fn as_f32(&self) -> &f32 {
672        unsafe { &*(self.storage.as_ref().as_ptr().cast::<f32>()) }
673    }
674
675    /// Return a mutable reference to the value as an f32.
676    pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
677        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>()) }
678    }
679
680    /// Return a reference to the value as f32 bits.
681    pub unsafe fn as_f32_bits(&self) -> &u32 {
682        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
683    }
684
685    /// Return a mutable reference to the value as f32 bits.
686    pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
687        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
688    }
689
690    /// Return a reference to the value as an f64.
691    pub unsafe fn as_f64(&self) -> &f64 {
692        unsafe { &*(self.storage.as_ref().as_ptr().cast::<f64>()) }
693    }
694
695    /// Return a mutable reference to the value as an f64.
696    pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
697        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>()) }
698    }
699
700    /// Return a reference to the value as f64 bits.
701    pub unsafe fn as_f64_bits(&self) -> &u64 {
702        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
703    }
704
705    /// Return a mutable reference to the value as f64 bits.
706    pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
707        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
708    }
709
710    /// Gets the underlying 128-bit vector value.
711    //
712    // Note that vectors are stored in little-endian format while other types
713    // are stored in native-endian format.
714    pub unsafe fn get_u128(&self) -> u128 {
715        unsafe { u128::from_le(*(self.storage.as_ref().as_ptr().cast::<u128>())) }
716    }
717
718    /// Sets the 128-bit vector values.
719    //
720    // Note that vectors are stored in little-endian format while other types
721    // are stored in native-endian format.
722    pub unsafe fn set_u128(&mut self, val: u128) {
723        unsafe {
724            *self.storage.as_mut().as_mut_ptr().cast::<u128>() = val.to_le();
725        }
726    }
727
728    /// Return a reference to the value as u128 bits.
729    pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
730        unsafe { &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>()) }
731    }
732
733    /// Return a mutable reference to the value as u128 bits.
734    pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
735        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>()) }
736    }
737
738    /// Return a reference to the global value as a borrowed GC reference.
739    pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
740        let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
741        let ret = unsafe { (*raw_ptr).as_ref() };
742        assert!(cfg!(feature = "gc") || ret.is_none());
743        ret
744    }
745
746    /// Initialize a global to the given GC reference.
747    pub unsafe fn init_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) {
748        let dest = unsafe {
749            &mut *(self
750                .storage
751                .as_mut()
752                .as_mut_ptr()
753                .cast::<MaybeUninit<Option<VMGcRef>>>())
754        };
755
756        store.init_gc_ref(dest, gc_ref)
757    }
758
759    /// Write a GC reference into this global value.
760    pub unsafe fn write_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) {
761        let dest = unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>()) };
762        store.write_gc_ref(dest, gc_ref)
763    }
764
765    /// Return a reference to the value as a `VMFuncRef`.
766    pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
767        unsafe { *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>()) }
768    }
769
770    /// Return a mutable reference to the value as a `VMFuncRef`.
771    pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
772        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>()) }
773    }
774}
775
776#[cfg(test)]
777mod test_vmshared_type_index {
778    use super::VMSharedTypeIndex;
779    use std::mem::size_of;
780    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
781
782    #[test]
783    fn check_vmshared_type_index() {
784        let module = Module::new(StaticModuleIndex::from_u32(0));
785        let offsets = VMOffsets::new(HostPtr, &module);
786        assert_eq!(
787            size_of::<VMSharedTypeIndex>(),
788            usize::from(offsets.size_of_vmshared_type_index())
789        );
790    }
791}
792
793/// A WebAssembly tag defined within the instance.
794///
795#[derive(Debug)]
796#[repr(C)]
797pub struct VMTagDefinition {
798    /// Function signature's type id.
799    pub type_index: VMSharedTypeIndex,
800}
801
802impl VMTagDefinition {
803    pub fn new(type_index: VMSharedTypeIndex) -> Self {
804        Self { type_index }
805    }
806}
807
808// SAFETY: the above structure is repr(C) and only contains VmSafe
809// fields.
810unsafe impl VmSafe for VMTagDefinition {}
811
812#[cfg(test)]
813mod test_vmtag_definition {
814    use super::VMTagDefinition;
815    use std::mem::size_of;
816    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
817
818    #[test]
819    fn check_vmtag_definition_offsets() {
820        let module = Module::new(StaticModuleIndex::from_u32(0));
821        let offsets = VMOffsets::new(HostPtr, &module);
822        assert_eq!(
823            size_of::<VMTagDefinition>(),
824            usize::from(offsets.ptr.size_of_vmtag_definition())
825        );
826    }
827
828    #[test]
829    fn check_vmtag_begins_aligned() {
830        let module = Module::new(StaticModuleIndex::from_u32(0));
831        let offsets = VMOffsets::new(HostPtr, &module);
832        assert_eq!(offsets.vmctx_tags_begin() % 16, 0);
833    }
834}
835
836/// The VM caller-checked "funcref" record, for caller-side signature checking.
837///
838/// It consists of function pointer(s), a type id to be checked by the
839/// caller, and the vmctx closure associated with this function.
840#[derive(Debug, Clone)]
841#[repr(C)]
842pub struct VMFuncRef {
843    /// Function pointer for this funcref if being called via the "array"
844    /// calling convention that `Func::new` et al use.
845    pub array_call: VmPtr<VMArrayCallFunction>,
846
847    /// Function pointer for this funcref if being called via the calling
848    /// convention we use when compiling Wasm.
849    ///
850    /// Most functions come with a function pointer that we can use when they
851    /// are called from Wasm. The notable exception is when we `Func::wrap` a
852    /// host function, and we don't have a Wasm compiler on hand to compile a
853    /// Wasm-to-native trampoline for the function. In this case, we leave
854    /// `wasm_call` empty until the function is passed as an import to Wasm (or
855    /// otherwise exposed to Wasm via tables/globals). At this point, we look up
856    /// a Wasm-to-native trampoline for the function in the Wasm's compiled
857    /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
858    /// there is no guarantee that the Wasm module has a trampoline for this
859    /// function's signature. The Wasm module only has trampolines for its
860    /// types, and if this function isn't of one of those types, then the Wasm
861    /// module will not have a trampoline for it. This is actually okay, because
862    /// it means that the Wasm cannot actually call this function. But it does
863    /// mean that this field needs to be an `Option` even though it is non-null
864    /// the vast vast vast majority of the time.
865    pub wasm_call: Option<VmPtr<VMWasmCallFunction>>,
866
867    /// Function signature's type id.
868    pub type_index: VMSharedTypeIndex,
869
870    /// The VM state associated with this function.
871    ///
872    /// The actual definition of what this pointer points to depends on the
873    /// function being referenced: for core Wasm functions, this is a `*mut
874    /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
875    /// component functions it is a `*mut VMComponentContext`.
876    pub vmctx: VmPtr<VMOpaqueContext>,
877    // If more elements are added here, remember to add offset_of tests below!
878}
879
880// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
881unsafe impl VmSafe for VMFuncRef {}
882
883impl VMFuncRef {
884    /// Invokes the `array_call` field of this `VMFuncRef` with the supplied
885    /// arguments.
886    ///
887    /// This will invoke the function pointer in the `array_call` field with:
888    ///
889    /// * the `callee` vmctx as `self.vmctx`
890    /// * the `caller` as `caller` specified here
891    /// * the args pointer as `args_and_results`
892    /// * the args length as `args_and_results`
893    ///
894    /// The `args_and_results` area must be large enough to both load all
895    /// arguments from and store all results to.
896    ///
897    /// Returns whether a trap was recorded in TLS for raising.
898    ///
899    /// # Unsafety
900    ///
901    /// This method is unsafe because it can be called with any pointers. They
902    /// must all be valid for this wasm function call to proceed. For example
903    /// the `caller` must be valid machine code if `pulley` is `None` or it must
904    /// be valid bytecode if `pulley` is `Some`. Additionally `args_and_results`
905    /// must be large enough to handle all the arguments/results for this call.
906    ///
907    /// Note that the unsafety invariants to maintain here are not currently
908    /// exhaustively documented.
909    #[inline]
910    pub unsafe fn array_call(
911        me: NonNull<VMFuncRef>,
912        pulley: Option<InterpreterRef<'_>>,
913        caller: NonNull<VMContext>,
914        args_and_results: NonNull<[ValRaw]>,
915    ) -> bool {
916        match pulley {
917            Some(vm) => unsafe { Self::array_call_interpreted(me, vm, caller, args_and_results) },
918            None => unsafe { Self::array_call_native(me, caller, args_and_results) },
919        }
920    }
921
922    unsafe fn array_call_interpreted(
923        me: NonNull<VMFuncRef>,
924        vm: InterpreterRef<'_>,
925        caller: NonNull<VMContext>,
926        args_and_results: NonNull<[ValRaw]>,
927    ) -> bool {
928        // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the
929        // interpreter, even though it's available, as `array_call` will be
930        // native code.
931        unsafe {
932            if me.as_ref().vmctx.as_non_null().as_ref().magic
933                == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC
934            {
935                return Self::array_call_native(me, caller, args_and_results);
936            }
937            vm.call(
938                me.as_ref().array_call.as_non_null().cast(),
939                me.as_ref().vmctx.as_non_null(),
940                caller,
941                args_and_results,
942            )
943        }
944    }
945
946    #[inline]
947    unsafe fn array_call_native(
948        me: NonNull<VMFuncRef>,
949        caller: NonNull<VMContext>,
950        args_and_results: NonNull<[ValRaw]>,
951    ) -> bool {
952        unsafe {
953            union GetNativePointer {
954                native: VMArrayCallNative,
955                ptr: NonNull<VMArrayCallFunction>,
956            }
957            let native = GetNativePointer {
958                ptr: me.as_ref().array_call.as_non_null(),
959            }
960            .native;
961            native(
962                me.as_ref().vmctx.as_non_null(),
963                caller,
964                args_and_results.cast(),
965                args_and_results.len(),
966            )
967        }
968    }
969}
970
971#[cfg(test)]
972mod test_vm_func_ref {
973    use super::VMFuncRef;
974    use core::mem::offset_of;
975    use std::mem::size_of;
976    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
977
978    #[test]
979    fn check_vm_func_ref_offsets() {
980        let module = Module::new(StaticModuleIndex::from_u32(0));
981        let offsets = VMOffsets::new(HostPtr, &module);
982        assert_eq!(
983            size_of::<VMFuncRef>(),
984            usize::from(offsets.ptr.size_of_vm_func_ref())
985        );
986        assert_eq!(
987            offset_of!(VMFuncRef, array_call),
988            usize::from(offsets.ptr.vm_func_ref_array_call())
989        );
990        assert_eq!(
991            offset_of!(VMFuncRef, wasm_call),
992            usize::from(offsets.ptr.vm_func_ref_wasm_call())
993        );
994        assert_eq!(
995            offset_of!(VMFuncRef, type_index),
996            usize::from(offsets.ptr.vm_func_ref_type_index())
997        );
998        assert_eq!(
999            offset_of!(VMFuncRef, vmctx),
1000            usize::from(offsets.ptr.vm_func_ref_vmctx())
1001        );
1002    }
1003}
1004
1005macro_rules! define_builtin_array {
1006    (
1007        $(
1008            $( #[$attr:meta] )*
1009            $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
1010        )*
1011    ) => {
1012        /// An array that stores addresses of builtin functions. We translate code
1013        /// to use indirect calls. This way, we don't have to patch the code.
1014        #[repr(C)]
1015        #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
1016        pub struct VMBuiltinFunctionsArray {
1017            $(
1018                $name: unsafe extern "C" fn(
1019                    $(define_builtin_array!(@ty $param)),*
1020                ) $( -> define_builtin_array!(@ty $result))?,
1021            )*
1022        }
1023
1024        impl VMBuiltinFunctionsArray {
1025            pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
1026                $(
1027                    $name: crate::runtime::vm::libcalls::raw::$name,
1028                )*
1029            };
1030
1031            /// Helper to call `expose_provenance()` on all contained pointers.
1032            ///
1033            /// This is required to be called at least once before entering wasm
1034            /// to inform the compiler that these function pointers may all be
1035            /// loaded/stored and used on the "other end" to reacquire
1036            /// provenance in Pulley. Pulley models hostcalls with a host
1037            /// pointer as the first parameter that's a function pointer under
1038            /// the hood, and this call ensures that the use of the function
1039            /// pointer is considered valid.
1040            pub fn expose_provenance(&self) -> NonNull<Self>{
1041                $(
1042                    (self.$name as *mut u8).expose_provenance();
1043                )*
1044                NonNull::from(self)
1045            }
1046        }
1047    };
1048
1049    (@ty u32) => (u32);
1050    (@ty u64) => (u64);
1051    (@ty f32) => (f32);
1052    (@ty f64) => (f64);
1053    (@ty u8) => (u8);
1054    (@ty i8x16) => (i8x16);
1055    (@ty f32x4) => (f32x4);
1056    (@ty f64x2) => (f64x2);
1057    (@ty bool) => (bool);
1058    (@ty pointer) => (*mut u8);
1059    (@ty size) => (usize);
1060    (@ty vmctx) => (NonNull<VMContext>);
1061}
1062
1063// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1064unsafe impl VmSafe for VMBuiltinFunctionsArray {}
1065
1066wasmtime_environ::foreach_builtin_function!(define_builtin_array);
1067
1068const _: () = {
1069    assert!(
1070        mem::size_of::<VMBuiltinFunctionsArray>()
1071            == mem::size_of::<usize>() * (BuiltinFunctionIndex::len() as usize)
1072    )
1073};
1074
1075/// Structure that holds all mutable context that is shared across all instances
1076/// in a store, for example data related to fuel or epochs.
1077///
1078/// `VMStoreContext`s are one-to-one with `wasmtime::Store`s, the same way that
1079/// `VMContext`s are one-to-one with `wasmtime::Instance`s. And the same way
1080/// that multiple `wasmtime::Instance`s may be associated with the same
1081/// `wasmtime::Store`, multiple `VMContext`s hold a pointer to the same
1082/// `VMStoreContext` when they are associated with the same `wasmtime::Store`.
1083#[derive(Debug)]
1084#[repr(C)]
1085pub struct VMStoreContext {
1086    // NB: 64-bit integer fields are located first with pointer-sized fields
1087    // trailing afterwards. That makes the offsets in this structure easier to
1088    // calculate on 32-bit platforms as we don't have to worry about the
1089    // alignment of 64-bit integers.
1090    //
1091    /// Indicator of how much fuel has been consumed and is remaining to
1092    /// WebAssembly.
1093    ///
1094    /// This field is typically negative and increments towards positive. Upon
1095    /// turning positive a wasm trap will be generated. This field is only
1096    /// modified if wasm is configured to consume fuel.
1097    pub fuel_consumed: UnsafeCell<i64>,
1098
1099    /// Deadline epoch for interruption: if epoch-based interruption
1100    /// is enabled and the global (per engine) epoch counter is
1101    /// observed to reach or exceed this value, the guest code will
1102    /// yield if running asynchronously.
1103    pub epoch_deadline: UnsafeCell<u64>,
1104
1105    /// Current stack limit of the wasm module.
1106    ///
1107    /// For more information see `crates/cranelift/src/lib.rs`.
1108    pub stack_limit: UnsafeCell<usize>,
1109
1110    /// The `VMMemoryDefinition` for this store's GC heap.
1111    pub gc_heap: VMMemoryDefinition,
1112
1113    /// The value of the frame pointer register in the trampoline used
1114    /// to call from Wasm to the host.
1115    ///
1116    /// Maintained by our Wasm-to-host trampoline, and cleared just
1117    /// before calling into Wasm in `catch_traps`.
1118    ///
1119    /// This member is `0` when Wasm is actively running and has not called out
1120    /// to the host.
1121    ///
1122    /// Used to find the start of a contiguous sequence of Wasm frames
1123    /// when walking the stack. Note that we record the FP of the
1124    /// *trampoline*'s frame, not the last Wasm frame, because we need
1125    /// to know the SP (bottom of frame) of the last Wasm frame as
1126    /// well in case we need to resume to an exception handler in that
1127    /// frame. The FP of the last Wasm frame can be recovered by
1128    /// loading the saved FP value at this FP address.
1129    pub last_wasm_exit_trampoline_fp: UnsafeCell<usize>,
1130
1131    /// The last Wasm program counter before we called from Wasm to the host.
1132    ///
1133    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1134    /// calling into Wasm in `catch_traps`.
1135    ///
1136    /// This member is `0` when Wasm is actively running and has not called out
1137    /// to the host.
1138    ///
1139    /// Used when walking a contiguous sequence of Wasm frames.
1140    pub last_wasm_exit_pc: UnsafeCell<usize>,
1141
1142    /// The last host stack pointer before we called into Wasm from the host.
1143    ///
1144    /// Maintained by our host-to-Wasm trampoline. This member is `0` when Wasm
1145    /// is not running, and it's set to nonzero once a host-to-wasm trampoline
1146    /// is executed.
1147    ///
1148    /// When a host function is wrapped into a `wasmtime::Func`, and is then
1149    /// called from the host, then this member is not changed meaning that the
1150    /// previous activation in pointed to by `last_wasm_exit_trampoline_fp` is
1151    /// still the last wasm set of frames on the stack.
1152    ///
1153    /// This field is saved/restored during fiber suspension/resumption
1154    /// resumption as part of `CallThreadState::swap`.
1155    ///
1156    /// This field is used to find the end of a contiguous sequence of Wasm
1157    /// frames when walking the stack. Additionally it's used when a trap is
1158    /// raised as part of the set of parameters used to resume in the entry
1159    /// trampoline's "catch" block.
1160    pub last_wasm_entry_sp: UnsafeCell<usize>,
1161
1162    /// Same as `last_wasm_entry_sp`, but for the `fp` of the trampoline.
1163    pub last_wasm_entry_fp: UnsafeCell<usize>,
1164
1165    /// The last trap handler from a host-to-wasm entry trampoline on the stack.
1166    ///
1167    /// This field is configured when the host calls into wasm by the trampoline
1168    /// itself. It stores the `pc` of an exception handler suitable to handle
1169    /// all traps (or uncaught exceptions).
1170    pub last_wasm_entry_trap_handler: UnsafeCell<usize>,
1171
1172    /// Stack information used by stack switching instructions. See documentation
1173    /// on `VMStackChain` for details.
1174    pub stack_chain: UnsafeCell<VMStackChain>,
1175
1176    /// A pointer to the embedder's `T` inside a `Store<T>`, for use with the
1177    /// `store-data-address` unsafe intrinsic.
1178    pub store_data: VmPtr<()>,
1179
1180    /// The range, in addresses, of the guard page that is currently in use.
1181    ///
1182    /// This field is used when signal handlers are run to determine whether a
1183    /// faulting address lies within the guard page of an async stack for
1184    /// example. If this happens then the signal handler aborts with a stack
1185    /// overflow message similar to what would happen had the stack overflow
1186    /// happened on the main thread. This field is, by default a null..null
1187    /// range indicating that no async guard is in use (aka no fiber). In such a
1188    /// situation while this field is read it'll never classify a fault as an
1189    /// guard page fault.
1190    pub async_guard_range: Range<*mut u8>,
1191}
1192
1193impl VMStoreContext {
1194    /// From the current saved trampoline FP, get the FP of the last
1195    /// Wasm frame. If the current saved trampoline FP is null, return
1196    /// null.
1197    ///
1198    /// We store only the trampoline FP, because (i) we need the
1199    /// trampoline FP, so we know the size (bottom) of the last Wasm
1200    /// frame; and (ii) the last Wasm frame, just above the trampoline
1201    /// frame, can be recovered via the FP chain.
1202    ///
1203    /// # Safety
1204    ///
1205    /// This function requires that the `last_wasm_exit_trampoline_fp`
1206    /// field either points to an active trampoline frame or is a null
1207    /// pointer.
1208    pub(crate) unsafe fn last_wasm_exit_fp(&self) -> usize {
1209        // SAFETY: the unsafe cell is safe to load (no other threads
1210        // will be writing our store when we have control), and the
1211        // helper function's safety condition is the same as ours.
1212        unsafe {
1213            let trampoline_fp = *self.last_wasm_exit_trampoline_fp.get();
1214            Self::wasm_exit_fp_from_trampoline_fp(trampoline_fp)
1215        }
1216    }
1217
1218    /// From any saved trampoline FP, get the FP of the last Wasm
1219    /// frame. If the given trampoline FP is null, return null.
1220    ///
1221    /// This differs from `last_wasm_exit_fp()` above in that it
1222    /// allows accessing activations further up the stack as well,
1223    /// e.g. via `CallThreadState::old_state`.
1224    ///
1225    /// # Safety
1226    ///
1227    /// This function requires that the provided FP value is valid,
1228    /// and points to an active trampoline frame, or is null.
1229    ///
1230    /// This function depends on the invariant that on all supported
1231    /// architectures, we store the previous FP value under the
1232    /// current FP. This is a property of our ABI that we control and
1233    /// ensure.
1234    pub(crate) unsafe fn wasm_exit_fp_from_trampoline_fp(trampoline_fp: usize) -> usize {
1235        if trampoline_fp != 0 {
1236            // SAFETY: We require that trampoline_fp points to a valid
1237            // frame, which will (by definition) contain an old FP value
1238            // that we can load.
1239            unsafe { *(trampoline_fp as *const usize) }
1240        } else {
1241            0
1242        }
1243    }
1244}
1245
1246// The `VMStoreContext` type is a pod-type with no destructor, and we don't
1247// access any fields from other threads, so add in these trait impls which are
1248// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
1249// variables in `VMStoreContext`.
1250unsafe impl Send for VMStoreContext {}
1251unsafe impl Sync for VMStoreContext {}
1252
1253// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1254unsafe impl VmSafe for VMStoreContext {}
1255
1256impl Default for VMStoreContext {
1257    fn default() -> VMStoreContext {
1258        VMStoreContext {
1259            fuel_consumed: UnsafeCell::new(0),
1260            epoch_deadline: UnsafeCell::new(0),
1261            stack_limit: UnsafeCell::new(usize::max_value()),
1262            gc_heap: VMMemoryDefinition {
1263                base: NonNull::dangling().into(),
1264                current_length: AtomicUsize::new(0),
1265            },
1266            last_wasm_exit_trampoline_fp: UnsafeCell::new(0),
1267            last_wasm_exit_pc: UnsafeCell::new(0),
1268            last_wasm_entry_fp: UnsafeCell::new(0),
1269            last_wasm_entry_sp: UnsafeCell::new(0),
1270            last_wasm_entry_trap_handler: UnsafeCell::new(0),
1271            stack_chain: UnsafeCell::new(VMStackChain::Absent),
1272            async_guard_range: ptr::null_mut()..ptr::null_mut(),
1273            store_data: VmPtr::dangling(),
1274        }
1275    }
1276}
1277
1278#[cfg(test)]
1279mod test_vmstore_context {
1280    use super::{VMMemoryDefinition, VMStoreContext};
1281    use core::mem::offset_of;
1282    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
1283
1284    #[test]
1285    fn field_offsets() {
1286        let module = Module::new(StaticModuleIndex::from_u32(0));
1287        let offsets = VMOffsets::new(HostPtr, &module);
1288        assert_eq!(
1289            offset_of!(VMStoreContext, stack_limit),
1290            usize::from(offsets.ptr.vmstore_context_stack_limit())
1291        );
1292        assert_eq!(
1293            offset_of!(VMStoreContext, fuel_consumed),
1294            usize::from(offsets.ptr.vmstore_context_fuel_consumed())
1295        );
1296        assert_eq!(
1297            offset_of!(VMStoreContext, epoch_deadline),
1298            usize::from(offsets.ptr.vmstore_context_epoch_deadline())
1299        );
1300        assert_eq!(
1301            offset_of!(VMStoreContext, gc_heap),
1302            usize::from(offsets.ptr.vmstore_context_gc_heap())
1303        );
1304        assert_eq!(
1305            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, base),
1306            usize::from(offsets.ptr.vmstore_context_gc_heap_base())
1307        );
1308        assert_eq!(
1309            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, current_length),
1310            usize::from(offsets.ptr.vmstore_context_gc_heap_current_length())
1311        );
1312        assert_eq!(
1313            offset_of!(VMStoreContext, last_wasm_exit_trampoline_fp),
1314            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_trampoline_fp())
1315        );
1316        assert_eq!(
1317            offset_of!(VMStoreContext, last_wasm_exit_pc),
1318            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_pc())
1319        );
1320        assert_eq!(
1321            offset_of!(VMStoreContext, last_wasm_entry_fp),
1322            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp())
1323        );
1324        assert_eq!(
1325            offset_of!(VMStoreContext, last_wasm_entry_sp),
1326            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_sp())
1327        );
1328        assert_eq!(
1329            offset_of!(VMStoreContext, last_wasm_entry_trap_handler),
1330            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_trap_handler())
1331        );
1332        assert_eq!(
1333            offset_of!(VMStoreContext, stack_chain),
1334            usize::from(offsets.ptr.vmstore_context_stack_chain())
1335        );
1336        assert_eq!(
1337            offset_of!(VMStoreContext, store_data),
1338            usize::from(offsets.ptr.vmstore_context_store_data())
1339        );
1340    }
1341}
1342
1343/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
1344/// This has information about globals, memories, tables, and other runtime
1345/// state associated with the current instance.
1346///
1347/// The struct here is empty, as the sizes of these fields are dynamic, and
1348/// we can't describe them in Rust's type system. Sufficient memory is
1349/// allocated at runtime.
1350#[derive(Debug)]
1351#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
1352pub struct VMContext {
1353    _magic: u32,
1354}
1355
1356impl VMContext {
1357    /// Helper function to cast between context types using a debug assertion to
1358    /// protect against some mistakes.
1359    #[inline]
1360    pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMContext> {
1361        // Note that in general the offset of the "magic" field is stored in
1362        // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
1363        // about converting this pointer to another type we ideally don't want
1364        // to read the offset from potentially corrupt memory. Instead it would
1365        // be better to catch errors here as soon as possible.
1366        //
1367        // To accomplish this the `VMContext` structure is laid out with the
1368        // magic field at a statically known offset (here it's 0 for now). This
1369        // static offset is asserted in `VMOffsets::from` and needs to be kept
1370        // in sync with this line for this debug assertion to work.
1371        //
1372        // Also note that this magic is only ever invalid in the presence of
1373        // bugs, meaning we don't actually read the magic and act differently
1374        // at runtime depending what it is, so this is a debug assertion as
1375        // opposed to a regular assertion.
1376        unsafe {
1377            debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC);
1378        }
1379        opaque.cast()
1380    }
1381}
1382
1383/// A "raw" and unsafe representation of a WebAssembly value.
1384///
1385/// This is provided for use with the `Func::new_unchecked` and
1386/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
1387/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
1388///
1389/// This is notably an "unsafe" way to work with `Val` and it's recommended to
1390/// instead use `Val` where possible. An important note about this union is that
1391/// fields are all stored in little-endian format, regardless of the endianness
1392/// of the host system.
1393#[repr(C)]
1394#[derive(Copy, Clone)]
1395pub union ValRaw {
1396    /// A WebAssembly `i32` value.
1397    ///
1398    /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
1399    /// type does not assign an interpretation of the upper bit as either signed
1400    /// or unsigned. The Rust type `i32` is simply chosen for convenience.
1401    ///
1402    /// This value is always stored in a little-endian format.
1403    i32: i32,
1404
1405    /// A WebAssembly `i64` value.
1406    ///
1407    /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
1408    /// type does not assign an interpretation of the upper bit as either signed
1409    /// or unsigned. The Rust type `i64` is simply chosen for convenience.
1410    ///
1411    /// This value is always stored in a little-endian format.
1412    i64: i64,
1413
1414    /// A WebAssembly `f32` value.
1415    ///
1416    /// Note that the payload here is a Rust `u32`. This is to allow passing any
1417    /// representation of NaN into WebAssembly without risk of changing NaN
1418    /// payload bits as its gets passed around the system. Otherwise though this
1419    /// `u32` value is the return value of `f32::to_bits` in Rust.
1420    ///
1421    /// This value is always stored in a little-endian format.
1422    f32: u32,
1423
1424    /// A WebAssembly `f64` value.
1425    ///
1426    /// Note that the payload here is a Rust `u64`. This is to allow passing any
1427    /// representation of NaN into WebAssembly without risk of changing NaN
1428    /// payload bits as its gets passed around the system. Otherwise though this
1429    /// `u64` value is the return value of `f64::to_bits` in Rust.
1430    ///
1431    /// This value is always stored in a little-endian format.
1432    f64: u64,
1433
1434    /// A WebAssembly `v128` value.
1435    ///
1436    /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1437    /// but note that `v128` in WebAssembly is often considered a vector type
1438    /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1439    /// of the underlying bits is left up to the instructions which consume
1440    /// this value.
1441    ///
1442    /// This value is always stored in a little-endian format.
1443    v128: [u8; 16],
1444
1445    /// A WebAssembly `funcref` value (or one of its subtypes).
1446    ///
1447    /// The payload here is a pointer which is runtime-defined. This is one of
1448    /// the main points of unsafety about the `ValRaw` type as the validity of
1449    /// the pointer here is not easily verified and must be preserved by
1450    /// carefully calling the correct functions throughout the runtime.
1451    ///
1452    /// This value is always stored in a little-endian format.
1453    funcref: *mut c_void,
1454
1455    /// A WebAssembly `externref` value (or one of its subtypes).
1456    ///
1457    /// The payload here is a compressed pointer value which is
1458    /// runtime-defined. This is one of the main points of unsafety about the
1459    /// `ValRaw` type as the validity of the pointer here is not easily verified
1460    /// and must be preserved by carefully calling the correct functions
1461    /// throughout the runtime.
1462    ///
1463    /// This value is always stored in a little-endian format.
1464    externref: u32,
1465
1466    /// A WebAssembly `anyref` value (or one of its subtypes).
1467    ///
1468    /// The payload here is a compressed pointer value which is
1469    /// runtime-defined. This is one of the main points of unsafety about the
1470    /// `ValRaw` type as the validity of the pointer here is not easily verified
1471    /// and must be preserved by carefully calling the correct functions
1472    /// throughout the runtime.
1473    ///
1474    /// This value is always stored in a little-endian format.
1475    anyref: u32,
1476
1477    /// A WebAssembly `exnref` value (or one of its subtypes).
1478    ///
1479    /// The payload here is a compressed pointer value which is
1480    /// runtime-defined. This is one of the main points of unsafety about the
1481    /// `ValRaw` type as the validity of the pointer here is not easily verified
1482    /// and must be preserved by carefully calling the correct functions
1483    /// throughout the runtime.
1484    ///
1485    /// This value is always stored in a little-endian format.
1486    exnref: u32,
1487}
1488
1489// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1490// are some simple assertions about the shape of the type which are additionally
1491// matched in C.
1492const _: () = {
1493    assert!(mem::size_of::<ValRaw>() == 16);
1494    assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1495};
1496
1497// This type is just a bag-of-bits so it's up to the caller to figure out how
1498// to safely deal with threading concerns and safely access interior bits.
1499unsafe impl Send for ValRaw {}
1500unsafe impl Sync for ValRaw {}
1501
1502impl fmt::Debug for ValRaw {
1503    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1504        struct Hex<T>(T);
1505        impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1506            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1507                let bytes = mem::size_of::<T>();
1508                let hex_digits_per_byte = 2;
1509                let hex_digits = bytes * hex_digits_per_byte;
1510                write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1511            }
1512        }
1513
1514        unsafe {
1515            f.debug_struct("ValRaw")
1516                .field("i32", &Hex(self.i32))
1517                .field("i64", &Hex(self.i64))
1518                .field("f32", &Hex(self.f32))
1519                .field("f64", &Hex(self.f64))
1520                .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1521                .field("funcref", &self.funcref)
1522                .field("externref", &Hex(self.externref))
1523                .field("anyref", &Hex(self.anyref))
1524                .field("exnref", &Hex(self.exnref))
1525                .finish()
1526        }
1527    }
1528}
1529
1530impl ValRaw {
1531    /// Create a null reference that is compatible with any of
1532    /// `{any,extern,func,exn}ref`.
1533    pub fn null() -> ValRaw {
1534        unsafe {
1535            let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1536            debug_assert_eq!(raw.get_anyref(), 0);
1537            debug_assert_eq!(raw.get_exnref(), 0);
1538            debug_assert_eq!(raw.get_externref(), 0);
1539            debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1540            raw
1541        }
1542    }
1543
1544    /// Creates a WebAssembly `i32` value
1545    #[inline]
1546    pub fn i32(i: i32) -> ValRaw {
1547        // Note that this is intentionally not setting the `i32` field, instead
1548        // setting the `i64` field with a zero-extended version of `i`. For more
1549        // information on this see the comments on `Lower for Result` in the
1550        // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1551        // otherwise constrained to guarantee that the initial 64-bits are
1552        // always initialized.
1553        ValRaw::u64(i.cast_unsigned().into())
1554    }
1555
1556    /// Creates a WebAssembly `i64` value
1557    #[inline]
1558    pub fn i64(i: i64) -> ValRaw {
1559        ValRaw { i64: i.to_le() }
1560    }
1561
1562    /// Creates a WebAssembly `i32` value
1563    #[inline]
1564    pub fn u32(i: u32) -> ValRaw {
1565        // See comments in `ValRaw::i32` for why this is setting the upper
1566        // 32-bits as well.
1567        ValRaw::u64(i.into())
1568    }
1569
1570    /// Creates a WebAssembly `i64` value
1571    #[inline]
1572    pub fn u64(i: u64) -> ValRaw {
1573        ValRaw::i64(i as i64)
1574    }
1575
1576    /// Creates a WebAssembly `f32` value
1577    #[inline]
1578    pub fn f32(i: u32) -> ValRaw {
1579        // See comments in `ValRaw::i32` for why this is setting the upper
1580        // 32-bits as well.
1581        ValRaw::u64(i.into())
1582    }
1583
1584    /// Creates a WebAssembly `f64` value
1585    #[inline]
1586    pub fn f64(i: u64) -> ValRaw {
1587        ValRaw { f64: i.to_le() }
1588    }
1589
1590    /// Creates a WebAssembly `v128` value
1591    #[inline]
1592    pub fn v128(i: u128) -> ValRaw {
1593        ValRaw {
1594            v128: i.to_le_bytes(),
1595        }
1596    }
1597
1598    /// Creates a WebAssembly `funcref` value
1599    #[inline]
1600    pub fn funcref(i: *mut c_void) -> ValRaw {
1601        ValRaw {
1602            funcref: i.map_addr(|i| i.to_le()),
1603        }
1604    }
1605
1606    /// Creates a WebAssembly `externref` value
1607    #[inline]
1608    pub fn externref(e: u32) -> ValRaw {
1609        assert!(cfg!(feature = "gc") || e == 0);
1610        ValRaw {
1611            externref: e.to_le(),
1612        }
1613    }
1614
1615    /// Creates a WebAssembly `anyref` value
1616    #[inline]
1617    pub fn anyref(r: u32) -> ValRaw {
1618        assert!(cfg!(feature = "gc") || r == 0);
1619        ValRaw { anyref: r.to_le() }
1620    }
1621
1622    /// Creates a WebAssembly `exnref` value
1623    #[inline]
1624    pub fn exnref(r: u32) -> ValRaw {
1625        assert!(cfg!(feature = "gc") || r == 0);
1626        ValRaw { exnref: r.to_le() }
1627    }
1628
1629    /// Gets the WebAssembly `i32` value
1630    #[inline]
1631    pub fn get_i32(&self) -> i32 {
1632        unsafe { i32::from_le(self.i32) }
1633    }
1634
1635    /// Gets the WebAssembly `i64` value
1636    #[inline]
1637    pub fn get_i64(&self) -> i64 {
1638        unsafe { i64::from_le(self.i64) }
1639    }
1640
1641    /// Gets the WebAssembly `i32` value
1642    #[inline]
1643    pub fn get_u32(&self) -> u32 {
1644        self.get_i32().cast_unsigned()
1645    }
1646
1647    /// Gets the WebAssembly `i64` value
1648    #[inline]
1649    pub fn get_u64(&self) -> u64 {
1650        self.get_i64().cast_unsigned()
1651    }
1652
1653    /// Gets the WebAssembly `f32` value
1654    #[inline]
1655    pub fn get_f32(&self) -> u32 {
1656        unsafe { u32::from_le(self.f32) }
1657    }
1658
1659    /// Gets the WebAssembly `f64` value
1660    #[inline]
1661    pub fn get_f64(&self) -> u64 {
1662        unsafe { u64::from_le(self.f64) }
1663    }
1664
1665    /// Gets the WebAssembly `v128` value
1666    #[inline]
1667    pub fn get_v128(&self) -> u128 {
1668        unsafe { u128::from_le_bytes(self.v128) }
1669    }
1670
1671    /// Gets the WebAssembly `funcref` value
1672    #[inline]
1673    pub fn get_funcref(&self) -> *mut c_void {
1674        let addr = unsafe { usize::from_le(self.funcref.addr()) };
1675        core::ptr::with_exposed_provenance_mut(addr)
1676    }
1677
1678    /// Gets the WebAssembly `externref` value
1679    #[inline]
1680    pub fn get_externref(&self) -> u32 {
1681        let externref = u32::from_le(unsafe { self.externref });
1682        assert!(cfg!(feature = "gc") || externref == 0);
1683        externref
1684    }
1685
1686    /// Gets the WebAssembly `anyref` value
1687    #[inline]
1688    pub fn get_anyref(&self) -> u32 {
1689        let anyref = u32::from_le(unsafe { self.anyref });
1690        assert!(cfg!(feature = "gc") || anyref == 0);
1691        anyref
1692    }
1693
1694    /// Gets the WebAssembly `exnref` value
1695    #[inline]
1696    pub fn get_exnref(&self) -> u32 {
1697        let exnref = u32::from_le(unsafe { self.exnref });
1698        assert!(cfg!(feature = "gc") || exnref == 0);
1699        exnref
1700    }
1701}
1702
1703/// An "opaque" version of `VMContext` which must be explicitly casted to a
1704/// target context.
1705///
1706/// This context is used to represent that contexts specified in
1707/// `VMFuncRef` can have any type and don't have an implicit
1708/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1709/// structure of an opaque context in general and only the code which configured
1710/// the context is able to rely on a particular structure. This is because the
1711/// context pointer configured for `VMFuncRef` is guaranteed to be
1712/// the first parameter passed.
1713///
1714/// Note that Wasmtime currently has a layout where all contexts that are casted
1715/// to an opaque context start with a 32-bit "magic" which can be used in debug
1716/// mode to debug-assert that the casts here are correct and have at least a
1717/// little protection against incorrect casts.
1718pub struct VMOpaqueContext {
1719    pub(crate) magic: u32,
1720    _marker: marker::PhantomPinned,
1721}
1722
1723impl VMOpaqueContext {
1724    /// Helper function to clearly indicate that casts are desired.
1725    #[inline]
1726    pub fn from_vmcontext(ptr: NonNull<VMContext>) -> NonNull<VMOpaqueContext> {
1727        ptr.cast()
1728    }
1729
1730    /// Helper function to clearly indicate that casts are desired.
1731    #[inline]
1732    pub fn from_vm_array_call_host_func_context(
1733        ptr: NonNull<VMArrayCallHostFuncContext>,
1734    ) -> NonNull<VMOpaqueContext> {
1735        ptr.cast()
1736    }
1737}