Skip to main content

wasmtime/runtime/vm/
vmcontext.rs

1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::prelude::*;
8use crate::runtime::vm::{InterpreterRef, VMGcRef, VmPtr, VmSafe, f32x4, f64x2, i8x16};
9use crate::store::StoreOpaque;
10use crate::vm::stack_switching::VMStackChain;
11use core::cell::UnsafeCell;
12use core::ffi::c_void;
13use core::fmt;
14use core::marker;
15use core::mem::{self, MaybeUninit};
16use core::ops::Range;
17use core::ptr::{self, NonNull};
18use core::sync::atomic::{AtomicUsize, Ordering};
19use wasmtime_environ::{
20    BuiltinFunctionIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex,
21    DefinedTagIndex, NUM_COMPONENT_CONTEXT_SLOTS, VMCONTEXT_MAGIC, VMSharedTypeIndex,
22    WasmHeapTopType, WasmValType,
23};
24
25/// A function pointer that exposes the array calling convention.
26///
27/// Regardless of the underlying Wasm function type, all functions using the
28/// array calling convention have the same Rust signature.
29///
30/// Arguments:
31///
32/// * Callee `vmctx` for the function itself.
33///
34/// * Caller's `vmctx` (so that host functions can access the linear memory of
35///   their Wasm callers).
36///
37/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
38///   this function, and where results are returned from this function.
39///
40/// * The capacity of the `ValRaw` buffer. Must always be at least
41///   `max(len(wasm_params), len(wasm_results))`.
42///
43/// Return value:
44///
45/// * `true` if this call succeeded.
46/// * `false` if this call failed and a trap was recorded in TLS.
47pub type VMArrayCallNative = unsafe extern "C" fn(
48    NonNull<VMOpaqueContext>,
49    NonNull<VMContext>,
50    NonNull<ValRaw>,
51    usize,
52) -> bool;
53
54/// An opaque function pointer which might be `VMArrayCallNative` or it might be
55/// pulley bytecode. Requires external knowledge to determine what kind of
56/// function pointer this is.
57#[repr(transparent)]
58pub struct VMArrayCallFunction(VMFunctionBody);
59
60/// A function pointer that exposes the Wasm calling convention.
61///
62/// In practice, different Wasm function types end up mapping to different Rust
63/// function types, so this isn't simply a type alias the way that
64/// `VMArrayCallFunction` is. However, the exact details of the calling
65/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
66/// code never does anything with these function pointers except shuffle them
67/// around and pass them back to Wasm.
68#[repr(transparent)]
69pub struct VMWasmCallFunction(VMFunctionBody);
70
71/// An imported function.
72///
73/// Basically the same as `VMFuncRef`, except that `wasm_call` is not optional.
74#[derive(Debug, Clone)]
75#[repr(C)]
76pub struct VMFunctionImport {
77    /// Same as `VMFuncRef::array_call`.
78    pub array_call: VmPtr<VMArrayCallFunction>,
79
80    /// Same as `VMFuncRef::wasm_call`, except always non-null. Must be filled
81    /// in by the time Wasm is importing this function!
82    pub wasm_call: VmPtr<VMWasmCallFunction>,
83
84    /// Function signature's _actual_ type id.
85    ///
86    /// This is the type that the function was defined with, not the type that
87    /// it was imported as. These two can be different in the face of subtyping
88    /// and we need the former for to correctly implement dynamic downcasts.
89    pub type_index: VMSharedTypeIndex,
90
91    /// Same as `VMFuncRef::vmctx`.
92    pub vmctx: VmPtr<VMOpaqueContext>,
93    // If more elements are added here, remember to add offset_of tests below!
94}
95
96// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
97unsafe impl VmSafe for VMFunctionImport {}
98
99impl VMFunctionImport {
100    /// Convert `&VMFunctionImport` into `&VMFuncRef`.
101    pub fn as_func_ref(&self) -> &VMFuncRef {
102        // Safety: `VMFunctionImport` and `VMFuncRef` have the same
103        // representation.
104        unsafe { Self::as_non_null_func_ref(NonNull::from(self)).as_ref() }
105    }
106
107    /// Convert `NonNull<VMFunctionImport>` into `NonNull<VMFuncRef>`.
108    pub fn as_non_null_func_ref(p: NonNull<VMFunctionImport>) -> NonNull<VMFuncRef> {
109        p.cast()
110    }
111
112    /// Convert `*mut VMFunctionImport` into `*mut VMFuncRef`.
113    pub fn as_func_ref_ptr(p: *mut VMFunctionImport) -> *mut VMFuncRef {
114        p.cast()
115    }
116}
117
118#[cfg(test)]
119mod test_vmfunction_import {
120    use super::{VMFuncRef, VMFunctionImport};
121    use core::mem::offset_of;
122    use std::mem::size_of;
123    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
124
125    #[test]
126    fn check_vmfunction_import_offsets() {
127        let module = Module::new(StaticModuleIndex::from_u32(0));
128        let offsets = VMOffsets::new(HostPtr, &module);
129        assert_eq!(
130            size_of::<VMFunctionImport>(),
131            usize::from(offsets.size_of_vmfunction_import())
132        );
133        assert_eq!(
134            offset_of!(VMFunctionImport, array_call),
135            usize::from(offsets.vmfunction_import_array_call())
136        );
137        assert_eq!(
138            offset_of!(VMFunctionImport, wasm_call),
139            usize::from(offsets.vmfunction_import_wasm_call())
140        );
141        assert_eq!(
142            offset_of!(VMFunctionImport, type_index),
143            usize::from(offsets.vmfunction_import_type_index())
144        );
145        assert_eq!(
146            offset_of!(VMFunctionImport, vmctx),
147            usize::from(offsets.vmfunction_import_vmctx())
148        );
149    }
150
151    #[test]
152    fn vmfunction_import_and_vmfunc_ref_have_same_layout() {
153        assert_eq!(size_of::<VMFunctionImport>(), size_of::<VMFuncRef>());
154        assert_eq!(
155            offset_of!(VMFunctionImport, array_call),
156            offset_of!(VMFuncRef, array_call),
157        );
158        assert_eq!(
159            offset_of!(VMFunctionImport, wasm_call),
160            offset_of!(VMFuncRef, wasm_call),
161        );
162        assert_eq!(
163            offset_of!(VMFunctionImport, type_index),
164            offset_of!(VMFuncRef, type_index),
165        );
166        assert_eq!(
167            offset_of!(VMFunctionImport, vmctx),
168            offset_of!(VMFuncRef, vmctx),
169        );
170    }
171}
172
173/// A placeholder byte-sized type which is just used to provide some amount of type
174/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
175/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
176/// around.
177#[repr(C)]
178pub struct VMFunctionBody(u8);
179
180// SAFETY: this structure is never read and is safe to pass to jit code.
181unsafe impl VmSafe for VMFunctionBody {}
182
183#[cfg(test)]
184mod test_vmfunction_body {
185    use super::VMFunctionBody;
186    use std::mem::size_of;
187
188    #[test]
189    fn check_vmfunction_body_offsets() {
190        assert_eq!(size_of::<VMFunctionBody>(), 1);
191    }
192}
193
194/// The fields compiled code needs to access to utilize a WebAssembly table
195/// imported from another instance.
196#[derive(Debug, Copy, Clone)]
197#[repr(C)]
198pub struct VMTableImport {
199    /// A pointer to the imported table description.
200    pub from: VmPtr<VMTableDefinition>,
201
202    /// A pointer to the `VMContext` that owns the table description.
203    pub vmctx: VmPtr<VMContext>,
204
205    /// The table index, within `vmctx`, this definition resides at.
206    pub index: DefinedTableIndex,
207}
208
209// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
210unsafe impl VmSafe for VMTableImport {}
211
212#[cfg(test)]
213mod test_vmtable {
214    use super::VMTableImport;
215    use core::mem::offset_of;
216    use std::mem::size_of;
217    use wasmtime_environ::component::{Component, VMComponentOffsets};
218    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
219
220    #[test]
221    fn check_vmtable_offsets() {
222        let module = Module::new(StaticModuleIndex::from_u32(0));
223        let offsets = VMOffsets::new(HostPtr, &module);
224        assert_eq!(
225            size_of::<VMTableImport>(),
226            usize::from(offsets.size_of_vmtable_import())
227        );
228        assert_eq!(
229            offset_of!(VMTableImport, from),
230            usize::from(offsets.vmtable_import_from())
231        );
232        assert_eq!(
233            offset_of!(VMTableImport, vmctx),
234            usize::from(offsets.vmtable_import_vmctx())
235        );
236        assert_eq!(
237            offset_of!(VMTableImport, index),
238            usize::from(offsets.vmtable_import_index())
239        );
240    }
241
242    #[test]
243    fn ensure_sizes_match() {
244        // Because we use `VMTableImport` for recording tables used by components, we
245        // want to make sure that the size calculations between `VMOffsets` and
246        // `VMComponentOffsets` stay the same.
247        let module = Module::new(StaticModuleIndex::from_u32(0));
248        let vm_offsets = VMOffsets::new(HostPtr, &module);
249        let component = Component::default();
250        let vm_component_offsets = VMComponentOffsets::new(HostPtr, &component);
251        assert_eq!(
252            vm_offsets.size_of_vmtable_import(),
253            vm_component_offsets.size_of_vmtable_import()
254        );
255    }
256}
257
258/// The fields compiled code needs to access to utilize a WebAssembly linear
259/// memory imported from another instance.
260#[derive(Debug, Copy, Clone)]
261#[repr(C)]
262pub struct VMMemoryImport {
263    /// A pointer to the imported memory description.
264    pub from: VmPtr<VMMemoryDefinition>,
265
266    /// A pointer to the `VMContext` that owns the memory description.
267    pub vmctx: VmPtr<VMContext>,
268
269    /// The index of the memory in the containing `vmctx`.
270    pub index: DefinedMemoryIndex,
271}
272
273// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
274unsafe impl VmSafe for VMMemoryImport {}
275
276#[cfg(test)]
277mod test_vmmemory_import {
278    use super::VMMemoryImport;
279    use core::mem::offset_of;
280    use std::mem::size_of;
281    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
282
283    #[test]
284    fn check_vmmemory_import_offsets() {
285        let module = Module::new(StaticModuleIndex::from_u32(0));
286        let offsets = VMOffsets::new(HostPtr, &module);
287        assert_eq!(
288            size_of::<VMMemoryImport>(),
289            usize::from(offsets.size_of_vmmemory_import())
290        );
291        assert_eq!(
292            offset_of!(VMMemoryImport, from),
293            usize::from(offsets.vmmemory_import_from())
294        );
295        assert_eq!(
296            offset_of!(VMMemoryImport, vmctx),
297            usize::from(offsets.vmmemory_import_vmctx())
298        );
299        assert_eq!(
300            offset_of!(VMMemoryImport, index),
301            usize::from(offsets.vmmemory_import_index())
302        );
303    }
304}
305
306/// The fields compiled code needs to access to utilize a WebAssembly global
307/// variable imported from another instance.
308///
309/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
310/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
311/// require a `vmctx` pointer to access.
312#[derive(Debug, Copy, Clone)]
313#[repr(C)]
314pub struct VMGlobalImport {
315    /// A pointer to the imported global variable description.
316    pub from: VmPtr<VMGlobalDefinition>,
317
318    /// A pointer to the context that owns the global.
319    ///
320    /// Exactly what's stored here is dictated by `kind` below. This is `None`
321    /// for `VMGlobalKind::Host`, it's a `VMContext` for
322    /// `VMGlobalKind::Instance`, and it's `VMComponentContext` for
323    /// `VMGlobalKind::ComponentFlags`.
324    pub vmctx: Option<VmPtr<VMOpaqueContext>>,
325
326    /// The kind of global, and extra location information in addition to
327    /// `vmctx` above.
328    pub kind: VMGlobalKind,
329}
330
331// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
332unsafe impl VmSafe for VMGlobalImport {}
333
334/// The kinds of globals that Wasmtime has.
335#[derive(Debug, Copy, Clone)]
336#[repr(C, u32)]
337pub enum VMGlobalKind {
338    /// Host globals, stored in a `StoreOpaque`.
339    Host(DefinedGlobalIndex),
340    /// Instance globals, stored in `VMContext`s
341    Instance(DefinedGlobalIndex),
342    /// Flags for a component instance, stored in `VMComponentContext`.
343    #[cfg(feature = "component-model")]
344    ComponentFlags(wasmtime_environ::component::RuntimeComponentInstanceIndex),
345    #[cfg(feature = "component-model")]
346    TaskMayBlock,
347}
348
349// SAFETY: the above enum is repr(C) and stores nothing else
350unsafe impl VmSafe for VMGlobalKind {}
351
352#[cfg(test)]
353mod test_vmglobal_import {
354    use super::VMGlobalImport;
355    use core::mem::offset_of;
356    use std::mem::size_of;
357    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
358
359    #[test]
360    fn check_vmglobal_import_offsets() {
361        let module = Module::new(StaticModuleIndex::from_u32(0));
362        let offsets = VMOffsets::new(HostPtr, &module);
363        assert_eq!(
364            size_of::<VMGlobalImport>(),
365            usize::from(offsets.size_of_vmglobal_import())
366        );
367        assert_eq!(
368            offset_of!(VMGlobalImport, from),
369            usize::from(offsets.vmglobal_import_from())
370        );
371    }
372}
373
374/// The fields compiled code needs to access to utilize a WebAssembly
375/// tag imported from another instance.
376#[derive(Debug, Copy, Clone)]
377#[repr(C)]
378pub struct VMTagImport {
379    /// A pointer to the imported tag description.
380    pub from: VmPtr<VMTagDefinition>,
381
382    /// The instance that owns this tag.
383    pub vmctx: VmPtr<VMContext>,
384
385    /// The index of the tag in the containing `vmctx`.
386    pub index: DefinedTagIndex,
387}
388
389// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
390unsafe impl VmSafe for VMTagImport {}
391
392#[cfg(test)]
393mod test_vmtag_import {
394    use super::VMTagImport;
395    use core::mem::{offset_of, size_of};
396    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
397
398    #[test]
399    fn check_vmtag_import_offsets() {
400        let module = Module::new(StaticModuleIndex::from_u32(0));
401        let offsets = VMOffsets::new(HostPtr, &module);
402        assert_eq!(
403            size_of::<VMTagImport>(),
404            usize::from(offsets.size_of_vmtag_import())
405        );
406        assert_eq!(
407            offset_of!(VMTagImport, from),
408            usize::from(offsets.vmtag_import_from())
409        );
410        assert_eq!(
411            offset_of!(VMTagImport, vmctx),
412            usize::from(offsets.vmtag_import_vmctx())
413        );
414        assert_eq!(
415            offset_of!(VMTagImport, index),
416            usize::from(offsets.vmtag_import_index())
417        );
418    }
419}
420
421/// The fields compiled code needs to access to utilize a WebAssembly linear
422/// memory defined within the instance, namely the start address and the
423/// size in bytes.
424#[derive(Debug)]
425#[repr(C)]
426pub struct VMMemoryDefinition {
427    /// The start address.
428    pub base: VmPtr<u8>,
429
430    /// The current logical size of this linear memory in bytes.
431    ///
432    /// This is atomic because shared memories must be able to grow their length
433    /// atomically. For relaxed access, see
434    /// [`VMMemoryDefinition::current_length()`].
435    pub current_length: AtomicUsize,
436}
437
438// SAFETY: the above definition has `repr(C)` and each field individually
439// implements `VmSafe`, which satisfies the requirements of this trait.
440unsafe impl VmSafe for VMMemoryDefinition {}
441
442impl VMMemoryDefinition {
443    /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
444    /// performing a relaxed load; do not use this function for situations in
445    /// which a precise length is needed. Owned memories (i.e., non-shared) will
446    /// always return a precise result (since no concurrent modification is
447    /// possible) but shared memories may see an imprecise value--a
448    /// `current_length` potentially smaller than what some other thread
449    /// observes. Since Wasm memory only grows, this under-estimation may be
450    /// acceptable in certain cases.
451    #[inline]
452    pub fn current_length(&self) -> usize {
453        self.current_length.load(Ordering::Relaxed)
454    }
455
456    /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
457    /// `current_length`; see [`VMMemoryDefinition::current_length()`].
458    #[inline]
459    pub unsafe fn load(ptr: *mut Self) -> Self {
460        let other = unsafe { &*ptr };
461        VMMemoryDefinition {
462            base: other.base,
463            current_length: other.current_length().into(),
464        }
465    }
466}
467
468#[cfg(test)]
469mod test_vmmemory_definition {
470    use super::VMMemoryDefinition;
471    use core::mem::offset_of;
472    use std::mem::size_of;
473    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
474
475    #[test]
476    fn check_vmmemory_definition_offsets() {
477        let module = Module::new(StaticModuleIndex::from_u32(0));
478        let offsets = VMOffsets::new(HostPtr, &module);
479        assert_eq!(
480            size_of::<VMMemoryDefinition>(),
481            usize::from(offsets.ptr.size_of_vmmemory_definition())
482        );
483        assert_eq!(
484            offset_of!(VMMemoryDefinition, base),
485            usize::from(offsets.ptr.vmmemory_definition_base())
486        );
487        assert_eq!(
488            offset_of!(VMMemoryDefinition, current_length),
489            usize::from(offsets.ptr.vmmemory_definition_current_length())
490        );
491        /* TODO: Assert that the size of `current_length` matches.
492        assert_eq!(
493            size_of::<VMMemoryDefinition::current_length>(),
494            usize::from(offsets.size_of_vmmemory_definition_current_length())
495        );
496        */
497    }
498}
499
500/// The fields compiled code needs to access to utilize a WebAssembly table
501/// defined within the instance.
502#[derive(Debug, Copy, Clone)]
503#[repr(C)]
504pub struct VMTableDefinition {
505    /// Pointer to the table data.
506    pub base: VmPtr<u8>,
507
508    /// The current number of elements in the table.
509    pub current_elements: usize,
510}
511
512// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
513unsafe impl VmSafe for VMTableDefinition {}
514
515#[cfg(test)]
516mod test_vmtable_definition {
517    use super::VMTableDefinition;
518    use core::mem::offset_of;
519    use std::mem::size_of;
520    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
521
522    #[test]
523    fn check_vmtable_definition_offsets() {
524        let module = Module::new(StaticModuleIndex::from_u32(0));
525        let offsets = VMOffsets::new(HostPtr, &module);
526        assert_eq!(
527            size_of::<VMTableDefinition>(),
528            usize::from(offsets.size_of_vmtable_definition())
529        );
530        assert_eq!(
531            offset_of!(VMTableDefinition, base),
532            usize::from(offsets.vmtable_definition_base())
533        );
534        assert_eq!(
535            offset_of!(VMTableDefinition, current_elements),
536            usize::from(offsets.vmtable_definition_current_elements())
537        );
538    }
539}
540
541/// The storage for a WebAssembly global defined within the instance.
542///
543/// TODO: Pack the globals more densely, rather than using the same size
544/// for every type.
545#[derive(Debug)]
546#[repr(C, align(16))]
547pub struct VMGlobalDefinition {
548    storage: [u8; 16],
549    // If more elements are added here, remember to add offset_of tests below!
550}
551
552// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
553unsafe impl VmSafe for VMGlobalDefinition {}
554
555#[cfg(test)]
556mod test_vmglobal_definition {
557    use super::VMGlobalDefinition;
558    use std::mem::{align_of, size_of};
559    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
560
561    #[test]
562    fn check_vmglobal_definition_alignment() {
563        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
564        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
565        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
566        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
567        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
568        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f32; 4]>());
569        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f64; 2]>());
570    }
571
572    #[test]
573    fn check_vmglobal_definition_offsets() {
574        let module = Module::new(StaticModuleIndex::from_u32(0));
575        let offsets = VMOffsets::new(HostPtr, &module);
576        assert_eq!(
577            size_of::<VMGlobalDefinition>(),
578            usize::from(offsets.ptr.size_of_vmglobal_definition())
579        );
580    }
581
582    #[test]
583    fn check_vmglobal_begins_aligned() {
584        let module = Module::new(StaticModuleIndex::from_u32(0));
585        let offsets = VMOffsets::new(HostPtr, &module);
586        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
587    }
588
589    #[test]
590    #[cfg(feature = "gc")]
591    fn check_vmglobal_can_contain_gc_ref() {
592        assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
593    }
594}
595
596impl VMGlobalDefinition {
597    /// Construct a `VMGlobalDefinition`.
598    pub fn new() -> Self {
599        Self { storage: [0; 16] }
600    }
601
602    /// Create a `VMGlobalDefinition` from a `ValRaw`.
603    ///
604    /// # Unsafety
605    ///
606    /// This raw value's type must match the given `WasmValType`.
607    pub unsafe fn from_val_raw(
608        store: &mut StoreOpaque,
609        wasm_ty: WasmValType,
610        raw: ValRaw,
611    ) -> Result<Self> {
612        let mut global = Self::new();
613        unsafe {
614            match wasm_ty {
615                WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
616                WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
617                WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
618                WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
619                WasmValType::V128 => global.set_u128(raw.get_v128()),
620                WasmValType::Ref(r) => match r.heap_type.top() {
621                    WasmHeapTopType::Extern => {
622                        let r = VMGcRef::from_raw_u32(raw.get_externref());
623                        global.init_gc_ref(store, r.as_ref())
624                    }
625                    WasmHeapTopType::Any => {
626                        let r = VMGcRef::from_raw_u32(raw.get_anyref());
627                        global.init_gc_ref(store, r.as_ref())
628                    }
629                    WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
630                    WasmHeapTopType::Cont => *global.as_func_ref_mut() = raw.get_funcref().cast(), // TODO(#10248): temporary hack.
631                    WasmHeapTopType::Exn => {
632                        let r = VMGcRef::from_raw_u32(raw.get_exnref());
633                        global.init_gc_ref(store, r.as_ref())
634                    }
635                },
636            }
637        }
638        Ok(global)
639    }
640
641    /// Get this global's value as a `ValRaw`.
642    ///
643    /// # Unsafety
644    ///
645    /// This global's value's type must match the given `WasmValType`.
646    pub unsafe fn to_val_raw(
647        &self,
648        store: &mut StoreOpaque,
649        wasm_ty: WasmValType,
650    ) -> Result<ValRaw> {
651        unsafe {
652            Ok(match wasm_ty {
653                WasmValType::I32 => ValRaw::i32(*self.as_i32()),
654                WasmValType::I64 => ValRaw::i64(*self.as_i64()),
655                WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
656                WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
657                WasmValType::V128 => ValRaw::v128(self.get_u128()),
658                WasmValType::Ref(r) => match r.heap_type.top() {
659                    WasmHeapTopType::Extern => ValRaw::externref(match self.as_gc_ref() {
660                        Some(r) => store.clone_gc_ref(r).as_raw_u32(),
661                        None => 0,
662                    }),
663                    WasmHeapTopType::Any => ValRaw::anyref({
664                        match self.as_gc_ref() {
665                            Some(r) => store.clone_gc_ref(r).as_raw_u32(),
666                            None => 0,
667                        }
668                    }),
669                    WasmHeapTopType::Exn => ValRaw::exnref({
670                        match self.as_gc_ref() {
671                            Some(r) => store.clone_gc_ref(r).as_raw_u32(),
672                            None => 0,
673                        }
674                    }),
675                    WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
676                    WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
677                },
678            })
679        }
680    }
681
682    /// Return a reference to the value as an i32.
683    pub unsafe fn as_i32(&self) -> &i32 {
684        unsafe { &*(self.storage.as_ref().as_ptr().cast::<i32>()) }
685    }
686
687    /// Return a mutable reference to the value as an i32.
688    pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
689        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>()) }
690    }
691
692    /// Return a reference to the value as a u32.
693    pub unsafe fn as_u32(&self) -> &u32 {
694        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
695    }
696
697    /// Return a mutable reference to the value as an u32.
698    pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
699        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
700    }
701
702    /// Return a reference to the value as an i64.
703    pub unsafe fn as_i64(&self) -> &i64 {
704        unsafe { &*(self.storage.as_ref().as_ptr().cast::<i64>()) }
705    }
706
707    /// Return a mutable reference to the value as an i64.
708    pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
709        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>()) }
710    }
711
712    /// Return a reference to the value as an u64.
713    pub unsafe fn as_u64(&self) -> &u64 {
714        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
715    }
716
717    /// Return a mutable reference to the value as an u64.
718    pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
719        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
720    }
721
722    /// Return a reference to the value as an f32.
723    pub unsafe fn as_f32(&self) -> &f32 {
724        unsafe { &*(self.storage.as_ref().as_ptr().cast::<f32>()) }
725    }
726
727    /// Return a mutable reference to the value as an f32.
728    pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
729        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>()) }
730    }
731
732    /// Return a reference to the value as f32 bits.
733    pub unsafe fn as_f32_bits(&self) -> &u32 {
734        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
735    }
736
737    /// Return a mutable reference to the value as f32 bits.
738    pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
739        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
740    }
741
742    /// Return a reference to the value as an f64.
743    pub unsafe fn as_f64(&self) -> &f64 {
744        unsafe { &*(self.storage.as_ref().as_ptr().cast::<f64>()) }
745    }
746
747    /// Return a mutable reference to the value as an f64.
748    pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
749        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>()) }
750    }
751
752    /// Return a reference to the value as f64 bits.
753    pub unsafe fn as_f64_bits(&self) -> &u64 {
754        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
755    }
756
757    /// Return a mutable reference to the value as f64 bits.
758    pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
759        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
760    }
761
762    /// Gets the underlying 128-bit vector value.
763    //
764    // Note that vectors are stored in little-endian format while other types
765    // are stored in native-endian format.
766    pub unsafe fn get_u128(&self) -> u128 {
767        unsafe { u128::from_le(*(self.storage.as_ref().as_ptr().cast::<u128>())) }
768    }
769
770    /// Sets the 128-bit vector values.
771    //
772    // Note that vectors are stored in little-endian format while other types
773    // are stored in native-endian format.
774    pub unsafe fn set_u128(&mut self, val: u128) {
775        unsafe {
776            *self.storage.as_mut().as_mut_ptr().cast::<u128>() = val.to_le();
777        }
778    }
779
780    /// Return a reference to the value as u128 bits.
781    pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
782        unsafe { &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>()) }
783    }
784
785    /// Return a mutable reference to the value as u128 bits.
786    pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
787        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>()) }
788    }
789
790    /// Return a reference to the global value as a borrowed GC reference.
791    pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
792        let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
793        let ret = unsafe { (*raw_ptr).as_ref() };
794        assert!(cfg!(feature = "gc") || ret.is_none());
795        ret
796    }
797
798    /// Return a reference to the global value as a borrowed GC reference.
799    pub unsafe fn as_gc_ref_mut(&mut self) -> Option<&mut VMGcRef> {
800        let raw_ptr = self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>();
801        let ret = unsafe { (*raw_ptr).as_mut() };
802        assert!(cfg!(feature = "gc") || ret.is_none());
803        ret
804    }
805
806    /// Initialize a global to the given GC reference.
807    pub unsafe fn init_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) {
808        let dest = unsafe {
809            &mut *(self
810                .storage
811                .as_mut()
812                .as_mut_ptr()
813                .cast::<MaybeUninit<Option<VMGcRef>>>())
814        };
815
816        store.init_gc_ref(dest, gc_ref)
817    }
818
819    /// Write a GC reference into this global value.
820    pub unsafe fn write_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) {
821        let dest = unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>()) };
822        store.write_gc_ref(dest, gc_ref)
823    }
824
825    /// Return a reference to the value as a `VMFuncRef`.
826    pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
827        unsafe { *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>()) }
828    }
829
830    /// Return a mutable reference to the value as a `VMFuncRef`.
831    pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
832        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>()) }
833    }
834}
835
836#[cfg(test)]
837mod test_vmshared_type_index {
838    use super::VMSharedTypeIndex;
839    use std::mem::size_of;
840    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
841
842    #[test]
843    fn check_vmshared_type_index() {
844        let module = Module::new(StaticModuleIndex::from_u32(0));
845        let offsets = VMOffsets::new(HostPtr, &module);
846        assert_eq!(
847            size_of::<VMSharedTypeIndex>(),
848            usize::from(offsets.size_of_vmshared_type_index())
849        );
850    }
851}
852
853/// A WebAssembly tag defined within the instance.
854///
855#[derive(Debug)]
856#[repr(C)]
857pub struct VMTagDefinition {
858    /// Function signature's type id.
859    pub type_index: VMSharedTypeIndex,
860}
861
862impl VMTagDefinition {
863    pub fn new(type_index: VMSharedTypeIndex) -> Self {
864        Self { type_index }
865    }
866}
867
868// SAFETY: the above structure is repr(C) and only contains VmSafe
869// fields.
870unsafe impl VmSafe for VMTagDefinition {}
871
872#[cfg(test)]
873mod test_vmtag_definition {
874    use super::VMTagDefinition;
875    use std::mem::size_of;
876    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
877
878    #[test]
879    fn check_vmtag_definition_offsets() {
880        let module = Module::new(StaticModuleIndex::from_u32(0));
881        let offsets = VMOffsets::new(HostPtr, &module);
882        assert_eq!(
883            size_of::<VMTagDefinition>(),
884            usize::from(offsets.ptr.size_of_vmtag_definition())
885        );
886    }
887
888    #[test]
889    fn check_vmtag_begins_aligned() {
890        let module = Module::new(StaticModuleIndex::from_u32(0));
891        let offsets = VMOffsets::new(HostPtr, &module);
892        assert_eq!(offsets.vmctx_tags_begin() % 16, 0);
893    }
894}
895
896/// The VM caller-checked "funcref" record, for caller-side signature checking.
897///
898/// It consists of function pointer(s), a type id to be checked by the
899/// caller, and the vmctx closure associated with this function.
900#[derive(Debug, Clone)]
901#[repr(C)]
902pub struct VMFuncRef {
903    /// Function pointer for this funcref if being called via the "array"
904    /// calling convention that `Func::new` et al use.
905    pub array_call: VmPtr<VMArrayCallFunction>,
906
907    /// Function pointer for this funcref if being called via the calling
908    /// convention we use when compiling Wasm.
909    ///
910    /// Most functions come with a function pointer that we can use when they
911    /// are called from Wasm. The notable exception is when we `Func::wrap` a
912    /// host function, and we don't have a Wasm compiler on hand to compile a
913    /// Wasm-to-native trampoline for the function. In this case, we leave
914    /// `wasm_call` empty until the function is passed as an import to Wasm (or
915    /// otherwise exposed to Wasm via tables/globals). At this point, we look up
916    /// a Wasm-to-native trampoline for the function in the Wasm's compiled
917    /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
918    /// there is no guarantee that the Wasm module has a trampoline for this
919    /// function's signature. The Wasm module only has trampolines for its
920    /// types, and if this function isn't of one of those types, then the Wasm
921    /// module will not have a trampoline for it. This is actually okay, because
922    /// it means that the Wasm cannot actually call this function. But it does
923    /// mean that this field needs to be an `Option` even though it is non-null
924    /// the vast vast vast majority of the time.
925    pub wasm_call: Option<VmPtr<VMWasmCallFunction>>,
926
927    /// Function signature's type id.
928    pub type_index: VMSharedTypeIndex,
929
930    /// The VM state associated with this function.
931    ///
932    /// The actual definition of what this pointer points to depends on the
933    /// function being referenced: for core Wasm functions, this is a `*mut
934    /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
935    /// component functions it is a `*mut VMComponentContext`.
936    pub vmctx: VmPtr<VMOpaqueContext>,
937    // If more elements are added here, remember to add offset_of tests below!
938}
939
940// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
941unsafe impl VmSafe for VMFuncRef {}
942
943impl VMFuncRef {
944    /// Invokes the `array_call` field of this `VMFuncRef` with the supplied
945    /// arguments.
946    ///
947    /// This will invoke the function pointer in the `array_call` field with:
948    ///
949    /// * the `callee` vmctx as `self.vmctx`
950    /// * the `caller` as `caller` specified here
951    /// * the args pointer as `args_and_results`
952    /// * the args length as `args_and_results`
953    ///
954    /// The `args_and_results` area must be large enough to both load all
955    /// arguments from and store all results to.
956    ///
957    /// Returns whether a trap was recorded in TLS for raising.
958    ///
959    /// # Unsafety
960    ///
961    /// This method is unsafe because it can be called with any pointers. They
962    /// must all be valid for this wasm function call to proceed. For example
963    /// the `caller` must be valid machine code if `pulley` is `None` or it must
964    /// be valid bytecode if `pulley` is `Some`. Additionally `args_and_results`
965    /// must be large enough to handle all the arguments/results for this call.
966    ///
967    /// Note that the unsafety invariants to maintain here are not currently
968    /// exhaustively documented.
969    #[inline]
970    pub unsafe fn array_call(
971        me: NonNull<VMFuncRef>,
972        pulley: Option<InterpreterRef<'_>>,
973        caller: NonNull<VMContext>,
974        args_and_results: NonNull<[ValRaw]>,
975    ) -> bool {
976        match pulley {
977            Some(vm) => unsafe { Self::array_call_interpreted(me, vm, caller, args_and_results) },
978            None => unsafe { Self::array_call_native(me, caller, args_and_results) },
979        }
980    }
981
982    unsafe fn array_call_interpreted(
983        me: NonNull<VMFuncRef>,
984        vm: InterpreterRef<'_>,
985        caller: NonNull<VMContext>,
986        args_and_results: NonNull<[ValRaw]>,
987    ) -> bool {
988        // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the
989        // interpreter, even though it's available, as `array_call` will be
990        // native code.
991        unsafe {
992            if me.as_ref().vmctx.as_non_null().as_ref().magic
993                == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC
994            {
995                return Self::array_call_native(me, caller, args_and_results);
996            }
997            vm.call(
998                me.as_ref().array_call.as_non_null().cast(),
999                me.as_ref().vmctx.as_non_null(),
1000                caller,
1001                args_and_results,
1002            )
1003        }
1004    }
1005
1006    #[inline]
1007    unsafe fn array_call_native(
1008        me: NonNull<VMFuncRef>,
1009        caller: NonNull<VMContext>,
1010        args_and_results: NonNull<[ValRaw]>,
1011    ) -> bool {
1012        unsafe {
1013            union GetNativePointer {
1014                native: VMArrayCallNative,
1015                ptr: NonNull<VMArrayCallFunction>,
1016            }
1017            let native = GetNativePointer {
1018                ptr: me.as_ref().array_call.as_non_null(),
1019            }
1020            .native;
1021            native(
1022                me.as_ref().vmctx.as_non_null(),
1023                caller,
1024                args_and_results.cast(),
1025                args_and_results.len(),
1026            )
1027        }
1028    }
1029
1030    pub(crate) fn as_vm_function_import(&self) -> Option<&VMFunctionImport> {
1031        if self.wasm_call.is_some() {
1032            // Safety: `VMFuncRef` and `VMFunctionImport` have the same layout
1033            // and `wasm_call` is non-null.
1034            Some(unsafe { NonNull::from(self).cast::<VMFunctionImport>().as_ref() })
1035        } else {
1036            None
1037        }
1038    }
1039}
1040
1041#[cfg(test)]
1042mod test_vm_func_ref {
1043    use super::VMFuncRef;
1044    use core::mem::offset_of;
1045    use std::mem::size_of;
1046    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
1047
1048    #[test]
1049    fn check_vm_func_ref_offsets() {
1050        let module = Module::new(StaticModuleIndex::from_u32(0));
1051        let offsets = VMOffsets::new(HostPtr, &module);
1052        assert_eq!(
1053            size_of::<VMFuncRef>(),
1054            usize::from(offsets.ptr.size_of_vm_func_ref())
1055        );
1056        assert_eq!(
1057            offset_of!(VMFuncRef, array_call),
1058            usize::from(offsets.ptr.vm_func_ref_array_call())
1059        );
1060        assert_eq!(
1061            offset_of!(VMFuncRef, wasm_call),
1062            usize::from(offsets.ptr.vm_func_ref_wasm_call())
1063        );
1064        assert_eq!(
1065            offset_of!(VMFuncRef, type_index),
1066            usize::from(offsets.ptr.vm_func_ref_type_index())
1067        );
1068        assert_eq!(
1069            offset_of!(VMFuncRef, vmctx),
1070            usize::from(offsets.ptr.vm_func_ref_vmctx())
1071        );
1072    }
1073}
1074
1075macro_rules! define_builtin_array {
1076    (
1077        $(
1078            $( #[$attr:meta] )*
1079            $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
1080        )*
1081    ) => {
1082        /// An array that stores addresses of builtin functions. We translate code
1083        /// to use indirect calls. This way, we don't have to patch the code.
1084        #[repr(C)]
1085        #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
1086        pub struct VMBuiltinFunctionsArray {
1087            $(
1088                $name: unsafe extern "C" fn(
1089                    $(define_builtin_array!(@ty $param)),*
1090                ) $( -> define_builtin_array!(@ty $result))?,
1091            )*
1092        }
1093
1094        impl VMBuiltinFunctionsArray {
1095            pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
1096                $(
1097                    $name: crate::runtime::vm::libcalls::raw::$name,
1098                )*
1099            };
1100
1101            /// Helper to call `expose_provenance()` on all contained pointers.
1102            ///
1103            /// This is required to be called at least once before entering wasm
1104            /// to inform the compiler that these function pointers may all be
1105            /// loaded/stored and used on the "other end" to reacquire
1106            /// provenance in Pulley. Pulley models hostcalls with a host
1107            /// pointer as the first parameter that's a function pointer under
1108            /// the hood, and this call ensures that the use of the function
1109            /// pointer is considered valid.
1110            pub fn expose_provenance(&self) -> NonNull<Self>{
1111                $(
1112                    (self.$name as *mut u8).expose_provenance();
1113                )*
1114                NonNull::from(self)
1115            }
1116        }
1117    };
1118
1119    (@ty u32) => (u32);
1120    (@ty u64) => (u64);
1121    (@ty f32) => (f32);
1122    (@ty f64) => (f64);
1123    (@ty u8) => (u8);
1124    (@ty i8x16) => (i8x16);
1125    (@ty f32x4) => (f32x4);
1126    (@ty f64x2) => (f64x2);
1127    (@ty bool) => (bool);
1128    (@ty pointer) => (*mut u8);
1129    (@ty size) => (usize);
1130    (@ty vmctx) => (NonNull<VMContext>);
1131}
1132
1133// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1134unsafe impl VmSafe for VMBuiltinFunctionsArray {}
1135
1136wasmtime_environ::foreach_builtin_function!(define_builtin_array);
1137
1138const _: () = {
1139    assert!(
1140        mem::size_of::<VMBuiltinFunctionsArray>()
1141            == mem::size_of::<usize>() * (BuiltinFunctionIndex::len() as usize)
1142    )
1143};
1144
1145/// Structure that holds all mutable context that is shared across all instances
1146/// in a store, for example data related to fuel or epochs.
1147///
1148/// `VMStoreContext`s are one-to-one with `wasmtime::Store`s, the same way that
1149/// `VMContext`s are one-to-one with `wasmtime::Instance`s. And the same way
1150/// that multiple `wasmtime::Instance`s may be associated with the same
1151/// `wasmtime::Store`, multiple `VMContext`s hold a pointer to the same
1152/// `VMStoreContext` when they are associated with the same `wasmtime::Store`.
1153#[derive(Debug)]
1154#[repr(C)]
1155pub struct VMStoreContext {
1156    // NB: 64-bit integer fields are located first with pointer-sized fields
1157    // trailing afterwards. That makes the offsets in this structure easier to
1158    // calculate on 32-bit platforms as we don't have to worry about the
1159    // alignment of 64-bit integers.
1160    //
1161    /// Indicator of how much fuel has been consumed and is remaining to
1162    /// WebAssembly.
1163    ///
1164    /// This field is typically negative and increments towards positive. Upon
1165    /// turning positive a wasm trap will be generated. This field is only
1166    /// modified if wasm is configured to consume fuel.
1167    pub fuel_consumed: UnsafeCell<i64>,
1168
1169    /// Deadline epoch for interruption: if epoch-based interruption
1170    /// is enabled and the global (per engine) epoch counter is
1171    /// observed to reach or exceed this value, the guest code will
1172    /// yield if running asynchronously.
1173    pub epoch_deadline: UnsafeCell<u64>,
1174
1175    /// The "store version".
1176    ///
1177    /// This is used to test whether stack-frame handles referring to
1178    /// suspended stack frames remain valid.
1179    ///
1180    /// The invariant that this upward-counting number must satisfy
1181    /// is: the number must be incremented whenever execution starts
1182    /// or resumes in the `Store` or when any stack is
1183    /// dropped/freed. That way, if we take a reference to some
1184    /// suspended stack frame and track the "version" at the time we
1185    /// took that reference, if the version still matches, we can be
1186    /// sure that nothing could have unwound the referenced Wasm
1187    /// frame.
1188    ///
1189    /// This version number is incremented in exactly one place: the
1190    /// Wasm-to-host trampolines, after return from host code. Note
1191    /// that this captures both the normal "return into Wasm" case
1192    /// (where Wasm frames can subsequently return normally and thus
1193    /// invalidate frames), and the "trap/exception unwinds Wasm
1194    /// frames" case, which is done internally via the `raise` libcall
1195    /// invoked after the main hostcall returns an error, and after we
1196    /// increment this version number.
1197    ///
1198    /// Note that this also handles the fiber/future-drop case because
1199    /// because we *always* return into the trampoline to clean up;
1200    /// that trampoline immediately raises an error and uses the
1201    /// longjmp-like unwind within Cranelift frames to skip over all
1202    /// the guest Wasm frames, but not before it increments the
1203    /// store's execution version number.
1204    ///
1205    /// This field is in use only if guest debugging is enabled.
1206    pub execution_version: u64,
1207
1208    /// Current stack limit of the wasm module.
1209    ///
1210    /// For more information see `crates/cranelift/src/lib.rs`.
1211    pub stack_limit: UnsafeCell<usize>,
1212
1213    /// The `VMMemoryDefinition` for this store's GC heap.
1214    pub gc_heap: VMMemoryDefinition,
1215
1216    /// The value of the frame pointer register in the trampoline used
1217    /// to call from Wasm to the host.
1218    ///
1219    /// Maintained by our Wasm-to-host trampoline, and cleared just
1220    /// before calling into Wasm in `catch_traps`.
1221    ///
1222    /// This member is `0` when Wasm is actively running and has not called out
1223    /// to the host.
1224    ///
1225    /// Used to find the start of a contiguous sequence of Wasm frames
1226    /// when walking the stack. Note that we record the FP of the
1227    /// *trampoline*'s frame, not the last Wasm frame, because we need
1228    /// to know the SP (bottom of frame) of the last Wasm frame as
1229    /// well in case we need to resume to an exception handler in that
1230    /// frame. The FP of the last Wasm frame can be recovered by
1231    /// loading the saved FP value at this FP address.
1232    pub last_wasm_exit_trampoline_fp: UnsafeCell<usize>,
1233
1234    /// The last Wasm program counter before we called from Wasm to the host.
1235    ///
1236    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1237    /// calling into Wasm in `catch_traps`.
1238    ///
1239    /// This member is `0` when Wasm is actively running and has not called out
1240    /// to the host.
1241    ///
1242    /// Used when walking a contiguous sequence of Wasm frames.
1243    pub last_wasm_exit_pc: UnsafeCell<usize>,
1244
1245    /// The last host stack pointer before we called into Wasm from the host.
1246    ///
1247    /// Maintained by our host-to-Wasm trampoline. This member is `0` when Wasm
1248    /// is not running, and it's set to nonzero once a host-to-wasm trampoline
1249    /// is executed.
1250    ///
1251    /// When a host function is wrapped into a `wasmtime::Func`, and is then
1252    /// called from the host, then this member is not changed meaning that the
1253    /// previous activation in pointed to by `last_wasm_exit_trampoline_fp` is
1254    /// still the last wasm set of frames on the stack.
1255    ///
1256    /// This field is saved/restored during fiber suspension/resumption
1257    /// resumption as part of `CallThreadState::swap`.
1258    ///
1259    /// This field is used to find the end of a contiguous sequence of Wasm
1260    /// frames when walking the stack. Additionally it's used when a trap is
1261    /// raised as part of the set of parameters used to resume in the entry
1262    /// trampoline's "catch" block.
1263    pub last_wasm_entry_sp: UnsafeCell<usize>,
1264
1265    /// Same as `last_wasm_entry_sp`, but for the `fp` of the trampoline.
1266    pub last_wasm_entry_fp: UnsafeCell<usize>,
1267
1268    /// The last trap handler from a host-to-wasm entry trampoline on the stack.
1269    ///
1270    /// This field is configured when the host calls into wasm by the trampoline
1271    /// itself. It stores the `pc` of an exception handler suitable to handle
1272    /// all traps (or uncaught exceptions).
1273    pub last_wasm_entry_trap_handler: UnsafeCell<usize>,
1274
1275    /// Stack information used by stack switching instructions. See documentation
1276    /// on `VMStackChain` for details.
1277    pub stack_chain: UnsafeCell<VMStackChain>,
1278
1279    /// A pointer to the embedder's `T` inside a `Store<T>`, for use with the
1280    /// `store-data-address` unsafe intrinsic.
1281    pub store_data: VmPtr<()>,
1282
1283    /// The range, in addresses, of the guard page that is currently in use.
1284    ///
1285    /// This field is used when signal handlers are run to determine whether a
1286    /// faulting address lies within the guard page of an async stack for
1287    /// example. If this happens then the signal handler aborts with a stack
1288    /// overflow message similar to what would happen had the stack overflow
1289    /// happened on the main thread. This field is, by default a null..null
1290    /// range indicating that no async guard is in use (aka no fiber). In such a
1291    /// situation while this field is read it'll never classify a fault as an
1292    /// guard page fault.
1293    pub async_guard_range: Range<*mut u8>,
1294
1295    /// The `context.{get,set}` values for the current thread in the component
1296    /// model. This is only used for `component-model-async` and slot[1] is only
1297    /// used for `component-model-threading`. Despite the conditional use nature
1298    /// this is unconditionally present as it avoids the need to make logic in
1299    /// `VMOffsets` conditional.
1300    ///
1301    /// This is saved/restored when threads are swapped in the component model.
1302    pub component_context: [u32; NUM_COMPONENT_CONTEXT_SLOTS],
1303}
1304
1305impl VMStoreContext {
1306    /// From the current saved trampoline FP, get the FP of the last
1307    /// Wasm frame. If the current saved trampoline FP is null, return
1308    /// null.
1309    ///
1310    /// We store only the trampoline FP, because (i) we need the
1311    /// trampoline FP, so we know the size (bottom) of the last Wasm
1312    /// frame; and (ii) the last Wasm frame, just above the trampoline
1313    /// frame, can be recovered via the FP chain.
1314    ///
1315    /// # Safety
1316    ///
1317    /// This function requires that the `last_wasm_exit_trampoline_fp`
1318    /// field either points to an active trampoline frame or is a null
1319    /// pointer.
1320    pub(crate) unsafe fn last_wasm_exit_fp(&self) -> usize {
1321        // SAFETY: the unsafe cell is safe to load (no other threads
1322        // will be writing our store when we have control), and the
1323        // helper function's safety condition is the same as ours.
1324        unsafe {
1325            let trampoline_fp = *self.last_wasm_exit_trampoline_fp.get();
1326            Self::wasm_exit_fp_from_trampoline_fp(trampoline_fp)
1327        }
1328    }
1329
1330    /// From any saved trampoline FP, get the FP of the last Wasm
1331    /// frame. If the given trampoline FP is null, return null.
1332    ///
1333    /// This differs from `last_wasm_exit_fp()` above in that it
1334    /// allows accessing activations further up the stack as well,
1335    /// e.g. via `CallThreadState::old_state`.
1336    ///
1337    /// # Safety
1338    ///
1339    /// This function requires that the provided FP value is valid,
1340    /// and points to an active trampoline frame, or is null.
1341    ///
1342    /// This function depends on the invariant that on all supported
1343    /// architectures, we store the previous FP value under the
1344    /// current FP. This is a property of our ABI that we control and
1345    /// ensure.
1346    pub(crate) unsafe fn wasm_exit_fp_from_trampoline_fp(trampoline_fp: usize) -> usize {
1347        if trampoline_fp != 0 {
1348            // SAFETY: We require that trampoline_fp points to a valid
1349            // frame, which will (by definition) contain an old FP value
1350            // that we can load.
1351            unsafe { *(trampoline_fp as *const usize) }
1352        } else {
1353            0
1354        }
1355    }
1356}
1357
1358// The `VMStoreContext` type is a pod-type with no destructor, and we don't
1359// access any fields from other threads, so add in these trait impls which are
1360// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
1361// variables in `VMStoreContext`.
1362unsafe impl Send for VMStoreContext {}
1363unsafe impl Sync for VMStoreContext {}
1364
1365// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1366unsafe impl VmSafe for VMStoreContext {}
1367
1368impl Default for VMStoreContext {
1369    fn default() -> VMStoreContext {
1370        VMStoreContext {
1371            fuel_consumed: UnsafeCell::new(0),
1372            epoch_deadline: UnsafeCell::new(0),
1373            execution_version: 0,
1374            stack_limit: UnsafeCell::new(usize::max_value()),
1375            gc_heap: VMMemoryDefinition {
1376                base: NonNull::dangling().into(),
1377                current_length: AtomicUsize::new(0),
1378            },
1379            last_wasm_exit_trampoline_fp: UnsafeCell::new(0),
1380            last_wasm_exit_pc: UnsafeCell::new(0),
1381            last_wasm_entry_fp: UnsafeCell::new(0),
1382            last_wasm_entry_sp: UnsafeCell::new(0),
1383            last_wasm_entry_trap_handler: UnsafeCell::new(0),
1384            stack_chain: UnsafeCell::new(VMStackChain::Absent),
1385            async_guard_range: ptr::null_mut()..ptr::null_mut(),
1386            store_data: VmPtr::dangling(),
1387            component_context: [0; NUM_COMPONENT_CONTEXT_SLOTS],
1388        }
1389    }
1390}
1391
1392#[cfg(test)]
1393mod test_vmstore_context {
1394    use super::{VMMemoryDefinition, VMStoreContext};
1395    use core::mem::offset_of;
1396    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
1397
1398    #[test]
1399    fn field_offsets() {
1400        let module = Module::new(StaticModuleIndex::from_u32(0));
1401        let offsets = VMOffsets::new(HostPtr, &module);
1402        assert_eq!(
1403            offset_of!(VMStoreContext, stack_limit),
1404            usize::from(offsets.ptr.vmstore_context_stack_limit())
1405        );
1406        assert_eq!(
1407            offset_of!(VMStoreContext, fuel_consumed),
1408            usize::from(offsets.ptr.vmstore_context_fuel_consumed())
1409        );
1410        assert_eq!(
1411            offset_of!(VMStoreContext, epoch_deadline),
1412            usize::from(offsets.ptr.vmstore_context_epoch_deadline())
1413        );
1414        assert_eq!(
1415            offset_of!(VMStoreContext, execution_version),
1416            usize::from(offsets.ptr.vmstore_context_execution_version())
1417        );
1418        assert_eq!(
1419            offset_of!(VMStoreContext, gc_heap),
1420            usize::from(offsets.ptr.vmstore_context_gc_heap())
1421        );
1422        assert_eq!(
1423            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, base),
1424            usize::from(offsets.ptr.vmstore_context_gc_heap_base())
1425        );
1426        assert_eq!(
1427            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, current_length),
1428            usize::from(offsets.ptr.vmstore_context_gc_heap_current_length())
1429        );
1430        assert_eq!(
1431            offset_of!(VMStoreContext, last_wasm_exit_trampoline_fp),
1432            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_trampoline_fp())
1433        );
1434        assert_eq!(
1435            offset_of!(VMStoreContext, last_wasm_exit_pc),
1436            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_pc())
1437        );
1438        assert_eq!(
1439            offset_of!(VMStoreContext, last_wasm_entry_fp),
1440            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp())
1441        );
1442        assert_eq!(
1443            offset_of!(VMStoreContext, last_wasm_entry_sp),
1444            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_sp())
1445        );
1446        assert_eq!(
1447            offset_of!(VMStoreContext, last_wasm_entry_trap_handler),
1448            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_trap_handler())
1449        );
1450        assert_eq!(
1451            offset_of!(VMStoreContext, stack_chain),
1452            usize::from(offsets.ptr.vmstore_context_stack_chain())
1453        );
1454        assert_eq!(
1455            offset_of!(VMStoreContext, store_data),
1456            usize::from(offsets.ptr.vmstore_context_store_data())
1457        );
1458        assert_eq!(
1459            offset_of!(VMStoreContext, component_context),
1460            usize::from(offsets.ptr.vmstore_context_component_context_slot(0))
1461        );
1462
1463        // Make sure that the calculation for the size of a slot is also
1464        // accurate.
1465        let slot_width = offsets.ptr.vmstore_context_component_context_slot(1)
1466            - offsets.ptr.vmstore_context_component_context_slot(0);
1467        let default = VMStoreContext::default();
1468        assert_eq!(
1469            size_of_val(&default.component_context[0]),
1470            usize::from(slot_width)
1471        );
1472    }
1473}
1474
1475/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
1476/// This has information about globals, memories, tables, and other runtime
1477/// state associated with the current instance.
1478///
1479/// The struct here is empty, as the sizes of these fields are dynamic, and
1480/// we can't describe them in Rust's type system. Sufficient memory is
1481/// allocated at runtime.
1482#[derive(Debug)]
1483#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
1484pub struct VMContext {
1485    _magic: u32,
1486}
1487
1488impl VMContext {
1489    /// Helper function to cast between context types using a debug assertion to
1490    /// protect against some mistakes.
1491    #[inline]
1492    pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMContext> {
1493        // Note that in general the offset of the "magic" field is stored in
1494        // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
1495        // about converting this pointer to another type we ideally don't want
1496        // to read the offset from potentially corrupt memory. Instead it would
1497        // be better to catch errors here as soon as possible.
1498        //
1499        // To accomplish this the `VMContext` structure is laid out with the
1500        // magic field at a statically known offset (here it's 0 for now). This
1501        // static offset is asserted in `VMOffsets::from` and needs to be kept
1502        // in sync with this line for this debug assertion to work.
1503        //
1504        // Also note that this magic is only ever invalid in the presence of
1505        // bugs, meaning we don't actually read the magic and act differently
1506        // at runtime depending what it is, so this is a debug assertion as
1507        // opposed to a regular assertion.
1508        unsafe {
1509            debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC);
1510        }
1511        opaque.cast()
1512    }
1513}
1514
1515/// A "raw" and unsafe representation of a WebAssembly value.
1516///
1517/// This is provided for use with the `Func::new_unchecked` and
1518/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
1519/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
1520///
1521/// This is notably an "unsafe" way to work with `Val` and it's recommended to
1522/// instead use `Val` where possible. An important note about this union is that
1523/// fields are all stored in little-endian format, regardless of the endianness
1524/// of the host system.
1525#[repr(C)]
1526#[derive(Copy, Clone)]
1527pub union ValRaw {
1528    /// A WebAssembly `i32` value.
1529    ///
1530    /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
1531    /// type does not assign an interpretation of the upper bit as either signed
1532    /// or unsigned. The Rust type `i32` is simply chosen for convenience.
1533    ///
1534    /// This value is always stored in a little-endian format.
1535    i32: i32,
1536
1537    /// A WebAssembly `i64` value.
1538    ///
1539    /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
1540    /// type does not assign an interpretation of the upper bit as either signed
1541    /// or unsigned. The Rust type `i64` is simply chosen for convenience.
1542    ///
1543    /// This value is always stored in a little-endian format.
1544    i64: i64,
1545
1546    /// A WebAssembly `f32` value.
1547    ///
1548    /// Note that the payload here is a Rust `u32`. This is to allow passing any
1549    /// representation of NaN into WebAssembly without risk of changing NaN
1550    /// payload bits as its gets passed around the system. Otherwise though this
1551    /// `u32` value is the return value of `f32::to_bits` in Rust.
1552    ///
1553    /// This value is always stored in a little-endian format.
1554    f32: u32,
1555
1556    /// A WebAssembly `f64` value.
1557    ///
1558    /// Note that the payload here is a Rust `u64`. This is to allow passing any
1559    /// representation of NaN into WebAssembly without risk of changing NaN
1560    /// payload bits as its gets passed around the system. Otherwise though this
1561    /// `u64` value is the return value of `f64::to_bits` in Rust.
1562    ///
1563    /// This value is always stored in a little-endian format.
1564    f64: u64,
1565
1566    /// A WebAssembly `v128` value.
1567    ///
1568    /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1569    /// but note that `v128` in WebAssembly is often considered a vector type
1570    /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1571    /// of the underlying bits is left up to the instructions which consume
1572    /// this value.
1573    ///
1574    /// This value is always stored in a little-endian format.
1575    v128: [u8; 16],
1576
1577    /// A WebAssembly `funcref` value (or one of its subtypes).
1578    ///
1579    /// The payload here is a pointer which is runtime-defined. This is one of
1580    /// the main points of unsafety about the `ValRaw` type as the validity of
1581    /// the pointer here is not easily verified and must be preserved by
1582    /// carefully calling the correct functions throughout the runtime.
1583    ///
1584    /// This value is always stored in a little-endian format.
1585    funcref: *mut c_void,
1586
1587    /// A WebAssembly `externref` value (or one of its subtypes).
1588    ///
1589    /// The payload here is a compressed pointer value which is
1590    /// runtime-defined. This is one of the main points of unsafety about the
1591    /// `ValRaw` type as the validity of the pointer here is not easily verified
1592    /// and must be preserved by carefully calling the correct functions
1593    /// throughout the runtime.
1594    ///
1595    /// This value is always stored in a little-endian format.
1596    externref: u32,
1597
1598    /// A WebAssembly `anyref` value (or one of its subtypes).
1599    ///
1600    /// The payload here is a compressed pointer value which is
1601    /// runtime-defined. This is one of the main points of unsafety about the
1602    /// `ValRaw` type as the validity of the pointer here is not easily verified
1603    /// and must be preserved by carefully calling the correct functions
1604    /// throughout the runtime.
1605    ///
1606    /// This value is always stored in a little-endian format.
1607    anyref: u32,
1608
1609    /// A WebAssembly `exnref` value (or one of its subtypes).
1610    ///
1611    /// The payload here is a compressed pointer value which is
1612    /// runtime-defined. This is one of the main points of unsafety about the
1613    /// `ValRaw` type as the validity of the pointer here is not easily verified
1614    /// and must be preserved by carefully calling the correct functions
1615    /// throughout the runtime.
1616    ///
1617    /// This value is always stored in a little-endian format.
1618    exnref: u32,
1619}
1620
1621// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1622// are some simple assertions about the shape of the type which are additionally
1623// matched in C.
1624const _: () = {
1625    assert!(mem::size_of::<ValRaw>() == 16);
1626    assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1627};
1628
1629// This type is just a bag-of-bits so it's up to the caller to figure out how
1630// to safely deal with threading concerns and safely access interior bits.
1631unsafe impl Send for ValRaw {}
1632unsafe impl Sync for ValRaw {}
1633
1634impl fmt::Debug for ValRaw {
1635    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1636        struct Hex<T>(T);
1637        impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1638            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1639                let bytes = mem::size_of::<T>();
1640                let hex_digits_per_byte = 2;
1641                let hex_digits = bytes * hex_digits_per_byte;
1642                write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1643            }
1644        }
1645
1646        unsafe {
1647            f.debug_struct("ValRaw")
1648                .field("i32", &Hex(self.i32))
1649                .field("i64", &Hex(self.i64))
1650                .field("f32", &Hex(self.f32))
1651                .field("f64", &Hex(self.f64))
1652                .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1653                .field("funcref", &self.funcref)
1654                .field("externref", &Hex(self.externref))
1655                .field("anyref", &Hex(self.anyref))
1656                .field("exnref", &Hex(self.exnref))
1657                .finish()
1658        }
1659    }
1660}
1661
1662impl ValRaw {
1663    /// Create a null reference that is compatible with any of
1664    /// `{any,extern,func,exn}ref`.
1665    pub fn null() -> ValRaw {
1666        unsafe {
1667            let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1668            debug_assert_eq!(raw.get_anyref(), 0);
1669            debug_assert_eq!(raw.get_exnref(), 0);
1670            debug_assert_eq!(raw.get_externref(), 0);
1671            debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1672            raw
1673        }
1674    }
1675
1676    /// Creates a WebAssembly `i32` value
1677    #[inline]
1678    pub fn i32(i: i32) -> ValRaw {
1679        // Note that this is intentionally not setting the `i32` field, instead
1680        // setting the `i64` field with a zero-extended version of `i`. For more
1681        // information on this see the comments on `Lower for Result` in the
1682        // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1683        // otherwise constrained to guarantee that the initial 64-bits are
1684        // always initialized.
1685        ValRaw::u64(i.cast_unsigned().into())
1686    }
1687
1688    /// Creates a WebAssembly `i64` value
1689    #[inline]
1690    pub fn i64(i: i64) -> ValRaw {
1691        ValRaw { i64: i.to_le() }
1692    }
1693
1694    /// Creates a WebAssembly `i32` value
1695    #[inline]
1696    pub fn u32(i: u32) -> ValRaw {
1697        // See comments in `ValRaw::i32` for why this is setting the upper
1698        // 32-bits as well.
1699        ValRaw::u64(i.into())
1700    }
1701
1702    /// Creates a WebAssembly `i64` value
1703    #[inline]
1704    pub fn u64(i: u64) -> ValRaw {
1705        ValRaw::i64(i as i64)
1706    }
1707
1708    /// Creates a WebAssembly `f32` value
1709    #[inline]
1710    pub fn f32(i: u32) -> ValRaw {
1711        // See comments in `ValRaw::i32` for why this is setting the upper
1712        // 32-bits as well.
1713        ValRaw::u64(i.into())
1714    }
1715
1716    /// Creates a WebAssembly `f64` value
1717    #[inline]
1718    pub fn f64(i: u64) -> ValRaw {
1719        ValRaw { f64: i.to_le() }
1720    }
1721
1722    /// Creates a WebAssembly `v128` value
1723    #[inline]
1724    pub fn v128(i: u128) -> ValRaw {
1725        ValRaw {
1726            v128: i.to_le_bytes(),
1727        }
1728    }
1729
1730    /// Creates a WebAssembly `funcref` value
1731    #[inline]
1732    pub fn funcref(i: *mut c_void) -> ValRaw {
1733        ValRaw {
1734            funcref: i.map_addr(|i| i.to_le()),
1735        }
1736    }
1737
1738    /// Creates a WebAssembly `externref` value
1739    #[inline]
1740    pub fn externref(e: u32) -> ValRaw {
1741        assert!(cfg!(feature = "gc") || e == 0);
1742        ValRaw {
1743            externref: e.to_le(),
1744        }
1745    }
1746
1747    /// Creates a WebAssembly `anyref` value
1748    #[inline]
1749    pub fn anyref(r: u32) -> ValRaw {
1750        assert!(cfg!(feature = "gc") || r == 0);
1751        ValRaw { anyref: r.to_le() }
1752    }
1753
1754    /// Creates a WebAssembly `exnref` value
1755    #[inline]
1756    pub fn exnref(r: u32) -> ValRaw {
1757        assert!(cfg!(feature = "gc") || r == 0);
1758        ValRaw { exnref: r.to_le() }
1759    }
1760
1761    #[inline]
1762    pub(crate) fn vmgcref(r: Option<VMGcRef>) -> ValRaw {
1763        let raw = r.map_or(0, |r| r.as_raw_u32());
1764
1765        // NB: All `VMGcRef`-based `ValRaw`s are the same.
1766        debug_assert_eq!(raw, ValRaw::anyref(raw).get_exnref());
1767        debug_assert_eq!(raw, ValRaw::exnref(raw).get_externref());
1768        debug_assert_eq!(raw, ValRaw::externref(raw).get_anyref());
1769
1770        ValRaw::anyref(raw)
1771    }
1772
1773    /// Gets the WebAssembly `i32` value
1774    #[inline]
1775    pub fn get_i32(&self) -> i32 {
1776        unsafe { i32::from_le(self.i32) }
1777    }
1778
1779    /// Gets the WebAssembly `i64` value
1780    #[inline]
1781    pub fn get_i64(&self) -> i64 {
1782        unsafe { i64::from_le(self.i64) }
1783    }
1784
1785    /// Gets the WebAssembly `i32` value
1786    #[inline]
1787    pub fn get_u32(&self) -> u32 {
1788        self.get_i32().cast_unsigned()
1789    }
1790
1791    /// Gets the WebAssembly `i64` value
1792    #[inline]
1793    pub fn get_u64(&self) -> u64 {
1794        self.get_i64().cast_unsigned()
1795    }
1796
1797    /// Gets the WebAssembly `f32` value
1798    #[inline]
1799    pub fn get_f32(&self) -> u32 {
1800        unsafe { u32::from_le(self.f32) }
1801    }
1802
1803    /// Gets the WebAssembly `f64` value
1804    #[inline]
1805    pub fn get_f64(&self) -> u64 {
1806        unsafe { u64::from_le(self.f64) }
1807    }
1808
1809    /// Gets the WebAssembly `v128` value
1810    #[inline]
1811    pub fn get_v128(&self) -> u128 {
1812        unsafe { u128::from_le_bytes(self.v128) }
1813    }
1814
1815    /// Gets the WebAssembly `funcref` value
1816    #[inline]
1817    pub fn get_funcref(&self) -> *mut c_void {
1818        let addr = unsafe { usize::from_le(self.funcref.addr()) };
1819        core::ptr::with_exposed_provenance_mut(addr)
1820    }
1821
1822    /// Gets the WebAssembly `externref` value
1823    #[inline]
1824    pub fn get_externref(&self) -> u32 {
1825        let externref = u32::from_le(unsafe { self.externref });
1826        assert!(cfg!(feature = "gc") || externref == 0);
1827        externref
1828    }
1829
1830    /// Gets the WebAssembly `anyref` value
1831    #[inline]
1832    pub fn get_anyref(&self) -> u32 {
1833        let anyref = u32::from_le(unsafe { self.anyref });
1834        assert!(cfg!(feature = "gc") || anyref == 0);
1835        anyref
1836    }
1837
1838    /// Gets the WebAssembly `exnref` value
1839    #[inline]
1840    pub fn get_exnref(&self) -> u32 {
1841        let exnref = u32::from_le(unsafe { self.exnref });
1842        assert!(cfg!(feature = "gc") || exnref == 0);
1843        exnref
1844    }
1845
1846    /// Get the inner `VMGcRef`.
1847    pub(crate) fn get_vmgcref(&self) -> Option<crate::vm::VMGcRef> {
1848        debug_assert_eq!(self.get_anyref(), self.get_exnref());
1849        debug_assert_eq!(self.get_anyref(), self.get_externref());
1850        VMGcRef::from_raw_u32(self.get_anyref())
1851    }
1852}
1853
1854/// An "opaque" version of `VMContext` which must be explicitly casted to a
1855/// target context.
1856///
1857/// This context is used to represent that contexts specified in
1858/// `VMFuncRef` can have any type and don't have an implicit
1859/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1860/// structure of an opaque context in general and only the code which configured
1861/// the context is able to rely on a particular structure. This is because the
1862/// context pointer configured for `VMFuncRef` is guaranteed to be
1863/// the first parameter passed.
1864///
1865/// Note that Wasmtime currently has a layout where all contexts that are casted
1866/// to an opaque context start with a 32-bit "magic" which can be used in debug
1867/// mode to debug-assert that the casts here are correct and have at least a
1868/// little protection against incorrect casts.
1869pub struct VMOpaqueContext {
1870    pub(crate) magic: u32,
1871    _marker: marker::PhantomPinned,
1872}
1873
1874impl VMOpaqueContext {
1875    /// Helper function to clearly indicate that casts are desired.
1876    #[inline]
1877    pub fn from_vmcontext(ptr: NonNull<VMContext>) -> NonNull<VMOpaqueContext> {
1878        ptr.cast()
1879    }
1880
1881    /// Helper function to clearly indicate that casts are desired.
1882    #[inline]
1883    pub fn from_vm_array_call_host_func_context(
1884        ptr: NonNull<VMArrayCallHostFuncContext>,
1885    ) -> NonNull<VMOpaqueContext> {
1886        ptr.cast()
1887    }
1888}