wasmtime/runtime/vm/
vmcontext.rs

1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::prelude::*;
8use crate::runtime::vm::{GcStore, InterpreterRef, VMGcRef, VmPtr, VmSafe, f32x4, f64x2, i8x16};
9use crate::store::StoreOpaque;
10use crate::vm::stack_switching::VMStackChain;
11use core::cell::UnsafeCell;
12use core::ffi::c_void;
13use core::fmt;
14use core::marker;
15use core::mem::{self, MaybeUninit};
16use core::ops::Range;
17use core::ptr::{self, NonNull};
18use core::sync::atomic::{AtomicUsize, Ordering};
19use wasmtime_environ::{
20    BuiltinFunctionIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex,
21    DefinedTagIndex, VMCONTEXT_MAGIC, VMSharedTypeIndex, WasmHeapTopType, WasmValType,
22};
23
24/// A function pointer that exposes the array calling convention.
25///
26/// Regardless of the underlying Wasm function type, all functions using the
27/// array calling convention have the same Rust signature.
28///
29/// Arguments:
30///
31/// * Callee `vmctx` for the function itself.
32///
33/// * Caller's `vmctx` (so that host functions can access the linear memory of
34///   their Wasm callers).
35///
36/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
37///   this function, and where results are returned from this function.
38///
39/// * The capacity of the `ValRaw` buffer. Must always be at least
40///   `max(len(wasm_params), len(wasm_results))`.
41///
42/// Return value:
43///
44/// * `true` if this call succeeded.
45/// * `false` if this call failed and a trap was recorded in TLS.
46pub type VMArrayCallNative = unsafe extern "C" fn(
47    NonNull<VMOpaqueContext>,
48    NonNull<VMContext>,
49    NonNull<ValRaw>,
50    usize,
51) -> bool;
52
53/// An opaque function pointer which might be `VMArrayCallNative` or it might be
54/// pulley bytecode. Requires external knowledge to determine what kind of
55/// function pointer this is.
56#[repr(transparent)]
57pub struct VMArrayCallFunction(VMFunctionBody);
58
59/// A function pointer that exposes the Wasm calling convention.
60///
61/// In practice, different Wasm function types end up mapping to different Rust
62/// function types, so this isn't simply a type alias the way that
63/// `VMArrayCallFunction` is. However, the exact details of the calling
64/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
65/// code never does anything with these function pointers except shuffle them
66/// around and pass them back to Wasm.
67#[repr(transparent)]
68pub struct VMWasmCallFunction(VMFunctionBody);
69
70/// An imported function.
71#[derive(Debug, Copy, Clone)]
72#[repr(C)]
73pub struct VMFunctionImport {
74    /// Function pointer to use when calling this imported function from Wasm.
75    pub wasm_call: VmPtr<VMWasmCallFunction>,
76
77    /// Function pointer to use when calling this imported function with the
78    /// "array" calling convention that `Func::new` et al use.
79    pub array_call: VmPtr<VMArrayCallFunction>,
80
81    /// The VM state associated with this function.
82    ///
83    /// For Wasm functions defined by core wasm instances this will be `*mut
84    /// VMContext`, but for lifted/lowered component model functions this will
85    /// be a `VMComponentContext`, and for a host function it will be a
86    /// `VMHostFuncContext`, etc.
87    pub vmctx: VmPtr<VMOpaqueContext>,
88}
89
90// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
91unsafe impl VmSafe for VMFunctionImport {}
92
93#[cfg(test)]
94mod test_vmfunction_import {
95    use super::VMFunctionImport;
96    use core::mem::offset_of;
97    use std::mem::size_of;
98    use wasmtime_environ::{HostPtr, Module, VMOffsets};
99
100    #[test]
101    fn check_vmfunction_import_offsets() {
102        let module = Module::new();
103        let offsets = VMOffsets::new(HostPtr, &module);
104        assert_eq!(
105            size_of::<VMFunctionImport>(),
106            usize::from(offsets.size_of_vmfunction_import())
107        );
108        assert_eq!(
109            offset_of!(VMFunctionImport, wasm_call),
110            usize::from(offsets.vmfunction_import_wasm_call())
111        );
112        assert_eq!(
113            offset_of!(VMFunctionImport, array_call),
114            usize::from(offsets.vmfunction_import_array_call())
115        );
116        assert_eq!(
117            offset_of!(VMFunctionImport, vmctx),
118            usize::from(offsets.vmfunction_import_vmctx())
119        );
120    }
121}
122
123/// A placeholder byte-sized type which is just used to provide some amount of type
124/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
125/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
126/// around.
127#[repr(C)]
128pub struct VMFunctionBody(u8);
129
130// SAFETY: this structure is never read and is safe to pass to jit code.
131unsafe impl VmSafe for VMFunctionBody {}
132
133#[cfg(test)]
134mod test_vmfunction_body {
135    use super::VMFunctionBody;
136    use std::mem::size_of;
137
138    #[test]
139    fn check_vmfunction_body_offsets() {
140        assert_eq!(size_of::<VMFunctionBody>(), 1);
141    }
142}
143
144/// The fields compiled code needs to access to utilize a WebAssembly table
145/// imported from another instance.
146#[derive(Debug, Copy, Clone)]
147#[repr(C)]
148pub struct VMTableImport {
149    /// A pointer to the imported table description.
150    pub from: VmPtr<VMTableDefinition>,
151
152    /// A pointer to the `VMContext` that owns the table description.
153    pub vmctx: VmPtr<VMContext>,
154
155    /// The table index, within `vmctx`, this definition resides at.
156    pub index: DefinedTableIndex,
157}
158
159// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
160unsafe impl VmSafe for VMTableImport {}
161
162#[cfg(test)]
163mod test_vmtable {
164    use super::VMTableImport;
165    use core::mem::offset_of;
166    use std::mem::size_of;
167    use wasmtime_environ::component::{Component, VMComponentOffsets};
168    use wasmtime_environ::{HostPtr, Module, VMOffsets};
169
170    #[test]
171    fn check_vmtable_offsets() {
172        let module = Module::new();
173        let offsets = VMOffsets::new(HostPtr, &module);
174        assert_eq!(
175            size_of::<VMTableImport>(),
176            usize::from(offsets.size_of_vmtable_import())
177        );
178        assert_eq!(
179            offset_of!(VMTableImport, from),
180            usize::from(offsets.vmtable_import_from())
181        );
182        assert_eq!(
183            offset_of!(VMTableImport, vmctx),
184            usize::from(offsets.vmtable_import_vmctx())
185        );
186        assert_eq!(
187            offset_of!(VMTableImport, index),
188            usize::from(offsets.vmtable_import_index())
189        );
190    }
191
192    #[test]
193    fn ensure_sizes_match() {
194        // Because we use `VMTableImport` for recording tables used by components, we
195        // want to make sure that the size calculations between `VMOffsets` and
196        // `VMComponentOffsets` stay the same.
197        let module = Module::new();
198        let vm_offsets = VMOffsets::new(HostPtr, &module);
199        let component = Component::default();
200        let vm_component_offsets = VMComponentOffsets::new(HostPtr, &component);
201        assert_eq!(
202            vm_offsets.size_of_vmtable_import(),
203            vm_component_offsets.size_of_vmtable_import()
204        );
205    }
206}
207
208/// The fields compiled code needs to access to utilize a WebAssembly linear
209/// memory imported from another instance.
210#[derive(Debug, Copy, Clone)]
211#[repr(C)]
212pub struct VMMemoryImport {
213    /// A pointer to the imported memory description.
214    pub from: VmPtr<VMMemoryDefinition>,
215
216    /// A pointer to the `VMContext` that owns the memory description.
217    pub vmctx: VmPtr<VMContext>,
218
219    /// The index of the memory in the containing `vmctx`.
220    pub index: DefinedMemoryIndex,
221}
222
223// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
224unsafe impl VmSafe for VMMemoryImport {}
225
226#[cfg(test)]
227mod test_vmmemory_import {
228    use super::VMMemoryImport;
229    use core::mem::offset_of;
230    use std::mem::size_of;
231    use wasmtime_environ::{HostPtr, Module, VMOffsets};
232
233    #[test]
234    fn check_vmmemory_import_offsets() {
235        let module = Module::new();
236        let offsets = VMOffsets::new(HostPtr, &module);
237        assert_eq!(
238            size_of::<VMMemoryImport>(),
239            usize::from(offsets.size_of_vmmemory_import())
240        );
241        assert_eq!(
242            offset_of!(VMMemoryImport, from),
243            usize::from(offsets.vmmemory_import_from())
244        );
245        assert_eq!(
246            offset_of!(VMMemoryImport, vmctx),
247            usize::from(offsets.vmmemory_import_vmctx())
248        );
249        assert_eq!(
250            offset_of!(VMMemoryImport, index),
251            usize::from(offsets.vmmemory_import_index())
252        );
253    }
254}
255
256/// The fields compiled code needs to access to utilize a WebAssembly global
257/// variable imported from another instance.
258///
259/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
260/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
261/// require a `vmctx` pointer to access.
262#[derive(Debug, Copy, Clone)]
263#[repr(C)]
264pub struct VMGlobalImport {
265    /// A pointer to the imported global variable description.
266    pub from: VmPtr<VMGlobalDefinition>,
267
268    /// A pointer to the context that owns the global.
269    ///
270    /// Exactly what's stored here is dictated by `kind` below. This is `None`
271    /// for `VMGlobalKind::Host`, it's a `VMContext` for
272    /// `VMGlobalKind::Instance`, and it's `VMComponentContext` for
273    /// `VMGlobalKind::ComponentFlags`.
274    pub vmctx: Option<VmPtr<VMOpaqueContext>>,
275
276    /// The kind of global, and extra location information in addition to
277    /// `vmctx` above.
278    pub kind: VMGlobalKind,
279}
280
281// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
282unsafe impl VmSafe for VMGlobalImport {}
283
284/// The kinds of globals that Wasmtime has.
285#[derive(Debug, Copy, Clone)]
286#[repr(C, u32)]
287pub enum VMGlobalKind {
288    /// Host globals, stored in a `StoreOpaque`.
289    Host(DefinedGlobalIndex),
290    /// Instance globals, stored in `VMContext`s
291    Instance(DefinedGlobalIndex),
292    /// Flags for a component instance, stored in `VMComponentContext`.
293    #[cfg(feature = "component-model")]
294    ComponentFlags(wasmtime_environ::component::RuntimeComponentInstanceIndex),
295}
296
297// SAFETY: the above enum is repr(C) and stores nothing else
298unsafe impl VmSafe for VMGlobalKind {}
299
300#[cfg(test)]
301mod test_vmglobal_import {
302    use super::VMGlobalImport;
303    use core::mem::offset_of;
304    use std::mem::size_of;
305    use wasmtime_environ::{HostPtr, Module, VMOffsets};
306
307    #[test]
308    fn check_vmglobal_import_offsets() {
309        let module = Module::new();
310        let offsets = VMOffsets::new(HostPtr, &module);
311        assert_eq!(
312            size_of::<VMGlobalImport>(),
313            usize::from(offsets.size_of_vmglobal_import())
314        );
315        assert_eq!(
316            offset_of!(VMGlobalImport, from),
317            usize::from(offsets.vmglobal_import_from())
318        );
319    }
320}
321
322/// The fields compiled code needs to access to utilize a WebAssembly
323/// tag imported from another instance.
324#[derive(Debug, Copy, Clone)]
325#[repr(C)]
326pub struct VMTagImport {
327    /// A pointer to the imported tag description.
328    pub from: VmPtr<VMTagDefinition>,
329
330    /// The instance that owns this tag.
331    pub vmctx: VmPtr<VMContext>,
332
333    /// The index of the tag in the containing `vmctx`.
334    pub index: DefinedTagIndex,
335}
336
337// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
338unsafe impl VmSafe for VMTagImport {}
339
340#[cfg(test)]
341mod test_vmtag_import {
342    use super::VMTagImport;
343    use core::mem::{offset_of, size_of};
344    use wasmtime_environ::{HostPtr, Module, VMOffsets};
345
346    #[test]
347    fn check_vmtag_import_offsets() {
348        let module = Module::new();
349        let offsets = VMOffsets::new(HostPtr, &module);
350        assert_eq!(
351            size_of::<VMTagImport>(),
352            usize::from(offsets.size_of_vmtag_import())
353        );
354        assert_eq!(
355            offset_of!(VMTagImport, from),
356            usize::from(offsets.vmtag_import_from())
357        );
358    }
359}
360
361/// The fields compiled code needs to access to utilize a WebAssembly linear
362/// memory defined within the instance, namely the start address and the
363/// size in bytes.
364#[derive(Debug)]
365#[repr(C)]
366pub struct VMMemoryDefinition {
367    /// The start address.
368    pub base: VmPtr<u8>,
369
370    /// The current logical size of this linear memory in bytes.
371    ///
372    /// This is atomic because shared memories must be able to grow their length
373    /// atomically. For relaxed access, see
374    /// [`VMMemoryDefinition::current_length()`].
375    pub current_length: AtomicUsize,
376}
377
378// SAFETY: the above definition has `repr(C)` and each field individually
379// implements `VmSafe`, which satisfies the requirements of this trait.
380unsafe impl VmSafe for VMMemoryDefinition {}
381
382impl VMMemoryDefinition {
383    /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
384    /// performing a relaxed load; do not use this function for situations in
385    /// which a precise length is needed. Owned memories (i.e., non-shared) will
386    /// always return a precise result (since no concurrent modification is
387    /// possible) but shared memories may see an imprecise value--a
388    /// `current_length` potentially smaller than what some other thread
389    /// observes. Since Wasm memory only grows, this under-estimation may be
390    /// acceptable in certain cases.
391    #[inline]
392    pub fn current_length(&self) -> usize {
393        self.current_length.load(Ordering::Relaxed)
394    }
395
396    /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
397    /// `current_length`; see [`VMMemoryDefinition::current_length()`].
398    #[inline]
399    pub unsafe fn load(ptr: *mut Self) -> Self {
400        let other = unsafe { &*ptr };
401        VMMemoryDefinition {
402            base: other.base,
403            current_length: other.current_length().into(),
404        }
405    }
406}
407
408#[cfg(test)]
409mod test_vmmemory_definition {
410    use super::VMMemoryDefinition;
411    use core::mem::offset_of;
412    use std::mem::size_of;
413    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
414
415    #[test]
416    fn check_vmmemory_definition_offsets() {
417        let module = Module::new();
418        let offsets = VMOffsets::new(HostPtr, &module);
419        assert_eq!(
420            size_of::<VMMemoryDefinition>(),
421            usize::from(offsets.ptr.size_of_vmmemory_definition())
422        );
423        assert_eq!(
424            offset_of!(VMMemoryDefinition, base),
425            usize::from(offsets.ptr.vmmemory_definition_base())
426        );
427        assert_eq!(
428            offset_of!(VMMemoryDefinition, current_length),
429            usize::from(offsets.ptr.vmmemory_definition_current_length())
430        );
431        /* TODO: Assert that the size of `current_length` matches.
432        assert_eq!(
433            size_of::<VMMemoryDefinition::current_length>(),
434            usize::from(offsets.size_of_vmmemory_definition_current_length())
435        );
436        */
437    }
438}
439
440/// The fields compiled code needs to access to utilize a WebAssembly table
441/// defined within the instance.
442#[derive(Debug, Copy, Clone)]
443#[repr(C)]
444pub struct VMTableDefinition {
445    /// Pointer to the table data.
446    pub base: VmPtr<u8>,
447
448    /// The current number of elements in the table.
449    pub current_elements: usize,
450}
451
452// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
453unsafe impl VmSafe for VMTableDefinition {}
454
455#[cfg(test)]
456mod test_vmtable_definition {
457    use super::VMTableDefinition;
458    use core::mem::offset_of;
459    use std::mem::size_of;
460    use wasmtime_environ::{HostPtr, Module, VMOffsets};
461
462    #[test]
463    fn check_vmtable_definition_offsets() {
464        let module = Module::new();
465        let offsets = VMOffsets::new(HostPtr, &module);
466        assert_eq!(
467            size_of::<VMTableDefinition>(),
468            usize::from(offsets.size_of_vmtable_definition())
469        );
470        assert_eq!(
471            offset_of!(VMTableDefinition, base),
472            usize::from(offsets.vmtable_definition_base())
473        );
474        assert_eq!(
475            offset_of!(VMTableDefinition, current_elements),
476            usize::from(offsets.vmtable_definition_current_elements())
477        );
478    }
479}
480
481/// The storage for a WebAssembly global defined within the instance.
482///
483/// TODO: Pack the globals more densely, rather than using the same size
484/// for every type.
485#[derive(Debug)]
486#[repr(C, align(16))]
487pub struct VMGlobalDefinition {
488    storage: [u8; 16],
489    // If more elements are added here, remember to add offset_of tests below!
490}
491
492// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
493unsafe impl VmSafe for VMGlobalDefinition {}
494
495#[cfg(test)]
496mod test_vmglobal_definition {
497    use super::VMGlobalDefinition;
498    use std::mem::{align_of, size_of};
499    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
500
501    #[test]
502    fn check_vmglobal_definition_alignment() {
503        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
504        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
505        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
506        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
507        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
508        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f32; 4]>());
509        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f64; 2]>());
510    }
511
512    #[test]
513    fn check_vmglobal_definition_offsets() {
514        let module = Module::new();
515        let offsets = VMOffsets::new(HostPtr, &module);
516        assert_eq!(
517            size_of::<VMGlobalDefinition>(),
518            usize::from(offsets.ptr.size_of_vmglobal_definition())
519        );
520    }
521
522    #[test]
523    fn check_vmglobal_begins_aligned() {
524        let module = Module::new();
525        let offsets = VMOffsets::new(HostPtr, &module);
526        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
527    }
528
529    #[test]
530    #[cfg(feature = "gc")]
531    fn check_vmglobal_can_contain_gc_ref() {
532        assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
533    }
534}
535
536impl VMGlobalDefinition {
537    /// Construct a `VMGlobalDefinition`.
538    pub fn new() -> Self {
539        Self { storage: [0; 16] }
540    }
541
542    /// Create a `VMGlobalDefinition` from a `ValRaw`.
543    ///
544    /// # Unsafety
545    ///
546    /// This raw value's type must match the given `WasmValType`.
547    pub unsafe fn from_val_raw(
548        store: &mut StoreOpaque,
549        wasm_ty: WasmValType,
550        raw: ValRaw,
551    ) -> Result<Self> {
552        let mut global = Self::new();
553        unsafe {
554            match wasm_ty {
555                WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
556                WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
557                WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
558                WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
559                WasmValType::V128 => global.set_u128(raw.get_v128()),
560                WasmValType::Ref(r) => match r.heap_type.top() {
561                    WasmHeapTopType::Extern => {
562                        let r = VMGcRef::from_raw_u32(raw.get_externref());
563                        global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
564                    }
565                    WasmHeapTopType::Any => {
566                        let r = VMGcRef::from_raw_u32(raw.get_anyref());
567                        global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
568                    }
569                    WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
570                    WasmHeapTopType::Cont => *global.as_func_ref_mut() = raw.get_funcref().cast(), // TODO(#10248): temporary hack.
571                    WasmHeapTopType::Exn => {
572                        let r = VMGcRef::from_raw_u32(raw.get_exnref());
573                        global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
574                    }
575                },
576            }
577        }
578        Ok(global)
579    }
580
581    /// Get this global's value as a `ValRaw`.
582    ///
583    /// # Unsafety
584    ///
585    /// This global's value's type must match the given `WasmValType`.
586    pub unsafe fn to_val_raw(
587        &self,
588        store: &mut StoreOpaque,
589        wasm_ty: WasmValType,
590    ) -> Result<ValRaw> {
591        unsafe {
592            Ok(match wasm_ty {
593                WasmValType::I32 => ValRaw::i32(*self.as_i32()),
594                WasmValType::I64 => ValRaw::i64(*self.as_i64()),
595                WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
596                WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
597                WasmValType::V128 => ValRaw::v128(self.get_u128()),
598                WasmValType::Ref(r) => match r.heap_type.top() {
599                    WasmHeapTopType::Extern => ValRaw::externref(match self.as_gc_ref() {
600                        Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
601                        None => 0,
602                    }),
603                    WasmHeapTopType::Any => ValRaw::anyref({
604                        match self.as_gc_ref() {
605                            Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
606                            None => 0,
607                        }
608                    }),
609                    WasmHeapTopType::Exn => ValRaw::exnref({
610                        match self.as_gc_ref() {
611                            Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
612                            None => 0,
613                        }
614                    }),
615                    WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
616                    WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
617                },
618            })
619        }
620    }
621
622    /// Return a reference to the value as an i32.
623    pub unsafe fn as_i32(&self) -> &i32 {
624        unsafe { &*(self.storage.as_ref().as_ptr().cast::<i32>()) }
625    }
626
627    /// Return a mutable reference to the value as an i32.
628    pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
629        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>()) }
630    }
631
632    /// Return a reference to the value as a u32.
633    pub unsafe fn as_u32(&self) -> &u32 {
634        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
635    }
636
637    /// Return a mutable reference to the value as an u32.
638    pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
639        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
640    }
641
642    /// Return a reference to the value as an i64.
643    pub unsafe fn as_i64(&self) -> &i64 {
644        unsafe { &*(self.storage.as_ref().as_ptr().cast::<i64>()) }
645    }
646
647    /// Return a mutable reference to the value as an i64.
648    pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
649        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>()) }
650    }
651
652    /// Return a reference to the value as an u64.
653    pub unsafe fn as_u64(&self) -> &u64 {
654        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
655    }
656
657    /// Return a mutable reference to the value as an u64.
658    pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
659        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
660    }
661
662    /// Return a reference to the value as an f32.
663    pub unsafe fn as_f32(&self) -> &f32 {
664        unsafe { &*(self.storage.as_ref().as_ptr().cast::<f32>()) }
665    }
666
667    /// Return a mutable reference to the value as an f32.
668    pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
669        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>()) }
670    }
671
672    /// Return a reference to the value as f32 bits.
673    pub unsafe fn as_f32_bits(&self) -> &u32 {
674        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
675    }
676
677    /// Return a mutable reference to the value as f32 bits.
678    pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
679        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
680    }
681
682    /// Return a reference to the value as an f64.
683    pub unsafe fn as_f64(&self) -> &f64 {
684        unsafe { &*(self.storage.as_ref().as_ptr().cast::<f64>()) }
685    }
686
687    /// Return a mutable reference to the value as an f64.
688    pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
689        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>()) }
690    }
691
692    /// Return a reference to the value as f64 bits.
693    pub unsafe fn as_f64_bits(&self) -> &u64 {
694        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
695    }
696
697    /// Return a mutable reference to the value as f64 bits.
698    pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
699        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
700    }
701
702    /// Gets the underlying 128-bit vector value.
703    //
704    // Note that vectors are stored in little-endian format while other types
705    // are stored in native-endian format.
706    pub unsafe fn get_u128(&self) -> u128 {
707        unsafe { u128::from_le(*(self.storage.as_ref().as_ptr().cast::<u128>())) }
708    }
709
710    /// Sets the 128-bit vector values.
711    //
712    // Note that vectors are stored in little-endian format while other types
713    // are stored in native-endian format.
714    pub unsafe fn set_u128(&mut self, val: u128) {
715        unsafe {
716            *self.storage.as_mut().as_mut_ptr().cast::<u128>() = val.to_le();
717        }
718    }
719
720    /// Return a reference to the value as u128 bits.
721    pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
722        unsafe { &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>()) }
723    }
724
725    /// Return a mutable reference to the value as u128 bits.
726    pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
727        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>()) }
728    }
729
730    /// Return a reference to the global value as a borrowed GC reference.
731    pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
732        let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
733        let ret = unsafe { (*raw_ptr).as_ref() };
734        assert!(cfg!(feature = "gc") || ret.is_none());
735        ret
736    }
737
738    /// Initialize a global to the given GC reference.
739    pub unsafe fn init_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
740        assert!(cfg!(feature = "gc") || gc_ref.is_none());
741
742        let dest = unsafe {
743            &mut *(self
744                .storage
745                .as_mut()
746                .as_mut_ptr()
747                .cast::<MaybeUninit<Option<VMGcRef>>>())
748        };
749
750        gc_store.init_gc_ref(dest, gc_ref)
751    }
752
753    /// Write a GC reference into this global value.
754    pub unsafe fn write_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
755        assert!(cfg!(feature = "gc") || gc_ref.is_none());
756
757        let dest = unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>()) };
758        assert!(cfg!(feature = "gc") || dest.is_none());
759
760        gc_store.write_gc_ref(dest, gc_ref)
761    }
762
763    /// Return a reference to the value as a `VMFuncRef`.
764    pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
765        unsafe { *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>()) }
766    }
767
768    /// Return a mutable reference to the value as a `VMFuncRef`.
769    pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
770        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>()) }
771    }
772}
773
774#[cfg(test)]
775mod test_vmshared_type_index {
776    use super::VMSharedTypeIndex;
777    use std::mem::size_of;
778    use wasmtime_environ::{HostPtr, Module, VMOffsets};
779
780    #[test]
781    fn check_vmshared_type_index() {
782        let module = Module::new();
783        let offsets = VMOffsets::new(HostPtr, &module);
784        assert_eq!(
785            size_of::<VMSharedTypeIndex>(),
786            usize::from(offsets.size_of_vmshared_type_index())
787        );
788    }
789}
790
791/// A WebAssembly tag defined within the instance.
792///
793#[derive(Debug)]
794#[repr(C)]
795pub struct VMTagDefinition {
796    /// Function signature's type id.
797    pub type_index: VMSharedTypeIndex,
798}
799
800impl VMTagDefinition {
801    pub fn new(type_index: VMSharedTypeIndex) -> Self {
802        Self { type_index }
803    }
804}
805
806// SAFETY: the above structure is repr(C) and only contains VmSafe
807// fields.
808unsafe impl VmSafe for VMTagDefinition {}
809
810#[cfg(test)]
811mod test_vmtag_definition {
812    use super::VMTagDefinition;
813    use std::mem::size_of;
814    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
815
816    #[test]
817    fn check_vmtag_definition_offsets() {
818        let module = Module::new();
819        let offsets = VMOffsets::new(HostPtr, &module);
820        assert_eq!(
821            size_of::<VMTagDefinition>(),
822            usize::from(offsets.ptr.size_of_vmtag_definition())
823        );
824    }
825
826    #[test]
827    fn check_vmtag_begins_aligned() {
828        let module = Module::new();
829        let offsets = VMOffsets::new(HostPtr, &module);
830        assert_eq!(offsets.vmctx_tags_begin() % 16, 0);
831    }
832}
833
834/// The VM caller-checked "funcref" record, for caller-side signature checking.
835///
836/// It consists of function pointer(s), a type id to be checked by the
837/// caller, and the vmctx closure associated with this function.
838#[derive(Debug, Clone)]
839#[repr(C)]
840pub struct VMFuncRef {
841    /// Function pointer for this funcref if being called via the "array"
842    /// calling convention that `Func::new` et al use.
843    pub array_call: VmPtr<VMArrayCallFunction>,
844
845    /// Function pointer for this funcref if being called via the calling
846    /// convention we use when compiling Wasm.
847    ///
848    /// Most functions come with a function pointer that we can use when they
849    /// are called from Wasm. The notable exception is when we `Func::wrap` a
850    /// host function, and we don't have a Wasm compiler on hand to compile a
851    /// Wasm-to-native trampoline for the function. In this case, we leave
852    /// `wasm_call` empty until the function is passed as an import to Wasm (or
853    /// otherwise exposed to Wasm via tables/globals). At this point, we look up
854    /// a Wasm-to-native trampoline for the function in the Wasm's compiled
855    /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
856    /// there is no guarantee that the Wasm module has a trampoline for this
857    /// function's signature. The Wasm module only has trampolines for its
858    /// types, and if this function isn't of one of those types, then the Wasm
859    /// module will not have a trampoline for it. This is actually okay, because
860    /// it means that the Wasm cannot actually call this function. But it does
861    /// mean that this field needs to be an `Option` even though it is non-null
862    /// the vast vast vast majority of the time.
863    pub wasm_call: Option<VmPtr<VMWasmCallFunction>>,
864
865    /// Function signature's type id.
866    pub type_index: VMSharedTypeIndex,
867
868    /// The VM state associated with this function.
869    ///
870    /// The actual definition of what this pointer points to depends on the
871    /// function being referenced: for core Wasm functions, this is a `*mut
872    /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
873    /// component functions it is a `*mut VMComponentContext`.
874    pub vmctx: VmPtr<VMOpaqueContext>,
875    // If more elements are added here, remember to add offset_of tests below!
876}
877
878// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
879unsafe impl VmSafe for VMFuncRef {}
880
881impl VMFuncRef {
882    /// Invokes the `array_call` field of this `VMFuncRef` with the supplied
883    /// arguments.
884    ///
885    /// This will invoke the function pointer in the `array_call` field with:
886    ///
887    /// * the `callee` vmctx as `self.vmctx`
888    /// * the `caller` as `caller` specified here
889    /// * the args pointer as `args_and_results`
890    /// * the args length as `args_and_results`
891    ///
892    /// The `args_and_results` area must be large enough to both load all
893    /// arguments from and store all results to.
894    ///
895    /// Returns whether a trap was recorded in TLS for raising.
896    ///
897    /// # Unsafety
898    ///
899    /// This method is unsafe because it can be called with any pointers. They
900    /// must all be valid for this wasm function call to proceed. For example
901    /// the `caller` must be valid machine code if `pulley` is `None` or it must
902    /// be valid bytecode if `pulley` is `Some`. Additionally `args_and_results`
903    /// must be large enough to handle all the arguments/results for this call.
904    ///
905    /// Note that the unsafety invariants to maintain here are not currently
906    /// exhaustively documented.
907    #[inline]
908    pub unsafe fn array_call(
909        me: NonNull<VMFuncRef>,
910        pulley: Option<InterpreterRef<'_>>,
911        caller: NonNull<VMContext>,
912        args_and_results: NonNull<[ValRaw]>,
913    ) -> bool {
914        match pulley {
915            Some(vm) => unsafe { Self::array_call_interpreted(me, vm, caller, args_and_results) },
916            None => unsafe { Self::array_call_native(me, caller, args_and_results) },
917        }
918    }
919
920    unsafe fn array_call_interpreted(
921        me: NonNull<VMFuncRef>,
922        vm: InterpreterRef<'_>,
923        caller: NonNull<VMContext>,
924        args_and_results: NonNull<[ValRaw]>,
925    ) -> bool {
926        // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the
927        // interpreter, even though it's available, as `array_call` will be
928        // native code.
929        unsafe {
930            if me.as_ref().vmctx.as_non_null().as_ref().magic
931                == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC
932            {
933                return Self::array_call_native(me, caller, args_and_results);
934            }
935            vm.call(
936                me.as_ref().array_call.as_non_null().cast(),
937                me.as_ref().vmctx.as_non_null(),
938                caller,
939                args_and_results,
940            )
941        }
942    }
943
944    #[inline]
945    unsafe fn array_call_native(
946        me: NonNull<VMFuncRef>,
947        caller: NonNull<VMContext>,
948        args_and_results: NonNull<[ValRaw]>,
949    ) -> bool {
950        unsafe {
951            union GetNativePointer {
952                native: VMArrayCallNative,
953                ptr: NonNull<VMArrayCallFunction>,
954            }
955            let native = GetNativePointer {
956                ptr: me.as_ref().array_call.as_non_null(),
957            }
958            .native;
959            native(
960                me.as_ref().vmctx.as_non_null(),
961                caller,
962                args_and_results.cast(),
963                args_and_results.len(),
964            )
965        }
966    }
967}
968
969#[cfg(test)]
970mod test_vm_func_ref {
971    use super::VMFuncRef;
972    use core::mem::offset_of;
973    use std::mem::size_of;
974    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
975
976    #[test]
977    fn check_vm_func_ref_offsets() {
978        let module = Module::new();
979        let offsets = VMOffsets::new(HostPtr, &module);
980        assert_eq!(
981            size_of::<VMFuncRef>(),
982            usize::from(offsets.ptr.size_of_vm_func_ref())
983        );
984        assert_eq!(
985            offset_of!(VMFuncRef, array_call),
986            usize::from(offsets.ptr.vm_func_ref_array_call())
987        );
988        assert_eq!(
989            offset_of!(VMFuncRef, wasm_call),
990            usize::from(offsets.ptr.vm_func_ref_wasm_call())
991        );
992        assert_eq!(
993            offset_of!(VMFuncRef, type_index),
994            usize::from(offsets.ptr.vm_func_ref_type_index())
995        );
996        assert_eq!(
997            offset_of!(VMFuncRef, vmctx),
998            usize::from(offsets.ptr.vm_func_ref_vmctx())
999        );
1000    }
1001}
1002
1003macro_rules! define_builtin_array {
1004    (
1005        $(
1006            $( #[$attr:meta] )*
1007            $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
1008        )*
1009    ) => {
1010        /// An array that stores addresses of builtin functions. We translate code
1011        /// to use indirect calls. This way, we don't have to patch the code.
1012        #[repr(C)]
1013        #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
1014        pub struct VMBuiltinFunctionsArray {
1015            $(
1016                $name: unsafe extern "C" fn(
1017                    $(define_builtin_array!(@ty $param)),*
1018                ) $( -> define_builtin_array!(@ty $result))?,
1019            )*
1020        }
1021
1022        impl VMBuiltinFunctionsArray {
1023            pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
1024                $(
1025                    $name: crate::runtime::vm::libcalls::raw::$name,
1026                )*
1027            };
1028
1029            /// Helper to call `expose_provenance()` on all contained pointers.
1030            ///
1031            /// This is required to be called at least once before entering wasm
1032            /// to inform the compiler that these function pointers may all be
1033            /// loaded/stored and used on the "other end" to reacquire
1034            /// provenance in Pulley. Pulley models hostcalls with a host
1035            /// pointer as the first parameter that's a function pointer under
1036            /// the hood, and this call ensures that the use of the function
1037            /// pointer is considered valid.
1038            pub fn expose_provenance(&self) -> NonNull<Self>{
1039                $(
1040                    (self.$name as *mut u8).expose_provenance();
1041                )*
1042                NonNull::from(self)
1043            }
1044        }
1045    };
1046
1047    (@ty u32) => (u32);
1048    (@ty u64) => (u64);
1049    (@ty f32) => (f32);
1050    (@ty f64) => (f64);
1051    (@ty u8) => (u8);
1052    (@ty i8x16) => (i8x16);
1053    (@ty f32x4) => (f32x4);
1054    (@ty f64x2) => (f64x2);
1055    (@ty bool) => (bool);
1056    (@ty pointer) => (*mut u8);
1057    (@ty vmctx) => (NonNull<VMContext>);
1058}
1059
1060// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1061unsafe impl VmSafe for VMBuiltinFunctionsArray {}
1062
1063wasmtime_environ::foreach_builtin_function!(define_builtin_array);
1064
1065const _: () = {
1066    assert!(
1067        mem::size_of::<VMBuiltinFunctionsArray>()
1068            == mem::size_of::<usize>() * (BuiltinFunctionIndex::len() as usize)
1069    )
1070};
1071
1072/// Structure that holds all mutable context that is shared across all instances
1073/// in a store, for example data related to fuel or epochs.
1074///
1075/// `VMStoreContext`s are one-to-one with `wasmtime::Store`s, the same way that
1076/// `VMContext`s are one-to-one with `wasmtime::Instance`s. And the same way
1077/// that multiple `wasmtime::Instance`s may be associated with the same
1078/// `wasmtime::Store`, multiple `VMContext`s hold a pointer to the same
1079/// `VMStoreContext` when they are associated with the same `wasmtime::Store`.
1080#[derive(Debug)]
1081#[repr(C)]
1082pub struct VMStoreContext {
1083    // NB: 64-bit integer fields are located first with pointer-sized fields
1084    // trailing afterwards. That makes the offsets in this structure easier to
1085    // calculate on 32-bit platforms as we don't have to worry about the
1086    // alignment of 64-bit integers.
1087    //
1088    /// Indicator of how much fuel has been consumed and is remaining to
1089    /// WebAssembly.
1090    ///
1091    /// This field is typically negative and increments towards positive. Upon
1092    /// turning positive a wasm trap will be generated. This field is only
1093    /// modified if wasm is configured to consume fuel.
1094    pub fuel_consumed: UnsafeCell<i64>,
1095
1096    /// Deadline epoch for interruption: if epoch-based interruption
1097    /// is enabled and the global (per engine) epoch counter is
1098    /// observed to reach or exceed this value, the guest code will
1099    /// yield if running asynchronously.
1100    pub epoch_deadline: UnsafeCell<u64>,
1101
1102    /// Current stack limit of the wasm module.
1103    ///
1104    /// For more information see `crates/cranelift/src/lib.rs`.
1105    pub stack_limit: UnsafeCell<usize>,
1106
1107    /// The `VMMemoryDefinition` for this store's GC heap.
1108    pub gc_heap: VMMemoryDefinition,
1109
1110    /// The value of the frame pointer register when we last called from Wasm to
1111    /// the host.
1112    ///
1113    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1114    /// calling into Wasm in `catch_traps`.
1115    ///
1116    /// This member is `0` when Wasm is actively running and has not called out
1117    /// to the host.
1118    ///
1119    /// Used to find the start of a contiguous sequence of Wasm frames when
1120    /// walking the stack.
1121    pub last_wasm_exit_fp: UnsafeCell<usize>,
1122
1123    /// The last Wasm program counter before we called from Wasm to the host.
1124    ///
1125    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1126    /// calling into Wasm in `catch_traps`.
1127    ///
1128    /// This member is `0` when Wasm is actively running and has not called out
1129    /// to the host.
1130    ///
1131    /// Used when walking a contiguous sequence of Wasm frames.
1132    pub last_wasm_exit_pc: UnsafeCell<usize>,
1133
1134    /// The last host stack pointer before we called into Wasm from the host.
1135    ///
1136    /// Maintained by our host-to-Wasm trampoline, and cleared just before
1137    /// calling into Wasm in `catch_traps`.
1138    ///
1139    /// This member is `0` when Wasm is actively running and has not called out
1140    /// to the host.
1141    ///
1142    /// When a host function is wrapped into a `wasmtime::Func`, and is then
1143    /// called from the host, then this member has the sentinel value of `-1 as
1144    /// usize`, meaning that this contiguous sequence of Wasm frames is the
1145    /// empty sequence, and it is not safe to dereference the
1146    /// `last_wasm_exit_fp`.
1147    ///
1148    /// Used to find the end of a contiguous sequence of Wasm frames when
1149    /// walking the stack.
1150    pub last_wasm_entry_fp: UnsafeCell<usize>,
1151
1152    /// Stack information used by stack switching instructions. See documentation
1153    /// on `VMStackChain` for details.
1154    pub stack_chain: UnsafeCell<VMStackChain>,
1155
1156    /// The range, in addresses, of the guard page that is currently in use.
1157    ///
1158    /// This field is used when signal handlers are run to determine whether a
1159    /// faulting address lies within the guard page of an async stack for
1160    /// example. If this happens then the signal handler aborts with a stack
1161    /// overflow message similar to what would happen had the stack overflow
1162    /// happened on the main thread. This field is, by default a null..null
1163    /// range indicating that no async guard is in use (aka no fiber). In such a
1164    /// situation while this field is read it'll never classify a fault as an
1165    /// guard page fault.
1166    pub async_guard_range: Range<*mut u8>,
1167}
1168
1169// The `VMStoreContext` type is a pod-type with no destructor, and we don't
1170// access any fields from other threads, so add in these trait impls which are
1171// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
1172// variables in `VMStoreContext`.
1173unsafe impl Send for VMStoreContext {}
1174unsafe impl Sync for VMStoreContext {}
1175
1176// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1177unsafe impl VmSafe for VMStoreContext {}
1178
1179impl Default for VMStoreContext {
1180    fn default() -> VMStoreContext {
1181        VMStoreContext {
1182            fuel_consumed: UnsafeCell::new(0),
1183            epoch_deadline: UnsafeCell::new(0),
1184            stack_limit: UnsafeCell::new(usize::max_value()),
1185            gc_heap: VMMemoryDefinition {
1186                base: NonNull::dangling().into(),
1187                current_length: AtomicUsize::new(0),
1188            },
1189            last_wasm_exit_fp: UnsafeCell::new(0),
1190            last_wasm_exit_pc: UnsafeCell::new(0),
1191            last_wasm_entry_fp: UnsafeCell::new(0),
1192            stack_chain: UnsafeCell::new(VMStackChain::Absent),
1193            async_guard_range: ptr::null_mut()..ptr::null_mut(),
1194        }
1195    }
1196}
1197
1198#[cfg(test)]
1199mod test_vmstore_context {
1200    use super::{VMMemoryDefinition, VMStoreContext};
1201    use core::mem::offset_of;
1202    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
1203
1204    #[test]
1205    fn field_offsets() {
1206        let module = Module::new();
1207        let offsets = VMOffsets::new(HostPtr, &module);
1208        assert_eq!(
1209            offset_of!(VMStoreContext, stack_limit),
1210            usize::from(offsets.ptr.vmstore_context_stack_limit())
1211        );
1212        assert_eq!(
1213            offset_of!(VMStoreContext, fuel_consumed),
1214            usize::from(offsets.ptr.vmstore_context_fuel_consumed())
1215        );
1216        assert_eq!(
1217            offset_of!(VMStoreContext, epoch_deadline),
1218            usize::from(offsets.ptr.vmstore_context_epoch_deadline())
1219        );
1220        assert_eq!(
1221            offset_of!(VMStoreContext, gc_heap),
1222            usize::from(offsets.ptr.vmstore_context_gc_heap())
1223        );
1224        assert_eq!(
1225            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, base),
1226            usize::from(offsets.ptr.vmstore_context_gc_heap_base())
1227        );
1228        assert_eq!(
1229            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, current_length),
1230            usize::from(offsets.ptr.vmstore_context_gc_heap_current_length())
1231        );
1232        assert_eq!(
1233            offset_of!(VMStoreContext, last_wasm_exit_fp),
1234            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_fp())
1235        );
1236        assert_eq!(
1237            offset_of!(VMStoreContext, last_wasm_exit_pc),
1238            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_pc())
1239        );
1240        assert_eq!(
1241            offset_of!(VMStoreContext, last_wasm_entry_fp),
1242            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp())
1243        );
1244        assert_eq!(
1245            offset_of!(VMStoreContext, stack_chain),
1246            usize::from(offsets.ptr.vmstore_context_stack_chain())
1247        )
1248    }
1249}
1250
1251/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
1252/// This has information about globals, memories, tables, and other runtime
1253/// state associated with the current instance.
1254///
1255/// The struct here is empty, as the sizes of these fields are dynamic, and
1256/// we can't describe them in Rust's type system. Sufficient memory is
1257/// allocated at runtime.
1258#[derive(Debug)]
1259#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
1260pub struct VMContext {
1261    _magic: u32,
1262}
1263
1264impl VMContext {
1265    /// Helper function to cast between context types using a debug assertion to
1266    /// protect against some mistakes.
1267    #[inline]
1268    pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMContext> {
1269        // Note that in general the offset of the "magic" field is stored in
1270        // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
1271        // about converting this pointer to another type we ideally don't want
1272        // to read the offset from potentially corrupt memory. Instead it would
1273        // be better to catch errors here as soon as possible.
1274        //
1275        // To accomplish this the `VMContext` structure is laid out with the
1276        // magic field at a statically known offset (here it's 0 for now). This
1277        // static offset is asserted in `VMOffsets::from` and needs to be kept
1278        // in sync with this line for this debug assertion to work.
1279        //
1280        // Also note that this magic is only ever invalid in the presence of
1281        // bugs, meaning we don't actually read the magic and act differently
1282        // at runtime depending what it is, so this is a debug assertion as
1283        // opposed to a regular assertion.
1284        unsafe {
1285            debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC);
1286        }
1287        opaque.cast()
1288    }
1289}
1290
1291/// A "raw" and unsafe representation of a WebAssembly value.
1292///
1293/// This is provided for use with the `Func::new_unchecked` and
1294/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
1295/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
1296///
1297/// This is notably an "unsafe" way to work with `Val` and it's recommended to
1298/// instead use `Val` where possible. An important note about this union is that
1299/// fields are all stored in little-endian format, regardless of the endianness
1300/// of the host system.
1301#[repr(C)]
1302#[derive(Copy, Clone)]
1303pub union ValRaw {
1304    /// A WebAssembly `i32` value.
1305    ///
1306    /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
1307    /// type does not assign an interpretation of the upper bit as either signed
1308    /// or unsigned. The Rust type `i32` is simply chosen for convenience.
1309    ///
1310    /// This value is always stored in a little-endian format.
1311    i32: i32,
1312
1313    /// A WebAssembly `i64` value.
1314    ///
1315    /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
1316    /// type does not assign an interpretation of the upper bit as either signed
1317    /// or unsigned. The Rust type `i64` is simply chosen for convenience.
1318    ///
1319    /// This value is always stored in a little-endian format.
1320    i64: i64,
1321
1322    /// A WebAssembly `f32` value.
1323    ///
1324    /// Note that the payload here is a Rust `u32`. This is to allow passing any
1325    /// representation of NaN into WebAssembly without risk of changing NaN
1326    /// payload bits as its gets passed around the system. Otherwise though this
1327    /// `u32` value is the return value of `f32::to_bits` in Rust.
1328    ///
1329    /// This value is always stored in a little-endian format.
1330    f32: u32,
1331
1332    /// A WebAssembly `f64` value.
1333    ///
1334    /// Note that the payload here is a Rust `u64`. This is to allow passing any
1335    /// representation of NaN into WebAssembly without risk of changing NaN
1336    /// payload bits as its gets passed around the system. Otherwise though this
1337    /// `u64` value is the return value of `f64::to_bits` in Rust.
1338    ///
1339    /// This value is always stored in a little-endian format.
1340    f64: u64,
1341
1342    /// A WebAssembly `v128` value.
1343    ///
1344    /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1345    /// but note that `v128` in WebAssembly is often considered a vector type
1346    /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1347    /// of the underlying bits is left up to the instructions which consume
1348    /// this value.
1349    ///
1350    /// This value is always stored in a little-endian format.
1351    v128: [u8; 16],
1352
1353    /// A WebAssembly `funcref` value (or one of its subtypes).
1354    ///
1355    /// The payload here is a pointer which is runtime-defined. This is one of
1356    /// the main points of unsafety about the `ValRaw` type as the validity of
1357    /// the pointer here is not easily verified and must be preserved by
1358    /// carefully calling the correct functions throughout the runtime.
1359    ///
1360    /// This value is always stored in a little-endian format.
1361    funcref: *mut c_void,
1362
1363    /// A WebAssembly `externref` value (or one of its subtypes).
1364    ///
1365    /// The payload here is a compressed pointer value which is
1366    /// runtime-defined. This is one of the main points of unsafety about the
1367    /// `ValRaw` type as the validity of the pointer here is not easily verified
1368    /// and must be preserved by carefully calling the correct functions
1369    /// throughout the runtime.
1370    ///
1371    /// This value is always stored in a little-endian format.
1372    externref: u32,
1373
1374    /// A WebAssembly `anyref` value (or one of its subtypes).
1375    ///
1376    /// The payload here is a compressed pointer value which is
1377    /// runtime-defined. This is one of the main points of unsafety about the
1378    /// `ValRaw` type as the validity of the pointer here is not easily verified
1379    /// and must be preserved by carefully calling the correct functions
1380    /// throughout the runtime.
1381    ///
1382    /// This value is always stored in a little-endian format.
1383    anyref: u32,
1384
1385    /// A WebAssembly `exnref` value (or one of its subtypes).
1386    ///
1387    /// The payload here is a compressed pointer value which is
1388    /// runtime-defined. This is one of the main points of unsafety about the
1389    /// `ValRaw` type as the validity of the pointer here is not easily verified
1390    /// and must be preserved by carefully calling the correct functions
1391    /// throughout the runtime.
1392    ///
1393    /// This value is always stored in a little-endian format.
1394    exnref: u32,
1395}
1396
1397// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1398// are some simple assertions about the shape of the type which are additionally
1399// matched in C.
1400const _: () = {
1401    assert!(mem::size_of::<ValRaw>() == 16);
1402    assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1403};
1404
1405// This type is just a bag-of-bits so it's up to the caller to figure out how
1406// to safely deal with threading concerns and safely access interior bits.
1407unsafe impl Send for ValRaw {}
1408unsafe impl Sync for ValRaw {}
1409
1410impl fmt::Debug for ValRaw {
1411    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1412        struct Hex<T>(T);
1413        impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1414            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1415                let bytes = mem::size_of::<T>();
1416                let hex_digits_per_byte = 2;
1417                let hex_digits = bytes * hex_digits_per_byte;
1418                write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1419            }
1420        }
1421
1422        unsafe {
1423            f.debug_struct("ValRaw")
1424                .field("i32", &Hex(self.i32))
1425                .field("i64", &Hex(self.i64))
1426                .field("f32", &Hex(self.f32))
1427                .field("f64", &Hex(self.f64))
1428                .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1429                .field("funcref", &self.funcref)
1430                .field("externref", &Hex(self.externref))
1431                .field("anyref", &Hex(self.anyref))
1432                .field("exnref", &Hex(self.exnref))
1433                .finish()
1434        }
1435    }
1436}
1437
1438impl ValRaw {
1439    /// Create a null reference that is compatible with any of
1440    /// `{any,extern,func,exn}ref`.
1441    pub fn null() -> ValRaw {
1442        unsafe {
1443            let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1444            debug_assert_eq!(raw.get_anyref(), 0);
1445            debug_assert_eq!(raw.get_exnref(), 0);
1446            debug_assert_eq!(raw.get_externref(), 0);
1447            debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1448            raw
1449        }
1450    }
1451
1452    /// Creates a WebAssembly `i32` value
1453    #[inline]
1454    pub fn i32(i: i32) -> ValRaw {
1455        // Note that this is intentionally not setting the `i32` field, instead
1456        // setting the `i64` field with a zero-extended version of `i`. For more
1457        // information on this see the comments on `Lower for Result` in the
1458        // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1459        // otherwise constrained to guarantee that the initial 64-bits are
1460        // always initialized.
1461        ValRaw::u64(i.cast_unsigned().into())
1462    }
1463
1464    /// Creates a WebAssembly `i64` value
1465    #[inline]
1466    pub fn i64(i: i64) -> ValRaw {
1467        ValRaw { i64: i.to_le() }
1468    }
1469
1470    /// Creates a WebAssembly `i32` value
1471    #[inline]
1472    pub fn u32(i: u32) -> ValRaw {
1473        // See comments in `ValRaw::i32` for why this is setting the upper
1474        // 32-bits as well.
1475        ValRaw::u64(i.into())
1476    }
1477
1478    /// Creates a WebAssembly `i64` value
1479    #[inline]
1480    pub fn u64(i: u64) -> ValRaw {
1481        ValRaw::i64(i as i64)
1482    }
1483
1484    /// Creates a WebAssembly `f32` value
1485    #[inline]
1486    pub fn f32(i: u32) -> ValRaw {
1487        // See comments in `ValRaw::i32` for why this is setting the upper
1488        // 32-bits as well.
1489        ValRaw::u64(i.into())
1490    }
1491
1492    /// Creates a WebAssembly `f64` value
1493    #[inline]
1494    pub fn f64(i: u64) -> ValRaw {
1495        ValRaw { f64: i.to_le() }
1496    }
1497
1498    /// Creates a WebAssembly `v128` value
1499    #[inline]
1500    pub fn v128(i: u128) -> ValRaw {
1501        ValRaw {
1502            v128: i.to_le_bytes(),
1503        }
1504    }
1505
1506    /// Creates a WebAssembly `funcref` value
1507    #[inline]
1508    pub fn funcref(i: *mut c_void) -> ValRaw {
1509        ValRaw {
1510            funcref: i.map_addr(|i| i.to_le()),
1511        }
1512    }
1513
1514    /// Creates a WebAssembly `externref` value
1515    #[inline]
1516    pub fn externref(e: u32) -> ValRaw {
1517        assert!(cfg!(feature = "gc") || e == 0);
1518        ValRaw {
1519            externref: e.to_le(),
1520        }
1521    }
1522
1523    /// Creates a WebAssembly `anyref` value
1524    #[inline]
1525    pub fn anyref(r: u32) -> ValRaw {
1526        assert!(cfg!(feature = "gc") || r == 0);
1527        ValRaw { anyref: r.to_le() }
1528    }
1529
1530    /// Creates a WebAssembly `exnref` value
1531    #[inline]
1532    pub fn exnref(r: u32) -> ValRaw {
1533        assert!(cfg!(feature = "gc") || r == 0);
1534        ValRaw { exnref: r.to_le() }
1535    }
1536
1537    /// Gets the WebAssembly `i32` value
1538    #[inline]
1539    pub fn get_i32(&self) -> i32 {
1540        unsafe { i32::from_le(self.i32) }
1541    }
1542
1543    /// Gets the WebAssembly `i64` value
1544    #[inline]
1545    pub fn get_i64(&self) -> i64 {
1546        unsafe { i64::from_le(self.i64) }
1547    }
1548
1549    /// Gets the WebAssembly `i32` value
1550    #[inline]
1551    pub fn get_u32(&self) -> u32 {
1552        self.get_i32().cast_unsigned()
1553    }
1554
1555    /// Gets the WebAssembly `i64` value
1556    #[inline]
1557    pub fn get_u64(&self) -> u64 {
1558        self.get_i64().cast_unsigned()
1559    }
1560
1561    /// Gets the WebAssembly `f32` value
1562    #[inline]
1563    pub fn get_f32(&self) -> u32 {
1564        unsafe { u32::from_le(self.f32) }
1565    }
1566
1567    /// Gets the WebAssembly `f64` value
1568    #[inline]
1569    pub fn get_f64(&self) -> u64 {
1570        unsafe { u64::from_le(self.f64) }
1571    }
1572
1573    /// Gets the WebAssembly `v128` value
1574    #[inline]
1575    pub fn get_v128(&self) -> u128 {
1576        unsafe { u128::from_le_bytes(self.v128) }
1577    }
1578
1579    /// Gets the WebAssembly `funcref` value
1580    #[inline]
1581    pub fn get_funcref(&self) -> *mut c_void {
1582        unsafe { self.funcref.map_addr(|i| usize::from_le(i)) }
1583    }
1584
1585    /// Gets the WebAssembly `externref` value
1586    #[inline]
1587    pub fn get_externref(&self) -> u32 {
1588        let externref = u32::from_le(unsafe { self.externref });
1589        assert!(cfg!(feature = "gc") || externref == 0);
1590        externref
1591    }
1592
1593    /// Gets the WebAssembly `anyref` value
1594    #[inline]
1595    pub fn get_anyref(&self) -> u32 {
1596        let anyref = u32::from_le(unsafe { self.anyref });
1597        assert!(cfg!(feature = "gc") || anyref == 0);
1598        anyref
1599    }
1600
1601    /// Gets the WebAssembly `exnref` value
1602    #[inline]
1603    pub fn get_exnref(&self) -> u32 {
1604        let exnref = u32::from_le(unsafe { self.exnref });
1605        assert!(cfg!(feature = "gc") || exnref == 0);
1606        exnref
1607    }
1608}
1609
1610/// An "opaque" version of `VMContext` which must be explicitly casted to a
1611/// target context.
1612///
1613/// This context is used to represent that contexts specified in
1614/// `VMFuncRef` can have any type and don't have an implicit
1615/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1616/// structure of an opaque context in general and only the code which configured
1617/// the context is able to rely on a particular structure. This is because the
1618/// context pointer configured for `VMFuncRef` is guaranteed to be
1619/// the first parameter passed.
1620///
1621/// Note that Wasmtime currently has a layout where all contexts that are casted
1622/// to an opaque context start with a 32-bit "magic" which can be used in debug
1623/// mode to debug-assert that the casts here are correct and have at least a
1624/// little protection against incorrect casts.
1625pub struct VMOpaqueContext {
1626    pub(crate) magic: u32,
1627    _marker: marker::PhantomPinned,
1628}
1629
1630impl VMOpaqueContext {
1631    /// Helper function to clearly indicate that casts are desired.
1632    #[inline]
1633    pub fn from_vmcontext(ptr: NonNull<VMContext>) -> NonNull<VMOpaqueContext> {
1634        ptr.cast()
1635    }
1636
1637    /// Helper function to clearly indicate that casts are desired.
1638    #[inline]
1639    pub fn from_vm_array_call_host_func_context(
1640        ptr: NonNull<VMArrayCallHostFuncContext>,
1641    ) -> NonNull<VMOpaqueContext> {
1642        ptr.cast()
1643    }
1644}