wasmtime/runtime/vm/
vmcontext.rs

1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::prelude::*;
8use crate::runtime::vm::{f32x4, f64x2, i8x16, GcStore, InterpreterRef, VMGcRef, VmPtr, VmSafe};
9use crate::store::StoreOpaque;
10use core::cell::UnsafeCell;
11use core::ffi::c_void;
12use core::fmt;
13use core::marker;
14use core::mem::{self, MaybeUninit};
15use core::ptr::{self, NonNull};
16use core::sync::atomic::{AtomicUsize, Ordering};
17use sptr::Strict;
18use wasmtime_environ::{
19    BuiltinFunctionIndex, DefinedMemoryIndex, Unsigned, VMSharedTypeIndex, WasmHeapTopType,
20    WasmValType, VMCONTEXT_MAGIC,
21};
22
23/// A function pointer that exposes the array calling convention.
24///
25/// Regardless of the underlying Wasm function type, all functions using the
26/// array calling convention have the same Rust signature.
27///
28/// Arguments:
29///
30/// * Callee `vmctx` for the function itself.
31///
32/// * Caller's `vmctx` (so that host functions can access the linear memory of
33///   their Wasm callers).
34///
35/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
36///   this function, and where results are returned from this function.
37///
38/// * The capacity of the `ValRaw` buffer. Must always be at least
39///   `max(len(wasm_params), len(wasm_results))`.
40///
41/// Return value:
42///
43/// * `true` if this call succeeded.
44/// * `false` if this call failed and a trap was recorded in TLS.
45pub type VMArrayCallNative = unsafe extern "C" fn(
46    NonNull<VMOpaqueContext>,
47    NonNull<VMOpaqueContext>,
48    NonNull<ValRaw>,
49    usize,
50) -> bool;
51
52/// An opaque function pointer which might be `VMArrayCallNative` or it might be
53/// pulley bytecode. Requires external knowledge to determine what kind of
54/// function pointer this is.
55#[repr(transparent)]
56pub struct VMArrayCallFunction(VMFunctionBody);
57
58/// A function pointer that exposes the Wasm calling convention.
59///
60/// In practice, different Wasm function types end up mapping to different Rust
61/// function types, so this isn't simply a type alias the way that
62/// `VMArrayCallFunction` is. However, the exact details of the calling
63/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
64/// code never does anything with these function pointers except shuffle them
65/// around and pass them back to Wasm.
66#[repr(transparent)]
67pub struct VMWasmCallFunction(VMFunctionBody);
68
69/// An imported function.
70#[derive(Debug, Copy, Clone)]
71#[repr(C)]
72pub struct VMFunctionImport {
73    /// Function pointer to use when calling this imported function from Wasm.
74    pub wasm_call: VmPtr<VMWasmCallFunction>,
75
76    /// Function pointer to use when calling this imported function with the
77    /// "array" calling convention that `Func::new` et al use.
78    pub array_call: VmPtr<VMArrayCallFunction>,
79
80    /// The VM state associated with this function.
81    ///
82    /// For Wasm functions defined by core wasm instances this will be `*mut
83    /// VMContext`, but for lifted/lowered component model functions this will
84    /// be a `VMComponentContext`, and for a host function it will be a
85    /// `VMHostFuncContext`, etc.
86    pub vmctx: VmPtr<VMOpaqueContext>,
87}
88
89// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
90unsafe impl VmSafe for VMFunctionImport {}
91
92#[cfg(test)]
93mod test_vmfunction_import {
94    use super::VMFunctionImport;
95    use core::mem::offset_of;
96    use std::mem::size_of;
97    use wasmtime_environ::{HostPtr, Module, VMOffsets};
98
99    #[test]
100    fn check_vmfunction_import_offsets() {
101        let module = Module::new();
102        let offsets = VMOffsets::new(HostPtr, &module);
103        assert_eq!(
104            size_of::<VMFunctionImport>(),
105            usize::from(offsets.size_of_vmfunction_import())
106        );
107        assert_eq!(
108            offset_of!(VMFunctionImport, wasm_call),
109            usize::from(offsets.vmfunction_import_wasm_call())
110        );
111        assert_eq!(
112            offset_of!(VMFunctionImport, array_call),
113            usize::from(offsets.vmfunction_import_array_call())
114        );
115        assert_eq!(
116            offset_of!(VMFunctionImport, vmctx),
117            usize::from(offsets.vmfunction_import_vmctx())
118        );
119    }
120}
121
122/// A placeholder byte-sized type which is just used to provide some amount of type
123/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
124/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
125/// around.
126#[repr(C)]
127pub struct VMFunctionBody(u8);
128
129// SAFETY: this structure is never read and is safe to pass to jit code.
130unsafe impl VmSafe for VMFunctionBody {}
131
132#[cfg(test)]
133mod test_vmfunction_body {
134    use super::VMFunctionBody;
135    use std::mem::size_of;
136
137    #[test]
138    fn check_vmfunction_body_offsets() {
139        assert_eq!(size_of::<VMFunctionBody>(), 1);
140    }
141}
142
143/// The fields compiled code needs to access to utilize a WebAssembly table
144/// imported from another instance.
145#[derive(Debug, Copy, Clone)]
146#[repr(C)]
147pub struct VMTable {
148    /// A pointer to the imported table description.
149    pub from: VmPtr<VMTableDefinition>,
150
151    /// A pointer to the `VMContext` that owns the table description.
152    pub vmctx: VmPtr<VMContext>,
153}
154
155// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
156unsafe impl VmSafe for VMTable {}
157
158#[cfg(test)]
159mod test_vmtable {
160    use super::VMTable;
161    use core::mem::offset_of;
162    use std::mem::size_of;
163    use wasmtime_environ::component::{Component, VMComponentOffsets};
164    use wasmtime_environ::{HostPtr, Module, VMOffsets};
165
166    #[test]
167    fn check_vmtable_offsets() {
168        let module = Module::new();
169        let offsets = VMOffsets::new(HostPtr, &module);
170        assert_eq!(size_of::<VMTable>(), usize::from(offsets.size_of_vmtable()));
171        assert_eq!(
172            offset_of!(VMTable, from),
173            usize::from(offsets.vmtable_from())
174        );
175        assert_eq!(
176            offset_of!(VMTable, vmctx),
177            usize::from(offsets.vmtable_vmctx())
178        );
179    }
180
181    #[test]
182    fn ensure_sizes_match() {
183        // Because we use `VMTable` for recording tables used by components, we
184        // want to make sure that the size calculations between `VMOffsets` and
185        // `VMComponentOffsets` stay the same.
186        let module = Module::new();
187        let vm_offsets = VMOffsets::new(HostPtr, &module);
188        let component = Component::default();
189        let vm_component_offsets = VMComponentOffsets::new(HostPtr, &component);
190        assert_eq!(
191            vm_offsets.size_of_vmtable(),
192            vm_component_offsets.size_of_vmtable()
193        );
194    }
195}
196
197/// The fields compiled code needs to access to utilize a WebAssembly linear
198/// memory imported from another instance.
199#[derive(Debug, Copy, Clone)]
200#[repr(C)]
201pub struct VMMemoryImport {
202    /// A pointer to the imported memory description.
203    pub from: VmPtr<VMMemoryDefinition>,
204
205    /// A pointer to the `VMContext` that owns the memory description.
206    pub vmctx: VmPtr<VMContext>,
207
208    /// The index of the memory in the containing `vmctx`.
209    pub index: DefinedMemoryIndex,
210}
211
212// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
213unsafe impl VmSafe for VMMemoryImport {}
214
215#[cfg(test)]
216mod test_vmmemory_import {
217    use super::VMMemoryImport;
218    use core::mem::offset_of;
219    use std::mem::size_of;
220    use wasmtime_environ::{HostPtr, Module, VMOffsets};
221
222    #[test]
223    fn check_vmmemory_import_offsets() {
224        let module = Module::new();
225        let offsets = VMOffsets::new(HostPtr, &module);
226        assert_eq!(
227            size_of::<VMMemoryImport>(),
228            usize::from(offsets.size_of_vmmemory_import())
229        );
230        assert_eq!(
231            offset_of!(VMMemoryImport, from),
232            usize::from(offsets.vmmemory_import_from())
233        );
234        assert_eq!(
235            offset_of!(VMMemoryImport, vmctx),
236            usize::from(offsets.vmmemory_import_vmctx())
237        );
238    }
239}
240
241/// The fields compiled code needs to access to utilize a WebAssembly global
242/// variable imported from another instance.
243///
244/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
245/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
246/// require a `vmctx` pointer to access.
247#[derive(Debug, Copy, Clone)]
248#[repr(C)]
249pub struct VMGlobalImport {
250    /// A pointer to the imported global variable description.
251    pub from: VmPtr<VMGlobalDefinition>,
252}
253
254// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
255unsafe impl VmSafe for VMGlobalImport {}
256
257#[cfg(test)]
258mod test_vmglobal_import {
259    use super::VMGlobalImport;
260    use core::mem::offset_of;
261    use std::mem::size_of;
262    use wasmtime_environ::{HostPtr, Module, VMOffsets};
263
264    #[test]
265    fn check_vmglobal_import_offsets() {
266        let module = Module::new();
267        let offsets = VMOffsets::new(HostPtr, &module);
268        assert_eq!(
269            size_of::<VMGlobalImport>(),
270            usize::from(offsets.size_of_vmglobal_import())
271        );
272        assert_eq!(
273            offset_of!(VMGlobalImport, from),
274            usize::from(offsets.vmglobal_import_from())
275        );
276    }
277}
278
279/// The fields compiled code needs to access to utilize a WebAssembly
280/// tag imported from another instance.
281#[derive(Debug, Copy, Clone)]
282#[repr(C)]
283pub struct VMTagImport {
284    /// A pointer to the imported tag description.
285    pub from: VmPtr<VMTagDefinition>,
286}
287
288// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
289unsafe impl VmSafe for VMTagImport {}
290
291#[cfg(test)]
292mod test_vmtag_import {
293    use super::VMTagImport;
294    use core::mem::{offset_of, size_of};
295    use wasmtime_environ::{HostPtr, Module, VMOffsets};
296
297    #[test]
298    fn check_vmtag_import_offsets() {
299        let module = Module::new();
300        let offsets = VMOffsets::new(HostPtr, &module);
301        assert_eq!(
302            size_of::<VMTagImport>(),
303            usize::from(offsets.size_of_vmtag_import())
304        );
305        assert_eq!(
306            offset_of!(VMTagImport, from),
307            usize::from(offsets.vmtag_import_from())
308        );
309    }
310}
311
312/// The fields compiled code needs to access to utilize a WebAssembly linear
313/// memory defined within the instance, namely the start address and the
314/// size in bytes.
315#[derive(Debug)]
316#[repr(C)]
317pub struct VMMemoryDefinition {
318    /// The start address.
319    pub base: VmPtr<u8>,
320
321    /// The current logical size of this linear memory in bytes.
322    ///
323    /// This is atomic because shared memories must be able to grow their length
324    /// atomically. For relaxed access, see
325    /// [`VMMemoryDefinition::current_length()`].
326    pub current_length: AtomicUsize,
327}
328
329// SAFETY: the above definition has `repr(C)` and each field individually
330// implements `VmSafe`, which satisfies the requirements of this trait.
331unsafe impl VmSafe for VMMemoryDefinition {}
332
333impl VMMemoryDefinition {
334    /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
335    /// performing a relaxed load; do not use this function for situations in
336    /// which a precise length is needed. Owned memories (i.e., non-shared) will
337    /// always return a precise result (since no concurrent modification is
338    /// possible) but shared memories may see an imprecise value--a
339    /// `current_length` potentially smaller than what some other thread
340    /// observes. Since Wasm memory only grows, this under-estimation may be
341    /// acceptable in certain cases.
342    pub fn current_length(&self) -> usize {
343        self.current_length.load(Ordering::Relaxed)
344    }
345
346    /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
347    /// `current_length`; see [`VMMemoryDefinition::current_length()`].
348    pub unsafe fn load(ptr: *mut Self) -> Self {
349        let other = &*ptr;
350        VMMemoryDefinition {
351            base: other.base,
352            current_length: other.current_length().into(),
353        }
354    }
355}
356
357#[cfg(test)]
358mod test_vmmemory_definition {
359    use super::VMMemoryDefinition;
360    use core::mem::offset_of;
361    use std::mem::size_of;
362    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
363
364    #[test]
365    fn check_vmmemory_definition_offsets() {
366        let module = Module::new();
367        let offsets = VMOffsets::new(HostPtr, &module);
368        assert_eq!(
369            size_of::<VMMemoryDefinition>(),
370            usize::from(offsets.ptr.size_of_vmmemory_definition())
371        );
372        assert_eq!(
373            offset_of!(VMMemoryDefinition, base),
374            usize::from(offsets.ptr.vmmemory_definition_base())
375        );
376        assert_eq!(
377            offset_of!(VMMemoryDefinition, current_length),
378            usize::from(offsets.ptr.vmmemory_definition_current_length())
379        );
380        /* TODO: Assert that the size of `current_length` matches.
381        assert_eq!(
382            size_of::<VMMemoryDefinition::current_length>(),
383            usize::from(offsets.size_of_vmmemory_definition_current_length())
384        );
385        */
386    }
387}
388
389/// The fields compiled code needs to access to utilize a WebAssembly table
390/// defined within the instance.
391#[derive(Debug, Copy, Clone)]
392#[repr(C)]
393pub struct VMTableDefinition {
394    /// Pointer to the table data.
395    pub base: VmPtr<u8>,
396
397    /// The current number of elements in the table.
398    pub current_elements: usize,
399}
400
401// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
402unsafe impl VmSafe for VMTableDefinition {}
403
404#[cfg(test)]
405mod test_vmtable_definition {
406    use super::VMTableDefinition;
407    use core::mem::offset_of;
408    use std::mem::size_of;
409    use wasmtime_environ::{HostPtr, Module, VMOffsets};
410
411    #[test]
412    fn check_vmtable_definition_offsets() {
413        let module = Module::new();
414        let offsets = VMOffsets::new(HostPtr, &module);
415        assert_eq!(
416            size_of::<VMTableDefinition>(),
417            usize::from(offsets.size_of_vmtable_definition())
418        );
419        assert_eq!(
420            offset_of!(VMTableDefinition, base),
421            usize::from(offsets.vmtable_definition_base())
422        );
423        assert_eq!(
424            offset_of!(VMTableDefinition, current_elements),
425            usize::from(offsets.vmtable_definition_current_elements())
426        );
427    }
428}
429
430/// The storage for a WebAssembly global defined within the instance.
431///
432/// TODO: Pack the globals more densely, rather than using the same size
433/// for every type.
434#[derive(Debug)]
435#[repr(C, align(16))]
436pub struct VMGlobalDefinition {
437    storage: [u8; 16],
438    // If more elements are added here, remember to add offset_of tests below!
439}
440
441// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
442unsafe impl VmSafe for VMGlobalDefinition {}
443
444#[cfg(test)]
445mod test_vmglobal_definition {
446    use super::VMGlobalDefinition;
447    use std::mem::{align_of, size_of};
448    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
449
450    #[test]
451    fn check_vmglobal_definition_alignment() {
452        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
453        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
454        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
455        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
456        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
457        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f32; 4]>());
458        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f64; 2]>());
459    }
460
461    #[test]
462    fn check_vmglobal_definition_offsets() {
463        let module = Module::new();
464        let offsets = VMOffsets::new(HostPtr, &module);
465        assert_eq!(
466            size_of::<VMGlobalDefinition>(),
467            usize::from(offsets.ptr.size_of_vmglobal_definition())
468        );
469    }
470
471    #[test]
472    fn check_vmglobal_begins_aligned() {
473        let module = Module::new();
474        let offsets = VMOffsets::new(HostPtr, &module);
475        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
476    }
477
478    #[test]
479    #[cfg(feature = "gc")]
480    fn check_vmglobal_can_contain_gc_ref() {
481        assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
482    }
483}
484
485impl VMGlobalDefinition {
486    /// Construct a `VMGlobalDefinition`.
487    pub fn new() -> Self {
488        Self { storage: [0; 16] }
489    }
490
491    /// Create a `VMGlobalDefinition` from a `ValRaw`.
492    ///
493    /// # Unsafety
494    ///
495    /// This raw value's type must match the given `WasmValType`.
496    pub unsafe fn from_val_raw(
497        store: &mut StoreOpaque,
498        wasm_ty: WasmValType,
499        raw: ValRaw,
500    ) -> Result<Self> {
501        let mut global = Self::new();
502        match wasm_ty {
503            WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
504            WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
505            WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
506            WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
507            WasmValType::V128 => global.set_u128(raw.get_v128()),
508            WasmValType::Ref(r) => match r.heap_type.top() {
509                WasmHeapTopType::Extern => {
510                    let r = VMGcRef::from_raw_u32(raw.get_externref());
511                    global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
512                }
513                WasmHeapTopType::Any => {
514                    let r = VMGcRef::from_raw_u32(raw.get_anyref());
515                    global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
516                }
517                WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
518                WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
519            },
520        }
521        Ok(global)
522    }
523
524    /// Get this global's value as a `ValRaw`.
525    ///
526    /// # Unsafety
527    ///
528    /// This global's value's type must match the given `WasmValType`.
529    pub unsafe fn to_val_raw(
530        &self,
531        store: &mut StoreOpaque,
532        wasm_ty: WasmValType,
533    ) -> Result<ValRaw> {
534        Ok(match wasm_ty {
535            WasmValType::I32 => ValRaw::i32(*self.as_i32()),
536            WasmValType::I64 => ValRaw::i64(*self.as_i64()),
537            WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
538            WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
539            WasmValType::V128 => ValRaw::v128(self.get_u128()),
540            WasmValType::Ref(r) => match r.heap_type.top() {
541                WasmHeapTopType::Extern => ValRaw::externref(match self.as_gc_ref() {
542                    Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
543                    None => 0,
544                }),
545                WasmHeapTopType::Any => ValRaw::anyref({
546                    match self.as_gc_ref() {
547                        Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
548                        None => 0,
549                    }
550                }),
551                WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
552                WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
553            },
554        })
555    }
556
557    /// Return a reference to the value as an i32.
558    pub unsafe fn as_i32(&self) -> &i32 {
559        &*(self.storage.as_ref().as_ptr().cast::<i32>())
560    }
561
562    /// Return a mutable reference to the value as an i32.
563    pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
564        &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>())
565    }
566
567    /// Return a reference to the value as a u32.
568    pub unsafe fn as_u32(&self) -> &u32 {
569        &*(self.storage.as_ref().as_ptr().cast::<u32>())
570    }
571
572    /// Return a mutable reference to the value as an u32.
573    pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
574        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
575    }
576
577    /// Return a reference to the value as an i64.
578    pub unsafe fn as_i64(&self) -> &i64 {
579        &*(self.storage.as_ref().as_ptr().cast::<i64>())
580    }
581
582    /// Return a mutable reference to the value as an i64.
583    pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
584        &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>())
585    }
586
587    /// Return a reference to the value as an u64.
588    pub unsafe fn as_u64(&self) -> &u64 {
589        &*(self.storage.as_ref().as_ptr().cast::<u64>())
590    }
591
592    /// Return a mutable reference to the value as an u64.
593    pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
594        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
595    }
596
597    /// Return a reference to the value as an f32.
598    pub unsafe fn as_f32(&self) -> &f32 {
599        &*(self.storage.as_ref().as_ptr().cast::<f32>())
600    }
601
602    /// Return a mutable reference to the value as an f32.
603    pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
604        &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>())
605    }
606
607    /// Return a reference to the value as f32 bits.
608    pub unsafe fn as_f32_bits(&self) -> &u32 {
609        &*(self.storage.as_ref().as_ptr().cast::<u32>())
610    }
611
612    /// Return a mutable reference to the value as f32 bits.
613    pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
614        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
615    }
616
617    /// Return a reference to the value as an f64.
618    pub unsafe fn as_f64(&self) -> &f64 {
619        &*(self.storage.as_ref().as_ptr().cast::<f64>())
620    }
621
622    /// Return a mutable reference to the value as an f64.
623    pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
624        &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>())
625    }
626
627    /// Return a reference to the value as f64 bits.
628    pub unsafe fn as_f64_bits(&self) -> &u64 {
629        &*(self.storage.as_ref().as_ptr().cast::<u64>())
630    }
631
632    /// Return a mutable reference to the value as f64 bits.
633    pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
634        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
635    }
636
637    /// Gets the underlying 128-bit vector value.
638    //
639    // Note that vectors are stored in little-endian format while other types
640    // are stored in native-endian format.
641    pub unsafe fn get_u128(&self) -> u128 {
642        u128::from_le(*(self.storage.as_ref().as_ptr().cast::<u128>()))
643    }
644
645    /// Sets the 128-bit vector values.
646    //
647    // Note that vectors are stored in little-endian format while other types
648    // are stored in native-endian format.
649    pub unsafe fn set_u128(&mut self, val: u128) {
650        *self.storage.as_mut().as_mut_ptr().cast::<u128>() = val.to_le();
651    }
652
653    /// Return a reference to the value as u128 bits.
654    pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
655        &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>())
656    }
657
658    /// Return a mutable reference to the value as u128 bits.
659    pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
660        &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>())
661    }
662
663    /// Return a reference to the global value as a borrowed GC reference.
664    pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
665        let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
666        let ret = (*raw_ptr).as_ref();
667        assert!(cfg!(feature = "gc") || ret.is_none());
668        ret
669    }
670
671    /// Initialize a global to the given GC reference.
672    pub unsafe fn init_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
673        assert!(cfg!(feature = "gc") || gc_ref.is_none());
674
675        let dest = &mut *(self
676            .storage
677            .as_mut()
678            .as_mut_ptr()
679            .cast::<MaybeUninit<Option<VMGcRef>>>());
680
681        gc_store.init_gc_ref(dest, gc_ref)
682    }
683
684    /// Write a GC reference into this global value.
685    pub unsafe fn write_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
686        assert!(cfg!(feature = "gc") || gc_ref.is_none());
687
688        let dest = &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>());
689        assert!(cfg!(feature = "gc") || dest.is_none());
690
691        gc_store.write_gc_ref(dest, gc_ref)
692    }
693
694    /// Return a reference to the value as a `VMFuncRef`.
695    pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
696        *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>())
697    }
698
699    /// Return a mutable reference to the value as a `VMFuncRef`.
700    pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
701        &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>())
702    }
703}
704
705#[cfg(test)]
706mod test_vmshared_type_index {
707    use super::VMSharedTypeIndex;
708    use std::mem::size_of;
709    use wasmtime_environ::{HostPtr, Module, VMOffsets};
710
711    #[test]
712    fn check_vmshared_type_index() {
713        let module = Module::new();
714        let offsets = VMOffsets::new(HostPtr, &module);
715        assert_eq!(
716            size_of::<VMSharedTypeIndex>(),
717            usize::from(offsets.size_of_vmshared_type_index())
718        );
719    }
720}
721
722/// A WebAssembly tag defined within the instance.
723///
724#[derive(Debug)]
725#[repr(C)]
726pub struct VMTagDefinition {
727    /// Function signature's type id.
728    pub type_index: VMSharedTypeIndex,
729}
730
731impl VMTagDefinition {
732    pub fn new(type_index: VMSharedTypeIndex) -> Self {
733        Self { type_index }
734    }
735}
736
737// SAFETY: the above structure is repr(C) and only contains VmSafe
738// fields.
739unsafe impl VmSafe for VMTagDefinition {}
740
741#[cfg(test)]
742mod test_vmtag_definition {
743    use super::VMTagDefinition;
744    use std::mem::size_of;
745    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
746
747    #[test]
748    fn check_vmtag_definition_offsets() {
749        let module = Module::new();
750        let offsets = VMOffsets::new(HostPtr, &module);
751        assert_eq!(
752            size_of::<VMTagDefinition>(),
753            usize::from(offsets.ptr.size_of_vmtag_definition())
754        );
755    }
756
757    #[test]
758    fn check_vmtag_begins_aligned() {
759        let module = Module::new();
760        let offsets = VMOffsets::new(HostPtr, &module);
761        assert_eq!(offsets.vmctx_tags_begin() % 16, 0);
762    }
763}
764
765/// The VM caller-checked "funcref" record, for caller-side signature checking.
766///
767/// It consists of function pointer(s), a type id to be checked by the
768/// caller, and the vmctx closure associated with this function.
769#[derive(Debug, Clone)]
770#[repr(C)]
771pub struct VMFuncRef {
772    /// Function pointer for this funcref if being called via the "array"
773    /// calling convention that `Func::new` et al use.
774    pub array_call: VmPtr<VMArrayCallFunction>,
775
776    /// Function pointer for this funcref if being called via the calling
777    /// convention we use when compiling Wasm.
778    ///
779    /// Most functions come with a function pointer that we can use when they
780    /// are called from Wasm. The notable exception is when we `Func::wrap` a
781    /// host function, and we don't have a Wasm compiler on hand to compile a
782    /// Wasm-to-native trampoline for the function. In this case, we leave
783    /// `wasm_call` empty until the function is passed as an import to Wasm (or
784    /// otherwise exposed to Wasm via tables/globals). At this point, we look up
785    /// a Wasm-to-native trampoline for the function in the Wasm's compiled
786    /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
787    /// there is no guarantee that the Wasm module has a trampoline for this
788    /// function's signature. The Wasm module only has trampolines for its
789    /// types, and if this function isn't of one of those types, then the Wasm
790    /// module will not have a trampoline for it. This is actually okay, because
791    /// it means that the Wasm cannot actually call this function. But it does
792    /// mean that this field needs to be an `Option` even though it is non-null
793    /// the vast vast vast majority of the time.
794    pub wasm_call: Option<VmPtr<VMWasmCallFunction>>,
795
796    /// Function signature's type id.
797    pub type_index: VMSharedTypeIndex,
798
799    /// The VM state associated with this function.
800    ///
801    /// The actual definition of what this pointer points to depends on the
802    /// function being referenced: for core Wasm functions, this is a `*mut
803    /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
804    /// component functions it is a `*mut VMComponentContext`.
805    pub vmctx: VmPtr<VMOpaqueContext>,
806    // If more elements are added here, remember to add offset_of tests below!
807}
808
809// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
810unsafe impl VmSafe for VMFuncRef {}
811
812impl VMFuncRef {
813    /// Invokes the `array_call` field of this `VMFuncRef` with the supplied
814    /// arguments.
815    ///
816    /// This will invoke the function pointer in the `array_call` field with:
817    ///
818    /// * the `callee` vmctx as `self.vmctx`
819    /// * the `caller` as `caller` specified here
820    /// * the args pointer as `args_and_results`
821    /// * the args length as `args_and_results`
822    ///
823    /// The `args_and_results` area must be large enough to both load all
824    /// arguments from and store all results to.
825    ///
826    /// Returns whether a trap was recorded in TLS for raising.
827    ///
828    /// # Unsafety
829    ///
830    /// This method is unsafe because it can be called with any pointers. They
831    /// must all be valid for this wasm function call to proceed. For example
832    /// the `caller` must be valid machine code if `pulley` is `None` or it must
833    /// be valid bytecode if `pulley` is `Some`. Additionally `args_and_results`
834    /// must be large enough to handle all the arguments/results for this call.
835    ///
836    /// Note that the unsafety invariants to maintain here are not currently
837    /// exhaustively documented.
838    #[inline]
839    pub unsafe fn array_call(
840        &self,
841        pulley: Option<InterpreterRef<'_>>,
842        caller: NonNull<VMOpaqueContext>,
843        args_and_results: NonNull<[ValRaw]>,
844    ) -> bool {
845        match pulley {
846            Some(vm) => self.array_call_interpreted(vm, caller, args_and_results),
847            None => self.array_call_native(caller, args_and_results),
848        }
849    }
850
851    unsafe fn array_call_interpreted(
852        &self,
853        vm: InterpreterRef<'_>,
854        caller: NonNull<VMOpaqueContext>,
855        args_and_results: NonNull<[ValRaw]>,
856    ) -> bool {
857        // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the
858        // interpreter, even though it's available, as `array_call` will be
859        // native code.
860        if self.vmctx.as_non_null().as_ref().magic
861            == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC
862        {
863            return self.array_call_native(caller, args_and_results);
864        }
865        vm.call(
866            self.array_call.as_non_null().cast(),
867            self.vmctx.as_non_null(),
868            caller,
869            args_and_results,
870        )
871    }
872
873    #[inline]
874    unsafe fn array_call_native(
875        &self,
876        caller: NonNull<VMOpaqueContext>,
877        args_and_results: NonNull<[ValRaw]>,
878    ) -> bool {
879        union GetNativePointer {
880            native: VMArrayCallNative,
881            ptr: NonNull<VMArrayCallFunction>,
882        }
883        let native = GetNativePointer {
884            ptr: self.array_call.as_non_null(),
885        }
886        .native;
887        native(
888            self.vmctx.as_non_null(),
889            caller,
890            args_and_results.cast(),
891            args_and_results.len(),
892        )
893    }
894}
895
896#[cfg(test)]
897mod test_vm_func_ref {
898    use super::VMFuncRef;
899    use core::mem::offset_of;
900    use std::mem::size_of;
901    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
902
903    #[test]
904    fn check_vm_func_ref_offsets() {
905        let module = Module::new();
906        let offsets = VMOffsets::new(HostPtr, &module);
907        assert_eq!(
908            size_of::<VMFuncRef>(),
909            usize::from(offsets.ptr.size_of_vm_func_ref())
910        );
911        assert_eq!(
912            offset_of!(VMFuncRef, array_call),
913            usize::from(offsets.ptr.vm_func_ref_array_call())
914        );
915        assert_eq!(
916            offset_of!(VMFuncRef, wasm_call),
917            usize::from(offsets.ptr.vm_func_ref_wasm_call())
918        );
919        assert_eq!(
920            offset_of!(VMFuncRef, type_index),
921            usize::from(offsets.ptr.vm_func_ref_type_index())
922        );
923        assert_eq!(
924            offset_of!(VMFuncRef, vmctx),
925            usize::from(offsets.ptr.vm_func_ref_vmctx())
926        );
927    }
928}
929
930macro_rules! define_builtin_array {
931    (
932        $(
933            $( #[$attr:meta] )*
934            $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
935        )*
936    ) => {
937        /// An array that stores addresses of builtin functions. We translate code
938        /// to use indirect calls. This way, we don't have to patch the code.
939        ///
940        /// Ignore improper ctypes to permit `__m128i` on x86_64.
941        #[repr(C)]
942        pub struct VMBuiltinFunctionsArray {
943            $(
944                #[allow(improper_ctypes_definitions)]
945                $name: unsafe extern "C" fn(
946                    $(define_builtin_array!(@ty $param)),*
947                ) $( -> define_builtin_array!(@ty $result))?,
948            )*
949        }
950
951        impl VMBuiltinFunctionsArray {
952            #[allow(unused_doc_comments)]
953            pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
954                $(
955                    $name: crate::runtime::vm::libcalls::raw::$name,
956                )*
957            };
958
959            /// Helper to call `expose_provenance()` on all contained pointers.
960            ///
961            /// This is required to be called at least once before entering wasm
962            /// to inform the compiler that these function pointers may all be
963            /// loaded/stored and used on the "other end" to reacquire
964            /// provenance in Pulley. Pulley models hostcalls with a host
965            /// pointer as the first parameter that's a function pointer under
966            /// the hood, and this call ensures that the use of the function
967            /// pointer is considered valid.
968            pub fn expose_provenance(&self) -> NonNull<Self>{
969                $(
970                    (self.$name as *mut u8).expose_provenance();
971                )*
972                NonNull::from(self)
973            }
974        }
975    };
976
977    (@ty u32) => (u32);
978    (@ty u64) => (u64);
979    (@ty f32) => (f32);
980    (@ty f64) => (f64);
981    (@ty u8) => (u8);
982    (@ty i8x16) => (i8x16);
983    (@ty f32x4) => (f32x4);
984    (@ty f64x2) => (f64x2);
985    (@ty bool) => (bool);
986    (@ty pointer) => (*mut u8);
987    (@ty vmctx) => (NonNull<VMContext>);
988}
989
990// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
991unsafe impl VmSafe for VMBuiltinFunctionsArray {}
992
993wasmtime_environ::foreach_builtin_function!(define_builtin_array);
994
995const _: () = {
996    assert!(
997        mem::size_of::<VMBuiltinFunctionsArray>()
998            == mem::size_of::<usize>() * (BuiltinFunctionIndex::len() as usize)
999    )
1000};
1001
1002/// Structure that holds all mutable context that is shared across all instances
1003/// in a store, for example data related to fuel or epochs.
1004///
1005/// `VMStoreContext`s are one-to-one with `wasmtime::Store`s, the same way that
1006/// `VMContext`s are one-to-one with `wasmtime::Instance`s. And the same way
1007/// that multiple `wasmtime::Instance`s may be associated with the same
1008/// `wasmtime::Store`, multiple `VMContext`s hold a pointer to the same
1009/// `VMStoreContext` when they are associated with the same `wasmtime::Store`.
1010#[derive(Debug)]
1011#[repr(C)]
1012pub struct VMStoreContext {
1013    // NB: 64-bit integer fields are located first with pointer-sized fields
1014    // trailing afterwards. That makes the offsets in this structure easier to
1015    // calculate on 32-bit platforms as we don't have to worry about the
1016    // alignment of 64-bit integers.
1017    //
1018    /// Indicator of how much fuel has been consumed and is remaining to
1019    /// WebAssembly.
1020    ///
1021    /// This field is typically negative and increments towards positive. Upon
1022    /// turning positive a wasm trap will be generated. This field is only
1023    /// modified if wasm is configured to consume fuel.
1024    pub fuel_consumed: UnsafeCell<i64>,
1025
1026    /// Deadline epoch for interruption: if epoch-based interruption
1027    /// is enabled and the global (per engine) epoch counter is
1028    /// observed to reach or exceed this value, the guest code will
1029    /// yield if running asynchronously.
1030    pub epoch_deadline: UnsafeCell<u64>,
1031
1032    /// Current stack limit of the wasm module.
1033    ///
1034    /// For more information see `crates/cranelift/src/lib.rs`.
1035    pub stack_limit: UnsafeCell<usize>,
1036
1037    /// The `VMMemoryDefinition` for this store's GC heap.
1038    pub gc_heap: VMMemoryDefinition,
1039
1040    /// The value of the frame pointer register when we last called from Wasm to
1041    /// the host.
1042    ///
1043    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1044    /// calling into Wasm in `catch_traps`.
1045    ///
1046    /// This member is `0` when Wasm is actively running and has not called out
1047    /// to the host.
1048    ///
1049    /// Used to find the start of a contiguous sequence of Wasm frames when
1050    /// walking the stack.
1051    pub last_wasm_exit_fp: UnsafeCell<usize>,
1052
1053    /// The last Wasm program counter before we called from Wasm to the host.
1054    ///
1055    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1056    /// calling into Wasm in `catch_traps`.
1057    ///
1058    /// This member is `0` when Wasm is actively running and has not called out
1059    /// to the host.
1060    ///
1061    /// Used when walking a contiguous sequence of Wasm frames.
1062    pub last_wasm_exit_pc: UnsafeCell<usize>,
1063
1064    /// The last host stack pointer before we called into Wasm from the host.
1065    ///
1066    /// Maintained by our host-to-Wasm trampoline, and cleared just before
1067    /// calling into Wasm in `catch_traps`.
1068    ///
1069    /// This member is `0` when Wasm is actively running and has not called out
1070    /// to the host.
1071    ///
1072    /// When a host function is wrapped into a `wasmtime::Func`, and is then
1073    /// called from the host, then this member has the sentinel value of `-1 as
1074    /// usize`, meaning that this contiguous sequence of Wasm frames is the
1075    /// empty sequence, and it is not safe to dereference the
1076    /// `last_wasm_exit_fp`.
1077    ///
1078    /// Used to find the end of a contiguous sequence of Wasm frames when
1079    /// walking the stack.
1080    pub last_wasm_entry_fp: UnsafeCell<usize>,
1081}
1082
1083// The `VMStoreContext` type is a pod-type with no destructor, and we don't
1084// access any fields from other threads, so add in these trait impls which are
1085// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
1086// variables in `VMStoreContext`.
1087unsafe impl Send for VMStoreContext {}
1088unsafe impl Sync for VMStoreContext {}
1089
1090// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1091unsafe impl VmSafe for VMStoreContext {}
1092
1093impl Default for VMStoreContext {
1094    fn default() -> VMStoreContext {
1095        VMStoreContext {
1096            fuel_consumed: UnsafeCell::new(0),
1097            epoch_deadline: UnsafeCell::new(0),
1098            stack_limit: UnsafeCell::new(usize::max_value()),
1099            gc_heap: VMMemoryDefinition {
1100                base: NonNull::dangling().into(),
1101                current_length: AtomicUsize::new(0),
1102            },
1103            last_wasm_exit_fp: UnsafeCell::new(0),
1104            last_wasm_exit_pc: UnsafeCell::new(0),
1105            last_wasm_entry_fp: UnsafeCell::new(0),
1106        }
1107    }
1108}
1109
1110#[cfg(test)]
1111mod test_vmstore_context {
1112    use super::{VMMemoryDefinition, VMStoreContext};
1113    use core::mem::offset_of;
1114    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
1115
1116    #[test]
1117    fn field_offsets() {
1118        let module = Module::new();
1119        let offsets = VMOffsets::new(HostPtr, &module);
1120        assert_eq!(
1121            offset_of!(VMStoreContext, stack_limit),
1122            usize::from(offsets.ptr.vmstore_context_stack_limit())
1123        );
1124        assert_eq!(
1125            offset_of!(VMStoreContext, fuel_consumed),
1126            usize::from(offsets.ptr.vmstore_context_fuel_consumed())
1127        );
1128        assert_eq!(
1129            offset_of!(VMStoreContext, epoch_deadline),
1130            usize::from(offsets.ptr.vmstore_context_epoch_deadline())
1131        );
1132        assert_eq!(
1133            offset_of!(VMStoreContext, gc_heap),
1134            usize::from(offsets.ptr.vmstore_context_gc_heap())
1135        );
1136        assert_eq!(
1137            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, base),
1138            usize::from(offsets.ptr.vmstore_context_gc_heap_base())
1139        );
1140        assert_eq!(
1141            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, current_length),
1142            usize::from(offsets.ptr.vmstore_context_gc_heap_current_length())
1143        );
1144        assert_eq!(
1145            offset_of!(VMStoreContext, last_wasm_exit_fp),
1146            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_fp())
1147        );
1148        assert_eq!(
1149            offset_of!(VMStoreContext, last_wasm_exit_pc),
1150            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_pc())
1151        );
1152        assert_eq!(
1153            offset_of!(VMStoreContext, last_wasm_entry_fp),
1154            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp())
1155        );
1156    }
1157}
1158
1159/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
1160/// This has information about globals, memories, tables, and other runtime
1161/// state associated with the current instance.
1162///
1163/// The struct here is empty, as the sizes of these fields are dynamic, and
1164/// we can't describe them in Rust's type system. Sufficient memory is
1165/// allocated at runtime.
1166#[derive(Debug)]
1167#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
1168pub struct VMContext {
1169    /// There's some more discussion about this within `wasmtime/src/lib.rs` but
1170    /// the idea is that we want to tell the compiler that this contains
1171    /// pointers which transitively refers to itself, to suppress some
1172    /// optimizations that might otherwise assume this doesn't exist.
1173    ///
1174    /// The self-referential pointer we care about is the `*mut Store` pointer
1175    /// early on in this context, which if you follow through enough levels of
1176    /// nesting, eventually can refer back to this `VMContext`
1177    pub _marker: marker::PhantomPinned,
1178}
1179
1180impl VMContext {
1181    /// Helper function to cast between context types using a debug assertion to
1182    /// protect against some mistakes.
1183    #[inline]
1184    pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMContext> {
1185        // Note that in general the offset of the "magic" field is stored in
1186        // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
1187        // about converting this pointer to another type we ideally don't want
1188        // to read the offset from potentially corrupt memory. Instead it would
1189        // be better to catch errors here as soon as possible.
1190        //
1191        // To accomplish this the `VMContext` structure is laid out with the
1192        // magic field at a statically known offset (here it's 0 for now). This
1193        // static offset is asserted in `VMOffsets::from` and needs to be kept
1194        // in sync with this line for this debug assertion to work.
1195        //
1196        // Also note that this magic is only ever invalid in the presence of
1197        // bugs, meaning we don't actually read the magic and act differently
1198        // at runtime depending what it is, so this is a debug assertion as
1199        // opposed to a regular assertion.
1200        debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC);
1201        opaque.cast()
1202    }
1203}
1204
1205/// A "raw" and unsafe representation of a WebAssembly value.
1206///
1207/// This is provided for use with the `Func::new_unchecked` and
1208/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
1209/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
1210///
1211/// This is notably an "unsafe" way to work with `Val` and it's recommended to
1212/// instead use `Val` where possible. An important note about this union is that
1213/// fields are all stored in little-endian format, regardless of the endianness
1214/// of the host system.
1215#[allow(missing_docs)]
1216#[repr(C)]
1217#[derive(Copy, Clone)]
1218pub union ValRaw {
1219    /// A WebAssembly `i32` value.
1220    ///
1221    /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
1222    /// type does not assign an interpretation of the upper bit as either signed
1223    /// or unsigned. The Rust type `i32` is simply chosen for convenience.
1224    ///
1225    /// This value is always stored in a little-endian format.
1226    i32: i32,
1227
1228    /// A WebAssembly `i64` value.
1229    ///
1230    /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
1231    /// type does not assign an interpretation of the upper bit as either signed
1232    /// or unsigned. The Rust type `i64` is simply chosen for convenience.
1233    ///
1234    /// This value is always stored in a little-endian format.
1235    i64: i64,
1236
1237    /// A WebAssembly `f32` value.
1238    ///
1239    /// Note that the payload here is a Rust `u32`. This is to allow passing any
1240    /// representation of NaN into WebAssembly without risk of changing NaN
1241    /// payload bits as its gets passed around the system. Otherwise though this
1242    /// `u32` value is the return value of `f32::to_bits` in Rust.
1243    ///
1244    /// This value is always stored in a little-endian format.
1245    f32: u32,
1246
1247    /// A WebAssembly `f64` value.
1248    ///
1249    /// Note that the payload here is a Rust `u64`. This is to allow passing any
1250    /// representation of NaN into WebAssembly without risk of changing NaN
1251    /// payload bits as its gets passed around the system. Otherwise though this
1252    /// `u64` value is the return value of `f64::to_bits` in Rust.
1253    ///
1254    /// This value is always stored in a little-endian format.
1255    f64: u64,
1256
1257    /// A WebAssembly `v128` value.
1258    ///
1259    /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1260    /// but note that `v128` in WebAssembly is often considered a vector type
1261    /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1262    /// of the underlying bits is left up to the instructions which consume
1263    /// this value.
1264    ///
1265    /// This value is always stored in a little-endian format.
1266    v128: [u8; 16],
1267
1268    /// A WebAssembly `funcref` value (or one of its subtypes).
1269    ///
1270    /// The payload here is a pointer which is runtime-defined. This is one of
1271    /// the main points of unsafety about the `ValRaw` type as the validity of
1272    /// the pointer here is not easily verified and must be preserved by
1273    /// carefully calling the correct functions throughout the runtime.
1274    ///
1275    /// This value is always stored in a little-endian format.
1276    funcref: *mut c_void,
1277
1278    /// A WebAssembly `externref` value (or one of its subtypes).
1279    ///
1280    /// The payload here is a compressed pointer value which is
1281    /// runtime-defined. This is one of the main points of unsafety about the
1282    /// `ValRaw` type as the validity of the pointer here is not easily verified
1283    /// and must be preserved by carefully calling the correct functions
1284    /// throughout the runtime.
1285    ///
1286    /// This value is always stored in a little-endian format.
1287    externref: u32,
1288
1289    /// A WebAssembly `anyref` value (or one of its subtypes).
1290    ///
1291    /// The payload here is a compressed pointer value which is
1292    /// runtime-defined. This is one of the main points of unsafety about the
1293    /// `ValRaw` type as the validity of the pointer here is not easily verified
1294    /// and must be preserved by carefully calling the correct functions
1295    /// throughout the runtime.
1296    ///
1297    /// This value is always stored in a little-endian format.
1298    anyref: u32,
1299}
1300
1301// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1302// are some simple assertions about the shape of the type which are additionally
1303// matched in C.
1304const _: () = {
1305    assert!(mem::size_of::<ValRaw>() == 16);
1306    assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1307};
1308
1309// This type is just a bag-of-bits so it's up to the caller to figure out how
1310// to safely deal with threading concerns and safely access interior bits.
1311unsafe impl Send for ValRaw {}
1312unsafe impl Sync for ValRaw {}
1313
1314impl fmt::Debug for ValRaw {
1315    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1316        struct Hex<T>(T);
1317        impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1318            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1319                let bytes = mem::size_of::<T>();
1320                let hex_digits_per_byte = 2;
1321                let hex_digits = bytes * hex_digits_per_byte;
1322                write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1323            }
1324        }
1325
1326        unsafe {
1327            f.debug_struct("ValRaw")
1328                .field("i32", &Hex(self.i32))
1329                .field("i64", &Hex(self.i64))
1330                .field("f32", &Hex(self.f32))
1331                .field("f64", &Hex(self.f64))
1332                .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1333                .field("funcref", &self.funcref)
1334                .field("externref", &Hex(self.externref))
1335                .field("anyref", &Hex(self.anyref))
1336                .finish()
1337        }
1338    }
1339}
1340
1341impl ValRaw {
1342    /// Create a null reference that is compatible with any of
1343    /// `{any,extern,func}ref`.
1344    pub fn null() -> ValRaw {
1345        unsafe {
1346            let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1347            debug_assert_eq!(raw.get_anyref(), 0);
1348            debug_assert_eq!(raw.get_externref(), 0);
1349            debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1350            raw
1351        }
1352    }
1353
1354    /// Creates a WebAssembly `i32` value
1355    #[inline]
1356    pub fn i32(i: i32) -> ValRaw {
1357        // Note that this is intentionally not setting the `i32` field, instead
1358        // setting the `i64` field with a zero-extended version of `i`. For more
1359        // information on this see the comments on `Lower for Result` in the
1360        // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1361        // otherwise constrained to guarantee that the initial 64-bits are
1362        // always initialized.
1363        ValRaw::u64(i.unsigned().into())
1364    }
1365
1366    /// Creates a WebAssembly `i64` value
1367    #[inline]
1368    pub fn i64(i: i64) -> ValRaw {
1369        ValRaw { i64: i.to_le() }
1370    }
1371
1372    /// Creates a WebAssembly `i32` value
1373    #[inline]
1374    pub fn u32(i: u32) -> ValRaw {
1375        // See comments in `ValRaw::i32` for why this is setting the upper
1376        // 32-bits as well.
1377        ValRaw::u64(i.into())
1378    }
1379
1380    /// Creates a WebAssembly `i64` value
1381    #[inline]
1382    pub fn u64(i: u64) -> ValRaw {
1383        ValRaw::i64(i as i64)
1384    }
1385
1386    /// Creates a WebAssembly `f32` value
1387    #[inline]
1388    pub fn f32(i: u32) -> ValRaw {
1389        // See comments in `ValRaw::i32` for why this is setting the upper
1390        // 32-bits as well.
1391        ValRaw::u64(i.into())
1392    }
1393
1394    /// Creates a WebAssembly `f64` value
1395    #[inline]
1396    pub fn f64(i: u64) -> ValRaw {
1397        ValRaw { f64: i.to_le() }
1398    }
1399
1400    /// Creates a WebAssembly `v128` value
1401    #[inline]
1402    pub fn v128(i: u128) -> ValRaw {
1403        ValRaw {
1404            v128: i.to_le_bytes(),
1405        }
1406    }
1407
1408    /// Creates a WebAssembly `funcref` value
1409    #[inline]
1410    pub fn funcref(i: *mut c_void) -> ValRaw {
1411        ValRaw {
1412            funcref: Strict::map_addr(i, |i| i.to_le()),
1413        }
1414    }
1415
1416    /// Creates a WebAssembly `externref` value
1417    #[inline]
1418    pub fn externref(e: u32) -> ValRaw {
1419        assert!(cfg!(feature = "gc") || e == 0);
1420        ValRaw {
1421            externref: e.to_le(),
1422        }
1423    }
1424
1425    /// Creates a WebAssembly `anyref` value
1426    #[inline]
1427    pub fn anyref(r: u32) -> ValRaw {
1428        assert!(cfg!(feature = "gc") || r == 0);
1429        ValRaw { anyref: r.to_le() }
1430    }
1431
1432    /// Gets the WebAssembly `i32` value
1433    #[inline]
1434    pub fn get_i32(&self) -> i32 {
1435        unsafe { i32::from_le(self.i32) }
1436    }
1437
1438    /// Gets the WebAssembly `i64` value
1439    #[inline]
1440    pub fn get_i64(&self) -> i64 {
1441        unsafe { i64::from_le(self.i64) }
1442    }
1443
1444    /// Gets the WebAssembly `i32` value
1445    #[inline]
1446    pub fn get_u32(&self) -> u32 {
1447        self.get_i32().unsigned()
1448    }
1449
1450    /// Gets the WebAssembly `i64` value
1451    #[inline]
1452    pub fn get_u64(&self) -> u64 {
1453        self.get_i64().unsigned()
1454    }
1455
1456    /// Gets the WebAssembly `f32` value
1457    #[inline]
1458    pub fn get_f32(&self) -> u32 {
1459        unsafe { u32::from_le(self.f32) }
1460    }
1461
1462    /// Gets the WebAssembly `f64` value
1463    #[inline]
1464    pub fn get_f64(&self) -> u64 {
1465        unsafe { u64::from_le(self.f64) }
1466    }
1467
1468    /// Gets the WebAssembly `v128` value
1469    #[inline]
1470    pub fn get_v128(&self) -> u128 {
1471        unsafe { u128::from_le_bytes(self.v128) }
1472    }
1473
1474    /// Gets the WebAssembly `funcref` value
1475    #[inline]
1476    pub fn get_funcref(&self) -> *mut c_void {
1477        unsafe { Strict::map_addr(self.funcref, |i| usize::from_le(i)) }
1478    }
1479
1480    /// Gets the WebAssembly `externref` value
1481    #[inline]
1482    pub fn get_externref(&self) -> u32 {
1483        let externref = u32::from_le(unsafe { self.externref });
1484        assert!(cfg!(feature = "gc") || externref == 0);
1485        externref
1486    }
1487
1488    /// Gets the WebAssembly `anyref` value
1489    #[inline]
1490    pub fn get_anyref(&self) -> u32 {
1491        let anyref = u32::from_le(unsafe { self.anyref });
1492        assert!(cfg!(feature = "gc") || anyref == 0);
1493        anyref
1494    }
1495}
1496
1497/// An "opaque" version of `VMContext` which must be explicitly casted to a
1498/// target context.
1499///
1500/// This context is used to represent that contexts specified in
1501/// `VMFuncRef` can have any type and don't have an implicit
1502/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1503/// structure of an opaque context in general and only the code which configured
1504/// the context is able to rely on a particular structure. This is because the
1505/// context pointer configured for `VMFuncRef` is guaranteed to be
1506/// the first parameter passed.
1507///
1508/// Note that Wasmtime currently has a layout where all contexts that are casted
1509/// to an opaque context start with a 32-bit "magic" which can be used in debug
1510/// mode to debug-assert that the casts here are correct and have at least a
1511/// little protection against incorrect casts.
1512pub struct VMOpaqueContext {
1513    pub(crate) magic: u32,
1514    _marker: marker::PhantomPinned,
1515}
1516
1517impl VMOpaqueContext {
1518    /// Helper function to clearly indicate that casts are desired.
1519    #[inline]
1520    pub fn from_vmcontext(ptr: NonNull<VMContext>) -> NonNull<VMOpaqueContext> {
1521        ptr.cast()
1522    }
1523
1524    /// Helper function to clearly indicate that casts are desired.
1525    #[inline]
1526    pub fn from_vm_array_call_host_func_context(
1527        ptr: NonNull<VMArrayCallHostFuncContext>,
1528    ) -> NonNull<VMOpaqueContext> {
1529        ptr.cast()
1530    }
1531}