wasmtime/runtime/vm/
vmcontext.rs

1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::prelude::*;
8use crate::runtime::vm::{GcStore, InterpreterRef, VMGcRef, VmPtr, VmSafe};
9use crate::store::StoreOpaque;
10use core::cell::UnsafeCell;
11use core::ffi::c_void;
12use core::fmt;
13use core::marker;
14use core::mem::{self, MaybeUninit};
15use core::ptr::{self, NonNull};
16use core::sync::atomic::{AtomicUsize, Ordering};
17use sptr::Strict;
18use wasmtime_environ::{
19    BuiltinFunctionIndex, DefinedMemoryIndex, Unsigned, VMSharedTypeIndex, WasmHeapTopType,
20    WasmValType, VMCONTEXT_MAGIC,
21};
22
23/// A function pointer that exposes the array calling convention.
24///
25/// Regardless of the underlying Wasm function type, all functions using the
26/// array calling convention have the same Rust signature.
27///
28/// Arguments:
29///
30/// * Callee `vmctx` for the function itself.
31///
32/// * Caller's `vmctx` (so that host functions can access the linear memory of
33///   their Wasm callers).
34///
35/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
36///   this function, and where results are returned from this function.
37///
38/// * The capacity of the `ValRaw` buffer. Must always be at least
39///   `max(len(wasm_params), len(wasm_results))`.
40///
41/// Return value:
42///
43/// * `true` if this call succeeded.
44/// * `false` if this call failed and a trap was recorded in TLS.
45pub type VMArrayCallNative = unsafe extern "C" fn(
46    NonNull<VMOpaqueContext>,
47    NonNull<VMOpaqueContext>,
48    NonNull<ValRaw>,
49    usize,
50) -> bool;
51
52/// An opaque function pointer which might be `VMArrayCallNative` or it might be
53/// pulley bytecode. Requires external knowledge to determine what kind of
54/// function pointer this is.
55#[repr(transparent)]
56pub struct VMArrayCallFunction(VMFunctionBody);
57
58/// A function pointer that exposes the Wasm calling convention.
59///
60/// In practice, different Wasm function types end up mapping to different Rust
61/// function types, so this isn't simply a type alias the way that
62/// `VMArrayCallFunction` is. However, the exact details of the calling
63/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
64/// code never does anything with these function pointers except shuffle them
65/// around and pass them back to Wasm.
66#[repr(transparent)]
67pub struct VMWasmCallFunction(VMFunctionBody);
68
69/// An imported function.
70#[derive(Debug, Copy, Clone)]
71#[repr(C)]
72pub struct VMFunctionImport {
73    /// Function pointer to use when calling this imported function from Wasm.
74    pub wasm_call: VmPtr<VMWasmCallFunction>,
75
76    /// Function pointer to use when calling this imported function with the
77    /// "array" calling convention that `Func::new` et al use.
78    pub array_call: VmPtr<VMArrayCallFunction>,
79
80    /// The VM state associated with this function.
81    ///
82    /// For Wasm functions defined by core wasm instances this will be `*mut
83    /// VMContext`, but for lifted/lowered component model functions this will
84    /// be a `VMComponentContext`, and for a host function it will be a
85    /// `VMHostFuncContext`, etc.
86    pub vmctx: VmPtr<VMOpaqueContext>,
87}
88
89// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
90unsafe impl VmSafe for VMFunctionImport {}
91
92#[cfg(test)]
93mod test_vmfunction_import {
94    use super::VMFunctionImport;
95    use core::mem::offset_of;
96    use std::mem::size_of;
97    use wasmtime_environ::{HostPtr, Module, VMOffsets};
98
99    #[test]
100    fn check_vmfunction_import_offsets() {
101        let module = Module::new();
102        let offsets = VMOffsets::new(HostPtr, &module);
103        assert_eq!(
104            size_of::<VMFunctionImport>(),
105            usize::from(offsets.size_of_vmfunction_import())
106        );
107        assert_eq!(
108            offset_of!(VMFunctionImport, wasm_call),
109            usize::from(offsets.vmfunction_import_wasm_call())
110        );
111        assert_eq!(
112            offset_of!(VMFunctionImport, array_call),
113            usize::from(offsets.vmfunction_import_array_call())
114        );
115        assert_eq!(
116            offset_of!(VMFunctionImport, vmctx),
117            usize::from(offsets.vmfunction_import_vmctx())
118        );
119    }
120}
121
122/// A placeholder byte-sized type which is just used to provide some amount of type
123/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
124/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
125/// around.
126#[repr(C)]
127pub struct VMFunctionBody(u8);
128
129// SAFETY: this structure is never read and is safe to pass to jit code.
130unsafe impl VmSafe for VMFunctionBody {}
131
132#[cfg(test)]
133mod test_vmfunction_body {
134    use super::VMFunctionBody;
135    use std::mem::size_of;
136
137    #[test]
138    fn check_vmfunction_body_offsets() {
139        assert_eq!(size_of::<VMFunctionBody>(), 1);
140    }
141}
142
143/// The fields compiled code needs to access to utilize a WebAssembly table
144/// imported from another instance.
145#[derive(Debug, Copy, Clone)]
146#[repr(C)]
147pub struct VMTableImport {
148    /// A pointer to the imported table description.
149    pub from: VmPtr<VMTableDefinition>,
150
151    /// A pointer to the `VMContext` that owns the table description.
152    pub vmctx: VmPtr<VMContext>,
153}
154
155// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
156unsafe impl VmSafe for VMTableImport {}
157
158#[cfg(test)]
159mod test_vmtable_import {
160    use super::VMTableImport;
161    use core::mem::offset_of;
162    use std::mem::size_of;
163    use wasmtime_environ::{HostPtr, Module, VMOffsets};
164
165    #[test]
166    fn check_vmtable_import_offsets() {
167        let module = Module::new();
168        let offsets = VMOffsets::new(HostPtr, &module);
169        assert_eq!(
170            size_of::<VMTableImport>(),
171            usize::from(offsets.size_of_vmtable_import())
172        );
173        assert_eq!(
174            offset_of!(VMTableImport, from),
175            usize::from(offsets.vmtable_import_from())
176        );
177        assert_eq!(
178            offset_of!(VMTableImport, vmctx),
179            usize::from(offsets.vmtable_import_vmctx())
180        );
181    }
182}
183
184/// The fields compiled code needs to access to utilize a WebAssembly linear
185/// memory imported from another instance.
186#[derive(Debug, Copy, Clone)]
187#[repr(C)]
188pub struct VMMemoryImport {
189    /// A pointer to the imported memory description.
190    pub from: VmPtr<VMMemoryDefinition>,
191
192    /// A pointer to the `VMContext` that owns the memory description.
193    pub vmctx: VmPtr<VMContext>,
194
195    /// The index of the memory in the containing `vmctx`.
196    pub index: DefinedMemoryIndex,
197}
198
199// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
200unsafe impl VmSafe for VMMemoryImport {}
201
202#[cfg(test)]
203mod test_vmmemory_import {
204    use super::VMMemoryImport;
205    use core::mem::offset_of;
206    use std::mem::size_of;
207    use wasmtime_environ::{HostPtr, Module, VMOffsets};
208
209    #[test]
210    fn check_vmmemory_import_offsets() {
211        let module = Module::new();
212        let offsets = VMOffsets::new(HostPtr, &module);
213        assert_eq!(
214            size_of::<VMMemoryImport>(),
215            usize::from(offsets.size_of_vmmemory_import())
216        );
217        assert_eq!(
218            offset_of!(VMMemoryImport, from),
219            usize::from(offsets.vmmemory_import_from())
220        );
221        assert_eq!(
222            offset_of!(VMMemoryImport, vmctx),
223            usize::from(offsets.vmmemory_import_vmctx())
224        );
225    }
226}
227
228/// The fields compiled code needs to access to utilize a WebAssembly global
229/// variable imported from another instance.
230///
231/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
232/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
233/// require a `vmctx` pointer to access.
234#[derive(Debug, Copy, Clone)]
235#[repr(C)]
236pub struct VMGlobalImport {
237    /// A pointer to the imported global variable description.
238    pub from: VmPtr<VMGlobalDefinition>,
239}
240
241// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
242unsafe impl VmSafe for VMGlobalImport {}
243
244#[cfg(test)]
245mod test_vmglobal_import {
246    use super::VMGlobalImport;
247    use core::mem::offset_of;
248    use std::mem::size_of;
249    use wasmtime_environ::{HostPtr, Module, VMOffsets};
250
251    #[test]
252    fn check_vmglobal_import_offsets() {
253        let module = Module::new();
254        let offsets = VMOffsets::new(HostPtr, &module);
255        assert_eq!(
256            size_of::<VMGlobalImport>(),
257            usize::from(offsets.size_of_vmglobal_import())
258        );
259        assert_eq!(
260            offset_of!(VMGlobalImport, from),
261            usize::from(offsets.vmglobal_import_from())
262        );
263    }
264}
265
266/// The fields compiled code needs to access to utilize a WebAssembly
267/// tag imported from another instance.
268#[derive(Debug, Copy, Clone)]
269#[repr(C)]
270pub struct VMTagImport {
271    /// A pointer to the imported tag description.
272    pub from: VmPtr<VMTagDefinition>,
273}
274
275// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
276unsafe impl VmSafe for VMTagImport {}
277
278#[cfg(test)]
279mod test_vmtag_import {
280    use super::VMTagImport;
281    use core::mem::{offset_of, size_of};
282    use wasmtime_environ::{HostPtr, Module, VMOffsets};
283
284    #[test]
285    fn check_vmtag_import_offsets() {
286        let module = Module::new();
287        let offsets = VMOffsets::new(HostPtr, &module);
288        assert_eq!(
289            size_of::<VMTagImport>(),
290            usize::from(offsets.size_of_vmtag_import())
291        );
292        assert_eq!(
293            offset_of!(VMTagImport, from),
294            usize::from(offsets.vmtag_import_from())
295        );
296    }
297}
298
299/// The fields compiled code needs to access to utilize a WebAssembly linear
300/// memory defined within the instance, namely the start address and the
301/// size in bytes.
302#[derive(Debug)]
303#[repr(C)]
304pub struct VMMemoryDefinition {
305    /// The start address.
306    pub base: VmPtr<u8>,
307
308    /// The current logical size of this linear memory in bytes.
309    ///
310    /// This is atomic because shared memories must be able to grow their length
311    /// atomically. For relaxed access, see
312    /// [`VMMemoryDefinition::current_length()`].
313    pub current_length: AtomicUsize,
314}
315
316// SAFETY: the above definition has `repr(C)` and each field individually
317// implements `VmSafe`, which satisfies the requirements of this trait.
318unsafe impl VmSafe for VMMemoryDefinition {}
319
320impl VMMemoryDefinition {
321    /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
322    /// performing a relaxed load; do not use this function for situations in
323    /// which a precise length is needed. Owned memories (i.e., non-shared) will
324    /// always return a precise result (since no concurrent modification is
325    /// possible) but shared memories may see an imprecise value--a
326    /// `current_length` potentially smaller than what some other thread
327    /// observes. Since Wasm memory only grows, this under-estimation may be
328    /// acceptable in certain cases.
329    pub fn current_length(&self) -> usize {
330        self.current_length.load(Ordering::Relaxed)
331    }
332
333    /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
334    /// `current_length`; see [`VMMemoryDefinition::current_length()`].
335    pub unsafe fn load(ptr: *mut Self) -> Self {
336        let other = &*ptr;
337        VMMemoryDefinition {
338            base: other.base,
339            current_length: other.current_length().into(),
340        }
341    }
342}
343
344#[cfg(test)]
345mod test_vmmemory_definition {
346    use super::VMMemoryDefinition;
347    use core::mem::offset_of;
348    use std::mem::size_of;
349    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
350
351    #[test]
352    fn check_vmmemory_definition_offsets() {
353        let module = Module::new();
354        let offsets = VMOffsets::new(HostPtr, &module);
355        assert_eq!(
356            size_of::<VMMemoryDefinition>(),
357            usize::from(offsets.ptr.size_of_vmmemory_definition())
358        );
359        assert_eq!(
360            offset_of!(VMMemoryDefinition, base),
361            usize::from(offsets.ptr.vmmemory_definition_base())
362        );
363        assert_eq!(
364            offset_of!(VMMemoryDefinition, current_length),
365            usize::from(offsets.ptr.vmmemory_definition_current_length())
366        );
367        /* TODO: Assert that the size of `current_length` matches.
368        assert_eq!(
369            size_of::<VMMemoryDefinition::current_length>(),
370            usize::from(offsets.size_of_vmmemory_definition_current_length())
371        );
372        */
373    }
374}
375
376/// The fields compiled code needs to access to utilize a WebAssembly table
377/// defined within the instance.
378#[derive(Debug, Copy, Clone)]
379#[repr(C)]
380pub struct VMTableDefinition {
381    /// Pointer to the table data.
382    pub base: VmPtr<u8>,
383
384    /// The current number of elements in the table.
385    pub current_elements: usize,
386}
387
388// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
389unsafe impl VmSafe for VMTableDefinition {}
390
391#[cfg(test)]
392mod test_vmtable_definition {
393    use super::VMTableDefinition;
394    use core::mem::offset_of;
395    use std::mem::size_of;
396    use wasmtime_environ::{HostPtr, Module, VMOffsets};
397
398    #[test]
399    fn check_vmtable_definition_offsets() {
400        let module = Module::new();
401        let offsets = VMOffsets::new(HostPtr, &module);
402        assert_eq!(
403            size_of::<VMTableDefinition>(),
404            usize::from(offsets.size_of_vmtable_definition())
405        );
406        assert_eq!(
407            offset_of!(VMTableDefinition, base),
408            usize::from(offsets.vmtable_definition_base())
409        );
410        assert_eq!(
411            offset_of!(VMTableDefinition, current_elements),
412            usize::from(offsets.vmtable_definition_current_elements())
413        );
414    }
415}
416
417/// The storage for a WebAssembly global defined within the instance.
418///
419/// TODO: Pack the globals more densely, rather than using the same size
420/// for every type.
421#[derive(Debug)]
422#[repr(C, align(16))]
423pub struct VMGlobalDefinition {
424    storage: [u8; 16],
425    // If more elements are added here, remember to add offset_of tests below!
426}
427
428// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
429unsafe impl VmSafe for VMGlobalDefinition {}
430
431#[cfg(test)]
432mod test_vmglobal_definition {
433    use super::VMGlobalDefinition;
434    use std::mem::{align_of, size_of};
435    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
436
437    #[test]
438    fn check_vmglobal_definition_alignment() {
439        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
440        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
441        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
442        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
443        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
444    }
445
446    #[test]
447    fn check_vmglobal_definition_offsets() {
448        let module = Module::new();
449        let offsets = VMOffsets::new(HostPtr, &module);
450        assert_eq!(
451            size_of::<VMGlobalDefinition>(),
452            usize::from(offsets.ptr.size_of_vmglobal_definition())
453        );
454    }
455
456    #[test]
457    fn check_vmglobal_begins_aligned() {
458        let module = Module::new();
459        let offsets = VMOffsets::new(HostPtr, &module);
460        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
461    }
462
463    #[test]
464    #[cfg(feature = "gc")]
465    fn check_vmglobal_can_contain_gc_ref() {
466        assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
467    }
468}
469
470impl VMGlobalDefinition {
471    /// Construct a `VMGlobalDefinition`.
472    pub fn new() -> Self {
473        Self { storage: [0; 16] }
474    }
475
476    /// Create a `VMGlobalDefinition` from a `ValRaw`.
477    ///
478    /// # Unsafety
479    ///
480    /// This raw value's type must match the given `WasmValType`.
481    pub unsafe fn from_val_raw(
482        store: &mut StoreOpaque,
483        wasm_ty: WasmValType,
484        raw: ValRaw,
485    ) -> Result<Self> {
486        let mut global = Self::new();
487        match wasm_ty {
488            WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
489            WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
490            WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
491            WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
492            WasmValType::V128 => global.set_u128(raw.get_v128()),
493            WasmValType::Ref(r) => match r.heap_type.top() {
494                WasmHeapTopType::Extern => {
495                    let r = VMGcRef::from_raw_u32(raw.get_externref());
496                    global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
497                }
498                WasmHeapTopType::Any => {
499                    let r = VMGcRef::from_raw_u32(raw.get_anyref());
500                    global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
501                }
502                WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
503                WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
504            },
505        }
506        Ok(global)
507    }
508
509    /// Get this global's value as a `ValRaw`.
510    ///
511    /// # Unsafety
512    ///
513    /// This global's value's type must match the given `WasmValType`.
514    pub unsafe fn to_val_raw(
515        &self,
516        store: &mut StoreOpaque,
517        wasm_ty: WasmValType,
518    ) -> Result<ValRaw> {
519        Ok(match wasm_ty {
520            WasmValType::I32 => ValRaw::i32(*self.as_i32()),
521            WasmValType::I64 => ValRaw::i64(*self.as_i64()),
522            WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
523            WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
524            WasmValType::V128 => ValRaw::v128(self.get_u128()),
525            WasmValType::Ref(r) => match r.heap_type.top() {
526                WasmHeapTopType::Extern => ValRaw::externref(match self.as_gc_ref() {
527                    Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
528                    None => 0,
529                }),
530                WasmHeapTopType::Any => ValRaw::anyref({
531                    match self.as_gc_ref() {
532                        Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
533                        None => 0,
534                    }
535                }),
536                WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
537                WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
538            },
539        })
540    }
541
542    /// Return a reference to the value as an i32.
543    pub unsafe fn as_i32(&self) -> &i32 {
544        &*(self.storage.as_ref().as_ptr().cast::<i32>())
545    }
546
547    /// Return a mutable reference to the value as an i32.
548    pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
549        &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>())
550    }
551
552    /// Return a reference to the value as a u32.
553    pub unsafe fn as_u32(&self) -> &u32 {
554        &*(self.storage.as_ref().as_ptr().cast::<u32>())
555    }
556
557    /// Return a mutable reference to the value as an u32.
558    pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
559        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
560    }
561
562    /// Return a reference to the value as an i64.
563    pub unsafe fn as_i64(&self) -> &i64 {
564        &*(self.storage.as_ref().as_ptr().cast::<i64>())
565    }
566
567    /// Return a mutable reference to the value as an i64.
568    pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
569        &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>())
570    }
571
572    /// Return a reference to the value as an u64.
573    pub unsafe fn as_u64(&self) -> &u64 {
574        &*(self.storage.as_ref().as_ptr().cast::<u64>())
575    }
576
577    /// Return a mutable reference to the value as an u64.
578    pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
579        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
580    }
581
582    /// Return a reference to the value as an f32.
583    pub unsafe fn as_f32(&self) -> &f32 {
584        &*(self.storage.as_ref().as_ptr().cast::<f32>())
585    }
586
587    /// Return a mutable reference to the value as an f32.
588    pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
589        &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>())
590    }
591
592    /// Return a reference to the value as f32 bits.
593    pub unsafe fn as_f32_bits(&self) -> &u32 {
594        &*(self.storage.as_ref().as_ptr().cast::<u32>())
595    }
596
597    /// Return a mutable reference to the value as f32 bits.
598    pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
599        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
600    }
601
602    /// Return a reference to the value as an f64.
603    pub unsafe fn as_f64(&self) -> &f64 {
604        &*(self.storage.as_ref().as_ptr().cast::<f64>())
605    }
606
607    /// Return a mutable reference to the value as an f64.
608    pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
609        &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>())
610    }
611
612    /// Return a reference to the value as f64 bits.
613    pub unsafe fn as_f64_bits(&self) -> &u64 {
614        &*(self.storage.as_ref().as_ptr().cast::<u64>())
615    }
616
617    /// Return a mutable reference to the value as f64 bits.
618    pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
619        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
620    }
621
622    /// Gets the underlying 128-bit vector value.
623    //
624    // Note that vectors are stored in little-endian format while other types
625    // are stored in native-endian format.
626    pub unsafe fn get_u128(&self) -> u128 {
627        u128::from_le(*(self.storage.as_ref().as_ptr().cast::<u128>()))
628    }
629
630    /// Sets the 128-bit vector values.
631    //
632    // Note that vectors are stored in little-endian format while other types
633    // are stored in native-endian format.
634    pub unsafe fn set_u128(&mut self, val: u128) {
635        *self.storage.as_mut().as_mut_ptr().cast::<u128>() = val.to_le();
636    }
637
638    /// Return a reference to the value as u128 bits.
639    pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
640        &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>())
641    }
642
643    /// Return a mutable reference to the value as u128 bits.
644    pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
645        &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>())
646    }
647
648    /// Return a reference to the global value as a borrowed GC reference.
649    pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
650        let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
651        let ret = (*raw_ptr).as_ref();
652        assert!(cfg!(feature = "gc") || ret.is_none());
653        ret
654    }
655
656    /// Initialize a global to the given GC reference.
657    pub unsafe fn init_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
658        assert!(cfg!(feature = "gc") || gc_ref.is_none());
659
660        let dest = &mut *(self
661            .storage
662            .as_mut()
663            .as_mut_ptr()
664            .cast::<MaybeUninit<Option<VMGcRef>>>());
665
666        gc_store.init_gc_ref(dest, gc_ref)
667    }
668
669    /// Write a GC reference into this global value.
670    pub unsafe fn write_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
671        assert!(cfg!(feature = "gc") || gc_ref.is_none());
672
673        let dest = &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>());
674        assert!(cfg!(feature = "gc") || dest.is_none());
675
676        gc_store.write_gc_ref(dest, gc_ref)
677    }
678
679    /// Return a reference to the value as a `VMFuncRef`.
680    pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
681        *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>())
682    }
683
684    /// Return a mutable reference to the value as a `VMFuncRef`.
685    pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
686        &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>())
687    }
688}
689
690#[cfg(test)]
691mod test_vmshared_type_index {
692    use super::VMSharedTypeIndex;
693    use std::mem::size_of;
694    use wasmtime_environ::{HostPtr, Module, VMOffsets};
695
696    #[test]
697    fn check_vmshared_type_index() {
698        let module = Module::new();
699        let offsets = VMOffsets::new(HostPtr, &module);
700        assert_eq!(
701            size_of::<VMSharedTypeIndex>(),
702            usize::from(offsets.size_of_vmshared_type_index())
703        );
704    }
705}
706
707/// A WebAssembly tag defined within the instance.
708///
709#[derive(Debug)]
710#[repr(C)]
711pub struct VMTagDefinition {
712    /// Function signature's type id.
713    pub type_index: VMSharedTypeIndex,
714}
715
716impl VMTagDefinition {
717    pub fn new(type_index: VMSharedTypeIndex) -> Self {
718        Self { type_index }
719    }
720}
721
722// SAFETY: the above structure is repr(C) and only contains VmSafe
723// fields.
724unsafe impl VmSafe for VMTagDefinition {}
725
726#[cfg(test)]
727mod test_vmtag_definition {
728    use super::VMTagDefinition;
729    use std::mem::size_of;
730    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
731
732    #[test]
733    fn check_vmtag_definition_offsets() {
734        let module = Module::new();
735        let offsets = VMOffsets::new(HostPtr, &module);
736        assert_eq!(
737            size_of::<VMTagDefinition>(),
738            usize::from(offsets.ptr.size_of_vmtag_definition())
739        );
740    }
741
742    #[test]
743    fn check_vmtag_begins_aligned() {
744        let module = Module::new();
745        let offsets = VMOffsets::new(HostPtr, &module);
746        assert_eq!(offsets.vmctx_tags_begin() % 16, 0);
747    }
748}
749
750/// The VM caller-checked "funcref" record, for caller-side signature checking.
751///
752/// It consists of function pointer(s), a type id to be checked by the
753/// caller, and the vmctx closure associated with this function.
754#[derive(Debug, Clone)]
755#[repr(C)]
756pub struct VMFuncRef {
757    /// Function pointer for this funcref if being called via the "array"
758    /// calling convention that `Func::new` et al use.
759    pub array_call: VmPtr<VMArrayCallFunction>,
760
761    /// Function pointer for this funcref if being called via the calling
762    /// convention we use when compiling Wasm.
763    ///
764    /// Most functions come with a function pointer that we can use when they
765    /// are called from Wasm. The notable exception is when we `Func::wrap` a
766    /// host function, and we don't have a Wasm compiler on hand to compile a
767    /// Wasm-to-native trampoline for the function. In this case, we leave
768    /// `wasm_call` empty until the function is passed as an import to Wasm (or
769    /// otherwise exposed to Wasm via tables/globals). At this point, we look up
770    /// a Wasm-to-native trampoline for the function in the Wasm's compiled
771    /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
772    /// there is no guarantee that the Wasm module has a trampoline for this
773    /// function's signature. The Wasm module only has trampolines for its
774    /// types, and if this function isn't of one of those types, then the Wasm
775    /// module will not have a trampoline for it. This is actually okay, because
776    /// it means that the Wasm cannot actually call this function. But it does
777    /// mean that this field needs to be an `Option` even though it is non-null
778    /// the vast vast vast majority of the time.
779    pub wasm_call: Option<VmPtr<VMWasmCallFunction>>,
780
781    /// Function signature's type id.
782    pub type_index: VMSharedTypeIndex,
783
784    /// The VM state associated with this function.
785    ///
786    /// The actual definition of what this pointer points to depends on the
787    /// function being referenced: for core Wasm functions, this is a `*mut
788    /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
789    /// component functions it is a `*mut VMComponentContext`.
790    pub vmctx: VmPtr<VMOpaqueContext>,
791    // If more elements are added here, remember to add offset_of tests below!
792}
793
794// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
795unsafe impl VmSafe for VMFuncRef {}
796
797impl VMFuncRef {
798    /// Invokes the `array_call` field of this `VMFuncRef` with the supplied
799    /// arguments.
800    ///
801    /// This will invoke the function pointer in the `array_call` field with:
802    ///
803    /// * the `callee` vmctx as `self.vmctx`
804    /// * the `caller` as `caller` specified here
805    /// * the args pointer as `args_and_results`
806    /// * the args length as `args_and_results`
807    ///
808    /// The `args_and_results` area must be large enough to both load all
809    /// arguments from and store all results to.
810    ///
811    /// Returns whether a trap was recorded in TLS for raising.
812    ///
813    /// # Unsafety
814    ///
815    /// This method is unsafe because it can be called with any pointers. They
816    /// must all be valid for this wasm function call to proceed. For example
817    /// the `caller` must be valid machine code if `pulley` is `None` or it must
818    /// be valid bytecode if `pulley` is `Some`. Additionally `args_and_results`
819    /// must be large enough to handle all the arguments/results for this call.
820    ///
821    /// Note that the unsafety invariants to maintain here are not currently
822    /// exhaustively documented.
823    pub unsafe fn array_call(
824        &self,
825        pulley: Option<InterpreterRef<'_>>,
826        caller: NonNull<VMOpaqueContext>,
827        args_and_results: NonNull<[ValRaw]>,
828    ) -> bool {
829        match pulley {
830            Some(vm) => self.array_call_interpreted(vm, caller, args_and_results),
831            None => self.array_call_native(caller, args_and_results),
832        }
833    }
834
835    unsafe fn array_call_interpreted(
836        &self,
837        vm: InterpreterRef<'_>,
838        caller: NonNull<VMOpaqueContext>,
839        args_and_results: NonNull<[ValRaw]>,
840    ) -> bool {
841        // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the
842        // interpreter, even though it's available, as `array_call` will be
843        // native code.
844        if self.vmctx.as_non_null().as_ref().magic
845            == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC
846        {
847            return self.array_call_native(caller, args_and_results);
848        }
849        vm.call(
850            self.array_call.as_non_null().cast(),
851            self.vmctx.as_non_null(),
852            caller,
853            args_and_results,
854        )
855    }
856
857    unsafe fn array_call_native(
858        &self,
859        caller: NonNull<VMOpaqueContext>,
860        args_and_results: NonNull<[ValRaw]>,
861    ) -> bool {
862        union GetNativePointer {
863            native: VMArrayCallNative,
864            ptr: NonNull<VMArrayCallFunction>,
865        }
866        let native = GetNativePointer {
867            ptr: self.array_call.as_non_null(),
868        }
869        .native;
870        native(
871            self.vmctx.as_non_null(),
872            caller,
873            args_and_results.cast(),
874            args_and_results.len(),
875        )
876    }
877}
878
879#[cfg(test)]
880mod test_vm_func_ref {
881    use super::VMFuncRef;
882    use core::mem::offset_of;
883    use std::mem::size_of;
884    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
885
886    #[test]
887    fn check_vm_func_ref_offsets() {
888        let module = Module::new();
889        let offsets = VMOffsets::new(HostPtr, &module);
890        assert_eq!(
891            size_of::<VMFuncRef>(),
892            usize::from(offsets.ptr.size_of_vm_func_ref())
893        );
894        assert_eq!(
895            offset_of!(VMFuncRef, array_call),
896            usize::from(offsets.ptr.vm_func_ref_array_call())
897        );
898        assert_eq!(
899            offset_of!(VMFuncRef, wasm_call),
900            usize::from(offsets.ptr.vm_func_ref_wasm_call())
901        );
902        assert_eq!(
903            offset_of!(VMFuncRef, type_index),
904            usize::from(offsets.ptr.vm_func_ref_type_index())
905        );
906        assert_eq!(
907            offset_of!(VMFuncRef, vmctx),
908            usize::from(offsets.ptr.vm_func_ref_vmctx())
909        );
910    }
911}
912
913macro_rules! define_builtin_array {
914    (
915        $(
916            $( #[$attr:meta] )*
917            $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
918        )*
919    ) => {
920        /// An array that stores addresses of builtin functions. We translate code
921        /// to use indirect calls. This way, we don't have to patch the code.
922        #[repr(C)]
923        pub struct VMBuiltinFunctionsArray {
924            $(
925                $name: unsafe extern "C" fn(
926                    $(define_builtin_array!(@ty $param)),*
927                ) $( -> define_builtin_array!(@ty $result))?,
928            )*
929        }
930
931        impl VMBuiltinFunctionsArray {
932            #[allow(unused_doc_comments)]
933            pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
934                $(
935                    $name: crate::runtime::vm::libcalls::raw::$name,
936                )*
937            };
938
939            /// Helper to call `expose_provenance()` on all contained pointers.
940            ///
941            /// This is required to be called at least once before entering wasm
942            /// to inform the compiler that these function pointers may all be
943            /// loaded/stored and used on the "other end" to reacquire
944            /// provenance in Pulley. Pulley models hostcalls with a host
945            /// pointer as the first parameter that's a function pointer under
946            /// the hood, and this call ensures that the use of the function
947            /// pointer is considered valid.
948            pub fn expose_provenance(&self) -> NonNull<Self>{
949                $(
950                    #[cfg(has_provenance_apis)]
951                    (self.$name as *mut u8).expose_provenance();
952                )*
953                NonNull::from(self)
954            }
955        }
956    };
957
958    (@ty u32) => (u32);
959    (@ty u64) => (u64);
960    (@ty u8) => (u8);
961    (@ty bool) => (bool);
962    (@ty pointer) => (*mut u8);
963    (@ty vmctx) => (NonNull<VMContext>);
964}
965
966// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
967unsafe impl VmSafe for VMBuiltinFunctionsArray {}
968
969wasmtime_environ::foreach_builtin_function!(define_builtin_array);
970
971const _: () = {
972    assert!(
973        mem::size_of::<VMBuiltinFunctionsArray>()
974            == mem::size_of::<usize>() * (BuiltinFunctionIndex::len() as usize)
975    )
976};
977
978/// Structure used to control interrupting wasm code.
979#[derive(Debug)]
980#[repr(C)]
981pub struct VMRuntimeLimits {
982    // NB: 64-bit integer fields are located first with pointer-sized fields
983    // trailing afterwards. That makes the offsets in this structure easier to
984    // calculate on 32-bit platforms as we don't have to worry about the
985    // alignment of 64-bit integers.
986    //
987    /// Indicator of how much fuel has been consumed and is remaining to
988    /// WebAssembly.
989    ///
990    /// This field is typically negative and increments towards positive. Upon
991    /// turning positive a wasm trap will be generated. This field is only
992    /// modified if wasm is configured to consume fuel.
993    pub fuel_consumed: UnsafeCell<i64>,
994
995    /// Deadline epoch for interruption: if epoch-based interruption
996    /// is enabled and the global (per engine) epoch counter is
997    /// observed to reach or exceed this value, the guest code will
998    /// yield if running asynchronously.
999    pub epoch_deadline: UnsafeCell<u64>,
1000
1001    /// Current stack limit of the wasm module.
1002    ///
1003    /// For more information see `crates/cranelift/src/lib.rs`.
1004    pub stack_limit: UnsafeCell<usize>,
1005
1006    /// The value of the frame pointer register when we last called from Wasm to
1007    /// the host.
1008    ///
1009    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1010    /// calling into Wasm in `catch_traps`.
1011    ///
1012    /// This member is `0` when Wasm is actively running and has not called out
1013    /// to the host.
1014    ///
1015    /// Used to find the start of a a contiguous sequence of Wasm frames when
1016    /// walking the stack.
1017    pub last_wasm_exit_fp: UnsafeCell<usize>,
1018
1019    /// The last Wasm program counter before we called from Wasm to the host.
1020    ///
1021    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1022    /// calling into Wasm in `catch_traps`.
1023    ///
1024    /// This member is `0` when Wasm is actively running and has not called out
1025    /// to the host.
1026    ///
1027    /// Used when walking a contiguous sequence of Wasm frames.
1028    pub last_wasm_exit_pc: UnsafeCell<usize>,
1029
1030    /// The last host stack pointer before we called into Wasm from the host.
1031    ///
1032    /// Maintained by our host-to-Wasm trampoline, and cleared just before
1033    /// calling into Wasm in `catch_traps`.
1034    ///
1035    /// This member is `0` when Wasm is actively running and has not called out
1036    /// to the host.
1037    ///
1038    /// When a host function is wrapped into a `wasmtime::Func`, and is then
1039    /// called from the host, then this member has the sentinel value of `-1 as
1040    /// usize`, meaning that this contiguous sequence of Wasm frames is the
1041    /// empty sequence, and it is not safe to dereference the
1042    /// `last_wasm_exit_fp`.
1043    ///
1044    /// Used to find the end of a contiguous sequence of Wasm frames when
1045    /// walking the stack.
1046    pub last_wasm_entry_fp: UnsafeCell<usize>,
1047}
1048
1049// The `VMRuntimeLimits` type is a pod-type with no destructor, and we don't
1050// access any fields from other threads, so add in these trait impls which are
1051// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
1052// variables in `VMRuntimeLimits`.
1053unsafe impl Send for VMRuntimeLimits {}
1054unsafe impl Sync for VMRuntimeLimits {}
1055
1056// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1057unsafe impl VmSafe for VMRuntimeLimits {}
1058
1059impl Default for VMRuntimeLimits {
1060    fn default() -> VMRuntimeLimits {
1061        VMRuntimeLimits {
1062            stack_limit: UnsafeCell::new(usize::max_value()),
1063            fuel_consumed: UnsafeCell::new(0),
1064            epoch_deadline: UnsafeCell::new(0),
1065            last_wasm_exit_fp: UnsafeCell::new(0),
1066            last_wasm_exit_pc: UnsafeCell::new(0),
1067            last_wasm_entry_fp: UnsafeCell::new(0),
1068        }
1069    }
1070}
1071
1072#[cfg(test)]
1073mod test_vmruntime_limits {
1074    use super::VMRuntimeLimits;
1075    use core::mem::offset_of;
1076    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
1077
1078    #[test]
1079    fn field_offsets() {
1080        let module = Module::new();
1081        let offsets = VMOffsets::new(HostPtr, &module);
1082        assert_eq!(
1083            offset_of!(VMRuntimeLimits, stack_limit),
1084            usize::from(offsets.ptr.vmruntime_limits_stack_limit())
1085        );
1086        assert_eq!(
1087            offset_of!(VMRuntimeLimits, fuel_consumed),
1088            usize::from(offsets.ptr.vmruntime_limits_fuel_consumed())
1089        );
1090        assert_eq!(
1091            offset_of!(VMRuntimeLimits, epoch_deadline),
1092            usize::from(offsets.ptr.vmruntime_limits_epoch_deadline())
1093        );
1094        assert_eq!(
1095            offset_of!(VMRuntimeLimits, last_wasm_exit_fp),
1096            usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_fp())
1097        );
1098        assert_eq!(
1099            offset_of!(VMRuntimeLimits, last_wasm_exit_pc),
1100            usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_pc())
1101        );
1102        assert_eq!(
1103            offset_of!(VMRuntimeLimits, last_wasm_entry_fp),
1104            usize::from(offsets.ptr.vmruntime_limits_last_wasm_entry_fp())
1105        );
1106    }
1107}
1108
1109/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
1110/// This has information about globals, memories, tables, and other runtime
1111/// state associated with the current instance.
1112///
1113/// The struct here is empty, as the sizes of these fields are dynamic, and
1114/// we can't describe them in Rust's type system. Sufficient memory is
1115/// allocated at runtime.
1116#[derive(Debug)]
1117#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
1118pub struct VMContext {
1119    /// There's some more discussion about this within `wasmtime/src/lib.rs` but
1120    /// the idea is that we want to tell the compiler that this contains
1121    /// pointers which transitively refers to itself, to suppress some
1122    /// optimizations that might otherwise assume this doesn't exist.
1123    ///
1124    /// The self-referential pointer we care about is the `*mut Store` pointer
1125    /// early on in this context, which if you follow through enough levels of
1126    /// nesting, eventually can refer back to this `VMContext`
1127    pub _marker: marker::PhantomPinned,
1128}
1129
1130impl VMContext {
1131    /// Helper function to cast between context types using a debug assertion to
1132    /// protect against some mistakes.
1133    #[inline]
1134    pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMContext> {
1135        // Note that in general the offset of the "magic" field is stored in
1136        // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
1137        // about converting this pointer to another type we ideally don't want
1138        // to read the offset from potentially corrupt memory. Instead it would
1139        // be better to catch errors here as soon as possible.
1140        //
1141        // To accomplish this the `VMContext` structure is laid out with the
1142        // magic field at a statically known offset (here it's 0 for now). This
1143        // static offset is asserted in `VMOffsets::from` and needs to be kept
1144        // in sync with this line for this debug assertion to work.
1145        //
1146        // Also note that this magic is only ever invalid in the presence of
1147        // bugs, meaning we don't actually read the magic and act differently
1148        // at runtime depending what it is, so this is a debug assertion as
1149        // opposed to a regular assertion.
1150        debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC);
1151        opaque.cast()
1152    }
1153}
1154
1155/// A "raw" and unsafe representation of a WebAssembly value.
1156///
1157/// This is provided for use with the `Func::new_unchecked` and
1158/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
1159/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
1160///
1161/// This is notably an "unsafe" way to work with `Val` and it's recommended to
1162/// instead use `Val` where possible. An important note about this union is that
1163/// fields are all stored in little-endian format, regardless of the endianness
1164/// of the host system.
1165#[allow(missing_docs)]
1166#[repr(C)]
1167#[derive(Copy, Clone)]
1168pub union ValRaw {
1169    /// A WebAssembly `i32` value.
1170    ///
1171    /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
1172    /// type does not assign an interpretation of the upper bit as either signed
1173    /// or unsigned. The Rust type `i32` is simply chosen for convenience.
1174    ///
1175    /// This value is always stored in a little-endian format.
1176    i32: i32,
1177
1178    /// A WebAssembly `i64` value.
1179    ///
1180    /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
1181    /// type does not assign an interpretation of the upper bit as either signed
1182    /// or unsigned. The Rust type `i64` is simply chosen for convenience.
1183    ///
1184    /// This value is always stored in a little-endian format.
1185    i64: i64,
1186
1187    /// A WebAssembly `f32` value.
1188    ///
1189    /// Note that the payload here is a Rust `u32`. This is to allow passing any
1190    /// representation of NaN into WebAssembly without risk of changing NaN
1191    /// payload bits as its gets passed around the system. Otherwise though this
1192    /// `u32` value is the return value of `f32::to_bits` in Rust.
1193    ///
1194    /// This value is always stored in a little-endian format.
1195    f32: u32,
1196
1197    /// A WebAssembly `f64` value.
1198    ///
1199    /// Note that the payload here is a Rust `u64`. This is to allow passing any
1200    /// representation of NaN into WebAssembly without risk of changing NaN
1201    /// payload bits as its gets passed around the system. Otherwise though this
1202    /// `u64` value is the return value of `f64::to_bits` in Rust.
1203    ///
1204    /// This value is always stored in a little-endian format.
1205    f64: u64,
1206
1207    /// A WebAssembly `v128` value.
1208    ///
1209    /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1210    /// but note that `v128` in WebAssembly is often considered a vector type
1211    /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1212    /// of the underlying bits is left up to the instructions which consume
1213    /// this value.
1214    ///
1215    /// This value is always stored in a little-endian format.
1216    v128: [u8; 16],
1217
1218    /// A WebAssembly `funcref` value (or one of its subtypes).
1219    ///
1220    /// The payload here is a pointer which is runtime-defined. This is one of
1221    /// the main points of unsafety about the `ValRaw` type as the validity of
1222    /// the pointer here is not easily verified and must be preserved by
1223    /// carefully calling the correct functions throughout the runtime.
1224    ///
1225    /// This value is always stored in a little-endian format.
1226    funcref: *mut c_void,
1227
1228    /// A WebAssembly `externref` value (or one of its subtypes).
1229    ///
1230    /// The payload here is a compressed pointer value which is
1231    /// runtime-defined. This is one of the main points of unsafety about the
1232    /// `ValRaw` type as the validity of the pointer here is not easily verified
1233    /// and must be preserved by carefully calling the correct functions
1234    /// throughout the runtime.
1235    ///
1236    /// This value is always stored in a little-endian format.
1237    externref: u32,
1238
1239    /// A WebAssembly `anyref` value (or one of its subtypes).
1240    ///
1241    /// The payload here is a compressed pointer value which is
1242    /// runtime-defined. This is one of the main points of unsafety about the
1243    /// `ValRaw` type as the validity of the pointer here is not easily verified
1244    /// and must be preserved by carefully calling the correct functions
1245    /// throughout the runtime.
1246    ///
1247    /// This value is always stored in a little-endian format.
1248    anyref: u32,
1249}
1250
1251// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1252// are some simple assertions about the shape of the type which are additionally
1253// matched in C.
1254const _: () = {
1255    assert!(mem::size_of::<ValRaw>() == 16);
1256    assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1257};
1258
1259// This type is just a bag-of-bits so it's up to the caller to figure out how
1260// to safely deal with threading concerns and safely access interior bits.
1261unsafe impl Send for ValRaw {}
1262unsafe impl Sync for ValRaw {}
1263
1264impl fmt::Debug for ValRaw {
1265    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1266        struct Hex<T>(T);
1267        impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1268            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1269                let bytes = mem::size_of::<T>();
1270                let hex_digits_per_byte = 2;
1271                let hex_digits = bytes * hex_digits_per_byte;
1272                write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1273            }
1274        }
1275
1276        unsafe {
1277            f.debug_struct("ValRaw")
1278                .field("i32", &Hex(self.i32))
1279                .field("i64", &Hex(self.i64))
1280                .field("f32", &Hex(self.f32))
1281                .field("f64", &Hex(self.f64))
1282                .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1283                .field("funcref", &self.funcref)
1284                .field("externref", &Hex(self.externref))
1285                .field("anyref", &Hex(self.anyref))
1286                .finish()
1287        }
1288    }
1289}
1290
1291impl ValRaw {
1292    /// Create a null reference that is compatible with any of
1293    /// `{any,extern,func}ref`.
1294    pub fn null() -> ValRaw {
1295        unsafe {
1296            let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1297            debug_assert_eq!(raw.get_anyref(), 0);
1298            debug_assert_eq!(raw.get_externref(), 0);
1299            debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1300            raw
1301        }
1302    }
1303
1304    /// Creates a WebAssembly `i32` value
1305    #[inline]
1306    pub fn i32(i: i32) -> ValRaw {
1307        // Note that this is intentionally not setting the `i32` field, instead
1308        // setting the `i64` field with a zero-extended version of `i`. For more
1309        // information on this see the comments on `Lower for Result` in the
1310        // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1311        // otherwise constrained to guarantee that the initial 64-bits are
1312        // always initialized.
1313        ValRaw::u64(i.unsigned().into())
1314    }
1315
1316    /// Creates a WebAssembly `i64` value
1317    #[inline]
1318    pub fn i64(i: i64) -> ValRaw {
1319        ValRaw { i64: i.to_le() }
1320    }
1321
1322    /// Creates a WebAssembly `i32` value
1323    #[inline]
1324    pub fn u32(i: u32) -> ValRaw {
1325        // See comments in `ValRaw::i32` for why this is setting the upper
1326        // 32-bits as well.
1327        ValRaw::u64(i.into())
1328    }
1329
1330    /// Creates a WebAssembly `i64` value
1331    #[inline]
1332    pub fn u64(i: u64) -> ValRaw {
1333        ValRaw::i64(i as i64)
1334    }
1335
1336    /// Creates a WebAssembly `f32` value
1337    #[inline]
1338    pub fn f32(i: u32) -> ValRaw {
1339        // See comments in `ValRaw::i32` for why this is setting the upper
1340        // 32-bits as well.
1341        ValRaw::u64(i.into())
1342    }
1343
1344    /// Creates a WebAssembly `f64` value
1345    #[inline]
1346    pub fn f64(i: u64) -> ValRaw {
1347        ValRaw { f64: i.to_le() }
1348    }
1349
1350    /// Creates a WebAssembly `v128` value
1351    #[inline]
1352    pub fn v128(i: u128) -> ValRaw {
1353        ValRaw {
1354            v128: i.to_le_bytes(),
1355        }
1356    }
1357
1358    /// Creates a WebAssembly `funcref` value
1359    #[inline]
1360    pub fn funcref(i: *mut c_void) -> ValRaw {
1361        ValRaw {
1362            funcref: Strict::map_addr(i, |i| i.to_le()),
1363        }
1364    }
1365
1366    /// Creates a WebAssembly `externref` value
1367    #[inline]
1368    pub fn externref(e: u32) -> ValRaw {
1369        assert!(cfg!(feature = "gc") || e == 0);
1370        ValRaw {
1371            externref: e.to_le(),
1372        }
1373    }
1374
1375    /// Creates a WebAssembly `anyref` value
1376    #[inline]
1377    pub fn anyref(r: u32) -> ValRaw {
1378        assert!(cfg!(feature = "gc") || r == 0);
1379        ValRaw { anyref: r.to_le() }
1380    }
1381
1382    /// Gets the WebAssembly `i32` value
1383    #[inline]
1384    pub fn get_i32(&self) -> i32 {
1385        unsafe { i32::from_le(self.i32) }
1386    }
1387
1388    /// Gets the WebAssembly `i64` value
1389    #[inline]
1390    pub fn get_i64(&self) -> i64 {
1391        unsafe { i64::from_le(self.i64) }
1392    }
1393
1394    /// Gets the WebAssembly `i32` value
1395    #[inline]
1396    pub fn get_u32(&self) -> u32 {
1397        self.get_i32().unsigned()
1398    }
1399
1400    /// Gets the WebAssembly `i64` value
1401    #[inline]
1402    pub fn get_u64(&self) -> u64 {
1403        self.get_i64().unsigned()
1404    }
1405
1406    /// Gets the WebAssembly `f32` value
1407    #[inline]
1408    pub fn get_f32(&self) -> u32 {
1409        unsafe { u32::from_le(self.f32) }
1410    }
1411
1412    /// Gets the WebAssembly `f64` value
1413    #[inline]
1414    pub fn get_f64(&self) -> u64 {
1415        unsafe { u64::from_le(self.f64) }
1416    }
1417
1418    /// Gets the WebAssembly `v128` value
1419    #[inline]
1420    pub fn get_v128(&self) -> u128 {
1421        unsafe { u128::from_le_bytes(self.v128) }
1422    }
1423
1424    /// Gets the WebAssembly `funcref` value
1425    #[inline]
1426    pub fn get_funcref(&self) -> *mut c_void {
1427        unsafe { Strict::map_addr(self.funcref, |i| usize::from_le(i)) }
1428    }
1429
1430    /// Gets the WebAssembly `externref` value
1431    #[inline]
1432    pub fn get_externref(&self) -> u32 {
1433        let externref = u32::from_le(unsafe { self.externref });
1434        assert!(cfg!(feature = "gc") || externref == 0);
1435        externref
1436    }
1437
1438    /// Gets the WebAssembly `anyref` value
1439    #[inline]
1440    pub fn get_anyref(&self) -> u32 {
1441        let anyref = u32::from_le(unsafe { self.anyref });
1442        assert!(cfg!(feature = "gc") || anyref == 0);
1443        anyref
1444    }
1445}
1446
1447/// An "opaque" version of `VMContext` which must be explicitly casted to a
1448/// target context.
1449///
1450/// This context is used to represent that contexts specified in
1451/// `VMFuncRef` can have any type and don't have an implicit
1452/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1453/// structure of an opaque context in general and only the code which configured
1454/// the context is able to rely on a particular structure. This is because the
1455/// context pointer configured for `VMFuncRef` is guaranteed to be
1456/// the first parameter passed.
1457///
1458/// Note that Wasmtime currently has a layout where all contexts that are casted
1459/// to an opaque context start with a 32-bit "magic" which can be used in debug
1460/// mode to debug-assert that the casts here are correct and have at least a
1461/// little protection against incorrect casts.
1462pub struct VMOpaqueContext {
1463    pub(crate) magic: u32,
1464    _marker: marker::PhantomPinned,
1465}
1466
1467impl VMOpaqueContext {
1468    /// Helper function to clearly indicate that casts are desired.
1469    #[inline]
1470    pub fn from_vmcontext(ptr: NonNull<VMContext>) -> NonNull<VMOpaqueContext> {
1471        ptr.cast()
1472    }
1473
1474    /// Helper function to clearly indicate that casts are desired.
1475    #[inline]
1476    pub fn from_vm_array_call_host_func_context(
1477        ptr: NonNull<VMArrayCallHostFuncContext>,
1478    ) -> NonNull<VMOpaqueContext> {
1479        ptr.cast()
1480    }
1481}