wasmtime/runtime/vm/
vmcontext.rs

1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::prelude::*;
8use crate::runtime::vm::{GcStore, InterpreterRef, VMGcRef, VmPtr, VmSafe, f32x4, f64x2, i8x16};
9use crate::store::StoreOpaque;
10use crate::vm::stack_switching::VMStackChain;
11use core::cell::UnsafeCell;
12use core::ffi::c_void;
13use core::fmt;
14use core::marker;
15use core::mem::{self, MaybeUninit};
16use core::ptr::{self, NonNull};
17use core::sync::atomic::{AtomicUsize, Ordering};
18use wasmtime_environ::{
19    BuiltinFunctionIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex,
20    DefinedTagIndex, Unsigned, VMCONTEXT_MAGIC, VMSharedTypeIndex, WasmHeapTopType, WasmValType,
21};
22
23/// A function pointer that exposes the array calling convention.
24///
25/// Regardless of the underlying Wasm function type, all functions using the
26/// array calling convention have the same Rust signature.
27///
28/// Arguments:
29///
30/// * Callee `vmctx` for the function itself.
31///
32/// * Caller's `vmctx` (so that host functions can access the linear memory of
33///   their Wasm callers).
34///
35/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
36///   this function, and where results are returned from this function.
37///
38/// * The capacity of the `ValRaw` buffer. Must always be at least
39///   `max(len(wasm_params), len(wasm_results))`.
40///
41/// Return value:
42///
43/// * `true` if this call succeeded.
44/// * `false` if this call failed and a trap was recorded in TLS.
45pub type VMArrayCallNative = unsafe extern "C" fn(
46    NonNull<VMOpaqueContext>,
47    NonNull<VMOpaqueContext>,
48    NonNull<ValRaw>,
49    usize,
50) -> bool;
51
52/// An opaque function pointer which might be `VMArrayCallNative` or it might be
53/// pulley bytecode. Requires external knowledge to determine what kind of
54/// function pointer this is.
55#[repr(transparent)]
56pub struct VMArrayCallFunction(VMFunctionBody);
57
58/// A function pointer that exposes the Wasm calling convention.
59///
60/// In practice, different Wasm function types end up mapping to different Rust
61/// function types, so this isn't simply a type alias the way that
62/// `VMArrayCallFunction` is. However, the exact details of the calling
63/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
64/// code never does anything with these function pointers except shuffle them
65/// around and pass them back to Wasm.
66#[repr(transparent)]
67pub struct VMWasmCallFunction(VMFunctionBody);
68
69/// An imported function.
70#[derive(Debug, Copy, Clone)]
71#[repr(C)]
72pub struct VMFunctionImport {
73    /// Function pointer to use when calling this imported function from Wasm.
74    pub wasm_call: VmPtr<VMWasmCallFunction>,
75
76    /// Function pointer to use when calling this imported function with the
77    /// "array" calling convention that `Func::new` et al use.
78    pub array_call: VmPtr<VMArrayCallFunction>,
79
80    /// The VM state associated with this function.
81    ///
82    /// For Wasm functions defined by core wasm instances this will be `*mut
83    /// VMContext`, but for lifted/lowered component model functions this will
84    /// be a `VMComponentContext`, and for a host function it will be a
85    /// `VMHostFuncContext`, etc.
86    pub vmctx: VmPtr<VMOpaqueContext>,
87}
88
89// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
90unsafe impl VmSafe for VMFunctionImport {}
91
92#[cfg(test)]
93mod test_vmfunction_import {
94    use super::VMFunctionImport;
95    use core::mem::offset_of;
96    use std::mem::size_of;
97    use wasmtime_environ::{HostPtr, Module, VMOffsets};
98
99    #[test]
100    fn check_vmfunction_import_offsets() {
101        let module = Module::new();
102        let offsets = VMOffsets::new(HostPtr, &module);
103        assert_eq!(
104            size_of::<VMFunctionImport>(),
105            usize::from(offsets.size_of_vmfunction_import())
106        );
107        assert_eq!(
108            offset_of!(VMFunctionImport, wasm_call),
109            usize::from(offsets.vmfunction_import_wasm_call())
110        );
111        assert_eq!(
112            offset_of!(VMFunctionImport, array_call),
113            usize::from(offsets.vmfunction_import_array_call())
114        );
115        assert_eq!(
116            offset_of!(VMFunctionImport, vmctx),
117            usize::from(offsets.vmfunction_import_vmctx())
118        );
119    }
120}
121
122/// A placeholder byte-sized type which is just used to provide some amount of type
123/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
124/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
125/// around.
126#[repr(C)]
127pub struct VMFunctionBody(u8);
128
129// SAFETY: this structure is never read and is safe to pass to jit code.
130unsafe impl VmSafe for VMFunctionBody {}
131
132#[cfg(test)]
133mod test_vmfunction_body {
134    use super::VMFunctionBody;
135    use std::mem::size_of;
136
137    #[test]
138    fn check_vmfunction_body_offsets() {
139        assert_eq!(size_of::<VMFunctionBody>(), 1);
140    }
141}
142
143/// The fields compiled code needs to access to utilize a WebAssembly table
144/// imported from another instance.
145#[derive(Debug, Copy, Clone)]
146#[repr(C)]
147pub struct VMTableImport {
148    /// A pointer to the imported table description.
149    pub from: VmPtr<VMTableDefinition>,
150
151    /// A pointer to the `VMContext` that owns the table description.
152    pub vmctx: VmPtr<VMContext>,
153
154    /// The table index, within `vmctx`, this definition resides at.
155    pub index: DefinedTableIndex,
156}
157
158// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
159unsafe impl VmSafe for VMTableImport {}
160
161#[cfg(test)]
162mod test_vmtable {
163    use super::VMTableImport;
164    use core::mem::offset_of;
165    use std::mem::size_of;
166    use wasmtime_environ::component::{Component, VMComponentOffsets};
167    use wasmtime_environ::{HostPtr, Module, VMOffsets};
168
169    #[test]
170    fn check_vmtable_offsets() {
171        let module = Module::new();
172        let offsets = VMOffsets::new(HostPtr, &module);
173        assert_eq!(
174            size_of::<VMTableImport>(),
175            usize::from(offsets.size_of_vmtable_import())
176        );
177        assert_eq!(
178            offset_of!(VMTableImport, from),
179            usize::from(offsets.vmtable_import_from())
180        );
181    }
182
183    #[test]
184    fn ensure_sizes_match() {
185        // Because we use `VMTableImport` for recording tables used by components, we
186        // want to make sure that the size calculations between `VMOffsets` and
187        // `VMComponentOffsets` stay the same.
188        let module = Module::new();
189        let vm_offsets = VMOffsets::new(HostPtr, &module);
190        let component = Component::default();
191        let vm_component_offsets = VMComponentOffsets::new(HostPtr, &component);
192        assert_eq!(
193            vm_offsets.size_of_vmtable_import(),
194            vm_component_offsets.size_of_vmtable_import()
195        );
196    }
197}
198
199/// The fields compiled code needs to access to utilize a WebAssembly linear
200/// memory imported from another instance.
201#[derive(Debug, Copy, Clone)]
202#[repr(C)]
203pub struct VMMemoryImport {
204    /// A pointer to the imported memory description.
205    pub from: VmPtr<VMMemoryDefinition>,
206
207    /// A pointer to the `VMContext` that owns the memory description.
208    pub vmctx: VmPtr<VMContext>,
209
210    /// The index of the memory in the containing `vmctx`.
211    pub index: DefinedMemoryIndex,
212}
213
214// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
215unsafe impl VmSafe for VMMemoryImport {}
216
217#[cfg(test)]
218mod test_vmmemory_import {
219    use super::VMMemoryImport;
220    use core::mem::offset_of;
221    use std::mem::size_of;
222    use wasmtime_environ::{HostPtr, Module, VMOffsets};
223
224    #[test]
225    fn check_vmmemory_import_offsets() {
226        let module = Module::new();
227        let offsets = VMOffsets::new(HostPtr, &module);
228        assert_eq!(
229            size_of::<VMMemoryImport>(),
230            usize::from(offsets.size_of_vmmemory_import())
231        );
232        assert_eq!(
233            offset_of!(VMMemoryImport, from),
234            usize::from(offsets.vmmemory_import_from())
235        );
236    }
237}
238
239/// The fields compiled code needs to access to utilize a WebAssembly global
240/// variable imported from another instance.
241///
242/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
243/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
244/// require a `vmctx` pointer to access.
245#[derive(Debug, Copy, Clone)]
246#[repr(C)]
247pub struct VMGlobalImport {
248    /// A pointer to the imported global variable description.
249    pub from: VmPtr<VMGlobalDefinition>,
250
251    /// A pointer to the context that owns the global.
252    ///
253    /// Exactly what's stored here is dictated by `kind` below. This is `None`
254    /// for `VMGlobalKind::Host`, it's a `VMContext` for
255    /// `VMGlobalKind::Instance`, and it's `VMComponentContext` for
256    /// `VMGlobalKind::ComponentFlags`.
257    pub vmctx: Option<VmPtr<VMOpaqueContext>>,
258
259    /// The kind of global, and extra location information in addition to
260    /// `vmctx` above.
261    pub kind: VMGlobalKind,
262}
263
264// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
265unsafe impl VmSafe for VMGlobalImport {}
266
267/// The kinds of globals that Wasmtime has.
268#[derive(Debug, Copy, Clone)]
269#[repr(C, u32)]
270pub enum VMGlobalKind {
271    /// Host globals, stored in a `StoreOpaque`.
272    Host(DefinedGlobalIndex),
273    /// Instance globals, stored in `VMContext`s
274    Instance(DefinedGlobalIndex),
275    /// Flags for a component instance, stored in `VMComponentContext`.
276    #[cfg(feature = "component-model")]
277    ComponentFlags(wasmtime_environ::component::RuntimeComponentInstanceIndex),
278}
279
280// SAFETY: the above enum is repr(C) and stores nothing else
281unsafe impl VmSafe for VMGlobalKind {}
282
283#[cfg(test)]
284mod test_vmglobal_import {
285    use super::VMGlobalImport;
286    use core::mem::offset_of;
287    use std::mem::size_of;
288    use wasmtime_environ::{HostPtr, Module, VMOffsets};
289
290    #[test]
291    fn check_vmglobal_import_offsets() {
292        let module = Module::new();
293        let offsets = VMOffsets::new(HostPtr, &module);
294        assert_eq!(
295            size_of::<VMGlobalImport>(),
296            usize::from(offsets.size_of_vmglobal_import())
297        );
298        assert_eq!(
299            offset_of!(VMGlobalImport, from),
300            usize::from(offsets.vmglobal_import_from())
301        );
302    }
303}
304
305/// The fields compiled code needs to access to utilize a WebAssembly
306/// tag imported from another instance.
307#[derive(Debug, Copy, Clone)]
308#[repr(C)]
309pub struct VMTagImport {
310    /// A pointer to the imported tag description.
311    pub from: VmPtr<VMTagDefinition>,
312
313    /// The instance that owns this tag.
314    pub vmctx: VmPtr<VMContext>,
315
316    /// The index of the tag in the containing `vmctx`.
317    pub index: DefinedTagIndex,
318}
319
320// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
321unsafe impl VmSafe for VMTagImport {}
322
323#[cfg(test)]
324mod test_vmtag_import {
325    use super::VMTagImport;
326    use core::mem::{offset_of, size_of};
327    use wasmtime_environ::{HostPtr, Module, VMOffsets};
328
329    #[test]
330    fn check_vmtag_import_offsets() {
331        let module = Module::new();
332        let offsets = VMOffsets::new(HostPtr, &module);
333        assert_eq!(
334            size_of::<VMTagImport>(),
335            usize::from(offsets.size_of_vmtag_import())
336        );
337        assert_eq!(
338            offset_of!(VMTagImport, from),
339            usize::from(offsets.vmtag_import_from())
340        );
341    }
342}
343
344/// The fields compiled code needs to access to utilize a WebAssembly linear
345/// memory defined within the instance, namely the start address and the
346/// size in bytes.
347#[derive(Debug)]
348#[repr(C)]
349pub struct VMMemoryDefinition {
350    /// The start address.
351    pub base: VmPtr<u8>,
352
353    /// The current logical size of this linear memory in bytes.
354    ///
355    /// This is atomic because shared memories must be able to grow their length
356    /// atomically. For relaxed access, see
357    /// [`VMMemoryDefinition::current_length()`].
358    pub current_length: AtomicUsize,
359}
360
361// SAFETY: the above definition has `repr(C)` and each field individually
362// implements `VmSafe`, which satisfies the requirements of this trait.
363unsafe impl VmSafe for VMMemoryDefinition {}
364
365impl VMMemoryDefinition {
366    /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
367    /// performing a relaxed load; do not use this function for situations in
368    /// which a precise length is needed. Owned memories (i.e., non-shared) will
369    /// always return a precise result (since no concurrent modification is
370    /// possible) but shared memories may see an imprecise value--a
371    /// `current_length` potentially smaller than what some other thread
372    /// observes. Since Wasm memory only grows, this under-estimation may be
373    /// acceptable in certain cases.
374    pub fn current_length(&self) -> usize {
375        self.current_length.load(Ordering::Relaxed)
376    }
377
378    /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
379    /// `current_length`; see [`VMMemoryDefinition::current_length()`].
380    pub unsafe fn load(ptr: *mut Self) -> Self {
381        let other = &*ptr;
382        VMMemoryDefinition {
383            base: other.base,
384            current_length: other.current_length().into(),
385        }
386    }
387}
388
389#[cfg(test)]
390mod test_vmmemory_definition {
391    use super::VMMemoryDefinition;
392    use core::mem::offset_of;
393    use std::mem::size_of;
394    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
395
396    #[test]
397    fn check_vmmemory_definition_offsets() {
398        let module = Module::new();
399        let offsets = VMOffsets::new(HostPtr, &module);
400        assert_eq!(
401            size_of::<VMMemoryDefinition>(),
402            usize::from(offsets.ptr.size_of_vmmemory_definition())
403        );
404        assert_eq!(
405            offset_of!(VMMemoryDefinition, base),
406            usize::from(offsets.ptr.vmmemory_definition_base())
407        );
408        assert_eq!(
409            offset_of!(VMMemoryDefinition, current_length),
410            usize::from(offsets.ptr.vmmemory_definition_current_length())
411        );
412        /* TODO: Assert that the size of `current_length` matches.
413        assert_eq!(
414            size_of::<VMMemoryDefinition::current_length>(),
415            usize::from(offsets.size_of_vmmemory_definition_current_length())
416        );
417        */
418    }
419}
420
421/// The fields compiled code needs to access to utilize a WebAssembly table
422/// defined within the instance.
423#[derive(Debug, Copy, Clone)]
424#[repr(C)]
425pub struct VMTableDefinition {
426    /// Pointer to the table data.
427    pub base: VmPtr<u8>,
428
429    /// The current number of elements in the table.
430    pub current_elements: usize,
431}
432
433// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
434unsafe impl VmSafe for VMTableDefinition {}
435
436#[cfg(test)]
437mod test_vmtable_definition {
438    use super::VMTableDefinition;
439    use core::mem::offset_of;
440    use std::mem::size_of;
441    use wasmtime_environ::{HostPtr, Module, VMOffsets};
442
443    #[test]
444    fn check_vmtable_definition_offsets() {
445        let module = Module::new();
446        let offsets = VMOffsets::new(HostPtr, &module);
447        assert_eq!(
448            size_of::<VMTableDefinition>(),
449            usize::from(offsets.size_of_vmtable_definition())
450        );
451        assert_eq!(
452            offset_of!(VMTableDefinition, base),
453            usize::from(offsets.vmtable_definition_base())
454        );
455        assert_eq!(
456            offset_of!(VMTableDefinition, current_elements),
457            usize::from(offsets.vmtable_definition_current_elements())
458        );
459    }
460}
461
462/// The storage for a WebAssembly global defined within the instance.
463///
464/// TODO: Pack the globals more densely, rather than using the same size
465/// for every type.
466#[derive(Debug)]
467#[repr(C, align(16))]
468pub struct VMGlobalDefinition {
469    storage: [u8; 16],
470    // If more elements are added here, remember to add offset_of tests below!
471}
472
473// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
474unsafe impl VmSafe for VMGlobalDefinition {}
475
476#[cfg(test)]
477mod test_vmglobal_definition {
478    use super::VMGlobalDefinition;
479    use std::mem::{align_of, size_of};
480    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
481
482    #[test]
483    fn check_vmglobal_definition_alignment() {
484        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
485        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
486        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
487        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
488        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
489        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f32; 4]>());
490        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f64; 2]>());
491    }
492
493    #[test]
494    fn check_vmglobal_definition_offsets() {
495        let module = Module::new();
496        let offsets = VMOffsets::new(HostPtr, &module);
497        assert_eq!(
498            size_of::<VMGlobalDefinition>(),
499            usize::from(offsets.ptr.size_of_vmglobal_definition())
500        );
501    }
502
503    #[test]
504    fn check_vmglobal_begins_aligned() {
505        let module = Module::new();
506        let offsets = VMOffsets::new(HostPtr, &module);
507        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
508    }
509
510    #[test]
511    #[cfg(feature = "gc")]
512    fn check_vmglobal_can_contain_gc_ref() {
513        assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
514    }
515}
516
517impl VMGlobalDefinition {
518    /// Construct a `VMGlobalDefinition`.
519    pub fn new() -> Self {
520        Self { storage: [0; 16] }
521    }
522
523    /// Create a `VMGlobalDefinition` from a `ValRaw`.
524    ///
525    /// # Unsafety
526    ///
527    /// This raw value's type must match the given `WasmValType`.
528    pub unsafe fn from_val_raw(
529        store: &mut StoreOpaque,
530        wasm_ty: WasmValType,
531        raw: ValRaw,
532    ) -> Result<Self> {
533        let mut global = Self::new();
534        match wasm_ty {
535            WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
536            WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
537            WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
538            WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
539            WasmValType::V128 => global.set_u128(raw.get_v128()),
540            WasmValType::Ref(r) => match r.heap_type.top() {
541                WasmHeapTopType::Extern => {
542                    let r = VMGcRef::from_raw_u32(raw.get_externref());
543                    global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
544                }
545                WasmHeapTopType::Any => {
546                    let r = VMGcRef::from_raw_u32(raw.get_anyref());
547                    global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
548                }
549                WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
550                WasmHeapTopType::Cont => *global.as_func_ref_mut() = raw.get_funcref().cast(), // TODO(#10248): temporary hack.
551            },
552        }
553        Ok(global)
554    }
555
556    /// Get this global's value as a `ValRaw`.
557    ///
558    /// # Unsafety
559    ///
560    /// This global's value's type must match the given `WasmValType`.
561    pub unsafe fn to_val_raw(
562        &self,
563        store: &mut StoreOpaque,
564        wasm_ty: WasmValType,
565    ) -> Result<ValRaw> {
566        Ok(match wasm_ty {
567            WasmValType::I32 => ValRaw::i32(*self.as_i32()),
568            WasmValType::I64 => ValRaw::i64(*self.as_i64()),
569            WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
570            WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
571            WasmValType::V128 => ValRaw::v128(self.get_u128()),
572            WasmValType::Ref(r) => match r.heap_type.top() {
573                WasmHeapTopType::Extern => ValRaw::externref(match self.as_gc_ref() {
574                    Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
575                    None => 0,
576                }),
577                WasmHeapTopType::Any => ValRaw::anyref({
578                    match self.as_gc_ref() {
579                        Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
580                        None => 0,
581                    }
582                }),
583                WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
584                WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
585            },
586        })
587    }
588
589    /// Return a reference to the value as an i32.
590    pub unsafe fn as_i32(&self) -> &i32 {
591        &*(self.storage.as_ref().as_ptr().cast::<i32>())
592    }
593
594    /// Return a mutable reference to the value as an i32.
595    pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
596        &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>())
597    }
598
599    /// Return a reference to the value as a u32.
600    pub unsafe fn as_u32(&self) -> &u32 {
601        &*(self.storage.as_ref().as_ptr().cast::<u32>())
602    }
603
604    /// Return a mutable reference to the value as an u32.
605    pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
606        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
607    }
608
609    /// Return a reference to the value as an i64.
610    pub unsafe fn as_i64(&self) -> &i64 {
611        &*(self.storage.as_ref().as_ptr().cast::<i64>())
612    }
613
614    /// Return a mutable reference to the value as an i64.
615    pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
616        &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>())
617    }
618
619    /// Return a reference to the value as an u64.
620    pub unsafe fn as_u64(&self) -> &u64 {
621        &*(self.storage.as_ref().as_ptr().cast::<u64>())
622    }
623
624    /// Return a mutable reference to the value as an u64.
625    pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
626        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
627    }
628
629    /// Return a reference to the value as an f32.
630    pub unsafe fn as_f32(&self) -> &f32 {
631        &*(self.storage.as_ref().as_ptr().cast::<f32>())
632    }
633
634    /// Return a mutable reference to the value as an f32.
635    pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
636        &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>())
637    }
638
639    /// Return a reference to the value as f32 bits.
640    pub unsafe fn as_f32_bits(&self) -> &u32 {
641        &*(self.storage.as_ref().as_ptr().cast::<u32>())
642    }
643
644    /// Return a mutable reference to the value as f32 bits.
645    pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
646        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
647    }
648
649    /// Return a reference to the value as an f64.
650    pub unsafe fn as_f64(&self) -> &f64 {
651        &*(self.storage.as_ref().as_ptr().cast::<f64>())
652    }
653
654    /// Return a mutable reference to the value as an f64.
655    pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
656        &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>())
657    }
658
659    /// Return a reference to the value as f64 bits.
660    pub unsafe fn as_f64_bits(&self) -> &u64 {
661        &*(self.storage.as_ref().as_ptr().cast::<u64>())
662    }
663
664    /// Return a mutable reference to the value as f64 bits.
665    pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
666        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
667    }
668
669    /// Gets the underlying 128-bit vector value.
670    //
671    // Note that vectors are stored in little-endian format while other types
672    // are stored in native-endian format.
673    pub unsafe fn get_u128(&self) -> u128 {
674        u128::from_le(*(self.storage.as_ref().as_ptr().cast::<u128>()))
675    }
676
677    /// Sets the 128-bit vector values.
678    //
679    // Note that vectors are stored in little-endian format while other types
680    // are stored in native-endian format.
681    pub unsafe fn set_u128(&mut self, val: u128) {
682        *self.storage.as_mut().as_mut_ptr().cast::<u128>() = val.to_le();
683    }
684
685    /// Return a reference to the value as u128 bits.
686    pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
687        &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>())
688    }
689
690    /// Return a mutable reference to the value as u128 bits.
691    pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
692        &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>())
693    }
694
695    /// Return a reference to the global value as a borrowed GC reference.
696    pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
697        let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
698        let ret = (*raw_ptr).as_ref();
699        assert!(cfg!(feature = "gc") || ret.is_none());
700        ret
701    }
702
703    /// Initialize a global to the given GC reference.
704    pub unsafe fn init_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
705        assert!(cfg!(feature = "gc") || gc_ref.is_none());
706
707        let dest = &mut *(self
708            .storage
709            .as_mut()
710            .as_mut_ptr()
711            .cast::<MaybeUninit<Option<VMGcRef>>>());
712
713        gc_store.init_gc_ref(dest, gc_ref)
714    }
715
716    /// Write a GC reference into this global value.
717    pub unsafe fn write_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
718        assert!(cfg!(feature = "gc") || gc_ref.is_none());
719
720        let dest = &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>());
721        assert!(cfg!(feature = "gc") || dest.is_none());
722
723        gc_store.write_gc_ref(dest, gc_ref)
724    }
725
726    /// Return a reference to the value as a `VMFuncRef`.
727    pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
728        *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>())
729    }
730
731    /// Return a mutable reference to the value as a `VMFuncRef`.
732    pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
733        &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>())
734    }
735}
736
737#[cfg(test)]
738mod test_vmshared_type_index {
739    use super::VMSharedTypeIndex;
740    use std::mem::size_of;
741    use wasmtime_environ::{HostPtr, Module, VMOffsets};
742
743    #[test]
744    fn check_vmshared_type_index() {
745        let module = Module::new();
746        let offsets = VMOffsets::new(HostPtr, &module);
747        assert_eq!(
748            size_of::<VMSharedTypeIndex>(),
749            usize::from(offsets.size_of_vmshared_type_index())
750        );
751    }
752}
753
754/// A WebAssembly tag defined within the instance.
755///
756#[derive(Debug)]
757#[repr(C)]
758pub struct VMTagDefinition {
759    /// Function signature's type id.
760    pub type_index: VMSharedTypeIndex,
761}
762
763impl VMTagDefinition {
764    pub fn new(type_index: VMSharedTypeIndex) -> Self {
765        Self { type_index }
766    }
767}
768
769// SAFETY: the above structure is repr(C) and only contains VmSafe
770// fields.
771unsafe impl VmSafe for VMTagDefinition {}
772
773#[cfg(test)]
774mod test_vmtag_definition {
775    use super::VMTagDefinition;
776    use std::mem::size_of;
777    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
778
779    #[test]
780    fn check_vmtag_definition_offsets() {
781        let module = Module::new();
782        let offsets = VMOffsets::new(HostPtr, &module);
783        assert_eq!(
784            size_of::<VMTagDefinition>(),
785            usize::from(offsets.ptr.size_of_vmtag_definition())
786        );
787    }
788
789    #[test]
790    fn check_vmtag_begins_aligned() {
791        let module = Module::new();
792        let offsets = VMOffsets::new(HostPtr, &module);
793        assert_eq!(offsets.vmctx_tags_begin() % 16, 0);
794    }
795}
796
797/// The VM caller-checked "funcref" record, for caller-side signature checking.
798///
799/// It consists of function pointer(s), a type id to be checked by the
800/// caller, and the vmctx closure associated with this function.
801#[derive(Debug, Clone)]
802#[repr(C)]
803pub struct VMFuncRef {
804    /// Function pointer for this funcref if being called via the "array"
805    /// calling convention that `Func::new` et al use.
806    pub array_call: VmPtr<VMArrayCallFunction>,
807
808    /// Function pointer for this funcref if being called via the calling
809    /// convention we use when compiling Wasm.
810    ///
811    /// Most functions come with a function pointer that we can use when they
812    /// are called from Wasm. The notable exception is when we `Func::wrap` a
813    /// host function, and we don't have a Wasm compiler on hand to compile a
814    /// Wasm-to-native trampoline for the function. In this case, we leave
815    /// `wasm_call` empty until the function is passed as an import to Wasm (or
816    /// otherwise exposed to Wasm via tables/globals). At this point, we look up
817    /// a Wasm-to-native trampoline for the function in the Wasm's compiled
818    /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
819    /// there is no guarantee that the Wasm module has a trampoline for this
820    /// function's signature. The Wasm module only has trampolines for its
821    /// types, and if this function isn't of one of those types, then the Wasm
822    /// module will not have a trampoline for it. This is actually okay, because
823    /// it means that the Wasm cannot actually call this function. But it does
824    /// mean that this field needs to be an `Option` even though it is non-null
825    /// the vast vast vast majority of the time.
826    pub wasm_call: Option<VmPtr<VMWasmCallFunction>>,
827
828    /// Function signature's type id.
829    pub type_index: VMSharedTypeIndex,
830
831    /// The VM state associated with this function.
832    ///
833    /// The actual definition of what this pointer points to depends on the
834    /// function being referenced: for core Wasm functions, this is a `*mut
835    /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
836    /// component functions it is a `*mut VMComponentContext`.
837    pub vmctx: VmPtr<VMOpaqueContext>,
838    // If more elements are added here, remember to add offset_of tests below!
839}
840
841// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
842unsafe impl VmSafe for VMFuncRef {}
843
844impl VMFuncRef {
845    /// Invokes the `array_call` field of this `VMFuncRef` with the supplied
846    /// arguments.
847    ///
848    /// This will invoke the function pointer in the `array_call` field with:
849    ///
850    /// * the `callee` vmctx as `self.vmctx`
851    /// * the `caller` as `caller` specified here
852    /// * the args pointer as `args_and_results`
853    /// * the args length as `args_and_results`
854    ///
855    /// The `args_and_results` area must be large enough to both load all
856    /// arguments from and store all results to.
857    ///
858    /// Returns whether a trap was recorded in TLS for raising.
859    ///
860    /// # Unsafety
861    ///
862    /// This method is unsafe because it can be called with any pointers. They
863    /// must all be valid for this wasm function call to proceed. For example
864    /// the `caller` must be valid machine code if `pulley` is `None` or it must
865    /// be valid bytecode if `pulley` is `Some`. Additionally `args_and_results`
866    /// must be large enough to handle all the arguments/results for this call.
867    ///
868    /// Note that the unsafety invariants to maintain here are not currently
869    /// exhaustively documented.
870    #[inline]
871    pub unsafe fn array_call(
872        &self,
873        pulley: Option<InterpreterRef<'_>>,
874        caller: NonNull<VMOpaqueContext>,
875        args_and_results: NonNull<[ValRaw]>,
876    ) -> bool {
877        match pulley {
878            Some(vm) => self.array_call_interpreted(vm, caller, args_and_results),
879            None => self.array_call_native(caller, args_and_results),
880        }
881    }
882
883    unsafe fn array_call_interpreted(
884        &self,
885        vm: InterpreterRef<'_>,
886        caller: NonNull<VMOpaqueContext>,
887        args_and_results: NonNull<[ValRaw]>,
888    ) -> bool {
889        // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the
890        // interpreter, even though it's available, as `array_call` will be
891        // native code.
892        if self.vmctx.as_non_null().as_ref().magic
893            == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC
894        {
895            return self.array_call_native(caller, args_and_results);
896        }
897        vm.call(
898            self.array_call.as_non_null().cast(),
899            self.vmctx.as_non_null(),
900            caller,
901            args_and_results,
902        )
903    }
904
905    #[inline]
906    unsafe fn array_call_native(
907        &self,
908        caller: NonNull<VMOpaqueContext>,
909        args_and_results: NonNull<[ValRaw]>,
910    ) -> bool {
911        union GetNativePointer {
912            native: VMArrayCallNative,
913            ptr: NonNull<VMArrayCallFunction>,
914        }
915        let native = GetNativePointer {
916            ptr: self.array_call.as_non_null(),
917        }
918        .native;
919        native(
920            self.vmctx.as_non_null(),
921            caller,
922            args_and_results.cast(),
923            args_and_results.len(),
924        )
925    }
926}
927
928#[cfg(test)]
929mod test_vm_func_ref {
930    use super::VMFuncRef;
931    use core::mem::offset_of;
932    use std::mem::size_of;
933    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
934
935    #[test]
936    fn check_vm_func_ref_offsets() {
937        let module = Module::new();
938        let offsets = VMOffsets::new(HostPtr, &module);
939        assert_eq!(
940            size_of::<VMFuncRef>(),
941            usize::from(offsets.ptr.size_of_vm_func_ref())
942        );
943        assert_eq!(
944            offset_of!(VMFuncRef, array_call),
945            usize::from(offsets.ptr.vm_func_ref_array_call())
946        );
947        assert_eq!(
948            offset_of!(VMFuncRef, wasm_call),
949            usize::from(offsets.ptr.vm_func_ref_wasm_call())
950        );
951        assert_eq!(
952            offset_of!(VMFuncRef, type_index),
953            usize::from(offsets.ptr.vm_func_ref_type_index())
954        );
955        assert_eq!(
956            offset_of!(VMFuncRef, vmctx),
957            usize::from(offsets.ptr.vm_func_ref_vmctx())
958        );
959    }
960}
961
962macro_rules! define_builtin_array {
963    (
964        $(
965            $( #[$attr:meta] )*
966            $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
967        )*
968    ) => {
969        /// An array that stores addresses of builtin functions. We translate code
970        /// to use indirect calls. This way, we don't have to patch the code.
971        ///
972        /// Ignore improper ctypes to permit `__m128i` on x86_64.
973        #[repr(C)]
974        pub struct VMBuiltinFunctionsArray {
975            $(
976                #[allow(improper_ctypes_definitions)]
977                $name: unsafe extern "C" fn(
978                    $(define_builtin_array!(@ty $param)),*
979                ) $( -> define_builtin_array!(@ty $result))?,
980            )*
981        }
982
983        impl VMBuiltinFunctionsArray {
984            #[allow(unused_doc_comments)]
985            pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
986                $(
987                    $name: crate::runtime::vm::libcalls::raw::$name,
988                )*
989            };
990
991            /// Helper to call `expose_provenance()` on all contained pointers.
992            ///
993            /// This is required to be called at least once before entering wasm
994            /// to inform the compiler that these function pointers may all be
995            /// loaded/stored and used on the "other end" to reacquire
996            /// provenance in Pulley. Pulley models hostcalls with a host
997            /// pointer as the first parameter that's a function pointer under
998            /// the hood, and this call ensures that the use of the function
999            /// pointer is considered valid.
1000            pub fn expose_provenance(&self) -> NonNull<Self>{
1001                $(
1002                    (self.$name as *mut u8).expose_provenance();
1003                )*
1004                NonNull::from(self)
1005            }
1006        }
1007    };
1008
1009    (@ty u32) => (u32);
1010    (@ty u64) => (u64);
1011    (@ty f32) => (f32);
1012    (@ty f64) => (f64);
1013    (@ty u8) => (u8);
1014    (@ty i8x16) => (i8x16);
1015    (@ty f32x4) => (f32x4);
1016    (@ty f64x2) => (f64x2);
1017    (@ty bool) => (bool);
1018    (@ty pointer) => (*mut u8);
1019    (@ty vmctx) => (NonNull<VMContext>);
1020}
1021
1022// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1023unsafe impl VmSafe for VMBuiltinFunctionsArray {}
1024
1025wasmtime_environ::foreach_builtin_function!(define_builtin_array);
1026
1027const _: () = {
1028    assert!(
1029        mem::size_of::<VMBuiltinFunctionsArray>()
1030            == mem::size_of::<usize>() * (BuiltinFunctionIndex::len() as usize)
1031    )
1032};
1033
1034/// Structure that holds all mutable context that is shared across all instances
1035/// in a store, for example data related to fuel or epochs.
1036///
1037/// `VMStoreContext`s are one-to-one with `wasmtime::Store`s, the same way that
1038/// `VMContext`s are one-to-one with `wasmtime::Instance`s. And the same way
1039/// that multiple `wasmtime::Instance`s may be associated with the same
1040/// `wasmtime::Store`, multiple `VMContext`s hold a pointer to the same
1041/// `VMStoreContext` when they are associated with the same `wasmtime::Store`.
1042#[derive(Debug)]
1043#[repr(C)]
1044pub struct VMStoreContext {
1045    // NB: 64-bit integer fields are located first with pointer-sized fields
1046    // trailing afterwards. That makes the offsets in this structure easier to
1047    // calculate on 32-bit platforms as we don't have to worry about the
1048    // alignment of 64-bit integers.
1049    //
1050    /// Indicator of how much fuel has been consumed and is remaining to
1051    /// WebAssembly.
1052    ///
1053    /// This field is typically negative and increments towards positive. Upon
1054    /// turning positive a wasm trap will be generated. This field is only
1055    /// modified if wasm is configured to consume fuel.
1056    pub fuel_consumed: UnsafeCell<i64>,
1057
1058    /// Deadline epoch for interruption: if epoch-based interruption
1059    /// is enabled and the global (per engine) epoch counter is
1060    /// observed to reach or exceed this value, the guest code will
1061    /// yield if running asynchronously.
1062    pub epoch_deadline: UnsafeCell<u64>,
1063
1064    /// Current stack limit of the wasm module.
1065    ///
1066    /// For more information see `crates/cranelift/src/lib.rs`.
1067    pub stack_limit: UnsafeCell<usize>,
1068
1069    /// The `VMMemoryDefinition` for this store's GC heap.
1070    pub gc_heap: VMMemoryDefinition,
1071
1072    /// The value of the frame pointer register when we last called from Wasm to
1073    /// the host.
1074    ///
1075    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1076    /// calling into Wasm in `catch_traps`.
1077    ///
1078    /// This member is `0` when Wasm is actively running and has not called out
1079    /// to the host.
1080    ///
1081    /// Used to find the start of a contiguous sequence of Wasm frames when
1082    /// walking the stack.
1083    pub last_wasm_exit_fp: UnsafeCell<usize>,
1084
1085    /// The last Wasm program counter before we called from Wasm to the host.
1086    ///
1087    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1088    /// calling into Wasm in `catch_traps`.
1089    ///
1090    /// This member is `0` when Wasm is actively running and has not called out
1091    /// to the host.
1092    ///
1093    /// Used when walking a contiguous sequence of Wasm frames.
1094    pub last_wasm_exit_pc: UnsafeCell<usize>,
1095
1096    /// The last host stack pointer before we called into Wasm from the host.
1097    ///
1098    /// Maintained by our host-to-Wasm trampoline, and cleared just before
1099    /// calling into Wasm in `catch_traps`.
1100    ///
1101    /// This member is `0` when Wasm is actively running and has not called out
1102    /// to the host.
1103    ///
1104    /// When a host function is wrapped into a `wasmtime::Func`, and is then
1105    /// called from the host, then this member has the sentinel value of `-1 as
1106    /// usize`, meaning that this contiguous sequence of Wasm frames is the
1107    /// empty sequence, and it is not safe to dereference the
1108    /// `last_wasm_exit_fp`.
1109    ///
1110    /// Used to find the end of a contiguous sequence of Wasm frames when
1111    /// walking the stack.
1112    pub last_wasm_entry_fp: UnsafeCell<usize>,
1113
1114    /// Stack information used by stack switching instructions. See documentation
1115    /// on `VMStackChain` for details.
1116    pub stack_chain: UnsafeCell<VMStackChain>,
1117}
1118
1119// The `VMStoreContext` type is a pod-type with no destructor, and we don't
1120// access any fields from other threads, so add in these trait impls which are
1121// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
1122// variables in `VMStoreContext`.
1123unsafe impl Send for VMStoreContext {}
1124unsafe impl Sync for VMStoreContext {}
1125
1126// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1127unsafe impl VmSafe for VMStoreContext {}
1128
1129impl Default for VMStoreContext {
1130    fn default() -> VMStoreContext {
1131        VMStoreContext {
1132            fuel_consumed: UnsafeCell::new(0),
1133            epoch_deadline: UnsafeCell::new(0),
1134            stack_limit: UnsafeCell::new(usize::max_value()),
1135            gc_heap: VMMemoryDefinition {
1136                base: NonNull::dangling().into(),
1137                current_length: AtomicUsize::new(0),
1138            },
1139            last_wasm_exit_fp: UnsafeCell::new(0),
1140            last_wasm_exit_pc: UnsafeCell::new(0),
1141            last_wasm_entry_fp: UnsafeCell::new(0),
1142            stack_chain: UnsafeCell::new(VMStackChain::Absent),
1143        }
1144    }
1145}
1146
1147#[cfg(test)]
1148mod test_vmstore_context {
1149    use super::{VMMemoryDefinition, VMStoreContext};
1150    use core::mem::offset_of;
1151    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
1152
1153    #[test]
1154    fn field_offsets() {
1155        let module = Module::new();
1156        let offsets = VMOffsets::new(HostPtr, &module);
1157        assert_eq!(
1158            offset_of!(VMStoreContext, stack_limit),
1159            usize::from(offsets.ptr.vmstore_context_stack_limit())
1160        );
1161        assert_eq!(
1162            offset_of!(VMStoreContext, fuel_consumed),
1163            usize::from(offsets.ptr.vmstore_context_fuel_consumed())
1164        );
1165        assert_eq!(
1166            offset_of!(VMStoreContext, epoch_deadline),
1167            usize::from(offsets.ptr.vmstore_context_epoch_deadline())
1168        );
1169        assert_eq!(
1170            offset_of!(VMStoreContext, gc_heap),
1171            usize::from(offsets.ptr.vmstore_context_gc_heap())
1172        );
1173        assert_eq!(
1174            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, base),
1175            usize::from(offsets.ptr.vmstore_context_gc_heap_base())
1176        );
1177        assert_eq!(
1178            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, current_length),
1179            usize::from(offsets.ptr.vmstore_context_gc_heap_current_length())
1180        );
1181        assert_eq!(
1182            offset_of!(VMStoreContext, last_wasm_exit_fp),
1183            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_fp())
1184        );
1185        assert_eq!(
1186            offset_of!(VMStoreContext, last_wasm_exit_pc),
1187            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_pc())
1188        );
1189        assert_eq!(
1190            offset_of!(VMStoreContext, last_wasm_entry_fp),
1191            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp())
1192        );
1193        assert_eq!(
1194            offset_of!(VMStoreContext, stack_chain),
1195            usize::from(offsets.ptr.vmstore_context_stack_chain())
1196        )
1197    }
1198}
1199
1200/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
1201/// This has information about globals, memories, tables, and other runtime
1202/// state associated with the current instance.
1203///
1204/// The struct here is empty, as the sizes of these fields are dynamic, and
1205/// we can't describe them in Rust's type system. Sufficient memory is
1206/// allocated at runtime.
1207#[derive(Debug)]
1208#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
1209pub struct VMContext {
1210    /// There's some more discussion about this within `wasmtime/src/lib.rs` but
1211    /// the idea is that we want to tell the compiler that this contains
1212    /// pointers which transitively refers to itself, to suppress some
1213    /// optimizations that might otherwise assume this doesn't exist.
1214    ///
1215    /// The self-referential pointer we care about is the `*mut Store` pointer
1216    /// early on in this context, which if you follow through enough levels of
1217    /// nesting, eventually can refer back to this `VMContext`
1218    pub _marker: marker::PhantomPinned,
1219}
1220
1221impl VMContext {
1222    /// Helper function to cast between context types using a debug assertion to
1223    /// protect against some mistakes.
1224    #[inline]
1225    pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMContext> {
1226        // Note that in general the offset of the "magic" field is stored in
1227        // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
1228        // about converting this pointer to another type we ideally don't want
1229        // to read the offset from potentially corrupt memory. Instead it would
1230        // be better to catch errors here as soon as possible.
1231        //
1232        // To accomplish this the `VMContext` structure is laid out with the
1233        // magic field at a statically known offset (here it's 0 for now). This
1234        // static offset is asserted in `VMOffsets::from` and needs to be kept
1235        // in sync with this line for this debug assertion to work.
1236        //
1237        // Also note that this magic is only ever invalid in the presence of
1238        // bugs, meaning we don't actually read the magic and act differently
1239        // at runtime depending what it is, so this is a debug assertion as
1240        // opposed to a regular assertion.
1241        debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC);
1242        opaque.cast()
1243    }
1244}
1245
1246/// A "raw" and unsafe representation of a WebAssembly value.
1247///
1248/// This is provided for use with the `Func::new_unchecked` and
1249/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
1250/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
1251///
1252/// This is notably an "unsafe" way to work with `Val` and it's recommended to
1253/// instead use `Val` where possible. An important note about this union is that
1254/// fields are all stored in little-endian format, regardless of the endianness
1255/// of the host system.
1256#[allow(missing_docs)]
1257#[repr(C)]
1258#[derive(Copy, Clone)]
1259pub union ValRaw {
1260    /// A WebAssembly `i32` value.
1261    ///
1262    /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
1263    /// type does not assign an interpretation of the upper bit as either signed
1264    /// or unsigned. The Rust type `i32` is simply chosen for convenience.
1265    ///
1266    /// This value is always stored in a little-endian format.
1267    i32: i32,
1268
1269    /// A WebAssembly `i64` value.
1270    ///
1271    /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
1272    /// type does not assign an interpretation of the upper bit as either signed
1273    /// or unsigned. The Rust type `i64` is simply chosen for convenience.
1274    ///
1275    /// This value is always stored in a little-endian format.
1276    i64: i64,
1277
1278    /// A WebAssembly `f32` value.
1279    ///
1280    /// Note that the payload here is a Rust `u32`. This is to allow passing any
1281    /// representation of NaN into WebAssembly without risk of changing NaN
1282    /// payload bits as its gets passed around the system. Otherwise though this
1283    /// `u32` value is the return value of `f32::to_bits` in Rust.
1284    ///
1285    /// This value is always stored in a little-endian format.
1286    f32: u32,
1287
1288    /// A WebAssembly `f64` value.
1289    ///
1290    /// Note that the payload here is a Rust `u64`. This is to allow passing any
1291    /// representation of NaN into WebAssembly without risk of changing NaN
1292    /// payload bits as its gets passed around the system. Otherwise though this
1293    /// `u64` value is the return value of `f64::to_bits` in Rust.
1294    ///
1295    /// This value is always stored in a little-endian format.
1296    f64: u64,
1297
1298    /// A WebAssembly `v128` value.
1299    ///
1300    /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1301    /// but note that `v128` in WebAssembly is often considered a vector type
1302    /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1303    /// of the underlying bits is left up to the instructions which consume
1304    /// this value.
1305    ///
1306    /// This value is always stored in a little-endian format.
1307    v128: [u8; 16],
1308
1309    /// A WebAssembly `funcref` value (or one of its subtypes).
1310    ///
1311    /// The payload here is a pointer which is runtime-defined. This is one of
1312    /// the main points of unsafety about the `ValRaw` type as the validity of
1313    /// the pointer here is not easily verified and must be preserved by
1314    /// carefully calling the correct functions throughout the runtime.
1315    ///
1316    /// This value is always stored in a little-endian format.
1317    funcref: *mut c_void,
1318
1319    /// A WebAssembly `externref` value (or one of its subtypes).
1320    ///
1321    /// The payload here is a compressed pointer value which is
1322    /// runtime-defined. This is one of the main points of unsafety about the
1323    /// `ValRaw` type as the validity of the pointer here is not easily verified
1324    /// and must be preserved by carefully calling the correct functions
1325    /// throughout the runtime.
1326    ///
1327    /// This value is always stored in a little-endian format.
1328    externref: u32,
1329
1330    /// A WebAssembly `anyref` value (or one of its subtypes).
1331    ///
1332    /// The payload here is a compressed pointer value which is
1333    /// runtime-defined. This is one of the main points of unsafety about the
1334    /// `ValRaw` type as the validity of the pointer here is not easily verified
1335    /// and must be preserved by carefully calling the correct functions
1336    /// throughout the runtime.
1337    ///
1338    /// This value is always stored in a little-endian format.
1339    anyref: u32,
1340}
1341
1342// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1343// are some simple assertions about the shape of the type which are additionally
1344// matched in C.
1345const _: () = {
1346    assert!(mem::size_of::<ValRaw>() == 16);
1347    assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1348};
1349
1350// This type is just a bag-of-bits so it's up to the caller to figure out how
1351// to safely deal with threading concerns and safely access interior bits.
1352unsafe impl Send for ValRaw {}
1353unsafe impl Sync for ValRaw {}
1354
1355impl fmt::Debug for ValRaw {
1356    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1357        struct Hex<T>(T);
1358        impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1359            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1360                let bytes = mem::size_of::<T>();
1361                let hex_digits_per_byte = 2;
1362                let hex_digits = bytes * hex_digits_per_byte;
1363                write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1364            }
1365        }
1366
1367        unsafe {
1368            f.debug_struct("ValRaw")
1369                .field("i32", &Hex(self.i32))
1370                .field("i64", &Hex(self.i64))
1371                .field("f32", &Hex(self.f32))
1372                .field("f64", &Hex(self.f64))
1373                .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1374                .field("funcref", &self.funcref)
1375                .field("externref", &Hex(self.externref))
1376                .field("anyref", &Hex(self.anyref))
1377                .finish()
1378        }
1379    }
1380}
1381
1382impl ValRaw {
1383    /// Create a null reference that is compatible with any of
1384    /// `{any,extern,func}ref`.
1385    pub fn null() -> ValRaw {
1386        unsafe {
1387            let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1388            debug_assert_eq!(raw.get_anyref(), 0);
1389            debug_assert_eq!(raw.get_externref(), 0);
1390            debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1391            raw
1392        }
1393    }
1394
1395    /// Creates a WebAssembly `i32` value
1396    #[inline]
1397    pub fn i32(i: i32) -> ValRaw {
1398        // Note that this is intentionally not setting the `i32` field, instead
1399        // setting the `i64` field with a zero-extended version of `i`. For more
1400        // information on this see the comments on `Lower for Result` in the
1401        // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1402        // otherwise constrained to guarantee that the initial 64-bits are
1403        // always initialized.
1404        ValRaw::u64(i.unsigned().into())
1405    }
1406
1407    /// Creates a WebAssembly `i64` value
1408    #[inline]
1409    pub fn i64(i: i64) -> ValRaw {
1410        ValRaw { i64: i.to_le() }
1411    }
1412
1413    /// Creates a WebAssembly `i32` value
1414    #[inline]
1415    pub fn u32(i: u32) -> ValRaw {
1416        // See comments in `ValRaw::i32` for why this is setting the upper
1417        // 32-bits as well.
1418        ValRaw::u64(i.into())
1419    }
1420
1421    /// Creates a WebAssembly `i64` value
1422    #[inline]
1423    pub fn u64(i: u64) -> ValRaw {
1424        ValRaw::i64(i as i64)
1425    }
1426
1427    /// Creates a WebAssembly `f32` value
1428    #[inline]
1429    pub fn f32(i: u32) -> ValRaw {
1430        // See comments in `ValRaw::i32` for why this is setting the upper
1431        // 32-bits as well.
1432        ValRaw::u64(i.into())
1433    }
1434
1435    /// Creates a WebAssembly `f64` value
1436    #[inline]
1437    pub fn f64(i: u64) -> ValRaw {
1438        ValRaw { f64: i.to_le() }
1439    }
1440
1441    /// Creates a WebAssembly `v128` value
1442    #[inline]
1443    pub fn v128(i: u128) -> ValRaw {
1444        ValRaw {
1445            v128: i.to_le_bytes(),
1446        }
1447    }
1448
1449    /// Creates a WebAssembly `funcref` value
1450    #[inline]
1451    pub fn funcref(i: *mut c_void) -> ValRaw {
1452        ValRaw {
1453            funcref: i.map_addr(|i| i.to_le()),
1454        }
1455    }
1456
1457    /// Creates a WebAssembly `externref` value
1458    #[inline]
1459    pub fn externref(e: u32) -> ValRaw {
1460        assert!(cfg!(feature = "gc") || e == 0);
1461        ValRaw {
1462            externref: e.to_le(),
1463        }
1464    }
1465
1466    /// Creates a WebAssembly `anyref` value
1467    #[inline]
1468    pub fn anyref(r: u32) -> ValRaw {
1469        assert!(cfg!(feature = "gc") || r == 0);
1470        ValRaw { anyref: r.to_le() }
1471    }
1472
1473    /// Gets the WebAssembly `i32` value
1474    #[inline]
1475    pub fn get_i32(&self) -> i32 {
1476        unsafe { i32::from_le(self.i32) }
1477    }
1478
1479    /// Gets the WebAssembly `i64` value
1480    #[inline]
1481    pub fn get_i64(&self) -> i64 {
1482        unsafe { i64::from_le(self.i64) }
1483    }
1484
1485    /// Gets the WebAssembly `i32` value
1486    #[inline]
1487    pub fn get_u32(&self) -> u32 {
1488        self.get_i32().unsigned()
1489    }
1490
1491    /// Gets the WebAssembly `i64` value
1492    #[inline]
1493    pub fn get_u64(&self) -> u64 {
1494        self.get_i64().unsigned()
1495    }
1496
1497    /// Gets the WebAssembly `f32` value
1498    #[inline]
1499    pub fn get_f32(&self) -> u32 {
1500        unsafe { u32::from_le(self.f32) }
1501    }
1502
1503    /// Gets the WebAssembly `f64` value
1504    #[inline]
1505    pub fn get_f64(&self) -> u64 {
1506        unsafe { u64::from_le(self.f64) }
1507    }
1508
1509    /// Gets the WebAssembly `v128` value
1510    #[inline]
1511    pub fn get_v128(&self) -> u128 {
1512        unsafe { u128::from_le_bytes(self.v128) }
1513    }
1514
1515    /// Gets the WebAssembly `funcref` value
1516    #[inline]
1517    pub fn get_funcref(&self) -> *mut c_void {
1518        unsafe { self.funcref.map_addr(|i| usize::from_le(i)) }
1519    }
1520
1521    /// Gets the WebAssembly `externref` value
1522    #[inline]
1523    pub fn get_externref(&self) -> u32 {
1524        let externref = u32::from_le(unsafe { self.externref });
1525        assert!(cfg!(feature = "gc") || externref == 0);
1526        externref
1527    }
1528
1529    /// Gets the WebAssembly `anyref` value
1530    #[inline]
1531    pub fn get_anyref(&self) -> u32 {
1532        let anyref = u32::from_le(unsafe { self.anyref });
1533        assert!(cfg!(feature = "gc") || anyref == 0);
1534        anyref
1535    }
1536}
1537
1538/// An "opaque" version of `VMContext` which must be explicitly casted to a
1539/// target context.
1540///
1541/// This context is used to represent that contexts specified in
1542/// `VMFuncRef` can have any type and don't have an implicit
1543/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1544/// structure of an opaque context in general and only the code which configured
1545/// the context is able to rely on a particular structure. This is because the
1546/// context pointer configured for `VMFuncRef` is guaranteed to be
1547/// the first parameter passed.
1548///
1549/// Note that Wasmtime currently has a layout where all contexts that are casted
1550/// to an opaque context start with a 32-bit "magic" which can be used in debug
1551/// mode to debug-assert that the casts here are correct and have at least a
1552/// little protection against incorrect casts.
1553pub struct VMOpaqueContext {
1554    pub(crate) magic: u32,
1555    _marker: marker::PhantomPinned,
1556}
1557
1558impl VMOpaqueContext {
1559    /// Helper function to clearly indicate that casts are desired.
1560    #[inline]
1561    pub fn from_vmcontext(ptr: NonNull<VMContext>) -> NonNull<VMOpaqueContext> {
1562        ptr.cast()
1563    }
1564
1565    /// Helper function to clearly indicate that casts are desired.
1566    #[inline]
1567    pub fn from_vm_array_call_host_func_context(
1568        ptr: NonNull<VMArrayCallHostFuncContext>,
1569    ) -> NonNull<VMOpaqueContext> {
1570        ptr.cast()
1571    }
1572}