Skip to main content

wasmtime/runtime/vm/
vmcontext.rs

1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::prelude::*;
8use crate::runtime::vm::{InterpreterRef, VMGcRef, VmPtr, VmSafe, f32x4, f64x2, i8x16};
9use crate::store::StoreOpaque;
10use crate::vm::stack_switching::VMStackChain;
11use core::cell::UnsafeCell;
12use core::ffi::c_void;
13use core::fmt;
14use core::marker;
15use core::mem::{self, MaybeUninit};
16use core::ops::Range;
17use core::ptr::{self, NonNull};
18use core::sync::atomic::{AtomicUsize, Ordering};
19use wasmtime_environ::{
20    BuiltinFunctionIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex,
21    DefinedTagIndex, VMCONTEXT_MAGIC, VMSharedTypeIndex, WasmHeapTopType, WasmValType,
22};
23
24/// A function pointer that exposes the array calling convention.
25///
26/// Regardless of the underlying Wasm function type, all functions using the
27/// array calling convention have the same Rust signature.
28///
29/// Arguments:
30///
31/// * Callee `vmctx` for the function itself.
32///
33/// * Caller's `vmctx` (so that host functions can access the linear memory of
34///   their Wasm callers).
35///
36/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
37///   this function, and where results are returned from this function.
38///
39/// * The capacity of the `ValRaw` buffer. Must always be at least
40///   `max(len(wasm_params), len(wasm_results))`.
41///
42/// Return value:
43///
44/// * `true` if this call succeeded.
45/// * `false` if this call failed and a trap was recorded in TLS.
46pub type VMArrayCallNative = unsafe extern "C" fn(
47    NonNull<VMOpaqueContext>,
48    NonNull<VMContext>,
49    NonNull<ValRaw>,
50    usize,
51) -> bool;
52
53/// An opaque function pointer which might be `VMArrayCallNative` or it might be
54/// pulley bytecode. Requires external knowledge to determine what kind of
55/// function pointer this is.
56#[repr(transparent)]
57pub struct VMArrayCallFunction(VMFunctionBody);
58
59/// A function pointer that exposes the Wasm calling convention.
60///
61/// In practice, different Wasm function types end up mapping to different Rust
62/// function types, so this isn't simply a type alias the way that
63/// `VMArrayCallFunction` is. However, the exact details of the calling
64/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
65/// code never does anything with these function pointers except shuffle them
66/// around and pass them back to Wasm.
67#[repr(transparent)]
68pub struct VMWasmCallFunction(VMFunctionBody);
69
70/// An imported function.
71#[derive(Debug, Copy, Clone)]
72#[repr(C)]
73pub struct VMFunctionImport {
74    /// Function pointer to use when calling this imported function from Wasm.
75    pub wasm_call: VmPtr<VMWasmCallFunction>,
76
77    /// Function pointer to use when calling this imported function with the
78    /// "array" calling convention that `Func::new` et al use.
79    pub array_call: VmPtr<VMArrayCallFunction>,
80
81    /// The VM state associated with this function.
82    ///
83    /// For Wasm functions defined by core wasm instances this will be `*mut
84    /// VMContext`, but for lifted/lowered component model functions this will
85    /// be a `VMComponentContext`, and for a host function it will be a
86    /// `VMHostFuncContext`, etc.
87    pub vmctx: VmPtr<VMOpaqueContext>,
88}
89
90// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
91unsafe impl VmSafe for VMFunctionImport {}
92
93#[cfg(test)]
94mod test_vmfunction_import {
95    use super::VMFunctionImport;
96    use core::mem::offset_of;
97    use std::mem::size_of;
98    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
99
100    #[test]
101    fn check_vmfunction_import_offsets() {
102        let module = Module::new(StaticModuleIndex::from_u32(0));
103        let offsets = VMOffsets::new(HostPtr, &module);
104        assert_eq!(
105            size_of::<VMFunctionImport>(),
106            usize::from(offsets.size_of_vmfunction_import())
107        );
108        assert_eq!(
109            offset_of!(VMFunctionImport, wasm_call),
110            usize::from(offsets.vmfunction_import_wasm_call())
111        );
112        assert_eq!(
113            offset_of!(VMFunctionImport, array_call),
114            usize::from(offsets.vmfunction_import_array_call())
115        );
116        assert_eq!(
117            offset_of!(VMFunctionImport, vmctx),
118            usize::from(offsets.vmfunction_import_vmctx())
119        );
120    }
121}
122
123/// A placeholder byte-sized type which is just used to provide some amount of type
124/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
125/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
126/// around.
127#[repr(C)]
128pub struct VMFunctionBody(u8);
129
130// SAFETY: this structure is never read and is safe to pass to jit code.
131unsafe impl VmSafe for VMFunctionBody {}
132
133#[cfg(test)]
134mod test_vmfunction_body {
135    use super::VMFunctionBody;
136    use std::mem::size_of;
137
138    #[test]
139    fn check_vmfunction_body_offsets() {
140        assert_eq!(size_of::<VMFunctionBody>(), 1);
141    }
142}
143
144/// The fields compiled code needs to access to utilize a WebAssembly table
145/// imported from another instance.
146#[derive(Debug, Copy, Clone)]
147#[repr(C)]
148pub struct VMTableImport {
149    /// A pointer to the imported table description.
150    pub from: VmPtr<VMTableDefinition>,
151
152    /// A pointer to the `VMContext` that owns the table description.
153    pub vmctx: VmPtr<VMContext>,
154
155    /// The table index, within `vmctx`, this definition resides at.
156    pub index: DefinedTableIndex,
157}
158
159// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
160unsafe impl VmSafe for VMTableImport {}
161
162#[cfg(test)]
163mod test_vmtable {
164    use super::VMTableImport;
165    use core::mem::offset_of;
166    use std::mem::size_of;
167    use wasmtime_environ::component::{Component, VMComponentOffsets};
168    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
169
170    #[test]
171    fn check_vmtable_offsets() {
172        let module = Module::new(StaticModuleIndex::from_u32(0));
173        let offsets = VMOffsets::new(HostPtr, &module);
174        assert_eq!(
175            size_of::<VMTableImport>(),
176            usize::from(offsets.size_of_vmtable_import())
177        );
178        assert_eq!(
179            offset_of!(VMTableImport, from),
180            usize::from(offsets.vmtable_import_from())
181        );
182        assert_eq!(
183            offset_of!(VMTableImport, vmctx),
184            usize::from(offsets.vmtable_import_vmctx())
185        );
186        assert_eq!(
187            offset_of!(VMTableImport, index),
188            usize::from(offsets.vmtable_import_index())
189        );
190    }
191
192    #[test]
193    fn ensure_sizes_match() {
194        // Because we use `VMTableImport` for recording tables used by components, we
195        // want to make sure that the size calculations between `VMOffsets` and
196        // `VMComponentOffsets` stay the same.
197        let module = Module::new(StaticModuleIndex::from_u32(0));
198        let vm_offsets = VMOffsets::new(HostPtr, &module);
199        let component = Component::default();
200        let vm_component_offsets = VMComponentOffsets::new(HostPtr, &component);
201        assert_eq!(
202            vm_offsets.size_of_vmtable_import(),
203            vm_component_offsets.size_of_vmtable_import()
204        );
205    }
206}
207
208/// The fields compiled code needs to access to utilize a WebAssembly linear
209/// memory imported from another instance.
210#[derive(Debug, Copy, Clone)]
211#[repr(C)]
212pub struct VMMemoryImport {
213    /// A pointer to the imported memory description.
214    pub from: VmPtr<VMMemoryDefinition>,
215
216    /// A pointer to the `VMContext` that owns the memory description.
217    pub vmctx: VmPtr<VMContext>,
218
219    /// The index of the memory in the containing `vmctx`.
220    pub index: DefinedMemoryIndex,
221}
222
223// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
224unsafe impl VmSafe for VMMemoryImport {}
225
226#[cfg(test)]
227mod test_vmmemory_import {
228    use super::VMMemoryImport;
229    use core::mem::offset_of;
230    use std::mem::size_of;
231    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
232
233    #[test]
234    fn check_vmmemory_import_offsets() {
235        let module = Module::new(StaticModuleIndex::from_u32(0));
236        let offsets = VMOffsets::new(HostPtr, &module);
237        assert_eq!(
238            size_of::<VMMemoryImport>(),
239            usize::from(offsets.size_of_vmmemory_import())
240        );
241        assert_eq!(
242            offset_of!(VMMemoryImport, from),
243            usize::from(offsets.vmmemory_import_from())
244        );
245        assert_eq!(
246            offset_of!(VMMemoryImport, vmctx),
247            usize::from(offsets.vmmemory_import_vmctx())
248        );
249        assert_eq!(
250            offset_of!(VMMemoryImport, index),
251            usize::from(offsets.vmmemory_import_index())
252        );
253    }
254}
255
256/// The fields compiled code needs to access to utilize a WebAssembly global
257/// variable imported from another instance.
258///
259/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
260/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
261/// require a `vmctx` pointer to access.
262#[derive(Debug, Copy, Clone)]
263#[repr(C)]
264pub struct VMGlobalImport {
265    /// A pointer to the imported global variable description.
266    pub from: VmPtr<VMGlobalDefinition>,
267
268    /// A pointer to the context that owns the global.
269    ///
270    /// Exactly what's stored here is dictated by `kind` below. This is `None`
271    /// for `VMGlobalKind::Host`, it's a `VMContext` for
272    /// `VMGlobalKind::Instance`, and it's `VMComponentContext` for
273    /// `VMGlobalKind::ComponentFlags`.
274    pub vmctx: Option<VmPtr<VMOpaqueContext>>,
275
276    /// The kind of global, and extra location information in addition to
277    /// `vmctx` above.
278    pub kind: VMGlobalKind,
279}
280
281// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
282unsafe impl VmSafe for VMGlobalImport {}
283
284/// The kinds of globals that Wasmtime has.
285#[derive(Debug, Copy, Clone)]
286#[repr(C, u32)]
287pub enum VMGlobalKind {
288    /// Host globals, stored in a `StoreOpaque`.
289    Host(DefinedGlobalIndex),
290    /// Instance globals, stored in `VMContext`s
291    Instance(DefinedGlobalIndex),
292    /// Flags for a component instance, stored in `VMComponentContext`.
293    #[cfg(feature = "component-model")]
294    ComponentFlags(wasmtime_environ::component::RuntimeComponentInstanceIndex),
295    #[cfg(feature = "component-model")]
296    TaskMayBlock,
297}
298
299// SAFETY: the above enum is repr(C) and stores nothing else
300unsafe impl VmSafe for VMGlobalKind {}
301
302#[cfg(test)]
303mod test_vmglobal_import {
304    use super::VMGlobalImport;
305    use core::mem::offset_of;
306    use std::mem::size_of;
307    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
308
309    #[test]
310    fn check_vmglobal_import_offsets() {
311        let module = Module::new(StaticModuleIndex::from_u32(0));
312        let offsets = VMOffsets::new(HostPtr, &module);
313        assert_eq!(
314            size_of::<VMGlobalImport>(),
315            usize::from(offsets.size_of_vmglobal_import())
316        );
317        assert_eq!(
318            offset_of!(VMGlobalImport, from),
319            usize::from(offsets.vmglobal_import_from())
320        );
321    }
322}
323
324/// The fields compiled code needs to access to utilize a WebAssembly
325/// tag imported from another instance.
326#[derive(Debug, Copy, Clone)]
327#[repr(C)]
328pub struct VMTagImport {
329    /// A pointer to the imported tag description.
330    pub from: VmPtr<VMTagDefinition>,
331
332    /// The instance that owns this tag.
333    pub vmctx: VmPtr<VMContext>,
334
335    /// The index of the tag in the containing `vmctx`.
336    pub index: DefinedTagIndex,
337}
338
339// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
340unsafe impl VmSafe for VMTagImport {}
341
342#[cfg(test)]
343mod test_vmtag_import {
344    use super::VMTagImport;
345    use core::mem::{offset_of, size_of};
346    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
347
348    #[test]
349    fn check_vmtag_import_offsets() {
350        let module = Module::new(StaticModuleIndex::from_u32(0));
351        let offsets = VMOffsets::new(HostPtr, &module);
352        assert_eq!(
353            size_of::<VMTagImport>(),
354            usize::from(offsets.size_of_vmtag_import())
355        );
356        assert_eq!(
357            offset_of!(VMTagImport, from),
358            usize::from(offsets.vmtag_import_from())
359        );
360        assert_eq!(
361            offset_of!(VMTagImport, vmctx),
362            usize::from(offsets.vmtag_import_vmctx())
363        );
364        assert_eq!(
365            offset_of!(VMTagImport, index),
366            usize::from(offsets.vmtag_import_index())
367        );
368    }
369}
370
371/// The fields compiled code needs to access to utilize a WebAssembly linear
372/// memory defined within the instance, namely the start address and the
373/// size in bytes.
374#[derive(Debug)]
375#[repr(C)]
376pub struct VMMemoryDefinition {
377    /// The start address.
378    pub base: VmPtr<u8>,
379
380    /// The current logical size of this linear memory in bytes.
381    ///
382    /// This is atomic because shared memories must be able to grow their length
383    /// atomically. For relaxed access, see
384    /// [`VMMemoryDefinition::current_length()`].
385    pub current_length: AtomicUsize,
386}
387
388// SAFETY: the above definition has `repr(C)` and each field individually
389// implements `VmSafe`, which satisfies the requirements of this trait.
390unsafe impl VmSafe for VMMemoryDefinition {}
391
392impl VMMemoryDefinition {
393    /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
394    /// performing a relaxed load; do not use this function for situations in
395    /// which a precise length is needed. Owned memories (i.e., non-shared) will
396    /// always return a precise result (since no concurrent modification is
397    /// possible) but shared memories may see an imprecise value--a
398    /// `current_length` potentially smaller than what some other thread
399    /// observes. Since Wasm memory only grows, this under-estimation may be
400    /// acceptable in certain cases.
401    #[inline]
402    pub fn current_length(&self) -> usize {
403        self.current_length.load(Ordering::Relaxed)
404    }
405
406    /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
407    /// `current_length`; see [`VMMemoryDefinition::current_length()`].
408    #[inline]
409    pub unsafe fn load(ptr: *mut Self) -> Self {
410        let other = unsafe { &*ptr };
411        VMMemoryDefinition {
412            base: other.base,
413            current_length: other.current_length().into(),
414        }
415    }
416}
417
418#[cfg(test)]
419mod test_vmmemory_definition {
420    use super::VMMemoryDefinition;
421    use core::mem::offset_of;
422    use std::mem::size_of;
423    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
424
425    #[test]
426    fn check_vmmemory_definition_offsets() {
427        let module = Module::new(StaticModuleIndex::from_u32(0));
428        let offsets = VMOffsets::new(HostPtr, &module);
429        assert_eq!(
430            size_of::<VMMemoryDefinition>(),
431            usize::from(offsets.ptr.size_of_vmmemory_definition())
432        );
433        assert_eq!(
434            offset_of!(VMMemoryDefinition, base),
435            usize::from(offsets.ptr.vmmemory_definition_base())
436        );
437        assert_eq!(
438            offset_of!(VMMemoryDefinition, current_length),
439            usize::from(offsets.ptr.vmmemory_definition_current_length())
440        );
441        /* TODO: Assert that the size of `current_length` matches.
442        assert_eq!(
443            size_of::<VMMemoryDefinition::current_length>(),
444            usize::from(offsets.size_of_vmmemory_definition_current_length())
445        );
446        */
447    }
448}
449
450/// The fields compiled code needs to access to utilize a WebAssembly table
451/// defined within the instance.
452#[derive(Debug, Copy, Clone)]
453#[repr(C)]
454pub struct VMTableDefinition {
455    /// Pointer to the table data.
456    pub base: VmPtr<u8>,
457
458    /// The current number of elements in the table.
459    pub current_elements: usize,
460}
461
462// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
463unsafe impl VmSafe for VMTableDefinition {}
464
465#[cfg(test)]
466mod test_vmtable_definition {
467    use super::VMTableDefinition;
468    use core::mem::offset_of;
469    use std::mem::size_of;
470    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
471
472    #[test]
473    fn check_vmtable_definition_offsets() {
474        let module = Module::new(StaticModuleIndex::from_u32(0));
475        let offsets = VMOffsets::new(HostPtr, &module);
476        assert_eq!(
477            size_of::<VMTableDefinition>(),
478            usize::from(offsets.size_of_vmtable_definition())
479        );
480        assert_eq!(
481            offset_of!(VMTableDefinition, base),
482            usize::from(offsets.vmtable_definition_base())
483        );
484        assert_eq!(
485            offset_of!(VMTableDefinition, current_elements),
486            usize::from(offsets.vmtable_definition_current_elements())
487        );
488    }
489}
490
491/// The storage for a WebAssembly global defined within the instance.
492///
493/// TODO: Pack the globals more densely, rather than using the same size
494/// for every type.
495#[derive(Debug)]
496#[repr(C, align(16))]
497pub struct VMGlobalDefinition {
498    storage: [u8; 16],
499    // If more elements are added here, remember to add offset_of tests below!
500}
501
502// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
503unsafe impl VmSafe for VMGlobalDefinition {}
504
505#[cfg(test)]
506mod test_vmglobal_definition {
507    use super::VMGlobalDefinition;
508    use std::mem::{align_of, size_of};
509    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
510
511    #[test]
512    fn check_vmglobal_definition_alignment() {
513        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
514        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
515        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
516        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
517        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
518        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f32; 4]>());
519        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f64; 2]>());
520    }
521
522    #[test]
523    fn check_vmglobal_definition_offsets() {
524        let module = Module::new(StaticModuleIndex::from_u32(0));
525        let offsets = VMOffsets::new(HostPtr, &module);
526        assert_eq!(
527            size_of::<VMGlobalDefinition>(),
528            usize::from(offsets.ptr.size_of_vmglobal_definition())
529        );
530    }
531
532    #[test]
533    fn check_vmglobal_begins_aligned() {
534        let module = Module::new(StaticModuleIndex::from_u32(0));
535        let offsets = VMOffsets::new(HostPtr, &module);
536        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
537    }
538
539    #[test]
540    #[cfg(feature = "gc")]
541    fn check_vmglobal_can_contain_gc_ref() {
542        assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
543    }
544}
545
546impl VMGlobalDefinition {
547    /// Construct a `VMGlobalDefinition`.
548    pub fn new() -> Self {
549        Self { storage: [0; 16] }
550    }
551
552    /// Create a `VMGlobalDefinition` from a `ValRaw`.
553    ///
554    /// # Unsafety
555    ///
556    /// This raw value's type must match the given `WasmValType`.
557    pub unsafe fn from_val_raw(
558        store: &mut StoreOpaque,
559        wasm_ty: WasmValType,
560        raw: ValRaw,
561    ) -> Result<Self> {
562        let mut global = Self::new();
563        unsafe {
564            match wasm_ty {
565                WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
566                WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
567                WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
568                WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
569                WasmValType::V128 => global.set_u128(raw.get_v128()),
570                WasmValType::Ref(r) => match r.heap_type.top() {
571                    WasmHeapTopType::Extern => {
572                        let r = VMGcRef::from_raw_u32(raw.get_externref());
573                        global.init_gc_ref(store, r.as_ref())
574                    }
575                    WasmHeapTopType::Any => {
576                        let r = VMGcRef::from_raw_u32(raw.get_anyref());
577                        global.init_gc_ref(store, r.as_ref())
578                    }
579                    WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
580                    WasmHeapTopType::Cont => *global.as_func_ref_mut() = raw.get_funcref().cast(), // TODO(#10248): temporary hack.
581                    WasmHeapTopType::Exn => {
582                        let r = VMGcRef::from_raw_u32(raw.get_exnref());
583                        global.init_gc_ref(store, r.as_ref())
584                    }
585                },
586            }
587        }
588        Ok(global)
589    }
590
591    /// Get this global's value as a `ValRaw`.
592    ///
593    /// # Unsafety
594    ///
595    /// This global's value's type must match the given `WasmValType`.
596    pub unsafe fn to_val_raw(
597        &self,
598        store: &mut StoreOpaque,
599        wasm_ty: WasmValType,
600    ) -> Result<ValRaw> {
601        unsafe {
602            Ok(match wasm_ty {
603                WasmValType::I32 => ValRaw::i32(*self.as_i32()),
604                WasmValType::I64 => ValRaw::i64(*self.as_i64()),
605                WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
606                WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
607                WasmValType::V128 => ValRaw::v128(self.get_u128()),
608                WasmValType::Ref(r) => match r.heap_type.top() {
609                    WasmHeapTopType::Extern => ValRaw::externref(match self.as_gc_ref() {
610                        Some(r) => store.clone_gc_ref(r).as_raw_u32(),
611                        None => 0,
612                    }),
613                    WasmHeapTopType::Any => ValRaw::anyref({
614                        match self.as_gc_ref() {
615                            Some(r) => store.clone_gc_ref(r).as_raw_u32(),
616                            None => 0,
617                        }
618                    }),
619                    WasmHeapTopType::Exn => ValRaw::exnref({
620                        match self.as_gc_ref() {
621                            Some(r) => store.clone_gc_ref(r).as_raw_u32(),
622                            None => 0,
623                        }
624                    }),
625                    WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
626                    WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
627                },
628            })
629        }
630    }
631
632    /// Return a reference to the value as an i32.
633    pub unsafe fn as_i32(&self) -> &i32 {
634        unsafe { &*(self.storage.as_ref().as_ptr().cast::<i32>()) }
635    }
636
637    /// Return a mutable reference to the value as an i32.
638    pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
639        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>()) }
640    }
641
642    /// Return a reference to the value as a u32.
643    pub unsafe fn as_u32(&self) -> &u32 {
644        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
645    }
646
647    /// Return a mutable reference to the value as an u32.
648    pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
649        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
650    }
651
652    /// Return a reference to the value as an i64.
653    pub unsafe fn as_i64(&self) -> &i64 {
654        unsafe { &*(self.storage.as_ref().as_ptr().cast::<i64>()) }
655    }
656
657    /// Return a mutable reference to the value as an i64.
658    pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
659        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>()) }
660    }
661
662    /// Return a reference to the value as an u64.
663    pub unsafe fn as_u64(&self) -> &u64 {
664        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
665    }
666
667    /// Return a mutable reference to the value as an u64.
668    pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
669        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
670    }
671
672    /// Return a reference to the value as an f32.
673    pub unsafe fn as_f32(&self) -> &f32 {
674        unsafe { &*(self.storage.as_ref().as_ptr().cast::<f32>()) }
675    }
676
677    /// Return a mutable reference to the value as an f32.
678    pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
679        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>()) }
680    }
681
682    /// Return a reference to the value as f32 bits.
683    pub unsafe fn as_f32_bits(&self) -> &u32 {
684        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
685    }
686
687    /// Return a mutable reference to the value as f32 bits.
688    pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
689        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
690    }
691
692    /// Return a reference to the value as an f64.
693    pub unsafe fn as_f64(&self) -> &f64 {
694        unsafe { &*(self.storage.as_ref().as_ptr().cast::<f64>()) }
695    }
696
697    /// Return a mutable reference to the value as an f64.
698    pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
699        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>()) }
700    }
701
702    /// Return a reference to the value as f64 bits.
703    pub unsafe fn as_f64_bits(&self) -> &u64 {
704        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
705    }
706
707    /// Return a mutable reference to the value as f64 bits.
708    pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
709        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
710    }
711
712    /// Gets the underlying 128-bit vector value.
713    //
714    // Note that vectors are stored in little-endian format while other types
715    // are stored in native-endian format.
716    pub unsafe fn get_u128(&self) -> u128 {
717        unsafe { u128::from_le(*(self.storage.as_ref().as_ptr().cast::<u128>())) }
718    }
719
720    /// Sets the 128-bit vector values.
721    //
722    // Note that vectors are stored in little-endian format while other types
723    // are stored in native-endian format.
724    pub unsafe fn set_u128(&mut self, val: u128) {
725        unsafe {
726            *self.storage.as_mut().as_mut_ptr().cast::<u128>() = val.to_le();
727        }
728    }
729
730    /// Return a reference to the value as u128 bits.
731    pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
732        unsafe { &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>()) }
733    }
734
735    /// Return a mutable reference to the value as u128 bits.
736    pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
737        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>()) }
738    }
739
740    /// Return a reference to the global value as a borrowed GC reference.
741    pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
742        let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
743        let ret = unsafe { (*raw_ptr).as_ref() };
744        assert!(cfg!(feature = "gc") || ret.is_none());
745        ret
746    }
747
748    /// Initialize a global to the given GC reference.
749    pub unsafe fn init_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) {
750        let dest = unsafe {
751            &mut *(self
752                .storage
753                .as_mut()
754                .as_mut_ptr()
755                .cast::<MaybeUninit<Option<VMGcRef>>>())
756        };
757
758        store.init_gc_ref(dest, gc_ref)
759    }
760
761    /// Write a GC reference into this global value.
762    pub unsafe fn write_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) {
763        let dest = unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>()) };
764        store.write_gc_ref(dest, gc_ref)
765    }
766
767    /// Return a reference to the value as a `VMFuncRef`.
768    pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
769        unsafe { *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>()) }
770    }
771
772    /// Return a mutable reference to the value as a `VMFuncRef`.
773    pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
774        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>()) }
775    }
776}
777
778#[cfg(test)]
779mod test_vmshared_type_index {
780    use super::VMSharedTypeIndex;
781    use std::mem::size_of;
782    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
783
784    #[test]
785    fn check_vmshared_type_index() {
786        let module = Module::new(StaticModuleIndex::from_u32(0));
787        let offsets = VMOffsets::new(HostPtr, &module);
788        assert_eq!(
789            size_of::<VMSharedTypeIndex>(),
790            usize::from(offsets.size_of_vmshared_type_index())
791        );
792    }
793}
794
795/// A WebAssembly tag defined within the instance.
796///
797#[derive(Debug)]
798#[repr(C)]
799pub struct VMTagDefinition {
800    /// Function signature's type id.
801    pub type_index: VMSharedTypeIndex,
802}
803
804impl VMTagDefinition {
805    pub fn new(type_index: VMSharedTypeIndex) -> Self {
806        Self { type_index }
807    }
808}
809
810// SAFETY: the above structure is repr(C) and only contains VmSafe
811// fields.
812unsafe impl VmSafe for VMTagDefinition {}
813
814#[cfg(test)]
815mod test_vmtag_definition {
816    use super::VMTagDefinition;
817    use std::mem::size_of;
818    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
819
820    #[test]
821    fn check_vmtag_definition_offsets() {
822        let module = Module::new(StaticModuleIndex::from_u32(0));
823        let offsets = VMOffsets::new(HostPtr, &module);
824        assert_eq!(
825            size_of::<VMTagDefinition>(),
826            usize::from(offsets.ptr.size_of_vmtag_definition())
827        );
828    }
829
830    #[test]
831    fn check_vmtag_begins_aligned() {
832        let module = Module::new(StaticModuleIndex::from_u32(0));
833        let offsets = VMOffsets::new(HostPtr, &module);
834        assert_eq!(offsets.vmctx_tags_begin() % 16, 0);
835    }
836}
837
838/// The VM caller-checked "funcref" record, for caller-side signature checking.
839///
840/// It consists of function pointer(s), a type id to be checked by the
841/// caller, and the vmctx closure associated with this function.
842#[derive(Debug, Clone)]
843#[repr(C)]
844pub struct VMFuncRef {
845    /// Function pointer for this funcref if being called via the "array"
846    /// calling convention that `Func::new` et al use.
847    pub array_call: VmPtr<VMArrayCallFunction>,
848
849    /// Function pointer for this funcref if being called via the calling
850    /// convention we use when compiling Wasm.
851    ///
852    /// Most functions come with a function pointer that we can use when they
853    /// are called from Wasm. The notable exception is when we `Func::wrap` a
854    /// host function, and we don't have a Wasm compiler on hand to compile a
855    /// Wasm-to-native trampoline for the function. In this case, we leave
856    /// `wasm_call` empty until the function is passed as an import to Wasm (or
857    /// otherwise exposed to Wasm via tables/globals). At this point, we look up
858    /// a Wasm-to-native trampoline for the function in the Wasm's compiled
859    /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
860    /// there is no guarantee that the Wasm module has a trampoline for this
861    /// function's signature. The Wasm module only has trampolines for its
862    /// types, and if this function isn't of one of those types, then the Wasm
863    /// module will not have a trampoline for it. This is actually okay, because
864    /// it means that the Wasm cannot actually call this function. But it does
865    /// mean that this field needs to be an `Option` even though it is non-null
866    /// the vast vast vast majority of the time.
867    pub wasm_call: Option<VmPtr<VMWasmCallFunction>>,
868
869    /// Function signature's type id.
870    pub type_index: VMSharedTypeIndex,
871
872    /// The VM state associated with this function.
873    ///
874    /// The actual definition of what this pointer points to depends on the
875    /// function being referenced: for core Wasm functions, this is a `*mut
876    /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
877    /// component functions it is a `*mut VMComponentContext`.
878    pub vmctx: VmPtr<VMOpaqueContext>,
879    // If more elements are added here, remember to add offset_of tests below!
880}
881
882// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
883unsafe impl VmSafe for VMFuncRef {}
884
885impl VMFuncRef {
886    /// Invokes the `array_call` field of this `VMFuncRef` with the supplied
887    /// arguments.
888    ///
889    /// This will invoke the function pointer in the `array_call` field with:
890    ///
891    /// * the `callee` vmctx as `self.vmctx`
892    /// * the `caller` as `caller` specified here
893    /// * the args pointer as `args_and_results`
894    /// * the args length as `args_and_results`
895    ///
896    /// The `args_and_results` area must be large enough to both load all
897    /// arguments from and store all results to.
898    ///
899    /// Returns whether a trap was recorded in TLS for raising.
900    ///
901    /// # Unsafety
902    ///
903    /// This method is unsafe because it can be called with any pointers. They
904    /// must all be valid for this wasm function call to proceed. For example
905    /// the `caller` must be valid machine code if `pulley` is `None` or it must
906    /// be valid bytecode if `pulley` is `Some`. Additionally `args_and_results`
907    /// must be large enough to handle all the arguments/results for this call.
908    ///
909    /// Note that the unsafety invariants to maintain here are not currently
910    /// exhaustively documented.
911    #[inline]
912    pub unsafe fn array_call(
913        me: NonNull<VMFuncRef>,
914        pulley: Option<InterpreterRef<'_>>,
915        caller: NonNull<VMContext>,
916        args_and_results: NonNull<[ValRaw]>,
917    ) -> bool {
918        match pulley {
919            Some(vm) => unsafe { Self::array_call_interpreted(me, vm, caller, args_and_results) },
920            None => unsafe { Self::array_call_native(me, caller, args_and_results) },
921        }
922    }
923
924    unsafe fn array_call_interpreted(
925        me: NonNull<VMFuncRef>,
926        vm: InterpreterRef<'_>,
927        caller: NonNull<VMContext>,
928        args_and_results: NonNull<[ValRaw]>,
929    ) -> bool {
930        // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the
931        // interpreter, even though it's available, as `array_call` will be
932        // native code.
933        unsafe {
934            if me.as_ref().vmctx.as_non_null().as_ref().magic
935                == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC
936            {
937                return Self::array_call_native(me, caller, args_and_results);
938            }
939            vm.call(
940                me.as_ref().array_call.as_non_null().cast(),
941                me.as_ref().vmctx.as_non_null(),
942                caller,
943                args_and_results,
944            )
945        }
946    }
947
948    #[inline]
949    unsafe fn array_call_native(
950        me: NonNull<VMFuncRef>,
951        caller: NonNull<VMContext>,
952        args_and_results: NonNull<[ValRaw]>,
953    ) -> bool {
954        unsafe {
955            union GetNativePointer {
956                native: VMArrayCallNative,
957                ptr: NonNull<VMArrayCallFunction>,
958            }
959            let native = GetNativePointer {
960                ptr: me.as_ref().array_call.as_non_null(),
961            }
962            .native;
963            native(
964                me.as_ref().vmctx.as_non_null(),
965                caller,
966                args_and_results.cast(),
967                args_and_results.len(),
968            )
969        }
970    }
971}
972
973#[cfg(test)]
974mod test_vm_func_ref {
975    use super::VMFuncRef;
976    use core::mem::offset_of;
977    use std::mem::size_of;
978    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
979
980    #[test]
981    fn check_vm_func_ref_offsets() {
982        let module = Module::new(StaticModuleIndex::from_u32(0));
983        let offsets = VMOffsets::new(HostPtr, &module);
984        assert_eq!(
985            size_of::<VMFuncRef>(),
986            usize::from(offsets.ptr.size_of_vm_func_ref())
987        );
988        assert_eq!(
989            offset_of!(VMFuncRef, array_call),
990            usize::from(offsets.ptr.vm_func_ref_array_call())
991        );
992        assert_eq!(
993            offset_of!(VMFuncRef, wasm_call),
994            usize::from(offsets.ptr.vm_func_ref_wasm_call())
995        );
996        assert_eq!(
997            offset_of!(VMFuncRef, type_index),
998            usize::from(offsets.ptr.vm_func_ref_type_index())
999        );
1000        assert_eq!(
1001            offset_of!(VMFuncRef, vmctx),
1002            usize::from(offsets.ptr.vm_func_ref_vmctx())
1003        );
1004    }
1005}
1006
1007macro_rules! define_builtin_array {
1008    (
1009        $(
1010            $( #[$attr:meta] )*
1011            $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
1012        )*
1013    ) => {
1014        /// An array that stores addresses of builtin functions. We translate code
1015        /// to use indirect calls. This way, we don't have to patch the code.
1016        #[repr(C)]
1017        #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
1018        pub struct VMBuiltinFunctionsArray {
1019            $(
1020                $name: unsafe extern "C" fn(
1021                    $(define_builtin_array!(@ty $param)),*
1022                ) $( -> define_builtin_array!(@ty $result))?,
1023            )*
1024        }
1025
1026        impl VMBuiltinFunctionsArray {
1027            pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
1028                $(
1029                    $name: crate::runtime::vm::libcalls::raw::$name,
1030                )*
1031            };
1032
1033            /// Helper to call `expose_provenance()` on all contained pointers.
1034            ///
1035            /// This is required to be called at least once before entering wasm
1036            /// to inform the compiler that these function pointers may all be
1037            /// loaded/stored and used on the "other end" to reacquire
1038            /// provenance in Pulley. Pulley models hostcalls with a host
1039            /// pointer as the first parameter that's a function pointer under
1040            /// the hood, and this call ensures that the use of the function
1041            /// pointer is considered valid.
1042            pub fn expose_provenance(&self) -> NonNull<Self>{
1043                $(
1044                    (self.$name as *mut u8).expose_provenance();
1045                )*
1046                NonNull::from(self)
1047            }
1048        }
1049    };
1050
1051    (@ty u32) => (u32);
1052    (@ty u64) => (u64);
1053    (@ty f32) => (f32);
1054    (@ty f64) => (f64);
1055    (@ty u8) => (u8);
1056    (@ty i8x16) => (i8x16);
1057    (@ty f32x4) => (f32x4);
1058    (@ty f64x2) => (f64x2);
1059    (@ty bool) => (bool);
1060    (@ty pointer) => (*mut u8);
1061    (@ty size) => (usize);
1062    (@ty vmctx) => (NonNull<VMContext>);
1063}
1064
1065// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1066unsafe impl VmSafe for VMBuiltinFunctionsArray {}
1067
1068wasmtime_environ::foreach_builtin_function!(define_builtin_array);
1069
1070const _: () = {
1071    assert!(
1072        mem::size_of::<VMBuiltinFunctionsArray>()
1073            == mem::size_of::<usize>() * (BuiltinFunctionIndex::len() as usize)
1074    )
1075};
1076
1077/// Structure that holds all mutable context that is shared across all instances
1078/// in a store, for example data related to fuel or epochs.
1079///
1080/// `VMStoreContext`s are one-to-one with `wasmtime::Store`s, the same way that
1081/// `VMContext`s are one-to-one with `wasmtime::Instance`s. And the same way
1082/// that multiple `wasmtime::Instance`s may be associated with the same
1083/// `wasmtime::Store`, multiple `VMContext`s hold a pointer to the same
1084/// `VMStoreContext` when they are associated with the same `wasmtime::Store`.
1085#[derive(Debug)]
1086#[repr(C)]
1087pub struct VMStoreContext {
1088    // NB: 64-bit integer fields are located first with pointer-sized fields
1089    // trailing afterwards. That makes the offsets in this structure easier to
1090    // calculate on 32-bit platforms as we don't have to worry about the
1091    // alignment of 64-bit integers.
1092    //
1093    /// Indicator of how much fuel has been consumed and is remaining to
1094    /// WebAssembly.
1095    ///
1096    /// This field is typically negative and increments towards positive. Upon
1097    /// turning positive a wasm trap will be generated. This field is only
1098    /// modified if wasm is configured to consume fuel.
1099    pub fuel_consumed: UnsafeCell<i64>,
1100
1101    /// Deadline epoch for interruption: if epoch-based interruption
1102    /// is enabled and the global (per engine) epoch counter is
1103    /// observed to reach or exceed this value, the guest code will
1104    /// yield if running asynchronously.
1105    pub epoch_deadline: UnsafeCell<u64>,
1106
1107    /// The "store version".
1108    ///
1109    /// This is used to test whether stack-frame handles referring to
1110    /// suspended stack frames remain valid.
1111    ///
1112    /// The invariant that this upward-counting number must satisfy
1113    /// is: the number must be incremented whenever execution starts
1114    /// or resumes in the `Store` or when any stack is
1115    /// dropped/freed. That way, if we take a reference to some
1116    /// suspended stack frame and track the "version" at the time we
1117    /// took that reference, if the version still matches, we can be
1118    /// sure that nothing could have unwound the referenced Wasm
1119    /// frame.
1120    ///
1121    /// This version number is incremented in exactly one place: the
1122    /// Wasm-to-host trampolines, after return from host code. Note
1123    /// that this captures both the normal "return into Wasm" case
1124    /// (where Wasm frames can subsequently return normally and thus
1125    /// invalidate frames), and the "trap/exception unwinds Wasm
1126    /// frames" case, which is done internally via the `raise` libcall
1127    /// invoked after the main hostcall returns an error, and after we
1128    /// increment this version number.
1129    ///
1130    /// Note that this also handles the fiber/future-drop case because
1131    /// because we *always* return into the trampoline to clean up;
1132    /// that trampoline immediately raises an error and uses the
1133    /// longjmp-like unwind within Cranelift frames to skip over all
1134    /// the guest Wasm frames, but not before it increments the
1135    /// store's execution version number.
1136    ///
1137    /// This field is in use only if guest debugging is enabled.
1138    pub execution_version: u64,
1139
1140    /// Current stack limit of the wasm module.
1141    ///
1142    /// For more information see `crates/cranelift/src/lib.rs`.
1143    pub stack_limit: UnsafeCell<usize>,
1144
1145    /// The `VMMemoryDefinition` for this store's GC heap.
1146    pub gc_heap: VMMemoryDefinition,
1147
1148    /// The value of the frame pointer register in the trampoline used
1149    /// to call from Wasm to the host.
1150    ///
1151    /// Maintained by our Wasm-to-host trampoline, and cleared just
1152    /// before calling into Wasm in `catch_traps`.
1153    ///
1154    /// This member is `0` when Wasm is actively running and has not called out
1155    /// to the host.
1156    ///
1157    /// Used to find the start of a contiguous sequence of Wasm frames
1158    /// when walking the stack. Note that we record the FP of the
1159    /// *trampoline*'s frame, not the last Wasm frame, because we need
1160    /// to know the SP (bottom of frame) of the last Wasm frame as
1161    /// well in case we need to resume to an exception handler in that
1162    /// frame. The FP of the last Wasm frame can be recovered by
1163    /// loading the saved FP value at this FP address.
1164    pub last_wasm_exit_trampoline_fp: UnsafeCell<usize>,
1165
1166    /// The last Wasm program counter before we called from Wasm to the host.
1167    ///
1168    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1169    /// calling into Wasm in `catch_traps`.
1170    ///
1171    /// This member is `0` when Wasm is actively running and has not called out
1172    /// to the host.
1173    ///
1174    /// Used when walking a contiguous sequence of Wasm frames.
1175    pub last_wasm_exit_pc: UnsafeCell<usize>,
1176
1177    /// The last host stack pointer before we called into Wasm from the host.
1178    ///
1179    /// Maintained by our host-to-Wasm trampoline. This member is `0` when Wasm
1180    /// is not running, and it's set to nonzero once a host-to-wasm trampoline
1181    /// is executed.
1182    ///
1183    /// When a host function is wrapped into a `wasmtime::Func`, and is then
1184    /// called from the host, then this member is not changed meaning that the
1185    /// previous activation in pointed to by `last_wasm_exit_trampoline_fp` is
1186    /// still the last wasm set of frames on the stack.
1187    ///
1188    /// This field is saved/restored during fiber suspension/resumption
1189    /// resumption as part of `CallThreadState::swap`.
1190    ///
1191    /// This field is used to find the end of a contiguous sequence of Wasm
1192    /// frames when walking the stack. Additionally it's used when a trap is
1193    /// raised as part of the set of parameters used to resume in the entry
1194    /// trampoline's "catch" block.
1195    pub last_wasm_entry_sp: UnsafeCell<usize>,
1196
1197    /// Same as `last_wasm_entry_sp`, but for the `fp` of the trampoline.
1198    pub last_wasm_entry_fp: UnsafeCell<usize>,
1199
1200    /// The last trap handler from a host-to-wasm entry trampoline on the stack.
1201    ///
1202    /// This field is configured when the host calls into wasm by the trampoline
1203    /// itself. It stores the `pc` of an exception handler suitable to handle
1204    /// all traps (or uncaught exceptions).
1205    pub last_wasm_entry_trap_handler: UnsafeCell<usize>,
1206
1207    /// Stack information used by stack switching instructions. See documentation
1208    /// on `VMStackChain` for details.
1209    pub stack_chain: UnsafeCell<VMStackChain>,
1210
1211    /// A pointer to the embedder's `T` inside a `Store<T>`, for use with the
1212    /// `store-data-address` unsafe intrinsic.
1213    pub store_data: VmPtr<()>,
1214
1215    /// The range, in addresses, of the guard page that is currently in use.
1216    ///
1217    /// This field is used when signal handlers are run to determine whether a
1218    /// faulting address lies within the guard page of an async stack for
1219    /// example. If this happens then the signal handler aborts with a stack
1220    /// overflow message similar to what would happen had the stack overflow
1221    /// happened on the main thread. This field is, by default a null..null
1222    /// range indicating that no async guard is in use (aka no fiber). In such a
1223    /// situation while this field is read it'll never classify a fault as an
1224    /// guard page fault.
1225    pub async_guard_range: Range<*mut u8>,
1226}
1227
1228impl VMStoreContext {
1229    /// From the current saved trampoline FP, get the FP of the last
1230    /// Wasm frame. If the current saved trampoline FP is null, return
1231    /// null.
1232    ///
1233    /// We store only the trampoline FP, because (i) we need the
1234    /// trampoline FP, so we know the size (bottom) of the last Wasm
1235    /// frame; and (ii) the last Wasm frame, just above the trampoline
1236    /// frame, can be recovered via the FP chain.
1237    ///
1238    /// # Safety
1239    ///
1240    /// This function requires that the `last_wasm_exit_trampoline_fp`
1241    /// field either points to an active trampoline frame or is a null
1242    /// pointer.
1243    pub(crate) unsafe fn last_wasm_exit_fp(&self) -> usize {
1244        // SAFETY: the unsafe cell is safe to load (no other threads
1245        // will be writing our store when we have control), and the
1246        // helper function's safety condition is the same as ours.
1247        unsafe {
1248            let trampoline_fp = *self.last_wasm_exit_trampoline_fp.get();
1249            Self::wasm_exit_fp_from_trampoline_fp(trampoline_fp)
1250        }
1251    }
1252
1253    /// From any saved trampoline FP, get the FP of the last Wasm
1254    /// frame. If the given trampoline FP is null, return null.
1255    ///
1256    /// This differs from `last_wasm_exit_fp()` above in that it
1257    /// allows accessing activations further up the stack as well,
1258    /// e.g. via `CallThreadState::old_state`.
1259    ///
1260    /// # Safety
1261    ///
1262    /// This function requires that the provided FP value is valid,
1263    /// and points to an active trampoline frame, or is null.
1264    ///
1265    /// This function depends on the invariant that on all supported
1266    /// architectures, we store the previous FP value under the
1267    /// current FP. This is a property of our ABI that we control and
1268    /// ensure.
1269    pub(crate) unsafe fn wasm_exit_fp_from_trampoline_fp(trampoline_fp: usize) -> usize {
1270        if trampoline_fp != 0 {
1271            // SAFETY: We require that trampoline_fp points to a valid
1272            // frame, which will (by definition) contain an old FP value
1273            // that we can load.
1274            unsafe { *(trampoline_fp as *const usize) }
1275        } else {
1276            0
1277        }
1278    }
1279}
1280
1281// The `VMStoreContext` type is a pod-type with no destructor, and we don't
1282// access any fields from other threads, so add in these trait impls which are
1283// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
1284// variables in `VMStoreContext`.
1285unsafe impl Send for VMStoreContext {}
1286unsafe impl Sync for VMStoreContext {}
1287
1288// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1289unsafe impl VmSafe for VMStoreContext {}
1290
1291impl Default for VMStoreContext {
1292    fn default() -> VMStoreContext {
1293        VMStoreContext {
1294            fuel_consumed: UnsafeCell::new(0),
1295            epoch_deadline: UnsafeCell::new(0),
1296            execution_version: 0,
1297            stack_limit: UnsafeCell::new(usize::max_value()),
1298            gc_heap: VMMemoryDefinition {
1299                base: NonNull::dangling().into(),
1300                current_length: AtomicUsize::new(0),
1301            },
1302            last_wasm_exit_trampoline_fp: UnsafeCell::new(0),
1303            last_wasm_exit_pc: UnsafeCell::new(0),
1304            last_wasm_entry_fp: UnsafeCell::new(0),
1305            last_wasm_entry_sp: UnsafeCell::new(0),
1306            last_wasm_entry_trap_handler: UnsafeCell::new(0),
1307            stack_chain: UnsafeCell::new(VMStackChain::Absent),
1308            async_guard_range: ptr::null_mut()..ptr::null_mut(),
1309            store_data: VmPtr::dangling(),
1310        }
1311    }
1312}
1313
1314#[cfg(test)]
1315mod test_vmstore_context {
1316    use super::{VMMemoryDefinition, VMStoreContext};
1317    use core::mem::offset_of;
1318    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
1319
1320    #[test]
1321    fn field_offsets() {
1322        let module = Module::new(StaticModuleIndex::from_u32(0));
1323        let offsets = VMOffsets::new(HostPtr, &module);
1324        assert_eq!(
1325            offset_of!(VMStoreContext, stack_limit),
1326            usize::from(offsets.ptr.vmstore_context_stack_limit())
1327        );
1328        assert_eq!(
1329            offset_of!(VMStoreContext, fuel_consumed),
1330            usize::from(offsets.ptr.vmstore_context_fuel_consumed())
1331        );
1332        assert_eq!(
1333            offset_of!(VMStoreContext, epoch_deadline),
1334            usize::from(offsets.ptr.vmstore_context_epoch_deadline())
1335        );
1336        assert_eq!(
1337            offset_of!(VMStoreContext, execution_version),
1338            usize::from(offsets.ptr.vmstore_context_execution_version())
1339        );
1340        assert_eq!(
1341            offset_of!(VMStoreContext, gc_heap),
1342            usize::from(offsets.ptr.vmstore_context_gc_heap())
1343        );
1344        assert_eq!(
1345            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, base),
1346            usize::from(offsets.ptr.vmstore_context_gc_heap_base())
1347        );
1348        assert_eq!(
1349            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, current_length),
1350            usize::from(offsets.ptr.vmstore_context_gc_heap_current_length())
1351        );
1352        assert_eq!(
1353            offset_of!(VMStoreContext, last_wasm_exit_trampoline_fp),
1354            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_trampoline_fp())
1355        );
1356        assert_eq!(
1357            offset_of!(VMStoreContext, last_wasm_exit_pc),
1358            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_pc())
1359        );
1360        assert_eq!(
1361            offset_of!(VMStoreContext, last_wasm_entry_fp),
1362            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp())
1363        );
1364        assert_eq!(
1365            offset_of!(VMStoreContext, last_wasm_entry_sp),
1366            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_sp())
1367        );
1368        assert_eq!(
1369            offset_of!(VMStoreContext, last_wasm_entry_trap_handler),
1370            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_trap_handler())
1371        );
1372        assert_eq!(
1373            offset_of!(VMStoreContext, stack_chain),
1374            usize::from(offsets.ptr.vmstore_context_stack_chain())
1375        );
1376        assert_eq!(
1377            offset_of!(VMStoreContext, store_data),
1378            usize::from(offsets.ptr.vmstore_context_store_data())
1379        );
1380    }
1381}
1382
1383/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
1384/// This has information about globals, memories, tables, and other runtime
1385/// state associated with the current instance.
1386///
1387/// The struct here is empty, as the sizes of these fields are dynamic, and
1388/// we can't describe them in Rust's type system. Sufficient memory is
1389/// allocated at runtime.
1390#[derive(Debug)]
1391#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
1392pub struct VMContext {
1393    _magic: u32,
1394}
1395
1396impl VMContext {
1397    /// Helper function to cast between context types using a debug assertion to
1398    /// protect against some mistakes.
1399    #[inline]
1400    pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMContext> {
1401        // Note that in general the offset of the "magic" field is stored in
1402        // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
1403        // about converting this pointer to another type we ideally don't want
1404        // to read the offset from potentially corrupt memory. Instead it would
1405        // be better to catch errors here as soon as possible.
1406        //
1407        // To accomplish this the `VMContext` structure is laid out with the
1408        // magic field at a statically known offset (here it's 0 for now). This
1409        // static offset is asserted in `VMOffsets::from` and needs to be kept
1410        // in sync with this line for this debug assertion to work.
1411        //
1412        // Also note that this magic is only ever invalid in the presence of
1413        // bugs, meaning we don't actually read the magic and act differently
1414        // at runtime depending what it is, so this is a debug assertion as
1415        // opposed to a regular assertion.
1416        unsafe {
1417            debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC);
1418        }
1419        opaque.cast()
1420    }
1421}
1422
1423/// A "raw" and unsafe representation of a WebAssembly value.
1424///
1425/// This is provided for use with the `Func::new_unchecked` and
1426/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
1427/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
1428///
1429/// This is notably an "unsafe" way to work with `Val` and it's recommended to
1430/// instead use `Val` where possible. An important note about this union is that
1431/// fields are all stored in little-endian format, regardless of the endianness
1432/// of the host system.
1433#[repr(C)]
1434#[derive(Copy, Clone)]
1435pub union ValRaw {
1436    /// A WebAssembly `i32` value.
1437    ///
1438    /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
1439    /// type does not assign an interpretation of the upper bit as either signed
1440    /// or unsigned. The Rust type `i32` is simply chosen for convenience.
1441    ///
1442    /// This value is always stored in a little-endian format.
1443    i32: i32,
1444
1445    /// A WebAssembly `i64` value.
1446    ///
1447    /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
1448    /// type does not assign an interpretation of the upper bit as either signed
1449    /// or unsigned. The Rust type `i64` is simply chosen for convenience.
1450    ///
1451    /// This value is always stored in a little-endian format.
1452    i64: i64,
1453
1454    /// A WebAssembly `f32` value.
1455    ///
1456    /// Note that the payload here is a Rust `u32`. This is to allow passing any
1457    /// representation of NaN into WebAssembly without risk of changing NaN
1458    /// payload bits as its gets passed around the system. Otherwise though this
1459    /// `u32` value is the return value of `f32::to_bits` in Rust.
1460    ///
1461    /// This value is always stored in a little-endian format.
1462    f32: u32,
1463
1464    /// A WebAssembly `f64` value.
1465    ///
1466    /// Note that the payload here is a Rust `u64`. This is to allow passing any
1467    /// representation of NaN into WebAssembly without risk of changing NaN
1468    /// payload bits as its gets passed around the system. Otherwise though this
1469    /// `u64` value is the return value of `f64::to_bits` in Rust.
1470    ///
1471    /// This value is always stored in a little-endian format.
1472    f64: u64,
1473
1474    /// A WebAssembly `v128` value.
1475    ///
1476    /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1477    /// but note that `v128` in WebAssembly is often considered a vector type
1478    /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1479    /// of the underlying bits is left up to the instructions which consume
1480    /// this value.
1481    ///
1482    /// This value is always stored in a little-endian format.
1483    v128: [u8; 16],
1484
1485    /// A WebAssembly `funcref` value (or one of its subtypes).
1486    ///
1487    /// The payload here is a pointer which is runtime-defined. This is one of
1488    /// the main points of unsafety about the `ValRaw` type as the validity of
1489    /// the pointer here is not easily verified and must be preserved by
1490    /// carefully calling the correct functions throughout the runtime.
1491    ///
1492    /// This value is always stored in a little-endian format.
1493    funcref: *mut c_void,
1494
1495    /// A WebAssembly `externref` value (or one of its subtypes).
1496    ///
1497    /// The payload here is a compressed pointer value which is
1498    /// runtime-defined. This is one of the main points of unsafety about the
1499    /// `ValRaw` type as the validity of the pointer here is not easily verified
1500    /// and must be preserved by carefully calling the correct functions
1501    /// throughout the runtime.
1502    ///
1503    /// This value is always stored in a little-endian format.
1504    externref: u32,
1505
1506    /// A WebAssembly `anyref` value (or one of its subtypes).
1507    ///
1508    /// The payload here is a compressed pointer value which is
1509    /// runtime-defined. This is one of the main points of unsafety about the
1510    /// `ValRaw` type as the validity of the pointer here is not easily verified
1511    /// and must be preserved by carefully calling the correct functions
1512    /// throughout the runtime.
1513    ///
1514    /// This value is always stored in a little-endian format.
1515    anyref: u32,
1516
1517    /// A WebAssembly `exnref` value (or one of its subtypes).
1518    ///
1519    /// The payload here is a compressed pointer value which is
1520    /// runtime-defined. This is one of the main points of unsafety about the
1521    /// `ValRaw` type as the validity of the pointer here is not easily verified
1522    /// and must be preserved by carefully calling the correct functions
1523    /// throughout the runtime.
1524    ///
1525    /// This value is always stored in a little-endian format.
1526    exnref: u32,
1527}
1528
1529// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1530// are some simple assertions about the shape of the type which are additionally
1531// matched in C.
1532const _: () = {
1533    assert!(mem::size_of::<ValRaw>() == 16);
1534    assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1535};
1536
1537// This type is just a bag-of-bits so it's up to the caller to figure out how
1538// to safely deal with threading concerns and safely access interior bits.
1539unsafe impl Send for ValRaw {}
1540unsafe impl Sync for ValRaw {}
1541
1542impl fmt::Debug for ValRaw {
1543    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1544        struct Hex<T>(T);
1545        impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1546            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1547                let bytes = mem::size_of::<T>();
1548                let hex_digits_per_byte = 2;
1549                let hex_digits = bytes * hex_digits_per_byte;
1550                write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1551            }
1552        }
1553
1554        unsafe {
1555            f.debug_struct("ValRaw")
1556                .field("i32", &Hex(self.i32))
1557                .field("i64", &Hex(self.i64))
1558                .field("f32", &Hex(self.f32))
1559                .field("f64", &Hex(self.f64))
1560                .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1561                .field("funcref", &self.funcref)
1562                .field("externref", &Hex(self.externref))
1563                .field("anyref", &Hex(self.anyref))
1564                .field("exnref", &Hex(self.exnref))
1565                .finish()
1566        }
1567    }
1568}
1569
1570impl ValRaw {
1571    /// Create a null reference that is compatible with any of
1572    /// `{any,extern,func,exn}ref`.
1573    pub fn null() -> ValRaw {
1574        unsafe {
1575            let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1576            debug_assert_eq!(raw.get_anyref(), 0);
1577            debug_assert_eq!(raw.get_exnref(), 0);
1578            debug_assert_eq!(raw.get_externref(), 0);
1579            debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1580            raw
1581        }
1582    }
1583
1584    /// Creates a WebAssembly `i32` value
1585    #[inline]
1586    pub fn i32(i: i32) -> ValRaw {
1587        // Note that this is intentionally not setting the `i32` field, instead
1588        // setting the `i64` field with a zero-extended version of `i`. For more
1589        // information on this see the comments on `Lower for Result` in the
1590        // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1591        // otherwise constrained to guarantee that the initial 64-bits are
1592        // always initialized.
1593        ValRaw::u64(i.cast_unsigned().into())
1594    }
1595
1596    /// Creates a WebAssembly `i64` value
1597    #[inline]
1598    pub fn i64(i: i64) -> ValRaw {
1599        ValRaw { i64: i.to_le() }
1600    }
1601
1602    /// Creates a WebAssembly `i32` value
1603    #[inline]
1604    pub fn u32(i: u32) -> ValRaw {
1605        // See comments in `ValRaw::i32` for why this is setting the upper
1606        // 32-bits as well.
1607        ValRaw::u64(i.into())
1608    }
1609
1610    /// Creates a WebAssembly `i64` value
1611    #[inline]
1612    pub fn u64(i: u64) -> ValRaw {
1613        ValRaw::i64(i as i64)
1614    }
1615
1616    /// Creates a WebAssembly `f32` value
1617    #[inline]
1618    pub fn f32(i: u32) -> ValRaw {
1619        // See comments in `ValRaw::i32` for why this is setting the upper
1620        // 32-bits as well.
1621        ValRaw::u64(i.into())
1622    }
1623
1624    /// Creates a WebAssembly `f64` value
1625    #[inline]
1626    pub fn f64(i: u64) -> ValRaw {
1627        ValRaw { f64: i.to_le() }
1628    }
1629
1630    /// Creates a WebAssembly `v128` value
1631    #[inline]
1632    pub fn v128(i: u128) -> ValRaw {
1633        ValRaw {
1634            v128: i.to_le_bytes(),
1635        }
1636    }
1637
1638    /// Creates a WebAssembly `funcref` value
1639    #[inline]
1640    pub fn funcref(i: *mut c_void) -> ValRaw {
1641        ValRaw {
1642            funcref: i.map_addr(|i| i.to_le()),
1643        }
1644    }
1645
1646    /// Creates a WebAssembly `externref` value
1647    #[inline]
1648    pub fn externref(e: u32) -> ValRaw {
1649        assert!(cfg!(feature = "gc") || e == 0);
1650        ValRaw {
1651            externref: e.to_le(),
1652        }
1653    }
1654
1655    /// Creates a WebAssembly `anyref` value
1656    #[inline]
1657    pub fn anyref(r: u32) -> ValRaw {
1658        assert!(cfg!(feature = "gc") || r == 0);
1659        ValRaw { anyref: r.to_le() }
1660    }
1661
1662    /// Creates a WebAssembly `exnref` value
1663    #[inline]
1664    pub fn exnref(r: u32) -> ValRaw {
1665        assert!(cfg!(feature = "gc") || r == 0);
1666        ValRaw { exnref: r.to_le() }
1667    }
1668
1669    /// Gets the WebAssembly `i32` value
1670    #[inline]
1671    pub fn get_i32(&self) -> i32 {
1672        unsafe { i32::from_le(self.i32) }
1673    }
1674
1675    /// Gets the WebAssembly `i64` value
1676    #[inline]
1677    pub fn get_i64(&self) -> i64 {
1678        unsafe { i64::from_le(self.i64) }
1679    }
1680
1681    /// Gets the WebAssembly `i32` value
1682    #[inline]
1683    pub fn get_u32(&self) -> u32 {
1684        self.get_i32().cast_unsigned()
1685    }
1686
1687    /// Gets the WebAssembly `i64` value
1688    #[inline]
1689    pub fn get_u64(&self) -> u64 {
1690        self.get_i64().cast_unsigned()
1691    }
1692
1693    /// Gets the WebAssembly `f32` value
1694    #[inline]
1695    pub fn get_f32(&self) -> u32 {
1696        unsafe { u32::from_le(self.f32) }
1697    }
1698
1699    /// Gets the WebAssembly `f64` value
1700    #[inline]
1701    pub fn get_f64(&self) -> u64 {
1702        unsafe { u64::from_le(self.f64) }
1703    }
1704
1705    /// Gets the WebAssembly `v128` value
1706    #[inline]
1707    pub fn get_v128(&self) -> u128 {
1708        unsafe { u128::from_le_bytes(self.v128) }
1709    }
1710
1711    /// Gets the WebAssembly `funcref` value
1712    #[inline]
1713    pub fn get_funcref(&self) -> *mut c_void {
1714        let addr = unsafe { usize::from_le(self.funcref.addr()) };
1715        core::ptr::with_exposed_provenance_mut(addr)
1716    }
1717
1718    /// Gets the WebAssembly `externref` value
1719    #[inline]
1720    pub fn get_externref(&self) -> u32 {
1721        let externref = u32::from_le(unsafe { self.externref });
1722        assert!(cfg!(feature = "gc") || externref == 0);
1723        externref
1724    }
1725
1726    /// Gets the WebAssembly `anyref` value
1727    #[inline]
1728    pub fn get_anyref(&self) -> u32 {
1729        let anyref = u32::from_le(unsafe { self.anyref });
1730        assert!(cfg!(feature = "gc") || anyref == 0);
1731        anyref
1732    }
1733
1734    /// Gets the WebAssembly `exnref` value
1735    #[inline]
1736    pub fn get_exnref(&self) -> u32 {
1737        let exnref = u32::from_le(unsafe { self.exnref });
1738        assert!(cfg!(feature = "gc") || exnref == 0);
1739        exnref
1740    }
1741}
1742
1743/// An "opaque" version of `VMContext` which must be explicitly casted to a
1744/// target context.
1745///
1746/// This context is used to represent that contexts specified in
1747/// `VMFuncRef` can have any type and don't have an implicit
1748/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1749/// structure of an opaque context in general and only the code which configured
1750/// the context is able to rely on a particular structure. This is because the
1751/// context pointer configured for `VMFuncRef` is guaranteed to be
1752/// the first parameter passed.
1753///
1754/// Note that Wasmtime currently has a layout where all contexts that are casted
1755/// to an opaque context start with a 32-bit "magic" which can be used in debug
1756/// mode to debug-assert that the casts here are correct and have at least a
1757/// little protection against incorrect casts.
1758pub struct VMOpaqueContext {
1759    pub(crate) magic: u32,
1760    _marker: marker::PhantomPinned,
1761}
1762
1763impl VMOpaqueContext {
1764    /// Helper function to clearly indicate that casts are desired.
1765    #[inline]
1766    pub fn from_vmcontext(ptr: NonNull<VMContext>) -> NonNull<VMOpaqueContext> {
1767        ptr.cast()
1768    }
1769
1770    /// Helper function to clearly indicate that casts are desired.
1771    #[inline]
1772    pub fn from_vm_array_call_host_func_context(
1773        ptr: NonNull<VMArrayCallHostFuncContext>,
1774    ) -> NonNull<VMOpaqueContext> {
1775        ptr.cast()
1776    }
1777}