Skip to main content

wasmtime/runtime/vm/
vmcontext.rs

1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::prelude::*;
8use crate::runtime::vm::{InterpreterRef, VMGcRef, VmPtr, VmSafe, f32x4, f64x2, i8x16};
9use crate::store::StoreOpaque;
10use crate::vm::stack_switching::VMStackChain;
11use core::cell::UnsafeCell;
12use core::ffi::c_void;
13use core::fmt;
14use core::marker;
15use core::mem::{self, MaybeUninit};
16use core::ops::Range;
17use core::ptr::{self, NonNull};
18use core::sync::atomic::{AtomicUsize, Ordering};
19use wasmtime_environ::{
20    BuiltinFunctionIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex,
21    DefinedTagIndex, VMCONTEXT_MAGIC, VMSharedTypeIndex, WasmHeapTopType, WasmValType,
22};
23
24/// A function pointer that exposes the array calling convention.
25///
26/// Regardless of the underlying Wasm function type, all functions using the
27/// array calling convention have the same Rust signature.
28///
29/// Arguments:
30///
31/// * Callee `vmctx` for the function itself.
32///
33/// * Caller's `vmctx` (so that host functions can access the linear memory of
34///   their Wasm callers).
35///
36/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
37///   this function, and where results are returned from this function.
38///
39/// * The capacity of the `ValRaw` buffer. Must always be at least
40///   `max(len(wasm_params), len(wasm_results))`.
41///
42/// Return value:
43///
44/// * `true` if this call succeeded.
45/// * `false` if this call failed and a trap was recorded in TLS.
46pub type VMArrayCallNative = unsafe extern "C" fn(
47    NonNull<VMOpaqueContext>,
48    NonNull<VMContext>,
49    NonNull<ValRaw>,
50    usize,
51) -> bool;
52
53/// An opaque function pointer which might be `VMArrayCallNative` or it might be
54/// pulley bytecode. Requires external knowledge to determine what kind of
55/// function pointer this is.
56#[repr(transparent)]
57pub struct VMArrayCallFunction(VMFunctionBody);
58
59/// A function pointer that exposes the Wasm calling convention.
60///
61/// In practice, different Wasm function types end up mapping to different Rust
62/// function types, so this isn't simply a type alias the way that
63/// `VMArrayCallFunction` is. However, the exact details of the calling
64/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
65/// code never does anything with these function pointers except shuffle them
66/// around and pass them back to Wasm.
67#[repr(transparent)]
68pub struct VMWasmCallFunction(VMFunctionBody);
69
70/// An imported function.
71#[derive(Debug, Copy, Clone)]
72#[repr(C)]
73pub struct VMFunctionImport {
74    /// Function pointer to use when calling this imported function from Wasm.
75    pub wasm_call: VmPtr<VMWasmCallFunction>,
76
77    /// Function pointer to use when calling this imported function with the
78    /// "array" calling convention that `Func::new` et al use.
79    pub array_call: VmPtr<VMArrayCallFunction>,
80
81    /// The VM state associated with this function.
82    ///
83    /// For Wasm functions defined by core wasm instances this will be `*mut
84    /// VMContext`, but for lifted/lowered component model functions this will
85    /// be a `VMComponentContext`, and for a host function it will be a
86    /// `VMHostFuncContext`, etc.
87    pub vmctx: VmPtr<VMOpaqueContext>,
88}
89
90// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
91unsafe impl VmSafe for VMFunctionImport {}
92
93#[cfg(test)]
94mod test_vmfunction_import {
95    use super::VMFunctionImport;
96    use core::mem::offset_of;
97    use std::mem::size_of;
98    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
99
100    #[test]
101    fn check_vmfunction_import_offsets() {
102        let module = Module::new(StaticModuleIndex::from_u32(0));
103        let offsets = VMOffsets::new(HostPtr, &module);
104        assert_eq!(
105            size_of::<VMFunctionImport>(),
106            usize::from(offsets.size_of_vmfunction_import())
107        );
108        assert_eq!(
109            offset_of!(VMFunctionImport, wasm_call),
110            usize::from(offsets.vmfunction_import_wasm_call())
111        );
112        assert_eq!(
113            offset_of!(VMFunctionImport, array_call),
114            usize::from(offsets.vmfunction_import_array_call())
115        );
116        assert_eq!(
117            offset_of!(VMFunctionImport, vmctx),
118            usize::from(offsets.vmfunction_import_vmctx())
119        );
120    }
121}
122
123/// A placeholder byte-sized type which is just used to provide some amount of type
124/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
125/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
126/// around.
127#[repr(C)]
128pub struct VMFunctionBody(u8);
129
130// SAFETY: this structure is never read and is safe to pass to jit code.
131unsafe impl VmSafe for VMFunctionBody {}
132
133#[cfg(test)]
134mod test_vmfunction_body {
135    use super::VMFunctionBody;
136    use std::mem::size_of;
137
138    #[test]
139    fn check_vmfunction_body_offsets() {
140        assert_eq!(size_of::<VMFunctionBody>(), 1);
141    }
142}
143
144/// The fields compiled code needs to access to utilize a WebAssembly table
145/// imported from another instance.
146#[derive(Debug, Copy, Clone)]
147#[repr(C)]
148pub struct VMTableImport {
149    /// A pointer to the imported table description.
150    pub from: VmPtr<VMTableDefinition>,
151
152    /// A pointer to the `VMContext` that owns the table description.
153    pub vmctx: VmPtr<VMContext>,
154
155    /// The table index, within `vmctx`, this definition resides at.
156    pub index: DefinedTableIndex,
157}
158
159// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
160unsafe impl VmSafe for VMTableImport {}
161
162#[cfg(test)]
163mod test_vmtable {
164    use super::VMTableImport;
165    use core::mem::offset_of;
166    use std::mem::size_of;
167    use wasmtime_environ::component::{Component, VMComponentOffsets};
168    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
169
170    #[test]
171    fn check_vmtable_offsets() {
172        let module = Module::new(StaticModuleIndex::from_u32(0));
173        let offsets = VMOffsets::new(HostPtr, &module);
174        assert_eq!(
175            size_of::<VMTableImport>(),
176            usize::from(offsets.size_of_vmtable_import())
177        );
178        assert_eq!(
179            offset_of!(VMTableImport, from),
180            usize::from(offsets.vmtable_import_from())
181        );
182        assert_eq!(
183            offset_of!(VMTableImport, vmctx),
184            usize::from(offsets.vmtable_import_vmctx())
185        );
186        assert_eq!(
187            offset_of!(VMTableImport, index),
188            usize::from(offsets.vmtable_import_index())
189        );
190    }
191
192    #[test]
193    fn ensure_sizes_match() {
194        // Because we use `VMTableImport` for recording tables used by components, we
195        // want to make sure that the size calculations between `VMOffsets` and
196        // `VMComponentOffsets` stay the same.
197        let module = Module::new(StaticModuleIndex::from_u32(0));
198        let vm_offsets = VMOffsets::new(HostPtr, &module);
199        let component = Component::default();
200        let vm_component_offsets = VMComponentOffsets::new(HostPtr, &component);
201        assert_eq!(
202            vm_offsets.size_of_vmtable_import(),
203            vm_component_offsets.size_of_vmtable_import()
204        );
205    }
206}
207
208/// The fields compiled code needs to access to utilize a WebAssembly linear
209/// memory imported from another instance.
210#[derive(Debug, Copy, Clone)]
211#[repr(C)]
212pub struct VMMemoryImport {
213    /// A pointer to the imported memory description.
214    pub from: VmPtr<VMMemoryDefinition>,
215
216    /// A pointer to the `VMContext` that owns the memory description.
217    pub vmctx: VmPtr<VMContext>,
218
219    /// The index of the memory in the containing `vmctx`.
220    pub index: DefinedMemoryIndex,
221}
222
223// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
224unsafe impl VmSafe for VMMemoryImport {}
225
226#[cfg(test)]
227mod test_vmmemory_import {
228    use super::VMMemoryImport;
229    use core::mem::offset_of;
230    use std::mem::size_of;
231    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
232
233    #[test]
234    fn check_vmmemory_import_offsets() {
235        let module = Module::new(StaticModuleIndex::from_u32(0));
236        let offsets = VMOffsets::new(HostPtr, &module);
237        assert_eq!(
238            size_of::<VMMemoryImport>(),
239            usize::from(offsets.size_of_vmmemory_import())
240        );
241        assert_eq!(
242            offset_of!(VMMemoryImport, from),
243            usize::from(offsets.vmmemory_import_from())
244        );
245        assert_eq!(
246            offset_of!(VMMemoryImport, vmctx),
247            usize::from(offsets.vmmemory_import_vmctx())
248        );
249        assert_eq!(
250            offset_of!(VMMemoryImport, index),
251            usize::from(offsets.vmmemory_import_index())
252        );
253    }
254}
255
256/// The fields compiled code needs to access to utilize a WebAssembly global
257/// variable imported from another instance.
258///
259/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
260/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
261/// require a `vmctx` pointer to access.
262#[derive(Debug, Copy, Clone)]
263#[repr(C)]
264pub struct VMGlobalImport {
265    /// A pointer to the imported global variable description.
266    pub from: VmPtr<VMGlobalDefinition>,
267
268    /// A pointer to the context that owns the global.
269    ///
270    /// Exactly what's stored here is dictated by `kind` below. This is `None`
271    /// for `VMGlobalKind::Host`, it's a `VMContext` for
272    /// `VMGlobalKind::Instance`, and it's `VMComponentContext` for
273    /// `VMGlobalKind::ComponentFlags`.
274    pub vmctx: Option<VmPtr<VMOpaqueContext>>,
275
276    /// The kind of global, and extra location information in addition to
277    /// `vmctx` above.
278    pub kind: VMGlobalKind,
279}
280
281// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
282unsafe impl VmSafe for VMGlobalImport {}
283
284/// The kinds of globals that Wasmtime has.
285#[derive(Debug, Copy, Clone)]
286#[repr(C, u32)]
287pub enum VMGlobalKind {
288    /// Host globals, stored in a `StoreOpaque`.
289    Host(DefinedGlobalIndex),
290    /// Instance globals, stored in `VMContext`s
291    Instance(DefinedGlobalIndex),
292    /// Flags for a component instance, stored in `VMComponentContext`.
293    #[cfg(feature = "component-model")]
294    ComponentFlags(wasmtime_environ::component::RuntimeComponentInstanceIndex),
295    #[cfg(feature = "component-model")]
296    TaskMayBlock,
297}
298
299// SAFETY: the above enum is repr(C) and stores nothing else
300unsafe impl VmSafe for VMGlobalKind {}
301
302#[cfg(test)]
303mod test_vmglobal_import {
304    use super::VMGlobalImport;
305    use core::mem::offset_of;
306    use std::mem::size_of;
307    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
308
309    #[test]
310    fn check_vmglobal_import_offsets() {
311        let module = Module::new(StaticModuleIndex::from_u32(0));
312        let offsets = VMOffsets::new(HostPtr, &module);
313        assert_eq!(
314            size_of::<VMGlobalImport>(),
315            usize::from(offsets.size_of_vmglobal_import())
316        );
317        assert_eq!(
318            offset_of!(VMGlobalImport, from),
319            usize::from(offsets.vmglobal_import_from())
320        );
321    }
322}
323
324/// The fields compiled code needs to access to utilize a WebAssembly
325/// tag imported from another instance.
326#[derive(Debug, Copy, Clone)]
327#[repr(C)]
328pub struct VMTagImport {
329    /// A pointer to the imported tag description.
330    pub from: VmPtr<VMTagDefinition>,
331
332    /// The instance that owns this tag.
333    pub vmctx: VmPtr<VMContext>,
334
335    /// The index of the tag in the containing `vmctx`.
336    pub index: DefinedTagIndex,
337}
338
339// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
340unsafe impl VmSafe for VMTagImport {}
341
342#[cfg(test)]
343mod test_vmtag_import {
344    use super::VMTagImport;
345    use core::mem::{offset_of, size_of};
346    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
347
348    #[test]
349    fn check_vmtag_import_offsets() {
350        let module = Module::new(StaticModuleIndex::from_u32(0));
351        let offsets = VMOffsets::new(HostPtr, &module);
352        assert_eq!(
353            size_of::<VMTagImport>(),
354            usize::from(offsets.size_of_vmtag_import())
355        );
356        assert_eq!(
357            offset_of!(VMTagImport, from),
358            usize::from(offsets.vmtag_import_from())
359        );
360        assert_eq!(
361            offset_of!(VMTagImport, vmctx),
362            usize::from(offsets.vmtag_import_vmctx())
363        );
364        assert_eq!(
365            offset_of!(VMTagImport, index),
366            usize::from(offsets.vmtag_import_index())
367        );
368    }
369}
370
371/// The fields compiled code needs to access to utilize a WebAssembly linear
372/// memory defined within the instance, namely the start address and the
373/// size in bytes.
374#[derive(Debug)]
375#[repr(C)]
376pub struct VMMemoryDefinition {
377    /// The start address.
378    pub base: VmPtr<u8>,
379
380    /// The current logical size of this linear memory in bytes.
381    ///
382    /// This is atomic because shared memories must be able to grow their length
383    /// atomically. For relaxed access, see
384    /// [`VMMemoryDefinition::current_length()`].
385    pub current_length: AtomicUsize,
386}
387
388// SAFETY: the above definition has `repr(C)` and each field individually
389// implements `VmSafe`, which satisfies the requirements of this trait.
390unsafe impl VmSafe for VMMemoryDefinition {}
391
392impl VMMemoryDefinition {
393    /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
394    /// performing a relaxed load; do not use this function for situations in
395    /// which a precise length is needed. Owned memories (i.e., non-shared) will
396    /// always return a precise result (since no concurrent modification is
397    /// possible) but shared memories may see an imprecise value--a
398    /// `current_length` potentially smaller than what some other thread
399    /// observes. Since Wasm memory only grows, this under-estimation may be
400    /// acceptable in certain cases.
401    #[inline]
402    pub fn current_length(&self) -> usize {
403        self.current_length.load(Ordering::Relaxed)
404    }
405
406    /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
407    /// `current_length`; see [`VMMemoryDefinition::current_length()`].
408    #[inline]
409    pub unsafe fn load(ptr: *mut Self) -> Self {
410        let other = unsafe { &*ptr };
411        VMMemoryDefinition {
412            base: other.base,
413            current_length: other.current_length().into(),
414        }
415    }
416}
417
418#[cfg(test)]
419mod test_vmmemory_definition {
420    use super::VMMemoryDefinition;
421    use core::mem::offset_of;
422    use std::mem::size_of;
423    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
424
425    #[test]
426    fn check_vmmemory_definition_offsets() {
427        let module = Module::new(StaticModuleIndex::from_u32(0));
428        let offsets = VMOffsets::new(HostPtr, &module);
429        assert_eq!(
430            size_of::<VMMemoryDefinition>(),
431            usize::from(offsets.ptr.size_of_vmmemory_definition())
432        );
433        assert_eq!(
434            offset_of!(VMMemoryDefinition, base),
435            usize::from(offsets.ptr.vmmemory_definition_base())
436        );
437        assert_eq!(
438            offset_of!(VMMemoryDefinition, current_length),
439            usize::from(offsets.ptr.vmmemory_definition_current_length())
440        );
441        /* TODO: Assert that the size of `current_length` matches.
442        assert_eq!(
443            size_of::<VMMemoryDefinition::current_length>(),
444            usize::from(offsets.size_of_vmmemory_definition_current_length())
445        );
446        */
447    }
448}
449
450/// The fields compiled code needs to access to utilize a WebAssembly table
451/// defined within the instance.
452#[derive(Debug, Copy, Clone)]
453#[repr(C)]
454pub struct VMTableDefinition {
455    /// Pointer to the table data.
456    pub base: VmPtr<u8>,
457
458    /// The current number of elements in the table.
459    pub current_elements: usize,
460}
461
462// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
463unsafe impl VmSafe for VMTableDefinition {}
464
465#[cfg(test)]
466mod test_vmtable_definition {
467    use super::VMTableDefinition;
468    use core::mem::offset_of;
469    use std::mem::size_of;
470    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
471
472    #[test]
473    fn check_vmtable_definition_offsets() {
474        let module = Module::new(StaticModuleIndex::from_u32(0));
475        let offsets = VMOffsets::new(HostPtr, &module);
476        assert_eq!(
477            size_of::<VMTableDefinition>(),
478            usize::from(offsets.size_of_vmtable_definition())
479        );
480        assert_eq!(
481            offset_of!(VMTableDefinition, base),
482            usize::from(offsets.vmtable_definition_base())
483        );
484        assert_eq!(
485            offset_of!(VMTableDefinition, current_elements),
486            usize::from(offsets.vmtable_definition_current_elements())
487        );
488    }
489}
490
491/// The storage for a WebAssembly global defined within the instance.
492///
493/// TODO: Pack the globals more densely, rather than using the same size
494/// for every type.
495#[derive(Debug)]
496#[repr(C, align(16))]
497pub struct VMGlobalDefinition {
498    storage: [u8; 16],
499    // If more elements are added here, remember to add offset_of tests below!
500}
501
502// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
503unsafe impl VmSafe for VMGlobalDefinition {}
504
505#[cfg(test)]
506mod test_vmglobal_definition {
507    use super::VMGlobalDefinition;
508    use std::mem::{align_of, size_of};
509    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
510
511    #[test]
512    fn check_vmglobal_definition_alignment() {
513        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
514        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
515        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
516        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
517        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
518        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f32; 4]>());
519        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f64; 2]>());
520    }
521
522    #[test]
523    fn check_vmglobal_definition_offsets() {
524        let module = Module::new(StaticModuleIndex::from_u32(0));
525        let offsets = VMOffsets::new(HostPtr, &module);
526        assert_eq!(
527            size_of::<VMGlobalDefinition>(),
528            usize::from(offsets.ptr.size_of_vmglobal_definition())
529        );
530    }
531
532    #[test]
533    fn check_vmglobal_begins_aligned() {
534        let module = Module::new(StaticModuleIndex::from_u32(0));
535        let offsets = VMOffsets::new(HostPtr, &module);
536        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
537    }
538
539    #[test]
540    #[cfg(feature = "gc")]
541    fn check_vmglobal_can_contain_gc_ref() {
542        assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
543    }
544}
545
546impl VMGlobalDefinition {
547    /// Construct a `VMGlobalDefinition`.
548    pub fn new() -> Self {
549        Self { storage: [0; 16] }
550    }
551
552    /// Create a `VMGlobalDefinition` from a `ValRaw`.
553    ///
554    /// # Unsafety
555    ///
556    /// This raw value's type must match the given `WasmValType`.
557    pub unsafe fn from_val_raw(
558        store: &mut StoreOpaque,
559        wasm_ty: WasmValType,
560        raw: ValRaw,
561    ) -> Result<Self> {
562        let mut global = Self::new();
563        unsafe {
564            match wasm_ty {
565                WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
566                WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
567                WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
568                WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
569                WasmValType::V128 => global.set_u128(raw.get_v128()),
570                WasmValType::Ref(r) => match r.heap_type.top() {
571                    WasmHeapTopType::Extern => {
572                        let r = VMGcRef::from_raw_u32(raw.get_externref());
573                        global.init_gc_ref(store, r.as_ref())
574                    }
575                    WasmHeapTopType::Any => {
576                        let r = VMGcRef::from_raw_u32(raw.get_anyref());
577                        global.init_gc_ref(store, r.as_ref())
578                    }
579                    WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
580                    WasmHeapTopType::Cont => *global.as_func_ref_mut() = raw.get_funcref().cast(), // TODO(#10248): temporary hack.
581                    WasmHeapTopType::Exn => {
582                        let r = VMGcRef::from_raw_u32(raw.get_exnref());
583                        global.init_gc_ref(store, r.as_ref())
584                    }
585                },
586            }
587        }
588        Ok(global)
589    }
590
591    /// Get this global's value as a `ValRaw`.
592    ///
593    /// # Unsafety
594    ///
595    /// This global's value's type must match the given `WasmValType`.
596    pub unsafe fn to_val_raw(
597        &self,
598        store: &mut StoreOpaque,
599        wasm_ty: WasmValType,
600    ) -> Result<ValRaw> {
601        unsafe {
602            Ok(match wasm_ty {
603                WasmValType::I32 => ValRaw::i32(*self.as_i32()),
604                WasmValType::I64 => ValRaw::i64(*self.as_i64()),
605                WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
606                WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
607                WasmValType::V128 => ValRaw::v128(self.get_u128()),
608                WasmValType::Ref(r) => match r.heap_type.top() {
609                    WasmHeapTopType::Extern => ValRaw::externref(match self.as_gc_ref() {
610                        Some(r) => store.clone_gc_ref(r).as_raw_u32(),
611                        None => 0,
612                    }),
613                    WasmHeapTopType::Any => ValRaw::anyref({
614                        match self.as_gc_ref() {
615                            Some(r) => store.clone_gc_ref(r).as_raw_u32(),
616                            None => 0,
617                        }
618                    }),
619                    WasmHeapTopType::Exn => ValRaw::exnref({
620                        match self.as_gc_ref() {
621                            Some(r) => store.clone_gc_ref(r).as_raw_u32(),
622                            None => 0,
623                        }
624                    }),
625                    WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
626                    WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
627                },
628            })
629        }
630    }
631
632    /// Return a reference to the value as an i32.
633    pub unsafe fn as_i32(&self) -> &i32 {
634        unsafe { &*(self.storage.as_ref().as_ptr().cast::<i32>()) }
635    }
636
637    /// Return a mutable reference to the value as an i32.
638    pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
639        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>()) }
640    }
641
642    /// Return a reference to the value as a u32.
643    pub unsafe fn as_u32(&self) -> &u32 {
644        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
645    }
646
647    /// Return a mutable reference to the value as an u32.
648    pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
649        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
650    }
651
652    /// Return a reference to the value as an i64.
653    pub unsafe fn as_i64(&self) -> &i64 {
654        unsafe { &*(self.storage.as_ref().as_ptr().cast::<i64>()) }
655    }
656
657    /// Return a mutable reference to the value as an i64.
658    pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
659        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>()) }
660    }
661
662    /// Return a reference to the value as an u64.
663    pub unsafe fn as_u64(&self) -> &u64 {
664        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
665    }
666
667    /// Return a mutable reference to the value as an u64.
668    pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
669        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
670    }
671
672    /// Return a reference to the value as an f32.
673    pub unsafe fn as_f32(&self) -> &f32 {
674        unsafe { &*(self.storage.as_ref().as_ptr().cast::<f32>()) }
675    }
676
677    /// Return a mutable reference to the value as an f32.
678    pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
679        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>()) }
680    }
681
682    /// Return a reference to the value as f32 bits.
683    pub unsafe fn as_f32_bits(&self) -> &u32 {
684        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u32>()) }
685    }
686
687    /// Return a mutable reference to the value as f32 bits.
688    pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
689        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>()) }
690    }
691
692    /// Return a reference to the value as an f64.
693    pub unsafe fn as_f64(&self) -> &f64 {
694        unsafe { &*(self.storage.as_ref().as_ptr().cast::<f64>()) }
695    }
696
697    /// Return a mutable reference to the value as an f64.
698    pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
699        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>()) }
700    }
701
702    /// Return a reference to the value as f64 bits.
703    pub unsafe fn as_f64_bits(&self) -> &u64 {
704        unsafe { &*(self.storage.as_ref().as_ptr().cast::<u64>()) }
705    }
706
707    /// Return a mutable reference to the value as f64 bits.
708    pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
709        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>()) }
710    }
711
712    /// Gets the underlying 128-bit vector value.
713    //
714    // Note that vectors are stored in little-endian format while other types
715    // are stored in native-endian format.
716    pub unsafe fn get_u128(&self) -> u128 {
717        unsafe { u128::from_le(*(self.storage.as_ref().as_ptr().cast::<u128>())) }
718    }
719
720    /// Sets the 128-bit vector values.
721    //
722    // Note that vectors are stored in little-endian format while other types
723    // are stored in native-endian format.
724    pub unsafe fn set_u128(&mut self, val: u128) {
725        unsafe {
726            *self.storage.as_mut().as_mut_ptr().cast::<u128>() = val.to_le();
727        }
728    }
729
730    /// Return a reference to the value as u128 bits.
731    pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
732        unsafe { &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>()) }
733    }
734
735    /// Return a mutable reference to the value as u128 bits.
736    pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
737        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>()) }
738    }
739
740    /// Return a reference to the global value as a borrowed GC reference.
741    pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
742        let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
743        let ret = unsafe { (*raw_ptr).as_ref() };
744        assert!(cfg!(feature = "gc") || ret.is_none());
745        ret
746    }
747
748    /// Initialize a global to the given GC reference.
749    pub unsafe fn init_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) {
750        let dest = unsafe {
751            &mut *(self
752                .storage
753                .as_mut()
754                .as_mut_ptr()
755                .cast::<MaybeUninit<Option<VMGcRef>>>())
756        };
757
758        store.init_gc_ref(dest, gc_ref)
759    }
760
761    /// Write a GC reference into this global value.
762    pub unsafe fn write_gc_ref(&mut self, store: &mut StoreOpaque, gc_ref: Option<&VMGcRef>) {
763        let dest = unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>()) };
764        store.write_gc_ref(dest, gc_ref)
765    }
766
767    /// Return a reference to the value as a `VMFuncRef`.
768    pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
769        unsafe { *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>()) }
770    }
771
772    /// Return a mutable reference to the value as a `VMFuncRef`.
773    pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
774        unsafe { &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>()) }
775    }
776}
777
778#[cfg(test)]
779mod test_vmshared_type_index {
780    use super::VMSharedTypeIndex;
781    use std::mem::size_of;
782    use wasmtime_environ::{HostPtr, Module, StaticModuleIndex, VMOffsets};
783
784    #[test]
785    fn check_vmshared_type_index() {
786        let module = Module::new(StaticModuleIndex::from_u32(0));
787        let offsets = VMOffsets::new(HostPtr, &module);
788        assert_eq!(
789            size_of::<VMSharedTypeIndex>(),
790            usize::from(offsets.size_of_vmshared_type_index())
791        );
792    }
793}
794
795/// A WebAssembly tag defined within the instance.
796///
797#[derive(Debug)]
798#[repr(C)]
799pub struct VMTagDefinition {
800    /// Function signature's type id.
801    pub type_index: VMSharedTypeIndex,
802}
803
804impl VMTagDefinition {
805    pub fn new(type_index: VMSharedTypeIndex) -> Self {
806        Self { type_index }
807    }
808}
809
810// SAFETY: the above structure is repr(C) and only contains VmSafe
811// fields.
812unsafe impl VmSafe for VMTagDefinition {}
813
814#[cfg(test)]
815mod test_vmtag_definition {
816    use super::VMTagDefinition;
817    use std::mem::size_of;
818    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
819
820    #[test]
821    fn check_vmtag_definition_offsets() {
822        let module = Module::new(StaticModuleIndex::from_u32(0));
823        let offsets = VMOffsets::new(HostPtr, &module);
824        assert_eq!(
825            size_of::<VMTagDefinition>(),
826            usize::from(offsets.ptr.size_of_vmtag_definition())
827        );
828    }
829
830    #[test]
831    fn check_vmtag_begins_aligned() {
832        let module = Module::new(StaticModuleIndex::from_u32(0));
833        let offsets = VMOffsets::new(HostPtr, &module);
834        assert_eq!(offsets.vmctx_tags_begin() % 16, 0);
835    }
836}
837
838/// The VM caller-checked "funcref" record, for caller-side signature checking.
839///
840/// It consists of function pointer(s), a type id to be checked by the
841/// caller, and the vmctx closure associated with this function.
842#[derive(Debug, Clone)]
843#[repr(C)]
844pub struct VMFuncRef {
845    /// Function pointer for this funcref if being called via the "array"
846    /// calling convention that `Func::new` et al use.
847    pub array_call: VmPtr<VMArrayCallFunction>,
848
849    /// Function pointer for this funcref if being called via the calling
850    /// convention we use when compiling Wasm.
851    ///
852    /// Most functions come with a function pointer that we can use when they
853    /// are called from Wasm. The notable exception is when we `Func::wrap` a
854    /// host function, and we don't have a Wasm compiler on hand to compile a
855    /// Wasm-to-native trampoline for the function. In this case, we leave
856    /// `wasm_call` empty until the function is passed as an import to Wasm (or
857    /// otherwise exposed to Wasm via tables/globals). At this point, we look up
858    /// a Wasm-to-native trampoline for the function in the Wasm's compiled
859    /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
860    /// there is no guarantee that the Wasm module has a trampoline for this
861    /// function's signature. The Wasm module only has trampolines for its
862    /// types, and if this function isn't of one of those types, then the Wasm
863    /// module will not have a trampoline for it. This is actually okay, because
864    /// it means that the Wasm cannot actually call this function. But it does
865    /// mean that this field needs to be an `Option` even though it is non-null
866    /// the vast vast vast majority of the time.
867    pub wasm_call: Option<VmPtr<VMWasmCallFunction>>,
868
869    /// Function signature's type id.
870    pub type_index: VMSharedTypeIndex,
871
872    /// The VM state associated with this function.
873    ///
874    /// The actual definition of what this pointer points to depends on the
875    /// function being referenced: for core Wasm functions, this is a `*mut
876    /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
877    /// component functions it is a `*mut VMComponentContext`.
878    pub vmctx: VmPtr<VMOpaqueContext>,
879    // If more elements are added here, remember to add offset_of tests below!
880}
881
882// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
883unsafe impl VmSafe for VMFuncRef {}
884
885impl VMFuncRef {
886    /// Invokes the `array_call` field of this `VMFuncRef` with the supplied
887    /// arguments.
888    ///
889    /// This will invoke the function pointer in the `array_call` field with:
890    ///
891    /// * the `callee` vmctx as `self.vmctx`
892    /// * the `caller` as `caller` specified here
893    /// * the args pointer as `args_and_results`
894    /// * the args length as `args_and_results`
895    ///
896    /// The `args_and_results` area must be large enough to both load all
897    /// arguments from and store all results to.
898    ///
899    /// Returns whether a trap was recorded in TLS for raising.
900    ///
901    /// # Unsafety
902    ///
903    /// This method is unsafe because it can be called with any pointers. They
904    /// must all be valid for this wasm function call to proceed. For example
905    /// the `caller` must be valid machine code if `pulley` is `None` or it must
906    /// be valid bytecode if `pulley` is `Some`. Additionally `args_and_results`
907    /// must be large enough to handle all the arguments/results for this call.
908    ///
909    /// Note that the unsafety invariants to maintain here are not currently
910    /// exhaustively documented.
911    #[inline]
912    pub unsafe fn array_call(
913        me: NonNull<VMFuncRef>,
914        pulley: Option<InterpreterRef<'_>>,
915        caller: NonNull<VMContext>,
916        args_and_results: NonNull<[ValRaw]>,
917    ) -> bool {
918        match pulley {
919            Some(vm) => unsafe { Self::array_call_interpreted(me, vm, caller, args_and_results) },
920            None => unsafe { Self::array_call_native(me, caller, args_and_results) },
921        }
922    }
923
924    unsafe fn array_call_interpreted(
925        me: NonNull<VMFuncRef>,
926        vm: InterpreterRef<'_>,
927        caller: NonNull<VMContext>,
928        args_and_results: NonNull<[ValRaw]>,
929    ) -> bool {
930        // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the
931        // interpreter, even though it's available, as `array_call` will be
932        // native code.
933        unsafe {
934            if me.as_ref().vmctx.as_non_null().as_ref().magic
935                == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC
936            {
937                return Self::array_call_native(me, caller, args_and_results);
938            }
939            vm.call(
940                me.as_ref().array_call.as_non_null().cast(),
941                me.as_ref().vmctx.as_non_null(),
942                caller,
943                args_and_results,
944            )
945        }
946    }
947
948    #[inline]
949    unsafe fn array_call_native(
950        me: NonNull<VMFuncRef>,
951        caller: NonNull<VMContext>,
952        args_and_results: NonNull<[ValRaw]>,
953    ) -> bool {
954        unsafe {
955            union GetNativePointer {
956                native: VMArrayCallNative,
957                ptr: NonNull<VMArrayCallFunction>,
958            }
959            let native = GetNativePointer {
960                ptr: me.as_ref().array_call.as_non_null(),
961            }
962            .native;
963            native(
964                me.as_ref().vmctx.as_non_null(),
965                caller,
966                args_and_results.cast(),
967                args_and_results.len(),
968            )
969        }
970    }
971}
972
973#[cfg(test)]
974mod test_vm_func_ref {
975    use super::VMFuncRef;
976    use core::mem::offset_of;
977    use std::mem::size_of;
978    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
979
980    #[test]
981    fn check_vm_func_ref_offsets() {
982        let module = Module::new(StaticModuleIndex::from_u32(0));
983        let offsets = VMOffsets::new(HostPtr, &module);
984        assert_eq!(
985            size_of::<VMFuncRef>(),
986            usize::from(offsets.ptr.size_of_vm_func_ref())
987        );
988        assert_eq!(
989            offset_of!(VMFuncRef, array_call),
990            usize::from(offsets.ptr.vm_func_ref_array_call())
991        );
992        assert_eq!(
993            offset_of!(VMFuncRef, wasm_call),
994            usize::from(offsets.ptr.vm_func_ref_wasm_call())
995        );
996        assert_eq!(
997            offset_of!(VMFuncRef, type_index),
998            usize::from(offsets.ptr.vm_func_ref_type_index())
999        );
1000        assert_eq!(
1001            offset_of!(VMFuncRef, vmctx),
1002            usize::from(offsets.ptr.vm_func_ref_vmctx())
1003        );
1004    }
1005}
1006
1007macro_rules! define_builtin_array {
1008    (
1009        $(
1010            $( #[$attr:meta] )*
1011            $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
1012        )*
1013    ) => {
1014        /// An array that stores addresses of builtin functions. We translate code
1015        /// to use indirect calls. This way, we don't have to patch the code.
1016        #[repr(C)]
1017        #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
1018        pub struct VMBuiltinFunctionsArray {
1019            $(
1020                $name: unsafe extern "C" fn(
1021                    $(define_builtin_array!(@ty $param)),*
1022                ) $( -> define_builtin_array!(@ty $result))?,
1023            )*
1024        }
1025
1026        impl VMBuiltinFunctionsArray {
1027            pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
1028                $(
1029                    $name: crate::runtime::vm::libcalls::raw::$name,
1030                )*
1031            };
1032
1033            /// Helper to call `expose_provenance()` on all contained pointers.
1034            ///
1035            /// This is required to be called at least once before entering wasm
1036            /// to inform the compiler that these function pointers may all be
1037            /// loaded/stored and used on the "other end" to reacquire
1038            /// provenance in Pulley. Pulley models hostcalls with a host
1039            /// pointer as the first parameter that's a function pointer under
1040            /// the hood, and this call ensures that the use of the function
1041            /// pointer is considered valid.
1042            pub fn expose_provenance(&self) -> NonNull<Self>{
1043                $(
1044                    (self.$name as *mut u8).expose_provenance();
1045                )*
1046                NonNull::from(self)
1047            }
1048        }
1049    };
1050
1051    (@ty u32) => (u32);
1052    (@ty u64) => (u64);
1053    (@ty f32) => (f32);
1054    (@ty f64) => (f64);
1055    (@ty u8) => (u8);
1056    (@ty i8x16) => (i8x16);
1057    (@ty f32x4) => (f32x4);
1058    (@ty f64x2) => (f64x2);
1059    (@ty bool) => (bool);
1060    (@ty pointer) => (*mut u8);
1061    (@ty size) => (usize);
1062    (@ty vmctx) => (NonNull<VMContext>);
1063}
1064
1065// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1066unsafe impl VmSafe for VMBuiltinFunctionsArray {}
1067
1068wasmtime_environ::foreach_builtin_function!(define_builtin_array);
1069
1070const _: () = {
1071    assert!(
1072        mem::size_of::<VMBuiltinFunctionsArray>()
1073            == mem::size_of::<usize>() * (BuiltinFunctionIndex::len() as usize)
1074    )
1075};
1076
1077/// Structure that holds all mutable context that is shared across all instances
1078/// in a store, for example data related to fuel or epochs.
1079///
1080/// `VMStoreContext`s are one-to-one with `wasmtime::Store`s, the same way that
1081/// `VMContext`s are one-to-one with `wasmtime::Instance`s. And the same way
1082/// that multiple `wasmtime::Instance`s may be associated with the same
1083/// `wasmtime::Store`, multiple `VMContext`s hold a pointer to the same
1084/// `VMStoreContext` when they are associated with the same `wasmtime::Store`.
1085#[derive(Debug)]
1086#[repr(C)]
1087pub struct VMStoreContext {
1088    // NB: 64-bit integer fields are located first with pointer-sized fields
1089    // trailing afterwards. That makes the offsets in this structure easier to
1090    // calculate on 32-bit platforms as we don't have to worry about the
1091    // alignment of 64-bit integers.
1092    //
1093    /// Indicator of how much fuel has been consumed and is remaining to
1094    /// WebAssembly.
1095    ///
1096    /// This field is typically negative and increments towards positive. Upon
1097    /// turning positive a wasm trap will be generated. This field is only
1098    /// modified if wasm is configured to consume fuel.
1099    pub fuel_consumed: UnsafeCell<i64>,
1100
1101    /// Deadline epoch for interruption: if epoch-based interruption
1102    /// is enabled and the global (per engine) epoch counter is
1103    /// observed to reach or exceed this value, the guest code will
1104    /// yield if running asynchronously.
1105    pub epoch_deadline: UnsafeCell<u64>,
1106
1107    /// Current stack limit of the wasm module.
1108    ///
1109    /// For more information see `crates/cranelift/src/lib.rs`.
1110    pub stack_limit: UnsafeCell<usize>,
1111
1112    /// The `VMMemoryDefinition` for this store's GC heap.
1113    pub gc_heap: VMMemoryDefinition,
1114
1115    /// The value of the frame pointer register in the trampoline used
1116    /// to call from Wasm to the host.
1117    ///
1118    /// Maintained by our Wasm-to-host trampoline, and cleared just
1119    /// before calling into Wasm in `catch_traps`.
1120    ///
1121    /// This member is `0` when Wasm is actively running and has not called out
1122    /// to the host.
1123    ///
1124    /// Used to find the start of a contiguous sequence of Wasm frames
1125    /// when walking the stack. Note that we record the FP of the
1126    /// *trampoline*'s frame, not the last Wasm frame, because we need
1127    /// to know the SP (bottom of frame) of the last Wasm frame as
1128    /// well in case we need to resume to an exception handler in that
1129    /// frame. The FP of the last Wasm frame can be recovered by
1130    /// loading the saved FP value at this FP address.
1131    pub last_wasm_exit_trampoline_fp: UnsafeCell<usize>,
1132
1133    /// The last Wasm program counter before we called from Wasm to the host.
1134    ///
1135    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1136    /// calling into Wasm in `catch_traps`.
1137    ///
1138    /// This member is `0` when Wasm is actively running and has not called out
1139    /// to the host.
1140    ///
1141    /// Used when walking a contiguous sequence of Wasm frames.
1142    pub last_wasm_exit_pc: UnsafeCell<usize>,
1143
1144    /// The last host stack pointer before we called into Wasm from the host.
1145    ///
1146    /// Maintained by our host-to-Wasm trampoline. This member is `0` when Wasm
1147    /// is not running, and it's set to nonzero once a host-to-wasm trampoline
1148    /// is executed.
1149    ///
1150    /// When a host function is wrapped into a `wasmtime::Func`, and is then
1151    /// called from the host, then this member is not changed meaning that the
1152    /// previous activation in pointed to by `last_wasm_exit_trampoline_fp` is
1153    /// still the last wasm set of frames on the stack.
1154    ///
1155    /// This field is saved/restored during fiber suspension/resumption
1156    /// resumption as part of `CallThreadState::swap`.
1157    ///
1158    /// This field is used to find the end of a contiguous sequence of Wasm
1159    /// frames when walking the stack. Additionally it's used when a trap is
1160    /// raised as part of the set of parameters used to resume in the entry
1161    /// trampoline's "catch" block.
1162    pub last_wasm_entry_sp: UnsafeCell<usize>,
1163
1164    /// Same as `last_wasm_entry_sp`, but for the `fp` of the trampoline.
1165    pub last_wasm_entry_fp: UnsafeCell<usize>,
1166
1167    /// The last trap handler from a host-to-wasm entry trampoline on the stack.
1168    ///
1169    /// This field is configured when the host calls into wasm by the trampoline
1170    /// itself. It stores the `pc` of an exception handler suitable to handle
1171    /// all traps (or uncaught exceptions).
1172    pub last_wasm_entry_trap_handler: UnsafeCell<usize>,
1173
1174    /// Stack information used by stack switching instructions. See documentation
1175    /// on `VMStackChain` for details.
1176    pub stack_chain: UnsafeCell<VMStackChain>,
1177
1178    /// A pointer to the embedder's `T` inside a `Store<T>`, for use with the
1179    /// `store-data-address` unsafe intrinsic.
1180    pub store_data: VmPtr<()>,
1181
1182    /// The range, in addresses, of the guard page that is currently in use.
1183    ///
1184    /// This field is used when signal handlers are run to determine whether a
1185    /// faulting address lies within the guard page of an async stack for
1186    /// example. If this happens then the signal handler aborts with a stack
1187    /// overflow message similar to what would happen had the stack overflow
1188    /// happened on the main thread. This field is, by default a null..null
1189    /// range indicating that no async guard is in use (aka no fiber). In such a
1190    /// situation while this field is read it'll never classify a fault as an
1191    /// guard page fault.
1192    pub async_guard_range: Range<*mut u8>,
1193}
1194
1195impl VMStoreContext {
1196    /// From the current saved trampoline FP, get the FP of the last
1197    /// Wasm frame. If the current saved trampoline FP is null, return
1198    /// null.
1199    ///
1200    /// We store only the trampoline FP, because (i) we need the
1201    /// trampoline FP, so we know the size (bottom) of the last Wasm
1202    /// frame; and (ii) the last Wasm frame, just above the trampoline
1203    /// frame, can be recovered via the FP chain.
1204    ///
1205    /// # Safety
1206    ///
1207    /// This function requires that the `last_wasm_exit_trampoline_fp`
1208    /// field either points to an active trampoline frame or is a null
1209    /// pointer.
1210    pub(crate) unsafe fn last_wasm_exit_fp(&self) -> usize {
1211        // SAFETY: the unsafe cell is safe to load (no other threads
1212        // will be writing our store when we have control), and the
1213        // helper function's safety condition is the same as ours.
1214        unsafe {
1215            let trampoline_fp = *self.last_wasm_exit_trampoline_fp.get();
1216            Self::wasm_exit_fp_from_trampoline_fp(trampoline_fp)
1217        }
1218    }
1219
1220    /// From any saved trampoline FP, get the FP of the last Wasm
1221    /// frame. If the given trampoline FP is null, return null.
1222    ///
1223    /// This differs from `last_wasm_exit_fp()` above in that it
1224    /// allows accessing activations further up the stack as well,
1225    /// e.g. via `CallThreadState::old_state`.
1226    ///
1227    /// # Safety
1228    ///
1229    /// This function requires that the provided FP value is valid,
1230    /// and points to an active trampoline frame, or is null.
1231    ///
1232    /// This function depends on the invariant that on all supported
1233    /// architectures, we store the previous FP value under the
1234    /// current FP. This is a property of our ABI that we control and
1235    /// ensure.
1236    pub(crate) unsafe fn wasm_exit_fp_from_trampoline_fp(trampoline_fp: usize) -> usize {
1237        if trampoline_fp != 0 {
1238            // SAFETY: We require that trampoline_fp points to a valid
1239            // frame, which will (by definition) contain an old FP value
1240            // that we can load.
1241            unsafe { *(trampoline_fp as *const usize) }
1242        } else {
1243            0
1244        }
1245    }
1246}
1247
1248// The `VMStoreContext` type is a pod-type with no destructor, and we don't
1249// access any fields from other threads, so add in these trait impls which are
1250// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
1251// variables in `VMStoreContext`.
1252unsafe impl Send for VMStoreContext {}
1253unsafe impl Sync for VMStoreContext {}
1254
1255// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1256unsafe impl VmSafe for VMStoreContext {}
1257
1258impl Default for VMStoreContext {
1259    fn default() -> VMStoreContext {
1260        VMStoreContext {
1261            fuel_consumed: UnsafeCell::new(0),
1262            epoch_deadline: UnsafeCell::new(0),
1263            stack_limit: UnsafeCell::new(usize::max_value()),
1264            gc_heap: VMMemoryDefinition {
1265                base: NonNull::dangling().into(),
1266                current_length: AtomicUsize::new(0),
1267            },
1268            last_wasm_exit_trampoline_fp: UnsafeCell::new(0),
1269            last_wasm_exit_pc: UnsafeCell::new(0),
1270            last_wasm_entry_fp: UnsafeCell::new(0),
1271            last_wasm_entry_sp: UnsafeCell::new(0),
1272            last_wasm_entry_trap_handler: UnsafeCell::new(0),
1273            stack_chain: UnsafeCell::new(VMStackChain::Absent),
1274            async_guard_range: ptr::null_mut()..ptr::null_mut(),
1275            store_data: VmPtr::dangling(),
1276        }
1277    }
1278}
1279
1280#[cfg(test)]
1281mod test_vmstore_context {
1282    use super::{VMMemoryDefinition, VMStoreContext};
1283    use core::mem::offset_of;
1284    use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
1285
1286    #[test]
1287    fn field_offsets() {
1288        let module = Module::new(StaticModuleIndex::from_u32(0));
1289        let offsets = VMOffsets::new(HostPtr, &module);
1290        assert_eq!(
1291            offset_of!(VMStoreContext, stack_limit),
1292            usize::from(offsets.ptr.vmstore_context_stack_limit())
1293        );
1294        assert_eq!(
1295            offset_of!(VMStoreContext, fuel_consumed),
1296            usize::from(offsets.ptr.vmstore_context_fuel_consumed())
1297        );
1298        assert_eq!(
1299            offset_of!(VMStoreContext, epoch_deadline),
1300            usize::from(offsets.ptr.vmstore_context_epoch_deadline())
1301        );
1302        assert_eq!(
1303            offset_of!(VMStoreContext, gc_heap),
1304            usize::from(offsets.ptr.vmstore_context_gc_heap())
1305        );
1306        assert_eq!(
1307            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, base),
1308            usize::from(offsets.ptr.vmstore_context_gc_heap_base())
1309        );
1310        assert_eq!(
1311            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, current_length),
1312            usize::from(offsets.ptr.vmstore_context_gc_heap_current_length())
1313        );
1314        assert_eq!(
1315            offset_of!(VMStoreContext, last_wasm_exit_trampoline_fp),
1316            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_trampoline_fp())
1317        );
1318        assert_eq!(
1319            offset_of!(VMStoreContext, last_wasm_exit_pc),
1320            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_pc())
1321        );
1322        assert_eq!(
1323            offset_of!(VMStoreContext, last_wasm_entry_fp),
1324            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp())
1325        );
1326        assert_eq!(
1327            offset_of!(VMStoreContext, last_wasm_entry_sp),
1328            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_sp())
1329        );
1330        assert_eq!(
1331            offset_of!(VMStoreContext, last_wasm_entry_trap_handler),
1332            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_trap_handler())
1333        );
1334        assert_eq!(
1335            offset_of!(VMStoreContext, stack_chain),
1336            usize::from(offsets.ptr.vmstore_context_stack_chain())
1337        );
1338        assert_eq!(
1339            offset_of!(VMStoreContext, store_data),
1340            usize::from(offsets.ptr.vmstore_context_store_data())
1341        );
1342    }
1343}
1344
1345/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
1346/// This has information about globals, memories, tables, and other runtime
1347/// state associated with the current instance.
1348///
1349/// The struct here is empty, as the sizes of these fields are dynamic, and
1350/// we can't describe them in Rust's type system. Sufficient memory is
1351/// allocated at runtime.
1352#[derive(Debug)]
1353#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
1354pub struct VMContext {
1355    _magic: u32,
1356}
1357
1358impl VMContext {
1359    /// Helper function to cast between context types using a debug assertion to
1360    /// protect against some mistakes.
1361    #[inline]
1362    pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMContext> {
1363        // Note that in general the offset of the "magic" field is stored in
1364        // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
1365        // about converting this pointer to another type we ideally don't want
1366        // to read the offset from potentially corrupt memory. Instead it would
1367        // be better to catch errors here as soon as possible.
1368        //
1369        // To accomplish this the `VMContext` structure is laid out with the
1370        // magic field at a statically known offset (here it's 0 for now). This
1371        // static offset is asserted in `VMOffsets::from` and needs to be kept
1372        // in sync with this line for this debug assertion to work.
1373        //
1374        // Also note that this magic is only ever invalid in the presence of
1375        // bugs, meaning we don't actually read the magic and act differently
1376        // at runtime depending what it is, so this is a debug assertion as
1377        // opposed to a regular assertion.
1378        unsafe {
1379            debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC);
1380        }
1381        opaque.cast()
1382    }
1383}
1384
1385/// A "raw" and unsafe representation of a WebAssembly value.
1386///
1387/// This is provided for use with the `Func::new_unchecked` and
1388/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
1389/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
1390///
1391/// This is notably an "unsafe" way to work with `Val` and it's recommended to
1392/// instead use `Val` where possible. An important note about this union is that
1393/// fields are all stored in little-endian format, regardless of the endianness
1394/// of the host system.
1395#[repr(C)]
1396#[derive(Copy, Clone)]
1397pub union ValRaw {
1398    /// A WebAssembly `i32` value.
1399    ///
1400    /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
1401    /// type does not assign an interpretation of the upper bit as either signed
1402    /// or unsigned. The Rust type `i32` is simply chosen for convenience.
1403    ///
1404    /// This value is always stored in a little-endian format.
1405    i32: i32,
1406
1407    /// A WebAssembly `i64` value.
1408    ///
1409    /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
1410    /// type does not assign an interpretation of the upper bit as either signed
1411    /// or unsigned. The Rust type `i64` is simply chosen for convenience.
1412    ///
1413    /// This value is always stored in a little-endian format.
1414    i64: i64,
1415
1416    /// A WebAssembly `f32` value.
1417    ///
1418    /// Note that the payload here is a Rust `u32`. This is to allow passing any
1419    /// representation of NaN into WebAssembly without risk of changing NaN
1420    /// payload bits as its gets passed around the system. Otherwise though this
1421    /// `u32` value is the return value of `f32::to_bits` in Rust.
1422    ///
1423    /// This value is always stored in a little-endian format.
1424    f32: u32,
1425
1426    /// A WebAssembly `f64` value.
1427    ///
1428    /// Note that the payload here is a Rust `u64`. This is to allow passing any
1429    /// representation of NaN into WebAssembly without risk of changing NaN
1430    /// payload bits as its gets passed around the system. Otherwise though this
1431    /// `u64` value is the return value of `f64::to_bits` in Rust.
1432    ///
1433    /// This value is always stored in a little-endian format.
1434    f64: u64,
1435
1436    /// A WebAssembly `v128` value.
1437    ///
1438    /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1439    /// but note that `v128` in WebAssembly is often considered a vector type
1440    /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1441    /// of the underlying bits is left up to the instructions which consume
1442    /// this value.
1443    ///
1444    /// This value is always stored in a little-endian format.
1445    v128: [u8; 16],
1446
1447    /// A WebAssembly `funcref` value (or one of its subtypes).
1448    ///
1449    /// The payload here is a pointer which is runtime-defined. This is one of
1450    /// the main points of unsafety about the `ValRaw` type as the validity of
1451    /// the pointer here is not easily verified and must be preserved by
1452    /// carefully calling the correct functions throughout the runtime.
1453    ///
1454    /// This value is always stored in a little-endian format.
1455    funcref: *mut c_void,
1456
1457    /// A WebAssembly `externref` value (or one of its subtypes).
1458    ///
1459    /// The payload here is a compressed pointer value which is
1460    /// runtime-defined. This is one of the main points of unsafety about the
1461    /// `ValRaw` type as the validity of the pointer here is not easily verified
1462    /// and must be preserved by carefully calling the correct functions
1463    /// throughout the runtime.
1464    ///
1465    /// This value is always stored in a little-endian format.
1466    externref: u32,
1467
1468    /// A WebAssembly `anyref` value (or one of its subtypes).
1469    ///
1470    /// The payload here is a compressed pointer value which is
1471    /// runtime-defined. This is one of the main points of unsafety about the
1472    /// `ValRaw` type as the validity of the pointer here is not easily verified
1473    /// and must be preserved by carefully calling the correct functions
1474    /// throughout the runtime.
1475    ///
1476    /// This value is always stored in a little-endian format.
1477    anyref: u32,
1478
1479    /// A WebAssembly `exnref` value (or one of its subtypes).
1480    ///
1481    /// The payload here is a compressed pointer value which is
1482    /// runtime-defined. This is one of the main points of unsafety about the
1483    /// `ValRaw` type as the validity of the pointer here is not easily verified
1484    /// and must be preserved by carefully calling the correct functions
1485    /// throughout the runtime.
1486    ///
1487    /// This value is always stored in a little-endian format.
1488    exnref: u32,
1489}
1490
1491// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1492// are some simple assertions about the shape of the type which are additionally
1493// matched in C.
1494const _: () = {
1495    assert!(mem::size_of::<ValRaw>() == 16);
1496    assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1497};
1498
1499// This type is just a bag-of-bits so it's up to the caller to figure out how
1500// to safely deal with threading concerns and safely access interior bits.
1501unsafe impl Send for ValRaw {}
1502unsafe impl Sync for ValRaw {}
1503
1504impl fmt::Debug for ValRaw {
1505    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1506        struct Hex<T>(T);
1507        impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1508            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1509                let bytes = mem::size_of::<T>();
1510                let hex_digits_per_byte = 2;
1511                let hex_digits = bytes * hex_digits_per_byte;
1512                write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1513            }
1514        }
1515
1516        unsafe {
1517            f.debug_struct("ValRaw")
1518                .field("i32", &Hex(self.i32))
1519                .field("i64", &Hex(self.i64))
1520                .field("f32", &Hex(self.f32))
1521                .field("f64", &Hex(self.f64))
1522                .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1523                .field("funcref", &self.funcref)
1524                .field("externref", &Hex(self.externref))
1525                .field("anyref", &Hex(self.anyref))
1526                .field("exnref", &Hex(self.exnref))
1527                .finish()
1528        }
1529    }
1530}
1531
1532impl ValRaw {
1533    /// Create a null reference that is compatible with any of
1534    /// `{any,extern,func,exn}ref`.
1535    pub fn null() -> ValRaw {
1536        unsafe {
1537            let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1538            debug_assert_eq!(raw.get_anyref(), 0);
1539            debug_assert_eq!(raw.get_exnref(), 0);
1540            debug_assert_eq!(raw.get_externref(), 0);
1541            debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1542            raw
1543        }
1544    }
1545
1546    /// Creates a WebAssembly `i32` value
1547    #[inline]
1548    pub fn i32(i: i32) -> ValRaw {
1549        // Note that this is intentionally not setting the `i32` field, instead
1550        // setting the `i64` field with a zero-extended version of `i`. For more
1551        // information on this see the comments on `Lower for Result` in the
1552        // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1553        // otherwise constrained to guarantee that the initial 64-bits are
1554        // always initialized.
1555        ValRaw::u64(i.cast_unsigned().into())
1556    }
1557
1558    /// Creates a WebAssembly `i64` value
1559    #[inline]
1560    pub fn i64(i: i64) -> ValRaw {
1561        ValRaw { i64: i.to_le() }
1562    }
1563
1564    /// Creates a WebAssembly `i32` value
1565    #[inline]
1566    pub fn u32(i: u32) -> ValRaw {
1567        // See comments in `ValRaw::i32` for why this is setting the upper
1568        // 32-bits as well.
1569        ValRaw::u64(i.into())
1570    }
1571
1572    /// Creates a WebAssembly `i64` value
1573    #[inline]
1574    pub fn u64(i: u64) -> ValRaw {
1575        ValRaw::i64(i as i64)
1576    }
1577
1578    /// Creates a WebAssembly `f32` value
1579    #[inline]
1580    pub fn f32(i: u32) -> ValRaw {
1581        // See comments in `ValRaw::i32` for why this is setting the upper
1582        // 32-bits as well.
1583        ValRaw::u64(i.into())
1584    }
1585
1586    /// Creates a WebAssembly `f64` value
1587    #[inline]
1588    pub fn f64(i: u64) -> ValRaw {
1589        ValRaw { f64: i.to_le() }
1590    }
1591
1592    /// Creates a WebAssembly `v128` value
1593    #[inline]
1594    pub fn v128(i: u128) -> ValRaw {
1595        ValRaw {
1596            v128: i.to_le_bytes(),
1597        }
1598    }
1599
1600    /// Creates a WebAssembly `funcref` value
1601    #[inline]
1602    pub fn funcref(i: *mut c_void) -> ValRaw {
1603        ValRaw {
1604            funcref: i.map_addr(|i| i.to_le()),
1605        }
1606    }
1607
1608    /// Creates a WebAssembly `externref` value
1609    #[inline]
1610    pub fn externref(e: u32) -> ValRaw {
1611        assert!(cfg!(feature = "gc") || e == 0);
1612        ValRaw {
1613            externref: e.to_le(),
1614        }
1615    }
1616
1617    /// Creates a WebAssembly `anyref` value
1618    #[inline]
1619    pub fn anyref(r: u32) -> ValRaw {
1620        assert!(cfg!(feature = "gc") || r == 0);
1621        ValRaw { anyref: r.to_le() }
1622    }
1623
1624    /// Creates a WebAssembly `exnref` value
1625    #[inline]
1626    pub fn exnref(r: u32) -> ValRaw {
1627        assert!(cfg!(feature = "gc") || r == 0);
1628        ValRaw { exnref: r.to_le() }
1629    }
1630
1631    /// Gets the WebAssembly `i32` value
1632    #[inline]
1633    pub fn get_i32(&self) -> i32 {
1634        unsafe { i32::from_le(self.i32) }
1635    }
1636
1637    /// Gets the WebAssembly `i64` value
1638    #[inline]
1639    pub fn get_i64(&self) -> i64 {
1640        unsafe { i64::from_le(self.i64) }
1641    }
1642
1643    /// Gets the WebAssembly `i32` value
1644    #[inline]
1645    pub fn get_u32(&self) -> u32 {
1646        self.get_i32().cast_unsigned()
1647    }
1648
1649    /// Gets the WebAssembly `i64` value
1650    #[inline]
1651    pub fn get_u64(&self) -> u64 {
1652        self.get_i64().cast_unsigned()
1653    }
1654
1655    /// Gets the WebAssembly `f32` value
1656    #[inline]
1657    pub fn get_f32(&self) -> u32 {
1658        unsafe { u32::from_le(self.f32) }
1659    }
1660
1661    /// Gets the WebAssembly `f64` value
1662    #[inline]
1663    pub fn get_f64(&self) -> u64 {
1664        unsafe { u64::from_le(self.f64) }
1665    }
1666
1667    /// Gets the WebAssembly `v128` value
1668    #[inline]
1669    pub fn get_v128(&self) -> u128 {
1670        unsafe { u128::from_le_bytes(self.v128) }
1671    }
1672
1673    /// Gets the WebAssembly `funcref` value
1674    #[inline]
1675    pub fn get_funcref(&self) -> *mut c_void {
1676        let addr = unsafe { usize::from_le(self.funcref.addr()) };
1677        core::ptr::with_exposed_provenance_mut(addr)
1678    }
1679
1680    /// Gets the WebAssembly `externref` value
1681    #[inline]
1682    pub fn get_externref(&self) -> u32 {
1683        let externref = u32::from_le(unsafe { self.externref });
1684        assert!(cfg!(feature = "gc") || externref == 0);
1685        externref
1686    }
1687
1688    /// Gets the WebAssembly `anyref` value
1689    #[inline]
1690    pub fn get_anyref(&self) -> u32 {
1691        let anyref = u32::from_le(unsafe { self.anyref });
1692        assert!(cfg!(feature = "gc") || anyref == 0);
1693        anyref
1694    }
1695
1696    /// Gets the WebAssembly `exnref` value
1697    #[inline]
1698    pub fn get_exnref(&self) -> u32 {
1699        let exnref = u32::from_le(unsafe { self.exnref });
1700        assert!(cfg!(feature = "gc") || exnref == 0);
1701        exnref
1702    }
1703}
1704
1705/// An "opaque" version of `VMContext` which must be explicitly casted to a
1706/// target context.
1707///
1708/// This context is used to represent that contexts specified in
1709/// `VMFuncRef` can have any type and don't have an implicit
1710/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1711/// structure of an opaque context in general and only the code which configured
1712/// the context is able to rely on a particular structure. This is because the
1713/// context pointer configured for `VMFuncRef` is guaranteed to be
1714/// the first parameter passed.
1715///
1716/// Note that Wasmtime currently has a layout where all contexts that are casted
1717/// to an opaque context start with a 32-bit "magic" which can be used in debug
1718/// mode to debug-assert that the casts here are correct and have at least a
1719/// little protection against incorrect casts.
1720pub struct VMOpaqueContext {
1721    pub(crate) magic: u32,
1722    _marker: marker::PhantomPinned,
1723}
1724
1725impl VMOpaqueContext {
1726    /// Helper function to clearly indicate that casts are desired.
1727    #[inline]
1728    pub fn from_vmcontext(ptr: NonNull<VMContext>) -> NonNull<VMOpaqueContext> {
1729        ptr.cast()
1730    }
1731
1732    /// Helper function to clearly indicate that casts are desired.
1733    #[inline]
1734    pub fn from_vm_array_call_host_func_context(
1735        ptr: NonNull<VMArrayCallHostFuncContext>,
1736    ) -> NonNull<VMOpaqueContext> {
1737        ptr.cast()
1738    }
1739}