wasmtime/runtime/vm/
libcalls.rs

1//! Runtime library calls.
2//!
3//! Note that Wasm compilers may sometimes perform these inline rather than
4//! calling them, particularly when CPUs have special instructions which compute
5//! them directly.
6//!
7//! These functions are called by compiled Wasm code, and therefore must take
8//! certain care about some things:
9//!
10//! * They must only contain basic, raw i32/i64/f32/f64/pointer parameters that
11//!   are safe to pass across the system ABI.
12//!
13//! * If any nested function propagates an `Err(trap)` out to the library
14//!   function frame, we need to raise it. This involves some nasty and quite
15//!   unsafe code under the covers! Notably, after raising the trap, drops
16//!   **will not** be run for local variables! This can lead to things like
17//!   leaking `InstanceHandle`s which leads to never deallocating JIT code,
18//!   instances, and modules if we are not careful!
19//!
20//! * The libcall must be entered via a Wasm-to-libcall trampoline that saves
21//!   the last Wasm FP and PC for stack walking purposes. (For more details, see
22//!   `crates/wasmtime/src/runtime/vm/backtrace.rs`.)
23//!
24//! To make it easier to correctly handle all these things, **all** libcalls
25//! must be defined via the `libcall!` helper macro! See its doc comments below
26//! for an example, or just look at the rest of the file.
27//!
28//! ## Dealing with `externref`s
29//!
30//! When receiving a raw `*mut u8` that is actually a `VMExternRef` reference,
31//! convert it into a proper `VMExternRef` with `VMExternRef::clone_from_raw` as
32//! soon as apossible. Any GC before raw pointer is converted into a reference
33//! can potentially collect the referenced object, which could lead to use after
34//! free.
35//!
36//! Avoid this by eagerly converting into a proper `VMExternRef`! (Unfortunately
37//! there is no macro to help us automatically get this correct, so stay
38//! vigilant!)
39//!
40//! ```ignore
41//! pub unsafe extern "C" my_libcall_takes_ref(raw_extern_ref: *mut u8) {
42//!     // Before `clone_from_raw`, `raw_extern_ref` is potentially unrooted,
43//!     // and doing GC here could lead to use after free!
44//!
45//!     let my_extern_ref = if raw_extern_ref.is_null() {
46//!         None
47//!     } else {
48//!         Some(VMExternRef::clone_from_raw(raw_extern_ref))
49//!     };
50//!
51//!     // Now that we did `clone_from_raw`, it is safe to do a GC (or do
52//!     // anything else that might transitively GC, like call back into
53//!     // Wasm!)
54//! }
55//! ```
56
57#[cfg(feature = "stack-switching")]
58use super::stack_switching::VMContObj;
59use crate::prelude::*;
60use crate::runtime::store::{InstanceId, StoreInstanceId, StoreOpaque};
61#[cfg(feature = "gc")]
62use crate::runtime::vm::VMGcRef;
63use crate::runtime::vm::table::TableElementType;
64use crate::runtime::vm::vmcontext::VMFuncRef;
65use crate::runtime::vm::{
66    self, HostResultHasUnwindSentinel, SendSyncPtr, TrapReason, VMStore, f32x4, f64x2, i8x16,
67};
68use core::convert::Infallible;
69use core::ptr::NonNull;
70#[cfg(feature = "threads")]
71use core::time::Duration;
72use wasmtime_environ::{
73    DataIndex, DefinedMemoryIndex, DefinedTableIndex, ElemIndex, FuncIndex, MemoryIndex,
74    TableIndex, Trap,
75};
76#[cfg(feature = "wmemcheck")]
77use wasmtime_wmemcheck::AccessError::{
78    DoubleMalloc, InvalidFree, InvalidRead, InvalidWrite, OutOfBounds,
79};
80
81/// Raw functions which are actually called from compiled code.
82///
83/// Invocation of a builtin currently looks like:
84///
85/// * A wasm function calls a cranelift-compiled trampoline that's generated
86///   once-per-builtin.
87/// * The cranelift-compiled trampoline performs any necessary actions to exit
88///   wasm, such as dealing with fp/pc/etc.
89/// * The cranelift-compiled trampoline loads a function pointer from an array
90///   stored in `VMContext` That function pointer is defined in this module.
91/// * This module runs, handling things like `catch_unwind` and `Result` and
92///   such.
93/// * This module delegates to the outer module (this file) which has the actual
94///   implementation.
95///
96/// For more information on converting from host-defined values to Cranelift ABI
97/// values see the `catch_unwind_and_record_trap` function.
98pub mod raw {
99    use crate::runtime::vm::{Instance, VMContext, f32x4, f64x2, i8x16};
100    use core::ptr::NonNull;
101
102    macro_rules! libcall {
103        (
104            $(
105                $( #[cfg($attr:meta)] )?
106                $name:ident( vmctx: vmctx $(, $pname:ident: $param:ident )* ) $(-> $result:ident)?;
107            )*
108        ) => {
109            $(
110                // This is the direct entrypoint from the compiled module which
111                // still has the raw signature.
112                //
113                // This will delegate to the outer module to the actual
114                // implementation and automatically perform `catch_unwind` along
115                // with conversion of the return value in the face of traps.
116                #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
117                pub unsafe extern "C" fn $name(
118                    vmctx: NonNull<VMContext>,
119                    $( $pname : libcall!(@ty $param), )*
120                ) $(-> libcall!(@ty $result))? {
121                    $(#[cfg($attr)])?
122                    unsafe {
123                        Instance::enter_host_from_wasm(vmctx, |store, instance| {
124                            super::$name(store, instance, $($pname),*)
125                        })
126                    }
127                    $(
128                        #[cfg(not($attr))]
129                        {
130                            let _ = vmctx;
131                            unreachable!();
132                        }
133                    )?
134                }
135
136                // This works around a `rustc` bug where compiling with LTO
137                // will sometimes strip out some of these symbols resulting
138                // in a linking failure.
139                #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
140                const _: () = {
141                    #[used]
142                    static I_AM_USED: unsafe extern "C" fn(
143                        NonNull<VMContext>,
144                        $( $pname : libcall!(@ty $param), )*
145                    ) $( -> libcall!(@ty $result))? = $name;
146                };
147            )*
148        };
149
150        (@ty u32) => (u32);
151        (@ty u64) => (u64);
152        (@ty f32) => (f32);
153        (@ty f64) => (f64);
154        (@ty u8) => (u8);
155        (@ty i8x16) => (i8x16);
156        (@ty f32x4) => (f32x4);
157        (@ty f64x2) => (f64x2);
158        (@ty bool) => (bool);
159        (@ty pointer) => (*mut u8);
160        (@ty size) => (usize);
161    }
162
163    wasmtime_environ::foreach_builtin_function!(libcall);
164}
165
166/// Uses the `$store` provided to invoke the async closure `$f` and block on the
167/// result.
168///
169/// This will internally multiplex on `$store.with_blocking(...)` vs simply
170/// asserting the closure is ready depending on whether a store's
171/// `async_support` flag is set or not.
172///
173/// FIXME: ideally this would be a function, not a macro. If this is a function
174/// though it would require placing a bound on the async closure $f where the
175/// returned future is itself `Send`. That's not possible in Rust right now,
176/// unfortunately.
177///
178/// As a workaround this takes advantage of the fact that we can assume that the
179/// compiler can infer that the future returned by `$f` is indeed `Send` so long
180/// as we don't try to name the type or place it behind a generic. In the future
181/// when we can bound the return future of async functions with `Send` this
182/// macro should be replaced with an equivalent function.
183macro_rules! block_on {
184    ($store:expr, $f:expr) => {{
185        let store: &mut StoreOpaque = $store;
186        let closure = assert_async_fn_closure($f);
187        if store.async_support() {
188            #[cfg(feature = "async")]
189            {
190                store.with_blocking(|store, cx| cx.block_on(closure(store)))
191            }
192            #[cfg(not(feature = "async"))]
193            {
194                unreachable!()
195            }
196        } else {
197            // Note that if `async_support` is disabled then it should not be
198            // possible to introduce await points so the provided future should
199            // always be ready.
200            anyhow::Ok(vm::assert_ready(closure(store)))
201        }
202    }};
203}
204
205fn assert_async_fn_closure<F, R>(f: F) -> F
206where
207    F: AsyncFnOnce(&mut StoreOpaque) -> R,
208{
209    f
210}
211
212fn memory_grow(
213    store: &mut dyn VMStore,
214    instance: InstanceId,
215    delta: u64,
216    memory_index: u32,
217) -> Result<Option<AllocationSize>> {
218    let memory_index = DefinedMemoryIndex::from_u32(memory_index);
219    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
220    let limiter = limiter.as_mut();
221    block_on!(store, async |store| {
222        let instance = store.instance_mut(instance);
223        let module = instance.env_module();
224        let page_size_log2 = module.memories[module.memory_index(memory_index)].page_size_log2;
225
226        let result = instance
227            .memory_grow(limiter, memory_index, delta)
228            .await?
229            .map(|size_in_bytes| AllocationSize(size_in_bytes >> page_size_log2));
230
231        Ok(result)
232    })?
233}
234
235/// A helper structure to represent the return value of a memory or table growth
236/// call.
237///
238/// This represents a byte or element-based count of the size of an item on the
239/// host. For example a memory is how many bytes large the memory is, or a table
240/// is how many elements large it is. It's assumed that the value here is never
241/// -1 or -2 as that would mean the entire host address space is allocated which
242/// is not possible.
243struct AllocationSize(usize);
244
245/// Special implementation for growth-related libcalls.
246///
247/// Here the optional return value means:
248///
249/// * `Some(val)` - the growth succeeded and the previous size of the item was
250///   `val`.
251/// * `None` - the growth failed.
252///
253/// The failure case returns -1 (or `usize::MAX` as an unsigned integer) and the
254/// successful case returns the `val` itself. Note that -2 (`usize::MAX - 1`
255/// when unsigned) is unwind as a sentinel to indicate an unwind as no valid
256/// allocation can be that large.
257unsafe impl HostResultHasUnwindSentinel for Option<AllocationSize> {
258    type Abi = *mut u8;
259    const SENTINEL: *mut u8 = (usize::MAX - 1) as *mut u8;
260
261    fn into_abi(self) -> *mut u8 {
262        match self {
263            Some(size) => {
264                debug_assert!(size.0 < (usize::MAX - 1));
265                size.0 as *mut u8
266            }
267            None => usize::MAX as *mut u8,
268        }
269    }
270}
271
272/// Implementation of `table.grow` for `funcref` tables.
273unsafe fn table_grow_func_ref(
274    store: &mut dyn VMStore,
275    instance: InstanceId,
276    defined_table_index: u32,
277    delta: u64,
278    init_value: *mut u8,
279) -> Result<Option<AllocationSize>> {
280    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
281    let element = NonNull::new(init_value.cast::<VMFuncRef>()).map(SendSyncPtr::new);
282    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
283    let limiter = limiter.as_mut();
284    block_on!(store, async |store| {
285        let mut instance = store.instance_mut(instance);
286        let table_index = instance.env_module().table_index(defined_table_index);
287        debug_assert!(matches!(
288            instance.as_mut().table_element_type(table_index),
289            TableElementType::Func,
290        ));
291        let result = instance
292            .defined_table_grow(defined_table_index, async |table| unsafe {
293                table.grow_func(limiter, delta, element).await
294            })
295            .await?
296            .map(AllocationSize);
297        Ok(result)
298    })?
299}
300
301/// Implementation of `table.grow` for GC-reference tables.
302#[cfg(feature = "gc")]
303fn table_grow_gc_ref(
304    store: &mut dyn VMStore,
305    instance: InstanceId,
306    defined_table_index: u32,
307    delta: u64,
308    init_value: u32,
309) -> Result<Option<AllocationSize>> {
310    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
311    let element = VMGcRef::from_raw_u32(init_value);
312    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
313    let limiter = limiter.as_mut();
314    block_on!(store, async |store| {
315        let (gc_store, mut instance) = store.optional_gc_store_and_instance_mut(instance);
316        let table_index = instance.env_module().table_index(defined_table_index);
317        debug_assert!(matches!(
318            instance.as_mut().table_element_type(table_index),
319            TableElementType::GcRef,
320        ));
321
322        let result = instance
323            .defined_table_grow(defined_table_index, async |table| unsafe {
324                table
325                    .grow_gc_ref(limiter, gc_store, delta, element.as_ref())
326                    .await
327            })
328            .await?
329            .map(AllocationSize);
330        Ok(result)
331    })?
332}
333
334#[cfg(feature = "stack-switching")]
335unsafe fn table_grow_cont_obj(
336    store: &mut dyn VMStore,
337    instance: InstanceId,
338    defined_table_index: u32,
339    delta: u64,
340    // The following two values together form the initial Option<VMContObj>.
341    // A None value is indicated by the pointer being null.
342    init_value_contref: *mut u8,
343    init_value_revision: usize,
344) -> Result<Option<AllocationSize>> {
345    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
346    let element = unsafe { VMContObj::from_raw_parts(init_value_contref, init_value_revision) };
347    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
348    let limiter = limiter.as_mut();
349    block_on!(store, async |store| {
350        let mut instance = store.instance_mut(instance);
351        let table_index = instance.env_module().table_index(defined_table_index);
352        debug_assert!(matches!(
353            instance.as_mut().table_element_type(table_index),
354            TableElementType::Cont,
355        ));
356        let result = instance
357            .defined_table_grow(defined_table_index, async |table| unsafe {
358                table.grow_cont(limiter, delta, element).await
359            })
360            .await?
361            .map(AllocationSize);
362        Ok(result)
363    })?
364}
365
366/// Implementation of `table.fill` for `funcref`s.
367unsafe fn table_fill_func_ref(
368    store: &mut dyn VMStore,
369    instance: InstanceId,
370    table_index: u32,
371    dst: u64,
372    val: *mut u8,
373    len: u64,
374) -> Result<()> {
375    let instance = store.instance_mut(instance);
376    let table_index = DefinedTableIndex::from_u32(table_index);
377    let table = instance.get_defined_table(table_index);
378    match table.element_type() {
379        TableElementType::Func => {
380            let val = NonNull::new(val.cast::<VMFuncRef>());
381            table.fill_func(dst, val, len)?;
382            Ok(())
383        }
384        TableElementType::GcRef => unreachable!(),
385        TableElementType::Cont => unreachable!(),
386    }
387}
388
389#[cfg(feature = "gc")]
390fn table_fill_gc_ref(
391    store: &mut dyn VMStore,
392    instance: InstanceId,
393    table_index: u32,
394    dst: u64,
395    val: u32,
396    len: u64,
397) -> Result<()> {
398    let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance);
399    let table_index = DefinedTableIndex::from_u32(table_index);
400    let table = instance.get_defined_table(table_index);
401    match table.element_type() {
402        TableElementType::Func => unreachable!(),
403        TableElementType::GcRef => {
404            let gc_ref = VMGcRef::from_raw_u32(val);
405            table.fill_gc_ref(gc_store, dst, gc_ref.as_ref(), len)?;
406            Ok(())
407        }
408
409        TableElementType::Cont => unreachable!(),
410    }
411}
412
413#[cfg(feature = "stack-switching")]
414unsafe fn table_fill_cont_obj(
415    store: &mut dyn VMStore,
416    instance: InstanceId,
417    table_index: u32,
418    dst: u64,
419    value_contref: *mut u8,
420    value_revision: usize,
421    len: u64,
422) -> Result<()> {
423    let instance = store.instance_mut(instance);
424    let table_index = DefinedTableIndex::from_u32(table_index);
425    let table = instance.get_defined_table(table_index);
426    match table.element_type() {
427        TableElementType::Cont => {
428            let contobj = unsafe { VMContObj::from_raw_parts(value_contref, value_revision) };
429            table.fill_cont(dst, contobj, len)?;
430            Ok(())
431        }
432        _ => panic!("Wrong table filling function"),
433    }
434}
435
436// Implementation of `table.copy`.
437fn table_copy(
438    store: &mut dyn VMStore,
439    instance: InstanceId,
440    dst_table_index: u32,
441    src_table_index: u32,
442    dst: u64,
443    src: u64,
444    len: u64,
445) -> Result<(), Trap> {
446    let dst_table_index = TableIndex::from_u32(dst_table_index);
447    let src_table_index = TableIndex::from_u32(src_table_index);
448    let store = store.store_opaque_mut();
449    let mut instance = store.instance_mut(instance);
450
451    // Convert the two table indices relative to `instance` into two
452    // defining instances and the defined table index within that instance.
453    let (dst_def_index, dst_instance) = instance
454        .as_mut()
455        .defined_table_index_and_instance(dst_table_index);
456    let dst_instance_id = dst_instance.id();
457    let (src_def_index, src_instance) = instance
458        .as_mut()
459        .defined_table_index_and_instance(src_table_index);
460    let src_instance_id = src_instance.id();
461
462    let src_table = crate::Table::from_raw(
463        StoreInstanceId::new(store.id(), src_instance_id),
464        src_def_index,
465    );
466    let dst_table = crate::Table::from_raw(
467        StoreInstanceId::new(store.id(), dst_instance_id),
468        dst_def_index,
469    );
470
471    // SAFETY: this is only safe if the two tables have the same type, and that
472    // was validated during wasm-validation time.
473    unsafe { crate::Table::copy_raw(store, &dst_table, dst, &src_table, src, len) }
474}
475
476// Implementation of `table.init`.
477fn table_init(
478    store: &mut dyn VMStore,
479    instance: InstanceId,
480    table_index: u32,
481    elem_index: u32,
482    dst: u64,
483    src: u64,
484    len: u64,
485) -> Result<()> {
486    let table_index = TableIndex::from_u32(table_index);
487    let elem_index = ElemIndex::from_u32(elem_index);
488
489    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
490    block_on!(store, async |store| {
491        vm::Instance::table_init(
492            store,
493            limiter.as_mut(),
494            instance,
495            table_index,
496            elem_index,
497            dst,
498            src,
499            len,
500        )
501        .await
502    })??;
503    Ok(())
504}
505
506// Implementation of `elem.drop`.
507fn elem_drop(store: &mut dyn VMStore, instance: InstanceId, elem_index: u32) {
508    let elem_index = ElemIndex::from_u32(elem_index);
509    store.instance_mut(instance).elem_drop(elem_index)
510}
511
512// Implementation of `memory.copy`.
513fn memory_copy(
514    store: &mut dyn VMStore,
515    instance: InstanceId,
516    dst_index: u32,
517    dst: u64,
518    src_index: u32,
519    src: u64,
520    len: u64,
521) -> Result<(), Trap> {
522    let src_index = MemoryIndex::from_u32(src_index);
523    let dst_index = MemoryIndex::from_u32(dst_index);
524    store
525        .instance_mut(instance)
526        .memory_copy(dst_index, dst, src_index, src, len)
527}
528
529// Implementation of `memory.fill` for locally defined memories.
530fn memory_fill(
531    store: &mut dyn VMStore,
532    instance: InstanceId,
533    memory_index: u32,
534    dst: u64,
535    val: u32,
536    len: u64,
537) -> Result<(), Trap> {
538    let memory_index = DefinedMemoryIndex::from_u32(memory_index);
539    #[expect(clippy::cast_possible_truncation, reason = "known to truncate here")]
540    store
541        .instance_mut(instance)
542        .memory_fill(memory_index, dst, val as u8, len)
543}
544
545// Implementation of `memory.init`.
546fn memory_init(
547    store: &mut dyn VMStore,
548    instance: InstanceId,
549    memory_index: u32,
550    data_index: u32,
551    dst: u64,
552    src: u32,
553    len: u32,
554) -> Result<(), Trap> {
555    let memory_index = MemoryIndex::from_u32(memory_index);
556    let data_index = DataIndex::from_u32(data_index);
557    store
558        .instance_mut(instance)
559        .memory_init(memory_index, data_index, dst, src, len)
560}
561
562// Implementation of `ref.func`.
563fn ref_func(store: &mut dyn VMStore, instance: InstanceId, func_index: u32) -> NonNull<u8> {
564    store
565        .instance_mut(instance)
566        .get_func_ref(FuncIndex::from_u32(func_index))
567        .expect("ref_func: funcref should always be available for given func index")
568        .cast()
569}
570
571// Implementation of `data.drop`.
572fn data_drop(store: &mut dyn VMStore, instance: InstanceId, data_index: u32) {
573    let data_index = DataIndex::from_u32(data_index);
574    store.instance_mut(instance).data_drop(data_index)
575}
576
577// Returns a table entry after lazily initializing it.
578fn table_get_lazy_init_func_ref(
579    store: &mut dyn VMStore,
580    instance: InstanceId,
581    table_index: u32,
582    index: u64,
583) -> *mut u8 {
584    let table_index = TableIndex::from_u32(table_index);
585    let table = store
586        .instance_mut(instance)
587        .get_table_with_lazy_init(table_index, core::iter::once(index));
588    let elem = table
589        .get_func(index)
590        .expect("table access already bounds-checked");
591
592    match elem {
593        Some(ptr) => ptr.as_ptr().cast(),
594        None => core::ptr::null_mut(),
595    }
596}
597
598/// Drop a GC reference.
599#[cfg(feature = "gc-drc")]
600fn drop_gc_ref(store: &mut dyn VMStore, _instance: InstanceId, gc_ref: u32) {
601    log::trace!("libcalls::drop_gc_ref({gc_ref:#x})");
602    let gc_ref = VMGcRef::from_raw_u32(gc_ref).expect("non-null VMGcRef");
603    store
604        .store_opaque_mut()
605        .unwrap_gc_store_mut()
606        .drop_gc_ref(gc_ref);
607}
608
609/// Grow the GC heap.
610#[cfg(feature = "gc-null")]
611fn grow_gc_heap(store: &mut dyn VMStore, _instance: InstanceId, bytes_needed: u64) -> Result<()> {
612    let orig_len = u64::try_from(
613        store
614            .require_gc_store()?
615            .gc_heap
616            .vmmemory()
617            .current_length(),
618    )
619    .unwrap();
620
621    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
622    block_on!(store, async |store| {
623        store.gc(limiter.as_mut(), None, Some(bytes_needed)).await;
624    })?;
625
626    // JIT code relies on the memory having grown by `bytes_needed` bytes if
627    // this libcall returns successfully, so trap if we didn't grow that much.
628    let new_len = u64::try_from(
629        store
630            .require_gc_store()?
631            .gc_heap
632            .vmmemory()
633            .current_length(),
634    )
635    .unwrap();
636    if orig_len
637        .checked_add(bytes_needed)
638        .is_none_or(|expected_len| new_len < expected_len)
639    {
640        return Err(crate::Trap::AllocationTooLarge.into());
641    }
642
643    Ok(())
644}
645
646/// Allocate a raw, unininitialized GC object for Wasm code.
647///
648/// The Wasm code is responsible for initializing the object.
649#[cfg(feature = "gc-drc")]
650fn gc_alloc_raw(
651    store: &mut dyn VMStore,
652    instance: InstanceId,
653    kind_and_reserved: u32,
654    module_interned_type_index: u32,
655    size: u32,
656    align: u32,
657) -> Result<core::num::NonZeroU32> {
658    use crate::vm::VMGcHeader;
659    use core::alloc::Layout;
660    use wasmtime_environ::{ModuleInternedTypeIndex, VMGcKind};
661
662    let kind = VMGcKind::from_high_bits_of_u32(kind_and_reserved);
663    log::trace!("gc_alloc_raw(kind={kind:?}, size={size}, align={align})");
664
665    let module = store
666        .instance(instance)
667        .runtime_module()
668        .expect("should never allocate GC types defined in a dummy module");
669
670    let module_interned_type_index = ModuleInternedTypeIndex::from_u32(module_interned_type_index);
671    let shared_type_index = module
672        .signatures()
673        .shared_type(module_interned_type_index)
674        .expect("should have engine type index for module type index");
675
676    let mut header = VMGcHeader::from_kind_and_index(kind, shared_type_index);
677    header.set_reserved_u26(kind_and_reserved & VMGcKind::UNUSED_MASK);
678
679    let size = usize::try_from(size).unwrap();
680    let align = usize::try_from(align).unwrap();
681    assert!(align.is_power_of_two());
682    let layout = Layout::from_size_align(size, align).map_err(|e| {
683        let err = Error::from(crate::Trap::AllocationTooLarge);
684        err.context(e)
685    })?;
686
687    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
688    block_on!(store, async |store| {
689        let gc_ref = store
690            .retry_after_gc_async(limiter.as_mut(), (), |store, ()| {
691                store
692                    .unwrap_gc_store_mut()
693                    .alloc_raw(header, layout)?
694                    .map_err(|bytes_needed| crate::GcHeapOutOfMemory::new((), bytes_needed).into())
695            })
696            .await?;
697
698        let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
699        Ok(raw)
700    })?
701}
702
703// Intern a `funcref` into the GC heap, returning its `FuncRefTableId`.
704//
705// This libcall may not GC.
706#[cfg(feature = "gc")]
707unsafe fn intern_func_ref_for_gc_heap(
708    store: &mut dyn VMStore,
709    _instance: InstanceId,
710    func_ref: *mut u8,
711) -> Result<u32> {
712    use crate::{store::AutoAssertNoGc, vm::SendSyncPtr};
713    use core::ptr::NonNull;
714
715    let mut store = AutoAssertNoGc::new(store.store_opaque_mut());
716
717    let func_ref = func_ref.cast::<VMFuncRef>();
718    let func_ref = NonNull::new(func_ref).map(SendSyncPtr::new);
719
720    let func_ref_id = unsafe {
721        store
722            .require_gc_store_mut()?
723            .func_ref_table
724            .intern(func_ref)
725    };
726    Ok(func_ref_id.into_raw())
727}
728
729// Get the raw `VMFuncRef` pointer associated with a `FuncRefTableId` from an
730// earlier `intern_func_ref_for_gc_heap` call.
731//
732// This libcall may not GC.
733#[cfg(feature = "gc")]
734fn get_interned_func_ref(
735    store: &mut dyn VMStore,
736    instance: InstanceId,
737    func_ref_id: u32,
738    module_interned_type_index: u32,
739) -> *mut u8 {
740    use super::FuncRefTableId;
741    use crate::store::AutoAssertNoGc;
742    use wasmtime_environ::{ModuleInternedTypeIndex, packed_option::ReservedValue};
743
744    let store = AutoAssertNoGc::new(store.store_opaque_mut());
745
746    let func_ref_id = FuncRefTableId::from_raw(func_ref_id);
747    let module_interned_type_index = ModuleInternedTypeIndex::from_bits(module_interned_type_index);
748
749    let func_ref = if module_interned_type_index.is_reserved_value() {
750        store
751            .unwrap_gc_store()
752            .func_ref_table
753            .get_untyped(func_ref_id)
754    } else {
755        let types = store.engine().signatures();
756        let engine_ty = store
757            .instance(instance)
758            .engine_type_index(module_interned_type_index);
759        store
760            .unwrap_gc_store()
761            .func_ref_table
762            .get_typed(types, func_ref_id, engine_ty)
763    };
764
765    func_ref.map_or(core::ptr::null_mut(), |f| f.as_ptr().cast())
766}
767
768/// Implementation of the `array.new_data` instruction.
769#[cfg(feature = "gc")]
770fn array_new_data(
771    store: &mut dyn VMStore,
772    instance_id: InstanceId,
773    array_type_index: u32,
774    data_index: u32,
775    src: u32,
776    len: u32,
777) -> Result<core::num::NonZeroU32> {
778    use crate::ArrayType;
779    use wasmtime_environ::ModuleInternedTypeIndex;
780
781    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
782    block_on!(store, async |store| {
783        let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
784        let data_index = DataIndex::from_u32(data_index);
785        let instance = store.instance(instance_id);
786
787        // Calculate the byte-length of the data (as opposed to the element-length
788        // of the array).
789        let data_range = instance.wasm_data_range(data_index);
790        let shared_ty = instance.engine_type_index(array_type_index);
791        let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
792        let one_elem_size = array_ty
793            .element_type()
794            .data_byte_size()
795            .expect("Wasm validation ensures that this type have a defined byte size");
796        let byte_len = len
797            .checked_mul(one_elem_size)
798            .and_then(|x| usize::try_from(x).ok())
799            .ok_or_else(|| Trap::MemoryOutOfBounds)?;
800
801        // Get the data from the segment, checking bounds.
802        let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?;
803        instance
804            .wasm_data(data_range.clone())
805            .get(src..)
806            .and_then(|d| d.get(..byte_len))
807            .ok_or_else(|| Trap::MemoryOutOfBounds)?;
808
809        // Allocate the (uninitialized) array.
810        let gc_layout = store
811            .engine()
812            .signatures()
813            .layout(shared_ty)
814            .expect("array types have GC layouts");
815        let array_layout = gc_layout.unwrap_array();
816        let array_ref = store
817            .retry_after_gc_async(limiter.as_mut(), (), |store, ()| {
818                store
819                    .unwrap_gc_store_mut()
820                    .alloc_uninit_array(shared_ty, len, &array_layout)?
821                    .map_err(|bytes_needed| crate::GcHeapOutOfMemory::new((), bytes_needed).into())
822            })
823            .await?;
824
825        let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance_id);
826        let gc_store = gc_store.unwrap();
827        let data = &instance.wasm_data(data_range)[src..][..byte_len];
828
829        // Copy the data into the array, initializing it.
830        gc_store
831            .gc_object_data(array_ref.as_gc_ref())
832            .copy_from_slice(array_layout.base_size, data);
833
834        // Return the array to Wasm!
835        let raw = gc_store.expose_gc_ref_to_wasm(array_ref.into());
836        Ok(raw)
837    })?
838}
839
840/// Implementation of the `array.init_data` instruction.
841#[cfg(feature = "gc")]
842fn array_init_data(
843    store: &mut dyn VMStore,
844    instance_id: InstanceId,
845    array_type_index: u32,
846    array: u32,
847    dst: u32,
848    data_index: u32,
849    src: u32,
850    len: u32,
851) -> Result<()> {
852    use crate::ArrayType;
853    use wasmtime_environ::ModuleInternedTypeIndex;
854
855    let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
856    let data_index = DataIndex::from_u32(data_index);
857    let instance = store.instance(instance_id);
858
859    log::trace!(
860        "array.init_data(array={array:#x}, dst={dst}, data_index={data_index:?}, src={src}, len={len})",
861    );
862
863    // Null check the array.
864    let gc_ref = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?;
865    let array = gc_ref
866        .into_arrayref(&*store.unwrap_gc_store().gc_heap)
867        .expect("gc ref should be an array");
868
869    let dst = usize::try_from(dst).map_err(|_| Trap::MemoryOutOfBounds)?;
870    let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?;
871    let len = usize::try_from(len).map_err(|_| Trap::MemoryOutOfBounds)?;
872
873    // Bounds check the array.
874    let array_len = array.len(store.store_opaque());
875    let array_len = usize::try_from(array_len).map_err(|_| Trap::ArrayOutOfBounds)?;
876    if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len {
877        return Err(Trap::ArrayOutOfBounds.into());
878    }
879
880    // Calculate the byte length from the array length.
881    let shared_ty = instance.engine_type_index(array_type_index);
882    let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
883    let one_elem_size = array_ty
884        .element_type()
885        .data_byte_size()
886        .expect("Wasm validation ensures that this type have a defined byte size");
887    let data_len = len
888        .checked_mul(usize::try_from(one_elem_size).unwrap())
889        .ok_or_else(|| Trap::MemoryOutOfBounds)?;
890
891    // Get the data from the segment, checking its bounds.
892    let data_range = instance.wasm_data_range(data_index);
893    instance
894        .wasm_data(data_range.clone())
895        .get(src..)
896        .and_then(|d| d.get(..data_len))
897        .ok_or_else(|| Trap::MemoryOutOfBounds)?;
898
899    // Copy the data into the array.
900
901    let dst_offset = u32::try_from(dst)
902        .unwrap()
903        .checked_mul(one_elem_size)
904        .unwrap();
905
906    let array_layout = store
907        .engine()
908        .signatures()
909        .layout(shared_ty)
910        .expect("array types have GC layouts");
911    let array_layout = array_layout.unwrap_array();
912
913    let obj_offset = array_layout.base_size.checked_add(dst_offset).unwrap();
914
915    let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance_id);
916    let gc_store = gc_store.unwrap();
917    let data = &instance.wasm_data(data_range)[src..][..data_len];
918    gc_store
919        .gc_object_data(array.as_gc_ref())
920        .copy_from_slice(obj_offset, data);
921
922    Ok(())
923}
924
925#[cfg(feature = "gc")]
926fn array_new_elem(
927    store: &mut dyn VMStore,
928    instance_id: InstanceId,
929    array_type_index: u32,
930    elem_index: u32,
931    src: u32,
932    len: u32,
933) -> Result<core::num::NonZeroU32> {
934    use crate::{
935        ArrayRef, ArrayRefPre, ArrayType, Func, OpaqueRootScope, RootedGcRefImpl, Val,
936        store::AutoAssertNoGc,
937        vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
938    };
939    use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
940
941    // Convert indices to their typed forms.
942    let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
943    let elem_index = ElemIndex::from_u32(elem_index);
944    let instance = store.instance(instance_id);
945
946    let mut storage = None;
947    let elements = instance.passive_element_segment(&mut storage, elem_index);
948
949    let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
950    let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
951
952    let shared_ty = instance.engine_type_index(array_type_index);
953    let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
954    let pre = ArrayRefPre::_new(store, array_ty);
955
956    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
957    block_on!(store, async |store| {
958        let mut store = OpaqueRootScope::new(store);
959        // Turn the elements into `Val`s.
960        let mut vals = Vec::with_capacity(usize::try_from(elements.len()).unwrap());
961        match elements {
962            TableSegmentElements::Functions(fs) => {
963                let store_id = store.id();
964                let mut instance = store.instance_mut(instance_id);
965                vals.extend(
966                    fs.get(src..)
967                        .and_then(|s| s.get(..len))
968                        .ok_or_else(|| Trap::TableOutOfBounds)?
969                        .iter()
970                        .map(|f| {
971                            let raw_func_ref = instance.as_mut().get_func_ref(*f);
972                            let func = unsafe {
973                                raw_func_ref.map(|p| Func::from_vm_func_ref(store_id, p))
974                            };
975                            Val::FuncRef(func)
976                        }),
977                );
978            }
979            TableSegmentElements::Expressions(xs) => {
980                let xs = xs
981                    .get(src..)
982                    .and_then(|s| s.get(..len))
983                    .ok_or_else(|| Trap::TableOutOfBounds)?;
984
985                let mut const_context = ConstEvalContext::new(instance_id);
986                let mut const_evaluator = ConstExprEvaluator::default();
987
988                for x in xs.iter() {
989                    let val = *const_evaluator
990                        .eval(&mut store, limiter.as_mut(), &mut const_context, x)
991                        .await?;
992                    vals.push(val);
993                }
994            }
995        }
996
997        let array = ArrayRef::_new_fixed_async(&mut store, limiter.as_mut(), &pre, &vals).await?;
998
999        let mut store = AutoAssertNoGc::new(&mut store);
1000        let gc_ref = array.try_clone_gc_ref(&mut store)?;
1001        let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
1002        Ok(raw)
1003    })?
1004}
1005
1006#[cfg(feature = "gc")]
1007fn array_init_elem(
1008    store: &mut dyn VMStore,
1009    instance: InstanceId,
1010    array_type_index: u32,
1011    array: u32,
1012    dst: u32,
1013    elem_index: u32,
1014    src: u32,
1015    len: u32,
1016) -> Result<()> {
1017    use crate::{
1018        ArrayRef, Func, OpaqueRootScope, Val,
1019        store::AutoAssertNoGc,
1020        vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
1021    };
1022    use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
1023
1024    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
1025    block_on!(store, async |store| {
1026        let mut store = OpaqueRootScope::new(store);
1027
1028        // Convert the indices into their typed forms.
1029        let _array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
1030        let elem_index = ElemIndex::from_u32(elem_index);
1031
1032        log::trace!(
1033            "array.init_elem(array={array:#x}, dst={dst}, elem_index={elem_index:?}, src={src}, len={len})",
1034        );
1035
1036        // Convert the raw GC ref into a `Rooted<ArrayRef>`.
1037        let array = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?;
1038        let array = store.unwrap_gc_store_mut().clone_gc_ref(&array);
1039        let array = {
1040            let mut no_gc = AutoAssertNoGc::new(&mut store);
1041            ArrayRef::from_cloned_gc_ref(&mut no_gc, array)
1042        };
1043
1044        // Bounds check the destination within the array.
1045        let array_len = array._len(&store)?;
1046        log::trace!("array_len = {array_len}");
1047        if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len {
1048            return Err(Trap::ArrayOutOfBounds.into());
1049        }
1050
1051        // Get the passive element segment.
1052        let mut storage = None;
1053        let store_id = store.id();
1054        let mut instance = store.instance_mut(instance);
1055        let elements = instance.passive_element_segment(&mut storage, elem_index);
1056
1057        // Convert array offsets into `usize`s.
1058        let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
1059        let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
1060
1061        // Turn the elements into `Val`s.
1062        let vals = match elements {
1063            TableSegmentElements::Functions(fs) => fs
1064                .get(src..)
1065                .and_then(|s| s.get(..len))
1066                .ok_or_else(|| Trap::TableOutOfBounds)?
1067                .iter()
1068                .map(|f| {
1069                    let raw_func_ref = instance.as_mut().get_func_ref(*f);
1070                    let func = unsafe { raw_func_ref.map(|p| Func::from_vm_func_ref(store_id, p)) };
1071                    Val::FuncRef(func)
1072                })
1073                .collect::<Vec<_>>(),
1074            TableSegmentElements::Expressions(xs) => {
1075                let mut const_context = ConstEvalContext::new(instance.id());
1076                let mut const_evaluator = ConstExprEvaluator::default();
1077
1078                let mut vals = Vec::new();
1079                for x in xs
1080                    .get(src..)
1081                    .and_then(|s| s.get(..len))
1082                    .ok_or_else(|| Trap::TableOutOfBounds)?
1083                {
1084                    let val = *const_evaluator
1085                        .eval(&mut store, limiter.as_mut(), &mut const_context, x)
1086                        .await?;
1087                    vals.push(val);
1088                }
1089                vals
1090            }
1091        };
1092
1093        // Copy the values into the array.
1094        for (i, val) in vals.into_iter().enumerate() {
1095            let i = u32::try_from(i).unwrap();
1096            let j = dst.checked_add(i).unwrap();
1097            array._set(&mut store, j, val)?;
1098        }
1099
1100        Ok(())
1101    })?
1102}
1103
1104// TODO: Specialize this libcall for only non-GC array elements, so we never
1105// have to do GC barriers and their associated indirect calls through the `dyn
1106// GcHeap`. Instead, implement those copies inline in Wasm code. Then, use bulk
1107// `memcpy`-style APIs to do the actual copies here.
1108#[cfg(feature = "gc")]
1109fn array_copy(
1110    store: &mut dyn VMStore,
1111    _instance: InstanceId,
1112    dst_array: u32,
1113    dst: u32,
1114    src_array: u32,
1115    src: u32,
1116    len: u32,
1117) -> Result<()> {
1118    use crate::{ArrayRef, OpaqueRootScope, store::AutoAssertNoGc};
1119
1120    log::trace!(
1121        "array.copy(dst_array={dst_array:#x}, dst_index={dst}, src_array={src_array:#x}, src_index={src}, len={len})",
1122    );
1123
1124    let mut store = OpaqueRootScope::new(store.store_opaque_mut());
1125    let mut store = AutoAssertNoGc::new(&mut store);
1126
1127    // Convert the raw GC refs into `Rooted<ArrayRef>`s.
1128    let dst_array = VMGcRef::from_raw_u32(dst_array).ok_or_else(|| Trap::NullReference)?;
1129    let dst_array = store.unwrap_gc_store_mut().clone_gc_ref(&dst_array);
1130    let dst_array = ArrayRef::from_cloned_gc_ref(&mut store, dst_array);
1131    let src_array = VMGcRef::from_raw_u32(src_array).ok_or_else(|| Trap::NullReference)?;
1132    let src_array = store.unwrap_gc_store_mut().clone_gc_ref(&src_array);
1133    let src_array = ArrayRef::from_cloned_gc_ref(&mut store, src_array);
1134
1135    // Bounds check the destination array's elements.
1136    let dst_array_len = dst_array._len(&store)?;
1137    if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > dst_array_len {
1138        return Err(Trap::ArrayOutOfBounds.into());
1139    }
1140
1141    // Bounds check the source array's elements.
1142    let src_array_len = src_array._len(&store)?;
1143    if src.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > src_array_len {
1144        return Err(Trap::ArrayOutOfBounds.into());
1145    }
1146
1147    let mut store = AutoAssertNoGc::new(&mut store);
1148    // If `src_array` and `dst_array` are the same array, then we are
1149    // potentially doing an overlapping copy, so make sure to copy elements in
1150    // the order that doesn't clobber the source elements before they are
1151    // copied. If they are different arrays, the order doesn't matter, but we
1152    // simply don't bother checking.
1153    if src > dst {
1154        for i in 0..len {
1155            let src_elem = src_array._get(&mut store, src + i)?;
1156            let dst_i = dst + i;
1157            dst_array._set(&mut store, dst_i, src_elem)?;
1158        }
1159    } else {
1160        for i in (0..len).rev() {
1161            let src_elem = src_array._get(&mut store, src + i)?;
1162            let dst_i = dst + i;
1163            dst_array._set(&mut store, dst_i, src_elem)?;
1164        }
1165    }
1166    Ok(())
1167}
1168
1169#[cfg(feature = "gc")]
1170fn is_subtype(
1171    store: &mut dyn VMStore,
1172    _instance: InstanceId,
1173    actual_engine_type: u32,
1174    expected_engine_type: u32,
1175) -> u32 {
1176    use wasmtime_environ::VMSharedTypeIndex;
1177
1178    let actual = VMSharedTypeIndex::from_u32(actual_engine_type);
1179    let expected = VMSharedTypeIndex::from_u32(expected_engine_type);
1180
1181    let is_subtype: bool = store.engine().signatures().is_subtype(actual, expected);
1182
1183    log::trace!("is_subtype(actual={actual:?}, expected={expected:?}) -> {is_subtype}",);
1184    is_subtype as u32
1185}
1186
1187// Implementation of `memory.atomic.notify` for locally defined memories.
1188#[cfg(feature = "threads")]
1189fn memory_atomic_notify(
1190    store: &mut dyn VMStore,
1191    instance: InstanceId,
1192    memory_index: u32,
1193    addr_index: u64,
1194    count: u32,
1195) -> Result<u32, Trap> {
1196    let memory = DefinedMemoryIndex::from_u32(memory_index);
1197    store
1198        .instance_mut(instance)
1199        .get_defined_memory_mut(memory)
1200        .atomic_notify(addr_index, count)
1201}
1202
1203// Implementation of `memory.atomic.wait32` for locally defined memories.
1204#[cfg(feature = "threads")]
1205fn memory_atomic_wait32(
1206    store: &mut dyn VMStore,
1207    instance: InstanceId,
1208    memory_index: u32,
1209    addr_index: u64,
1210    expected: u32,
1211    timeout: u64,
1212) -> Result<u32, Trap> {
1213    let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
1214    let memory = DefinedMemoryIndex::from_u32(memory_index);
1215    Ok(store
1216        .instance_mut(instance)
1217        .get_defined_memory_mut(memory)
1218        .atomic_wait32(addr_index, expected, timeout)? as u32)
1219}
1220
1221// Implementation of `memory.atomic.wait64` for locally defined memories.
1222#[cfg(feature = "threads")]
1223fn memory_atomic_wait64(
1224    store: &mut dyn VMStore,
1225    instance: InstanceId,
1226    memory_index: u32,
1227    addr_index: u64,
1228    expected: u64,
1229    timeout: u64,
1230) -> Result<u32, Trap> {
1231    let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
1232    let memory = DefinedMemoryIndex::from_u32(memory_index);
1233    Ok(store
1234        .instance_mut(instance)
1235        .get_defined_memory_mut(memory)
1236        .atomic_wait64(addr_index, expected, timeout)? as u32)
1237}
1238
1239// Hook for when an instance runs out of fuel.
1240fn out_of_gas(store: &mut dyn VMStore, _instance: InstanceId) -> Result<()> {
1241    block_on!(store, async |store| {
1242        if !store.refuel() {
1243            return Err(Trap::OutOfFuel.into());
1244        }
1245        #[cfg(feature = "async")]
1246        if store.fuel_yield_interval.is_some() {
1247            crate::runtime::vm::Yield::new().await;
1248        }
1249        Ok(())
1250    })?
1251}
1252
1253// Hook for when an instance observes that the epoch has changed.
1254#[cfg(target_has_atomic = "64")]
1255fn new_epoch(store: &mut dyn VMStore, _instance: InstanceId) -> Result<NextEpoch> {
1256    use crate::UpdateDeadline;
1257
1258    let update_deadline = store.new_epoch_updated_deadline()?;
1259    block_on!(store, async move |store| {
1260        let delta = match update_deadline {
1261            UpdateDeadline::Interrupt => return Err(Trap::Interrupt.into()),
1262            UpdateDeadline::Continue(delta) => delta,
1263
1264            // Note that custom assertions for `async_support` are needed below
1265            // as otherwise if these are used in an
1266            // `async_support`-disabled-build it'll trip the `assert_ready` part
1267            // of `block_on!` above. The assertion here provides a more direct
1268            // error message as to what's going on.
1269            #[cfg(feature = "async")]
1270            UpdateDeadline::Yield(delta) => {
1271                assert!(
1272                    store.async_support(),
1273                    "cannot use `UpdateDeadline::Yield` without enabling \
1274                     async support in the config"
1275                );
1276                crate::runtime::vm::Yield::new().await;
1277                delta
1278            }
1279            #[cfg(feature = "async")]
1280            UpdateDeadline::YieldCustom(delta, future) => {
1281                assert!(
1282                    store.async_support(),
1283                    "cannot use `UpdateDeadline::YieldCustom` without enabling \
1284                     async support in the config"
1285                );
1286                future.await;
1287                delta
1288            }
1289        };
1290
1291        // Set a new deadline and return the new epoch deadline so
1292        // the Wasm code doesn't have to reload it.
1293        store.set_epoch_deadline(delta);
1294        Ok(NextEpoch(store.get_epoch_deadline()))
1295    })?
1296}
1297
1298struct NextEpoch(u64);
1299
1300unsafe impl HostResultHasUnwindSentinel for NextEpoch {
1301    type Abi = u64;
1302    const SENTINEL: u64 = u64::MAX;
1303    fn into_abi(self) -> u64 {
1304        self.0
1305    }
1306}
1307
1308// Hook for validating malloc using wmemcheck_state.
1309#[cfg(feature = "wmemcheck")]
1310fn check_malloc(store: &mut dyn VMStore, instance: InstanceId, addr: u32, len: u32) -> Result<()> {
1311    let instance = store.instance_mut(instance);
1312    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1313        let result = wmemcheck_state.malloc(addr as usize, len as usize);
1314        wmemcheck_state.memcheck_on();
1315        match result {
1316            Ok(()) => {}
1317            Err(DoubleMalloc { addr, len }) => {
1318                bail!("Double malloc at addr {:#x} of size {}", addr, len)
1319            }
1320            Err(OutOfBounds { addr, len }) => {
1321                bail!("Malloc out of bounds at addr {:#x} of size {}", addr, len);
1322            }
1323            _ => {
1324                panic!("unreachable")
1325            }
1326        }
1327    }
1328    Ok(())
1329}
1330
1331// Hook for validating free using wmemcheck_state.
1332#[cfg(feature = "wmemcheck")]
1333fn check_free(store: &mut dyn VMStore, instance: InstanceId, addr: u32) -> Result<()> {
1334    let instance = store.instance_mut(instance);
1335    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1336        let result = wmemcheck_state.free(addr as usize);
1337        wmemcheck_state.memcheck_on();
1338        match result {
1339            Ok(()) => {}
1340            Err(InvalidFree { addr }) => {
1341                bail!("Invalid free at addr {:#x}", addr)
1342            }
1343            _ => {
1344                panic!("unreachable")
1345            }
1346        }
1347    }
1348    Ok(())
1349}
1350
1351// Hook for validating load using wmemcheck_state.
1352#[cfg(feature = "wmemcheck")]
1353fn check_load(
1354    store: &mut dyn VMStore,
1355    instance: InstanceId,
1356    num_bytes: u32,
1357    addr: u32,
1358    offset: u32,
1359) -> Result<()> {
1360    let instance = store.instance_mut(instance);
1361    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1362        let result = wmemcheck_state.read(addr as usize + offset as usize, num_bytes as usize);
1363        match result {
1364            Ok(()) => {}
1365            Err(InvalidRead { addr, len }) => {
1366                bail!("Invalid load at addr {:#x} of size {}", addr, len);
1367            }
1368            Err(OutOfBounds { addr, len }) => {
1369                bail!("Load out of bounds at addr {:#x} of size {}", addr, len);
1370            }
1371            _ => {
1372                panic!("unreachable")
1373            }
1374        }
1375    }
1376    Ok(())
1377}
1378
1379// Hook for validating store using wmemcheck_state.
1380#[cfg(feature = "wmemcheck")]
1381fn check_store(
1382    store: &mut dyn VMStore,
1383    instance: InstanceId,
1384    num_bytes: u32,
1385    addr: u32,
1386    offset: u32,
1387) -> Result<()> {
1388    let instance = store.instance_mut(instance);
1389    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1390        let result = wmemcheck_state.write(addr as usize + offset as usize, num_bytes as usize);
1391        match result {
1392            Ok(()) => {}
1393            Err(InvalidWrite { addr, len }) => {
1394                bail!("Invalid store at addr {:#x} of size {}", addr, len)
1395            }
1396            Err(OutOfBounds { addr, len }) => {
1397                bail!("Store out of bounds at addr {:#x} of size {}", addr, len)
1398            }
1399            _ => {
1400                panic!("unreachable")
1401            }
1402        }
1403    }
1404    Ok(())
1405}
1406
1407// Hook for turning wmemcheck load/store validation off when entering a malloc function.
1408#[cfg(feature = "wmemcheck")]
1409fn malloc_start(store: &mut dyn VMStore, instance: InstanceId) {
1410    let instance = store.instance_mut(instance);
1411    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1412        wmemcheck_state.memcheck_off();
1413    }
1414}
1415
1416// Hook for turning wmemcheck load/store validation off when entering a free function.
1417#[cfg(feature = "wmemcheck")]
1418fn free_start(store: &mut dyn VMStore, instance: InstanceId) {
1419    let instance = store.instance_mut(instance);
1420    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1421        wmemcheck_state.memcheck_off();
1422    }
1423}
1424
1425// Hook for tracking wasm stack updates using wmemcheck_state.
1426#[cfg(feature = "wmemcheck")]
1427fn update_stack_pointer(_store: &mut dyn VMStore, _instance: InstanceId, _value: u32) {
1428    // TODO: stack-tracing has yet to be finalized. All memory below
1429    // the address of the top of the stack is marked as valid for
1430    // loads and stores.
1431    // if let Some(wmemcheck_state) = &mut instance.wmemcheck_state {
1432    //     instance.wmemcheck_state.update_stack_pointer(value as usize);
1433    // }
1434}
1435
1436// Hook updating wmemcheck_state memory state vector every time memory.grow is called.
1437#[cfg(feature = "wmemcheck")]
1438fn update_mem_size(store: &mut dyn VMStore, instance: InstanceId, num_pages: u32) {
1439    let instance = store.instance_mut(instance);
1440    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1441        const KIB: usize = 1024;
1442        let num_bytes = num_pages as usize * 64 * KIB;
1443        wmemcheck_state.update_mem_size(num_bytes);
1444    }
1445}
1446
1447fn floor_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1448    wasmtime_math::WasmFloat::wasm_floor(val)
1449}
1450
1451fn floor_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1452    wasmtime_math::WasmFloat::wasm_floor(val)
1453}
1454
1455fn ceil_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1456    wasmtime_math::WasmFloat::wasm_ceil(val)
1457}
1458
1459fn ceil_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1460    wasmtime_math::WasmFloat::wasm_ceil(val)
1461}
1462
1463fn trunc_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1464    wasmtime_math::WasmFloat::wasm_trunc(val)
1465}
1466
1467fn trunc_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1468    wasmtime_math::WasmFloat::wasm_trunc(val)
1469}
1470
1471fn nearest_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1472    wasmtime_math::WasmFloat::wasm_nearest(val)
1473}
1474
1475fn nearest_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1476    wasmtime_math::WasmFloat::wasm_nearest(val)
1477}
1478
1479// This intrinsic is only used on x86_64 platforms as an implementation of
1480// the `i8x16.swizzle` instruction when `pshufb` in SSSE3 is not available.
1481#[cfg(all(target_arch = "x86_64", target_feature = "sse"))]
1482fn i8x16_swizzle(_store: &mut dyn VMStore, _instance: InstanceId, a: i8x16, b: i8x16) -> i8x16 {
1483    union U {
1484        reg: i8x16,
1485        mem: [u8; 16],
1486    }
1487
1488    unsafe {
1489        let a = U { reg: a }.mem;
1490        let b = U { reg: b }.mem;
1491
1492        // Use the `swizzle` semantics of returning 0 on any out-of-bounds
1493        // index, rather than the x86 pshufb semantics, since Wasmtime uses
1494        // this to implement `i8x16.swizzle`.
1495        let select = |arr: &[u8; 16], byte: u8| {
1496            if byte >= 16 { 0x00 } else { arr[byte as usize] }
1497        };
1498
1499        U {
1500            mem: [
1501                select(&a, b[0]),
1502                select(&a, b[1]),
1503                select(&a, b[2]),
1504                select(&a, b[3]),
1505                select(&a, b[4]),
1506                select(&a, b[5]),
1507                select(&a, b[6]),
1508                select(&a, b[7]),
1509                select(&a, b[8]),
1510                select(&a, b[9]),
1511                select(&a, b[10]),
1512                select(&a, b[11]),
1513                select(&a, b[12]),
1514                select(&a, b[13]),
1515                select(&a, b[14]),
1516                select(&a, b[15]),
1517            ],
1518        }
1519        .reg
1520    }
1521}
1522
1523#[cfg(not(all(target_arch = "x86_64", target_feature = "sse")))]
1524fn i8x16_swizzle(_store: &mut dyn VMStore, _instance: InstanceId, _a: i8x16, _b: i8x16) -> i8x16 {
1525    unreachable!()
1526}
1527
1528// This intrinsic is only used on x86_64 platforms as an implementation of
1529// the `i8x16.shuffle` instruction when `pshufb` in SSSE3 is not available.
1530#[cfg(all(target_arch = "x86_64", target_feature = "sse"))]
1531fn i8x16_shuffle(
1532    _store: &mut dyn VMStore,
1533    _instance: InstanceId,
1534    a: i8x16,
1535    b: i8x16,
1536    c: i8x16,
1537) -> i8x16 {
1538    union U {
1539        reg: i8x16,
1540        mem: [u8; 16],
1541    }
1542
1543    unsafe {
1544        let ab = [U { reg: a }.mem, U { reg: b }.mem];
1545        let c = U { reg: c }.mem;
1546
1547        // Use the `shuffle` semantics of returning 0 on any out-of-bounds
1548        // index, rather than the x86 pshufb semantics, since Wasmtime uses
1549        // this to implement `i8x16.shuffle`.
1550        let select = |arr: &[[u8; 16]; 2], byte: u8| {
1551            if byte >= 32 {
1552                0x00
1553            } else if byte >= 16 {
1554                arr[1][byte as usize - 16]
1555            } else {
1556                arr[0][byte as usize]
1557            }
1558        };
1559
1560        U {
1561            mem: [
1562                select(&ab, c[0]),
1563                select(&ab, c[1]),
1564                select(&ab, c[2]),
1565                select(&ab, c[3]),
1566                select(&ab, c[4]),
1567                select(&ab, c[5]),
1568                select(&ab, c[6]),
1569                select(&ab, c[7]),
1570                select(&ab, c[8]),
1571                select(&ab, c[9]),
1572                select(&ab, c[10]),
1573                select(&ab, c[11]),
1574                select(&ab, c[12]),
1575                select(&ab, c[13]),
1576                select(&ab, c[14]),
1577                select(&ab, c[15]),
1578            ],
1579        }
1580        .reg
1581    }
1582}
1583
1584#[cfg(not(all(target_arch = "x86_64", target_feature = "sse")))]
1585fn i8x16_shuffle(
1586    _store: &mut dyn VMStore,
1587    _instance: InstanceId,
1588    _a: i8x16,
1589    _b: i8x16,
1590    _c: i8x16,
1591) -> i8x16 {
1592    unreachable!()
1593}
1594
1595fn fma_f32x4(
1596    _store: &mut dyn VMStore,
1597    _instance: InstanceId,
1598    x: f32x4,
1599    y: f32x4,
1600    z: f32x4,
1601) -> f32x4 {
1602    union U {
1603        reg: f32x4,
1604        mem: [f32; 4],
1605    }
1606
1607    unsafe {
1608        let x = U { reg: x }.mem;
1609        let y = U { reg: y }.mem;
1610        let z = U { reg: z }.mem;
1611
1612        U {
1613            mem: [
1614                wasmtime_math::WasmFloat::wasm_mul_add(x[0], y[0], z[0]),
1615                wasmtime_math::WasmFloat::wasm_mul_add(x[1], y[1], z[1]),
1616                wasmtime_math::WasmFloat::wasm_mul_add(x[2], y[2], z[2]),
1617                wasmtime_math::WasmFloat::wasm_mul_add(x[3], y[3], z[3]),
1618            ],
1619        }
1620        .reg
1621    }
1622}
1623
1624fn fma_f64x2(
1625    _store: &mut dyn VMStore,
1626    _instance: InstanceId,
1627    x: f64x2,
1628    y: f64x2,
1629    z: f64x2,
1630) -> f64x2 {
1631    union U {
1632        reg: f64x2,
1633        mem: [f64; 2],
1634    }
1635
1636    unsafe {
1637        let x = U { reg: x }.mem;
1638        let y = U { reg: y }.mem;
1639        let z = U { reg: z }.mem;
1640
1641        U {
1642            mem: [
1643                wasmtime_math::WasmFloat::wasm_mul_add(x[0], y[0], z[0]),
1644                wasmtime_math::WasmFloat::wasm_mul_add(x[1], y[1], z[1]),
1645            ],
1646        }
1647        .reg
1648    }
1649}
1650
1651/// This intrinsic is just used to record trap information.
1652///
1653/// The `Infallible` "ok" type here means that this never returns success, it
1654/// only ever returns an error, and this hooks into the machinery to handle
1655/// `Result` values to record such trap information.
1656fn trap(
1657    _store: &mut dyn VMStore,
1658    _instance: InstanceId,
1659    code: u8,
1660) -> Result<Infallible, TrapReason> {
1661    Err(TrapReason::Wasm(
1662        wasmtime_environ::Trap::from_u8(code).unwrap(),
1663    ))
1664}
1665
1666fn raise(store: &mut dyn VMStore, _instance: InstanceId) {
1667    // SAFETY: this is only called from compiled wasm so we know that wasm has
1668    // already been entered. It's a dynamic safety precondition that the trap
1669    // information has already been arranged to be present.
1670    unsafe { crate::runtime::vm::traphandlers::raise_preexisting_trap(store) }
1671}
1672
1673// Builtins for continuations. These are thin wrappers around the
1674// respective definitions in stack_switching.rs.
1675#[cfg(feature = "stack-switching")]
1676fn cont_new(
1677    store: &mut dyn VMStore,
1678    instance: InstanceId,
1679    func: *mut u8,
1680    param_count: u32,
1681    result_count: u32,
1682) -> Result<Option<AllocationSize>> {
1683    let ans =
1684        crate::vm::stack_switching::cont_new(store, instance, func, param_count, result_count)?;
1685    Ok(Some(AllocationSize(ans.cast::<u8>() as usize)))
1686}
1687
1688#[cfg(feature = "gc")]
1689fn get_instance_id(_store: &mut dyn VMStore, instance: InstanceId) -> u32 {
1690    instance.as_u32()
1691}
1692
1693#[cfg(feature = "gc")]
1694fn throw_ref(
1695    store: &mut dyn VMStore,
1696    _instance: InstanceId,
1697    exnref: u32,
1698) -> Result<(), TrapReason> {
1699    let exnref = VMGcRef::from_raw_u32(exnref).ok_or_else(|| Trap::NullReference)?;
1700    let exnref = store.unwrap_gc_store_mut().clone_gc_ref(&exnref);
1701    let exnref = exnref
1702        .into_exnref(&*store.unwrap_gc_store().gc_heap)
1703        .expect("gc ref should be an exception object");
1704    store.set_pending_exception(exnref);
1705    Err(TrapReason::Exception)
1706}