wasmtime/runtime/vm/
libcalls.rs

1//! Runtime library calls.
2//!
3//! Note that Wasm compilers may sometimes perform these inline rather than
4//! calling them, particularly when CPUs have special instructions which compute
5//! them directly.
6//!
7//! These functions are called by compiled Wasm code, and therefore must take
8//! certain care about some things:
9//!
10//! * They must only contain basic, raw i32/i64/f32/f64/pointer parameters that
11//!   are safe to pass across the system ABI.
12//!
13//! * If any nested function propagates an `Err(trap)` out to the library
14//!   function frame, we need to raise it. This involves some nasty and quite
15//!   unsafe code under the covers! Notably, after raising the trap, drops
16//!   **will not** be run for local variables! This can lead to things like
17//!   leaking `InstanceHandle`s which leads to never deallocating JIT code,
18//!   instances, and modules if we are not careful!
19//!
20//! * The libcall must be entered via a Wasm-to-libcall trampoline that saves
21//!   the last Wasm FP and PC for stack walking purposes. (For more details, see
22//!   `crates/wasmtime/src/runtime/vm/backtrace.rs`.)
23//!
24//! To make it easier to correctly handle all these things, **all** libcalls
25//! must be defined via the `libcall!` helper macro! See its doc comments below
26//! for an example, or just look at the rest of the file.
27//!
28//! ## Dealing with `externref`s
29//!
30//! When receiving a raw `*mut u8` that is actually a `VMExternRef` reference,
31//! convert it into a proper `VMExternRef` with `VMExternRef::clone_from_raw` as
32//! soon as apossible. Any GC before raw pointer is converted into a reference
33//! can potentially collect the referenced object, which could lead to use after
34//! free.
35//!
36//! Avoid this by eagerly converting into a proper `VMExternRef`! (Unfortunately
37//! there is no macro to help us automatically get this correct, so stay
38//! vigilant!)
39//!
40//! ```ignore
41//! pub unsafe extern "C" my_libcall_takes_ref(raw_extern_ref: *mut u8) {
42//!     // Before `clone_from_raw`, `raw_extern_ref` is potentially unrooted,
43//!     // and doing GC here could lead to use after free!
44//!
45//!     let my_extern_ref = if raw_extern_ref.is_null() {
46//!         None
47//!     } else {
48//!         Some(VMExternRef::clone_from_raw(raw_extern_ref))
49//!     };
50//!
51//!     // Now that we did `clone_from_raw`, it is safe to do a GC (or do
52//!     // anything else that might transitively GC, like call back into
53//!     // Wasm!)
54//! }
55//! ```
56
57#[cfg(feature = "stack-switching")]
58use super::stack_switching::VMContObj;
59use crate::prelude::*;
60use crate::runtime::store::{InstanceId, StoreInstanceId, StoreOpaque};
61#[cfg(feature = "gc")]
62use crate::runtime::vm::VMGcRef;
63use crate::runtime::vm::table::TableElementType;
64use crate::runtime::vm::vmcontext::VMFuncRef;
65use crate::runtime::vm::{
66    self, HostResultHasUnwindSentinel, SendSyncPtr, TrapReason, VMStore, f32x4, f64x2, i8x16,
67};
68use core::convert::Infallible;
69use core::ptr::NonNull;
70#[cfg(feature = "threads")]
71use core::time::Duration;
72use wasmtime_environ::{
73    DataIndex, DefinedMemoryIndex, DefinedTableIndex, ElemIndex, FuncIndex, MemoryIndex,
74    TableIndex, Trap,
75};
76#[cfg(feature = "wmemcheck")]
77use wasmtime_wmemcheck::AccessError::{
78    DoubleMalloc, InvalidFree, InvalidRead, InvalidWrite, OutOfBounds,
79};
80
81/// Raw functions which are actually called from compiled code.
82///
83/// Invocation of a builtin currently looks like:
84///
85/// * A wasm function calls a cranelift-compiled trampoline that's generated
86///   once-per-builtin.
87/// * The cranelift-compiled trampoline performs any necessary actions to exit
88///   wasm, such as dealing with fp/pc/etc.
89/// * The cranelift-compiled trampoline loads a function pointer from an array
90///   stored in `VMContext` That function pointer is defined in this module.
91/// * This module runs, handling things like `catch_unwind` and `Result` and
92///   such.
93/// * This module delegates to the outer module (this file) which has the actual
94///   implementation.
95///
96/// For more information on converting from host-defined values to Cranelift ABI
97/// values see the `catch_unwind_and_record_trap` function.
98pub mod raw {
99    use crate::runtime::vm::{Instance, VMContext, f32x4, f64x2, i8x16};
100    use core::ptr::NonNull;
101
102    macro_rules! libcall {
103        (
104            $(
105                $( #[cfg($attr:meta)] )?
106                $name:ident( vmctx: vmctx $(, $pname:ident: $param:ident )* ) $(-> $result:ident)?;
107            )*
108        ) => {
109            $(
110                // This is the direct entrypoint from the compiled module which
111                // still has the raw signature.
112                //
113                // This will delegate to the outer module to the actual
114                // implementation and automatically perform `catch_unwind` along
115                // with conversion of the return value in the face of traps.
116                #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
117                #[allow(unused_variables, reason = "macro-generated")]
118                #[allow(unreachable_code, reason = "some types uninhabited on some platforms")]
119                pub unsafe extern "C" fn $name(
120                    vmctx: NonNull<VMContext>,
121                    $( $pname : libcall!(@ty $param), )*
122                ) $(-> libcall!(@ty $result))? {
123                    $(#[cfg($attr)])?
124                    unsafe {
125                        Instance::enter_host_from_wasm(vmctx, |store, instance| {
126                            super::$name(store, instance, $($pname),*)
127                        })
128                    }
129                    $(
130                        #[cfg(not($attr))]
131                        {
132                            let _ = vmctx;
133                            unreachable!();
134                        }
135                    )?
136                }
137
138                // This works around a `rustc` bug where compiling with LTO
139                // will sometimes strip out some of these symbols resulting
140                // in a linking failure.
141                #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
142                const _: () = {
143                    #[used]
144                    static I_AM_USED: unsafe extern "C" fn(
145                        NonNull<VMContext>,
146                        $( $pname : libcall!(@ty $param), )*
147                    ) $( -> libcall!(@ty $result))? = $name;
148                };
149            )*
150        };
151
152        (@ty u32) => (u32);
153        (@ty u64) => (u64);
154        (@ty f32) => (f32);
155        (@ty f64) => (f64);
156        (@ty u8) => (u8);
157        (@ty i8x16) => (i8x16);
158        (@ty f32x4) => (f32x4);
159        (@ty f64x2) => (f64x2);
160        (@ty bool) => (bool);
161        (@ty pointer) => (*mut u8);
162        (@ty size) => (usize);
163    }
164
165    wasmtime_environ::foreach_builtin_function!(libcall);
166}
167
168/// Uses the `$store` provided to invoke the async closure `$f` and block on the
169/// result.
170///
171/// This will internally multiplex on `$store.with_blocking(...)` vs simply
172/// asserting the closure is ready depending on whether a store's
173/// `async_support` flag is set or not.
174///
175/// FIXME: ideally this would be a function, not a macro. If this is a function
176/// though it would require placing a bound on the async closure $f where the
177/// returned future is itself `Send`. That's not possible in Rust right now,
178/// unfortunately.
179///
180/// As a workaround this takes advantage of the fact that we can assume that the
181/// compiler can infer that the future returned by `$f` is indeed `Send` so long
182/// as we don't try to name the type or place it behind a generic. In the future
183/// when we can bound the return future of async functions with `Send` this
184/// macro should be replaced with an equivalent function.
185macro_rules! block_on {
186    ($store:expr, $f:expr) => {{
187        let store: &mut StoreOpaque = $store;
188        let closure = assert_async_fn_closure($f);
189        if store.async_support() {
190            #[cfg(feature = "async")]
191            {
192                store.with_blocking(|store, cx| cx.block_on(closure(store)))
193            }
194            #[cfg(not(feature = "async"))]
195            {
196                unreachable!()
197            }
198        } else {
199            // Note that if `async_support` is disabled then it should not be
200            // possible to introduce await points so the provided future should
201            // always be ready.
202            crate::error::Ok(vm::assert_ready(closure(store)))
203        }
204    }};
205}
206
207fn assert_async_fn_closure<F, R>(f: F) -> F
208where
209    F: AsyncFnOnce(&mut StoreOpaque) -> R,
210{
211    f
212}
213
214fn memory_grow(
215    store: &mut dyn VMStore,
216    instance: InstanceId,
217    delta: u64,
218    memory_index: u32,
219) -> Result<Option<AllocationSize>> {
220    let memory_index = DefinedMemoryIndex::from_u32(memory_index);
221    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
222    let limiter = limiter.as_mut();
223    block_on!(store, async |store| {
224        let instance = store.instance_mut(instance);
225        let module = instance.env_module();
226        let page_size_log2 = module.memories[module.memory_index(memory_index)].page_size_log2;
227
228        let result = instance
229            .memory_grow(limiter, memory_index, delta)
230            .await?
231            .map(|size_in_bytes| AllocationSize(size_in_bytes >> page_size_log2));
232
233        Ok(result)
234    })?
235}
236
237/// A helper structure to represent the return value of a memory or table growth
238/// call.
239///
240/// This represents a byte or element-based count of the size of an item on the
241/// host. For example a memory is how many bytes large the memory is, or a table
242/// is how many elements large it is. It's assumed that the value here is never
243/// -1 or -2 as that would mean the entire host address space is allocated which
244/// is not possible.
245struct AllocationSize(usize);
246
247/// Special implementation for growth-related libcalls.
248///
249/// Here the optional return value means:
250///
251/// * `Some(val)` - the growth succeeded and the previous size of the item was
252///   `val`.
253/// * `None` - the growth failed.
254///
255/// The failure case returns -1 (or `usize::MAX` as an unsigned integer) and the
256/// successful case returns the `val` itself. Note that -2 (`usize::MAX - 1`
257/// when unsigned) is unwind as a sentinel to indicate an unwind as no valid
258/// allocation can be that large.
259unsafe impl HostResultHasUnwindSentinel for Option<AllocationSize> {
260    type Abi = *mut u8;
261    const SENTINEL: *mut u8 = (usize::MAX - 1) as *mut u8;
262
263    fn into_abi(self) -> *mut u8 {
264        match self {
265            Some(size) => {
266                debug_assert!(size.0 < (usize::MAX - 1));
267                size.0 as *mut u8
268            }
269            None => usize::MAX as *mut u8,
270        }
271    }
272}
273
274/// Implementation of `table.grow` for `funcref` tables.
275unsafe fn table_grow_func_ref(
276    store: &mut dyn VMStore,
277    instance: InstanceId,
278    defined_table_index: u32,
279    delta: u64,
280    init_value: *mut u8,
281) -> Result<Option<AllocationSize>> {
282    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
283    let element = NonNull::new(init_value.cast::<VMFuncRef>()).map(SendSyncPtr::new);
284    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
285    let limiter = limiter.as_mut();
286    block_on!(store, async |store| {
287        let mut instance = store.instance_mut(instance);
288        let table_index = instance.env_module().table_index(defined_table_index);
289        debug_assert!(matches!(
290            instance.as_mut().table_element_type(table_index),
291            TableElementType::Func,
292        ));
293        let result = instance
294            .defined_table_grow(defined_table_index, async |table| unsafe {
295                table.grow_func(limiter, delta, element).await
296            })
297            .await?
298            .map(AllocationSize);
299        Ok(result)
300    })?
301}
302
303/// Implementation of `table.grow` for GC-reference tables.
304#[cfg(feature = "gc")]
305fn table_grow_gc_ref(
306    store: &mut dyn VMStore,
307    instance: InstanceId,
308    defined_table_index: u32,
309    delta: u64,
310    init_value: u32,
311) -> Result<Option<AllocationSize>> {
312    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
313    let element = VMGcRef::from_raw_u32(init_value);
314    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
315    let limiter = limiter.as_mut();
316    block_on!(store, async |store| {
317        let (gc_store, mut instance) = store.optional_gc_store_and_instance_mut(instance);
318        let table_index = instance.env_module().table_index(defined_table_index);
319        debug_assert!(matches!(
320            instance.as_mut().table_element_type(table_index),
321            TableElementType::GcRef,
322        ));
323
324        let result = instance
325            .defined_table_grow(defined_table_index, async |table| unsafe {
326                table
327                    .grow_gc_ref(limiter, gc_store, delta, element.as_ref())
328                    .await
329            })
330            .await?
331            .map(AllocationSize);
332        Ok(result)
333    })?
334}
335
336#[cfg(feature = "stack-switching")]
337unsafe fn table_grow_cont_obj(
338    store: &mut dyn VMStore,
339    instance: InstanceId,
340    defined_table_index: u32,
341    delta: u64,
342    // The following two values together form the initial Option<VMContObj>.
343    // A None value is indicated by the pointer being null.
344    init_value_contref: *mut u8,
345    init_value_revision: usize,
346) -> Result<Option<AllocationSize>> {
347    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
348    let element = unsafe { VMContObj::from_raw_parts(init_value_contref, init_value_revision) };
349    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
350    let limiter = limiter.as_mut();
351    block_on!(store, async |store| {
352        let mut instance = store.instance_mut(instance);
353        let table_index = instance.env_module().table_index(defined_table_index);
354        debug_assert!(matches!(
355            instance.as_mut().table_element_type(table_index),
356            TableElementType::Cont,
357        ));
358        let result = instance
359            .defined_table_grow(defined_table_index, async |table| unsafe {
360                table.grow_cont(limiter, delta, element).await
361            })
362            .await?
363            .map(AllocationSize);
364        Ok(result)
365    })?
366}
367
368/// Implementation of `table.fill` for `funcref`s.
369unsafe fn table_fill_func_ref(
370    store: &mut dyn VMStore,
371    instance: InstanceId,
372    table_index: u32,
373    dst: u64,
374    val: *mut u8,
375    len: u64,
376) -> Result<()> {
377    let instance = store.instance_mut(instance);
378    let table_index = DefinedTableIndex::from_u32(table_index);
379    let table = instance.get_defined_table(table_index);
380    match table.element_type() {
381        TableElementType::Func => {
382            let val = NonNull::new(val.cast::<VMFuncRef>());
383            table.fill_func(dst, val, len)?;
384            Ok(())
385        }
386        TableElementType::GcRef => unreachable!(),
387        TableElementType::Cont => unreachable!(),
388    }
389}
390
391#[cfg(feature = "gc")]
392fn table_fill_gc_ref(
393    store: &mut dyn VMStore,
394    instance: InstanceId,
395    table_index: u32,
396    dst: u64,
397    val: u32,
398    len: u64,
399) -> Result<()> {
400    let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance);
401    let table_index = DefinedTableIndex::from_u32(table_index);
402    let table = instance.get_defined_table(table_index);
403    match table.element_type() {
404        TableElementType::Func => unreachable!(),
405        TableElementType::GcRef => {
406            let gc_ref = VMGcRef::from_raw_u32(val);
407            table.fill_gc_ref(gc_store, dst, gc_ref.as_ref(), len)?;
408            Ok(())
409        }
410
411        TableElementType::Cont => unreachable!(),
412    }
413}
414
415#[cfg(feature = "stack-switching")]
416unsafe fn table_fill_cont_obj(
417    store: &mut dyn VMStore,
418    instance: InstanceId,
419    table_index: u32,
420    dst: u64,
421    value_contref: *mut u8,
422    value_revision: usize,
423    len: u64,
424) -> Result<()> {
425    let instance = store.instance_mut(instance);
426    let table_index = DefinedTableIndex::from_u32(table_index);
427    let table = instance.get_defined_table(table_index);
428    match table.element_type() {
429        TableElementType::Cont => {
430            let contobj = unsafe { VMContObj::from_raw_parts(value_contref, value_revision) };
431            table.fill_cont(dst, contobj, len)?;
432            Ok(())
433        }
434        _ => panic!("Wrong table filling function"),
435    }
436}
437
438// Implementation of `table.copy`.
439fn table_copy(
440    store: &mut dyn VMStore,
441    instance: InstanceId,
442    dst_table_index: u32,
443    src_table_index: u32,
444    dst: u64,
445    src: u64,
446    len: u64,
447) -> Result<(), Trap> {
448    let dst_table_index = TableIndex::from_u32(dst_table_index);
449    let src_table_index = TableIndex::from_u32(src_table_index);
450    let store = store.store_opaque_mut();
451    let mut instance = store.instance_mut(instance);
452
453    // Convert the two table indices relative to `instance` into two
454    // defining instances and the defined table index within that instance.
455    let (dst_def_index, dst_instance) = instance
456        .as_mut()
457        .defined_table_index_and_instance(dst_table_index);
458    let dst_instance_id = dst_instance.id();
459    let (src_def_index, src_instance) = instance
460        .as_mut()
461        .defined_table_index_and_instance(src_table_index);
462    let src_instance_id = src_instance.id();
463
464    let src_table = crate::Table::from_raw(
465        StoreInstanceId::new(store.id(), src_instance_id),
466        src_def_index,
467    );
468    let dst_table = crate::Table::from_raw(
469        StoreInstanceId::new(store.id(), dst_instance_id),
470        dst_def_index,
471    );
472
473    // SAFETY: this is only safe if the two tables have the same type, and that
474    // was validated during wasm-validation time.
475    unsafe { crate::Table::copy_raw(store, &dst_table, dst, &src_table, src, len) }
476}
477
478// Implementation of `table.init`.
479fn table_init(
480    store: &mut dyn VMStore,
481    instance: InstanceId,
482    table_index: u32,
483    elem_index: u32,
484    dst: u64,
485    src: u64,
486    len: u64,
487) -> Result<()> {
488    let table_index = TableIndex::from_u32(table_index);
489    let elem_index = ElemIndex::from_u32(elem_index);
490
491    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
492    block_on!(store, async |store| {
493        vm::Instance::table_init(
494            store,
495            limiter.as_mut(),
496            instance,
497            table_index,
498            elem_index,
499            dst,
500            src,
501            len,
502        )
503        .await
504    })??;
505    Ok(())
506}
507
508// Implementation of `elem.drop`.
509fn elem_drop(store: &mut dyn VMStore, instance: InstanceId, elem_index: u32) {
510    let elem_index = ElemIndex::from_u32(elem_index);
511    store.instance_mut(instance).elem_drop(elem_index)
512}
513
514// Implementation of `memory.copy`.
515fn memory_copy(
516    store: &mut dyn VMStore,
517    instance: InstanceId,
518    dst_index: u32,
519    dst: u64,
520    src_index: u32,
521    src: u64,
522    len: u64,
523) -> Result<(), Trap> {
524    let src_index = MemoryIndex::from_u32(src_index);
525    let dst_index = MemoryIndex::from_u32(dst_index);
526    store
527        .instance_mut(instance)
528        .memory_copy(dst_index, dst, src_index, src, len)
529}
530
531// Implementation of `memory.fill` for locally defined memories.
532fn memory_fill(
533    store: &mut dyn VMStore,
534    instance: InstanceId,
535    memory_index: u32,
536    dst: u64,
537    val: u32,
538    len: u64,
539) -> Result<(), Trap> {
540    let memory_index = DefinedMemoryIndex::from_u32(memory_index);
541    #[expect(clippy::cast_possible_truncation, reason = "known to truncate here")]
542    store
543        .instance_mut(instance)
544        .memory_fill(memory_index, dst, val as u8, len)
545}
546
547// Implementation of `memory.init`.
548fn memory_init(
549    store: &mut dyn VMStore,
550    instance: InstanceId,
551    memory_index: u32,
552    data_index: u32,
553    dst: u64,
554    src: u32,
555    len: u32,
556) -> Result<(), Trap> {
557    let memory_index = MemoryIndex::from_u32(memory_index);
558    let data_index = DataIndex::from_u32(data_index);
559    store
560        .instance_mut(instance)
561        .memory_init(memory_index, data_index, dst, src, len)
562}
563
564// Implementation of `ref.func`.
565fn ref_func(store: &mut dyn VMStore, instance: InstanceId, func_index: u32) -> NonNull<u8> {
566    let (instance, registry) = store.instance_and_module_registry_mut(instance);
567    instance
568        .get_func_ref(registry, FuncIndex::from_u32(func_index))
569        .expect("ref_func: funcref should always be available for given func index")
570        .cast()
571}
572
573// Implementation of `data.drop`.
574fn data_drop(store: &mut dyn VMStore, instance: InstanceId, data_index: u32) {
575    let data_index = DataIndex::from_u32(data_index);
576    store.instance_mut(instance).data_drop(data_index)
577}
578
579// Returns a table entry after lazily initializing it.
580fn table_get_lazy_init_func_ref(
581    store: &mut dyn VMStore,
582    instance: InstanceId,
583    table_index: u32,
584    index: u64,
585) -> *mut u8 {
586    let table_index = TableIndex::from_u32(table_index);
587    let (instance, registry) = store.instance_and_module_registry_mut(instance);
588    let table = instance.get_table_with_lazy_init(registry, table_index, core::iter::once(index));
589    let elem = table
590        .get_func(index)
591        .expect("table access already bounds-checked");
592
593    match elem {
594        Some(ptr) => ptr.as_ptr().cast(),
595        None => core::ptr::null_mut(),
596    }
597}
598
599/// Drop a GC reference.
600#[cfg(feature = "gc-drc")]
601fn drop_gc_ref(store: &mut dyn VMStore, _instance: InstanceId, gc_ref: u32) {
602    log::trace!("libcalls::drop_gc_ref({gc_ref:#x})");
603    let gc_ref = VMGcRef::from_raw_u32(gc_ref).expect("non-null VMGcRef");
604    store
605        .store_opaque_mut()
606        .unwrap_gc_store_mut()
607        .drop_gc_ref(gc_ref);
608}
609
610/// Grow the GC heap.
611#[cfg(feature = "gc-null")]
612fn grow_gc_heap(store: &mut dyn VMStore, _instance: InstanceId, bytes_needed: u64) -> Result<()> {
613    let orig_len = u64::try_from(
614        store
615            .require_gc_store()?
616            .gc_heap
617            .vmmemory()
618            .current_length(),
619    )
620    .unwrap();
621
622    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
623    block_on!(store, async |store| {
624        store.gc(limiter.as_mut(), None, Some(bytes_needed)).await;
625    })?;
626
627    // JIT code relies on the memory having grown by `bytes_needed` bytes if
628    // this libcall returns successfully, so trap if we didn't grow that much.
629    let new_len = u64::try_from(
630        store
631            .require_gc_store()?
632            .gc_heap
633            .vmmemory()
634            .current_length(),
635    )
636    .unwrap();
637    if orig_len
638        .checked_add(bytes_needed)
639        .is_none_or(|expected_len| new_len < expected_len)
640    {
641        return Err(crate::Trap::AllocationTooLarge.into());
642    }
643
644    Ok(())
645}
646
647/// Allocate a raw, unininitialized GC object for Wasm code.
648///
649/// The Wasm code is responsible for initializing the object.
650#[cfg(feature = "gc-drc")]
651fn gc_alloc_raw(
652    store: &mut dyn VMStore,
653    instance: InstanceId,
654    kind_and_reserved: u32,
655    module_interned_type_index: u32,
656    size: u32,
657    align: u32,
658) -> Result<core::num::NonZeroU32> {
659    use crate::vm::VMGcHeader;
660    use core::alloc::Layout;
661    use wasmtime_environ::{ModuleInternedTypeIndex, VMGcKind};
662
663    let kind = VMGcKind::from_high_bits_of_u32(kind_and_reserved);
664    log::trace!("gc_alloc_raw(kind={kind:?}, size={size}, align={align})");
665
666    let module = store
667        .instance(instance)
668        .runtime_module()
669        .expect("should never allocate GC types defined in a dummy module");
670
671    let module_interned_type_index = ModuleInternedTypeIndex::from_u32(module_interned_type_index);
672    let shared_type_index = module
673        .signatures()
674        .shared_type(module_interned_type_index)
675        .expect("should have engine type index for module type index");
676
677    let mut header = VMGcHeader::from_kind_and_index(kind, shared_type_index);
678    header.set_reserved_u26(kind_and_reserved & VMGcKind::UNUSED_MASK);
679
680    let size = usize::try_from(size).unwrap();
681    let align = usize::try_from(align).unwrap();
682    assert!(align.is_power_of_two());
683    let layout = Layout::from_size_align(size, align).map_err(|e| {
684        let err = Error::from(crate::Trap::AllocationTooLarge);
685        err.context(e)
686    })?;
687
688    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
689    block_on!(store, async |store| {
690        let gc_ref = store
691            .retry_after_gc_async(limiter.as_mut(), (), |store, ()| {
692                store
693                    .unwrap_gc_store_mut()
694                    .alloc_raw(header, layout)?
695                    .map_err(|bytes_needed| crate::GcHeapOutOfMemory::new((), bytes_needed).into())
696            })
697            .await?;
698
699        let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
700        Ok(raw)
701    })?
702}
703
704// Intern a `funcref` into the GC heap, returning its `FuncRefTableId`.
705//
706// This libcall may not GC.
707#[cfg(feature = "gc")]
708unsafe fn intern_func_ref_for_gc_heap(
709    store: &mut dyn VMStore,
710    _instance: InstanceId,
711    func_ref: *mut u8,
712) -> Result<u32> {
713    use crate::{store::AutoAssertNoGc, vm::SendSyncPtr};
714    use core::ptr::NonNull;
715
716    let mut store = AutoAssertNoGc::new(store.store_opaque_mut());
717
718    let func_ref = func_ref.cast::<VMFuncRef>();
719    let func_ref = NonNull::new(func_ref).map(SendSyncPtr::new);
720
721    let func_ref_id = unsafe {
722        store
723            .require_gc_store_mut()?
724            .func_ref_table
725            .intern(func_ref)
726    };
727    Ok(func_ref_id.into_raw())
728}
729
730// Get the raw `VMFuncRef` pointer associated with a `FuncRefTableId` from an
731// earlier `intern_func_ref_for_gc_heap` call.
732//
733// This libcall may not GC.
734#[cfg(feature = "gc")]
735fn get_interned_func_ref(
736    store: &mut dyn VMStore,
737    instance: InstanceId,
738    func_ref_id: u32,
739    module_interned_type_index: u32,
740) -> *mut u8 {
741    use super::FuncRefTableId;
742    use crate::store::AutoAssertNoGc;
743    use wasmtime_environ::{ModuleInternedTypeIndex, packed_option::ReservedValue};
744
745    let store = AutoAssertNoGc::new(store.store_opaque_mut());
746
747    let func_ref_id = FuncRefTableId::from_raw(func_ref_id);
748    let module_interned_type_index = ModuleInternedTypeIndex::from_bits(module_interned_type_index);
749
750    let func_ref = if module_interned_type_index.is_reserved_value() {
751        store
752            .unwrap_gc_store()
753            .func_ref_table
754            .get_untyped(func_ref_id)
755    } else {
756        let types = store.engine().signatures();
757        let engine_ty = store
758            .instance(instance)
759            .engine_type_index(module_interned_type_index);
760        store
761            .unwrap_gc_store()
762            .func_ref_table
763            .get_typed(types, func_ref_id, engine_ty)
764    };
765
766    func_ref.map_or(core::ptr::null_mut(), |f| f.as_ptr().cast())
767}
768
769/// Implementation of the `array.new_data` instruction.
770#[cfg(feature = "gc")]
771fn array_new_data(
772    store: &mut dyn VMStore,
773    instance_id: InstanceId,
774    array_type_index: u32,
775    data_index: u32,
776    src: u32,
777    len: u32,
778) -> Result<core::num::NonZeroU32> {
779    use crate::ArrayType;
780    use wasmtime_environ::ModuleInternedTypeIndex;
781
782    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
783    block_on!(store, async |store| {
784        let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
785        let data_index = DataIndex::from_u32(data_index);
786        let instance = store.instance(instance_id);
787
788        // Calculate the byte-length of the data (as opposed to the element-length
789        // of the array).
790        let data_range = instance.wasm_data_range(data_index);
791        let shared_ty = instance.engine_type_index(array_type_index);
792        let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
793        let one_elem_size = array_ty
794            .element_type()
795            .data_byte_size()
796            .expect("Wasm validation ensures that this type have a defined byte size");
797        let byte_len = len
798            .checked_mul(one_elem_size)
799            .and_then(|x| usize::try_from(x).ok())
800            .ok_or_else(|| Trap::MemoryOutOfBounds)?;
801
802        // Get the data from the segment, checking bounds.
803        let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?;
804        instance
805            .wasm_data(data_range.clone())
806            .get(src..)
807            .and_then(|d| d.get(..byte_len))
808            .ok_or_else(|| Trap::MemoryOutOfBounds)?;
809
810        // Allocate the (uninitialized) array.
811        let gc_layout = store
812            .engine()
813            .signatures()
814            .layout(shared_ty)
815            .expect("array types have GC layouts");
816        let array_layout = gc_layout.unwrap_array();
817        let array_ref = store
818            .retry_after_gc_async(limiter.as_mut(), (), |store, ()| {
819                store
820                    .unwrap_gc_store_mut()
821                    .alloc_uninit_array(shared_ty, len, &array_layout)?
822                    .map_err(|bytes_needed| crate::GcHeapOutOfMemory::new((), bytes_needed).into())
823            })
824            .await?;
825
826        let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance_id);
827        let gc_store = gc_store.unwrap();
828        let data = &instance.wasm_data(data_range)[src..][..byte_len];
829
830        // Copy the data into the array, initializing it.
831        gc_store
832            .gc_object_data(array_ref.as_gc_ref())
833            .copy_from_slice(array_layout.base_size, data);
834
835        // Return the array to Wasm!
836        let raw = gc_store.expose_gc_ref_to_wasm(array_ref.into());
837        Ok(raw)
838    })?
839}
840
841/// Implementation of the `array.init_data` instruction.
842#[cfg(feature = "gc")]
843fn array_init_data(
844    store: &mut dyn VMStore,
845    instance_id: InstanceId,
846    array_type_index: u32,
847    array: u32,
848    dst: u32,
849    data_index: u32,
850    src: u32,
851    len: u32,
852) -> Result<()> {
853    use crate::ArrayType;
854    use wasmtime_environ::ModuleInternedTypeIndex;
855
856    let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
857    let data_index = DataIndex::from_u32(data_index);
858    let instance = store.instance(instance_id);
859
860    log::trace!(
861        "array.init_data(array={array:#x}, dst={dst}, data_index={data_index:?}, src={src}, len={len})",
862    );
863
864    // Null check the array.
865    let gc_ref = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?;
866    let array = gc_ref
867        .into_arrayref(&*store.unwrap_gc_store().gc_heap)
868        .expect("gc ref should be an array");
869
870    let dst = usize::try_from(dst).map_err(|_| Trap::MemoryOutOfBounds)?;
871    let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?;
872    let len = usize::try_from(len).map_err(|_| Trap::MemoryOutOfBounds)?;
873
874    // Bounds check the array.
875    let array_len = array.len(store.store_opaque());
876    let array_len = usize::try_from(array_len).map_err(|_| Trap::ArrayOutOfBounds)?;
877    if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len {
878        return Err(Trap::ArrayOutOfBounds.into());
879    }
880
881    // Calculate the byte length from the array length.
882    let shared_ty = instance.engine_type_index(array_type_index);
883    let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
884    let one_elem_size = array_ty
885        .element_type()
886        .data_byte_size()
887        .expect("Wasm validation ensures that this type have a defined byte size");
888    let data_len = len
889        .checked_mul(usize::try_from(one_elem_size).unwrap())
890        .ok_or_else(|| Trap::MemoryOutOfBounds)?;
891
892    // Get the data from the segment, checking its bounds.
893    let data_range = instance.wasm_data_range(data_index);
894    instance
895        .wasm_data(data_range.clone())
896        .get(src..)
897        .and_then(|d| d.get(..data_len))
898        .ok_or_else(|| Trap::MemoryOutOfBounds)?;
899
900    // Copy the data into the array.
901
902    let dst_offset = u32::try_from(dst)
903        .unwrap()
904        .checked_mul(one_elem_size)
905        .unwrap();
906
907    let array_layout = store
908        .engine()
909        .signatures()
910        .layout(shared_ty)
911        .expect("array types have GC layouts");
912    let array_layout = array_layout.unwrap_array();
913
914    let obj_offset = array_layout.base_size.checked_add(dst_offset).unwrap();
915
916    let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance_id);
917    let gc_store = gc_store.unwrap();
918    let data = &instance.wasm_data(data_range)[src..][..data_len];
919    gc_store
920        .gc_object_data(array.as_gc_ref())
921        .copy_from_slice(obj_offset, data);
922
923    Ok(())
924}
925
926#[cfg(feature = "gc")]
927fn array_new_elem(
928    store: &mut dyn VMStore,
929    instance_id: InstanceId,
930    array_type_index: u32,
931    elem_index: u32,
932    src: u32,
933    len: u32,
934) -> Result<core::num::NonZeroU32> {
935    use crate::{
936        ArrayRef, ArrayRefPre, ArrayType, Func, OpaqueRootScope, RootedGcRefImpl, Val,
937        store::AutoAssertNoGc,
938        vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
939    };
940    use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
941
942    // Convert indices to their typed forms.
943    let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
944    let elem_index = ElemIndex::from_u32(elem_index);
945    let instance = store.instance(instance_id);
946
947    let mut storage = None;
948    let elements = instance.passive_element_segment(&mut storage, elem_index);
949
950    let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
951    let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
952
953    let shared_ty = instance.engine_type_index(array_type_index);
954    let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
955    let pre = ArrayRefPre::_new(store, array_ty);
956
957    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
958    block_on!(store, async |store| {
959        let mut store = OpaqueRootScope::new(store);
960        // Turn the elements into `Val`s.
961        let mut vals = Vec::with_capacity(usize::try_from(elements.len()).unwrap());
962        match elements {
963            TableSegmentElements::Functions(fs) => {
964                let store_id = store.id();
965                let (mut instance, registry) = store.instance_and_module_registry_mut(instance_id);
966                vals.extend(
967                    fs.get(src..)
968                        .and_then(|s| s.get(..len))
969                        .ok_or_else(|| Trap::TableOutOfBounds)?
970                        .iter()
971                        .map(|f| {
972                            let raw_func_ref = instance.as_mut().get_func_ref(registry, *f);
973                            let func = unsafe {
974                                raw_func_ref.map(|p| Func::from_vm_func_ref(store_id, p))
975                            };
976                            Val::FuncRef(func)
977                        }),
978                );
979            }
980            TableSegmentElements::Expressions(xs) => {
981                let xs = xs
982                    .get(src..)
983                    .and_then(|s| s.get(..len))
984                    .ok_or_else(|| Trap::TableOutOfBounds)?;
985
986                let mut const_context = ConstEvalContext::new(instance_id);
987                let mut const_evaluator = ConstExprEvaluator::default();
988
989                for x in xs.iter() {
990                    let val = *const_evaluator
991                        .eval(&mut store, limiter.as_mut(), &mut const_context, x)
992                        .await?;
993                    vals.push(val);
994                }
995            }
996        }
997
998        let array = ArrayRef::_new_fixed_async(&mut store, limiter.as_mut(), &pre, &vals).await?;
999
1000        let mut store = AutoAssertNoGc::new(&mut store);
1001        let gc_ref = array.try_clone_gc_ref(&mut store)?;
1002        let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
1003        Ok(raw)
1004    })?
1005}
1006
1007#[cfg(feature = "gc")]
1008fn array_init_elem(
1009    store: &mut dyn VMStore,
1010    instance: InstanceId,
1011    array_type_index: u32,
1012    array: u32,
1013    dst: u32,
1014    elem_index: u32,
1015    src: u32,
1016    len: u32,
1017) -> Result<()> {
1018    use crate::{
1019        ArrayRef, Func, OpaqueRootScope, Val,
1020        store::AutoAssertNoGc,
1021        vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
1022    };
1023    use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
1024
1025    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
1026    block_on!(store, async |store| {
1027        let mut store = OpaqueRootScope::new(store);
1028
1029        // Convert the indices into their typed forms.
1030        let _array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
1031        let elem_index = ElemIndex::from_u32(elem_index);
1032
1033        log::trace!(
1034            "array.init_elem(array={array:#x}, dst={dst}, elem_index={elem_index:?}, src={src}, len={len})",
1035        );
1036
1037        // Convert the raw GC ref into a `Rooted<ArrayRef>`.
1038        let array = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?;
1039        let array = store.unwrap_gc_store_mut().clone_gc_ref(&array);
1040        let array = {
1041            let mut no_gc = AutoAssertNoGc::new(&mut store);
1042            ArrayRef::from_cloned_gc_ref(&mut no_gc, array)
1043        };
1044
1045        // Bounds check the destination within the array.
1046        let array_len = array._len(&store)?;
1047        log::trace!("array_len = {array_len}");
1048        if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len {
1049            return Err(Trap::ArrayOutOfBounds.into());
1050        }
1051
1052        // Get the passive element segment.
1053        let mut storage = None;
1054        let store_id = store.id();
1055        let (mut instance, registry) = store.instance_and_module_registry_mut(instance);
1056        let elements = instance.passive_element_segment(&mut storage, elem_index);
1057
1058        // Convert array offsets into `usize`s.
1059        let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
1060        let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
1061
1062        // Turn the elements into `Val`s.
1063        let vals = match elements {
1064            TableSegmentElements::Functions(fs) => fs
1065                .get(src..)
1066                .and_then(|s| s.get(..len))
1067                .ok_or_else(|| Trap::TableOutOfBounds)?
1068                .iter()
1069                .map(|f| {
1070                    let raw_func_ref = instance.as_mut().get_func_ref(registry, *f);
1071                    let func = unsafe { raw_func_ref.map(|p| Func::from_vm_func_ref(store_id, p)) };
1072                    Val::FuncRef(func)
1073                })
1074                .collect::<Vec<_>>(),
1075            TableSegmentElements::Expressions(xs) => {
1076                let mut const_context = ConstEvalContext::new(instance.id());
1077                let mut const_evaluator = ConstExprEvaluator::default();
1078
1079                let mut vals = Vec::new();
1080                for x in xs
1081                    .get(src..)
1082                    .and_then(|s| s.get(..len))
1083                    .ok_or_else(|| Trap::TableOutOfBounds)?
1084                {
1085                    let val = *const_evaluator
1086                        .eval(&mut store, limiter.as_mut(), &mut const_context, x)
1087                        .await?;
1088                    vals.push(val);
1089                }
1090                vals
1091            }
1092        };
1093
1094        // Copy the values into the array.
1095        for (i, val) in vals.into_iter().enumerate() {
1096            let i = u32::try_from(i).unwrap();
1097            let j = dst.checked_add(i).unwrap();
1098            array._set(&mut store, j, val)?;
1099        }
1100
1101        Ok(())
1102    })?
1103}
1104
1105// TODO: Specialize this libcall for only non-GC array elements, so we never
1106// have to do GC barriers and their associated indirect calls through the `dyn
1107// GcHeap`. Instead, implement those copies inline in Wasm code. Then, use bulk
1108// `memcpy`-style APIs to do the actual copies here.
1109#[cfg(feature = "gc")]
1110fn array_copy(
1111    store: &mut dyn VMStore,
1112    _instance: InstanceId,
1113    dst_array: u32,
1114    dst: u32,
1115    src_array: u32,
1116    src: u32,
1117    len: u32,
1118) -> Result<()> {
1119    use crate::{ArrayRef, OpaqueRootScope, store::AutoAssertNoGc};
1120
1121    log::trace!(
1122        "array.copy(dst_array={dst_array:#x}, dst_index={dst}, src_array={src_array:#x}, src_index={src}, len={len})",
1123    );
1124
1125    let mut store = OpaqueRootScope::new(store.store_opaque_mut());
1126    let mut store = AutoAssertNoGc::new(&mut store);
1127
1128    // Convert the raw GC refs into `Rooted<ArrayRef>`s.
1129    let dst_array = VMGcRef::from_raw_u32(dst_array).ok_or_else(|| Trap::NullReference)?;
1130    let dst_array = store.unwrap_gc_store_mut().clone_gc_ref(&dst_array);
1131    let dst_array = ArrayRef::from_cloned_gc_ref(&mut store, dst_array);
1132    let src_array = VMGcRef::from_raw_u32(src_array).ok_or_else(|| Trap::NullReference)?;
1133    let src_array = store.unwrap_gc_store_mut().clone_gc_ref(&src_array);
1134    let src_array = ArrayRef::from_cloned_gc_ref(&mut store, src_array);
1135
1136    // Bounds check the destination array's elements.
1137    let dst_array_len = dst_array._len(&store)?;
1138    if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > dst_array_len {
1139        return Err(Trap::ArrayOutOfBounds.into());
1140    }
1141
1142    // Bounds check the source array's elements.
1143    let src_array_len = src_array._len(&store)?;
1144    if src.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > src_array_len {
1145        return Err(Trap::ArrayOutOfBounds.into());
1146    }
1147
1148    let mut store = AutoAssertNoGc::new(&mut store);
1149    // If `src_array` and `dst_array` are the same array, then we are
1150    // potentially doing an overlapping copy, so make sure to copy elements in
1151    // the order that doesn't clobber the source elements before they are
1152    // copied. If they are different arrays, the order doesn't matter, but we
1153    // simply don't bother checking.
1154    if src > dst {
1155        for i in 0..len {
1156            let src_elem = src_array._get(&mut store, src + i)?;
1157            let dst_i = dst + i;
1158            dst_array._set(&mut store, dst_i, src_elem)?;
1159        }
1160    } else {
1161        for i in (0..len).rev() {
1162            let src_elem = src_array._get(&mut store, src + i)?;
1163            let dst_i = dst + i;
1164            dst_array._set(&mut store, dst_i, src_elem)?;
1165        }
1166    }
1167    Ok(())
1168}
1169
1170#[cfg(feature = "gc")]
1171fn is_subtype(
1172    store: &mut dyn VMStore,
1173    _instance: InstanceId,
1174    actual_engine_type: u32,
1175    expected_engine_type: u32,
1176) -> u32 {
1177    use wasmtime_environ::VMSharedTypeIndex;
1178
1179    let actual = VMSharedTypeIndex::from_u32(actual_engine_type);
1180    let expected = VMSharedTypeIndex::from_u32(expected_engine_type);
1181
1182    let is_subtype: bool = store.engine().signatures().is_subtype(actual, expected);
1183
1184    log::trace!("is_subtype(actual={actual:?}, expected={expected:?}) -> {is_subtype}",);
1185    is_subtype as u32
1186}
1187
1188// Implementation of `memory.atomic.notify` for locally defined memories.
1189#[cfg(feature = "threads")]
1190fn memory_atomic_notify(
1191    store: &mut dyn VMStore,
1192    instance: InstanceId,
1193    memory_index: u32,
1194    addr_index: u64,
1195    count: u32,
1196) -> Result<u32, Trap> {
1197    let memory = DefinedMemoryIndex::from_u32(memory_index);
1198    store
1199        .instance_mut(instance)
1200        .get_defined_memory_mut(memory)
1201        .atomic_notify(addr_index, count)
1202}
1203
1204// Implementation of `memory.atomic.wait32` for locally defined memories.
1205#[cfg(feature = "threads")]
1206fn memory_atomic_wait32(
1207    store: &mut dyn VMStore,
1208    instance: InstanceId,
1209    memory_index: u32,
1210    addr_index: u64,
1211    expected: u32,
1212    timeout: u64,
1213) -> Result<u32, Trap> {
1214    let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
1215    let memory = DefinedMemoryIndex::from_u32(memory_index);
1216    Ok(store
1217        .instance_mut(instance)
1218        .get_defined_memory_mut(memory)
1219        .atomic_wait32(addr_index, expected, timeout)? as u32)
1220}
1221
1222// Implementation of `memory.atomic.wait64` for locally defined memories.
1223#[cfg(feature = "threads")]
1224fn memory_atomic_wait64(
1225    store: &mut dyn VMStore,
1226    instance: InstanceId,
1227    memory_index: u32,
1228    addr_index: u64,
1229    expected: u64,
1230    timeout: u64,
1231) -> Result<u32, Trap> {
1232    let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
1233    let memory = DefinedMemoryIndex::from_u32(memory_index);
1234    Ok(store
1235        .instance_mut(instance)
1236        .get_defined_memory_mut(memory)
1237        .atomic_wait64(addr_index, expected, timeout)? as u32)
1238}
1239
1240// Hook for when an instance runs out of fuel.
1241fn out_of_gas(store: &mut dyn VMStore, _instance: InstanceId) -> Result<()> {
1242    block_on!(store, async |store| {
1243        if !store.refuel() {
1244            return Err(Trap::OutOfFuel.into());
1245        }
1246        #[cfg(feature = "async")]
1247        if store.fuel_yield_interval.is_some() {
1248            crate::runtime::vm::Yield::new().await;
1249        }
1250        Ok(())
1251    })?
1252}
1253
1254// Hook for when an instance observes that the epoch has changed.
1255#[cfg(target_has_atomic = "64")]
1256fn new_epoch(store: &mut dyn VMStore, _instance: InstanceId) -> Result<NextEpoch> {
1257    use crate::UpdateDeadline;
1258
1259    #[cfg(feature = "debug")]
1260    {
1261        store.block_on_debug_handler(crate::DebugEvent::EpochYield)?;
1262    }
1263
1264    let update_deadline = store.new_epoch_updated_deadline()?;
1265    block_on!(store, async move |store| {
1266        let delta = match update_deadline {
1267            UpdateDeadline::Interrupt => return Err(Trap::Interrupt.into()),
1268            UpdateDeadline::Continue(delta) => delta,
1269
1270            // Note that custom assertions for `async_support` are needed below
1271            // as otherwise if these are used in an
1272            // `async_support`-disabled-build it'll trip the `assert_ready` part
1273            // of `block_on!` above. The assertion here provides a more direct
1274            // error message as to what's going on.
1275            #[cfg(feature = "async")]
1276            UpdateDeadline::Yield(delta) => {
1277                assert!(
1278                    store.async_support(),
1279                    "cannot use `UpdateDeadline::Yield` without enabling \
1280                     async support in the config"
1281                );
1282                crate::runtime::vm::Yield::new().await;
1283                delta
1284            }
1285            #[cfg(feature = "async")]
1286            UpdateDeadline::YieldCustom(delta, future) => {
1287                assert!(
1288                    store.async_support(),
1289                    "cannot use `UpdateDeadline::YieldCustom` without enabling \
1290                     async support in the config"
1291                );
1292                future.await;
1293                delta
1294            }
1295        };
1296
1297        // Set a new deadline and return the new epoch deadline so
1298        // the Wasm code doesn't have to reload it.
1299        store.set_epoch_deadline(delta);
1300        Ok(NextEpoch(store.get_epoch_deadline()))
1301    })?
1302}
1303
1304struct NextEpoch(u64);
1305
1306unsafe impl HostResultHasUnwindSentinel for NextEpoch {
1307    type Abi = u64;
1308    const SENTINEL: u64 = u64::MAX;
1309    fn into_abi(self) -> u64 {
1310        self.0
1311    }
1312}
1313
1314// Hook for validating malloc using wmemcheck_state.
1315#[cfg(feature = "wmemcheck")]
1316fn check_malloc(store: &mut dyn VMStore, instance: InstanceId, addr: u32, len: u32) -> Result<()> {
1317    let instance = store.instance_mut(instance);
1318    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1319        let result = wmemcheck_state.malloc(addr as usize, len as usize);
1320        wmemcheck_state.memcheck_on();
1321        match result {
1322            Ok(()) => {}
1323            Err(DoubleMalloc { addr, len }) => {
1324                bail!("Double malloc at addr {:#x} of size {}", addr, len)
1325            }
1326            Err(OutOfBounds { addr, len }) => {
1327                bail!("Malloc out of bounds at addr {:#x} of size {}", addr, len);
1328            }
1329            _ => {
1330                panic!("unreachable")
1331            }
1332        }
1333    }
1334    Ok(())
1335}
1336
1337// Hook for validating free using wmemcheck_state.
1338#[cfg(feature = "wmemcheck")]
1339fn check_free(store: &mut dyn VMStore, instance: InstanceId, addr: u32) -> Result<()> {
1340    let instance = store.instance_mut(instance);
1341    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1342        let result = wmemcheck_state.free(addr as usize);
1343        wmemcheck_state.memcheck_on();
1344        match result {
1345            Ok(()) => {}
1346            Err(InvalidFree { addr }) => {
1347                bail!("Invalid free at addr {:#x}", addr)
1348            }
1349            _ => {
1350                panic!("unreachable")
1351            }
1352        }
1353    }
1354    Ok(())
1355}
1356
1357// Hook for validating load using wmemcheck_state.
1358#[cfg(feature = "wmemcheck")]
1359fn check_load(
1360    store: &mut dyn VMStore,
1361    instance: InstanceId,
1362    num_bytes: u32,
1363    addr: u32,
1364    offset: u32,
1365) -> Result<()> {
1366    let instance = store.instance_mut(instance);
1367    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1368        let result = wmemcheck_state.read(addr as usize + offset as usize, num_bytes as usize);
1369        match result {
1370            Ok(()) => {}
1371            Err(InvalidRead { addr, len }) => {
1372                bail!("Invalid load at addr {:#x} of size {}", addr, len);
1373            }
1374            Err(OutOfBounds { addr, len }) => {
1375                bail!("Load out of bounds at addr {:#x} of size {}", addr, len);
1376            }
1377            _ => {
1378                panic!("unreachable")
1379            }
1380        }
1381    }
1382    Ok(())
1383}
1384
1385// Hook for validating store using wmemcheck_state.
1386#[cfg(feature = "wmemcheck")]
1387fn check_store(
1388    store: &mut dyn VMStore,
1389    instance: InstanceId,
1390    num_bytes: u32,
1391    addr: u32,
1392    offset: u32,
1393) -> Result<()> {
1394    let instance = store.instance_mut(instance);
1395    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1396        let result = wmemcheck_state.write(addr as usize + offset as usize, num_bytes as usize);
1397        match result {
1398            Ok(()) => {}
1399            Err(InvalidWrite { addr, len }) => {
1400                bail!("Invalid store at addr {:#x} of size {}", addr, len)
1401            }
1402            Err(OutOfBounds { addr, len }) => {
1403                bail!("Store out of bounds at addr {:#x} of size {}", addr, len)
1404            }
1405            _ => {
1406                panic!("unreachable")
1407            }
1408        }
1409    }
1410    Ok(())
1411}
1412
1413// Hook for turning wmemcheck load/store validation off when entering a malloc function.
1414#[cfg(feature = "wmemcheck")]
1415fn malloc_start(store: &mut dyn VMStore, instance: InstanceId) {
1416    let instance = store.instance_mut(instance);
1417    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1418        wmemcheck_state.memcheck_off();
1419    }
1420}
1421
1422// Hook for turning wmemcheck load/store validation off when entering a free function.
1423#[cfg(feature = "wmemcheck")]
1424fn free_start(store: &mut dyn VMStore, instance: InstanceId) {
1425    let instance = store.instance_mut(instance);
1426    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1427        wmemcheck_state.memcheck_off();
1428    }
1429}
1430
1431// Hook for tracking wasm stack updates using wmemcheck_state.
1432#[cfg(feature = "wmemcheck")]
1433fn update_stack_pointer(_store: &mut dyn VMStore, _instance: InstanceId, _value: u32) {
1434    // TODO: stack-tracing has yet to be finalized. All memory below
1435    // the address of the top of the stack is marked as valid for
1436    // loads and stores.
1437    // if let Some(wmemcheck_state) = &mut instance.wmemcheck_state {
1438    //     instance.wmemcheck_state.update_stack_pointer(value as usize);
1439    // }
1440}
1441
1442// Hook updating wmemcheck_state memory state vector every time memory.grow is called.
1443#[cfg(feature = "wmemcheck")]
1444fn update_mem_size(store: &mut dyn VMStore, instance: InstanceId, num_pages: u32) {
1445    let instance = store.instance_mut(instance);
1446    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1447        const KIB: usize = 1024;
1448        let num_bytes = num_pages as usize * 64 * KIB;
1449        wmemcheck_state.update_mem_size(num_bytes);
1450    }
1451}
1452
1453fn floor_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1454    wasmtime_math::WasmFloat::wasm_floor(val)
1455}
1456
1457fn floor_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1458    wasmtime_math::WasmFloat::wasm_floor(val)
1459}
1460
1461fn ceil_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1462    wasmtime_math::WasmFloat::wasm_ceil(val)
1463}
1464
1465fn ceil_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1466    wasmtime_math::WasmFloat::wasm_ceil(val)
1467}
1468
1469fn trunc_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1470    wasmtime_math::WasmFloat::wasm_trunc(val)
1471}
1472
1473fn trunc_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1474    wasmtime_math::WasmFloat::wasm_trunc(val)
1475}
1476
1477fn nearest_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1478    wasmtime_math::WasmFloat::wasm_nearest(val)
1479}
1480
1481fn nearest_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1482    wasmtime_math::WasmFloat::wasm_nearest(val)
1483}
1484
1485// This intrinsic is only used on x86_64 platforms as an implementation of
1486// the `i8x16.swizzle` instruction when `pshufb` in SSSE3 is not available.
1487#[cfg(all(target_arch = "x86_64", target_feature = "sse"))]
1488fn i8x16_swizzle(_store: &mut dyn VMStore, _instance: InstanceId, a: i8x16, b: i8x16) -> i8x16 {
1489    union U {
1490        reg: i8x16,
1491        mem: [u8; 16],
1492    }
1493
1494    unsafe {
1495        let a = U { reg: a }.mem;
1496        let b = U { reg: b }.mem;
1497
1498        // Use the `swizzle` semantics of returning 0 on any out-of-bounds
1499        // index, rather than the x86 pshufb semantics, since Wasmtime uses
1500        // this to implement `i8x16.swizzle`.
1501        let select = |arr: &[u8; 16], byte: u8| {
1502            if byte >= 16 { 0x00 } else { arr[byte as usize] }
1503        };
1504
1505        U {
1506            mem: [
1507                select(&a, b[0]),
1508                select(&a, b[1]),
1509                select(&a, b[2]),
1510                select(&a, b[3]),
1511                select(&a, b[4]),
1512                select(&a, b[5]),
1513                select(&a, b[6]),
1514                select(&a, b[7]),
1515                select(&a, b[8]),
1516                select(&a, b[9]),
1517                select(&a, b[10]),
1518                select(&a, b[11]),
1519                select(&a, b[12]),
1520                select(&a, b[13]),
1521                select(&a, b[14]),
1522                select(&a, b[15]),
1523            ],
1524        }
1525        .reg
1526    }
1527}
1528
1529#[cfg(not(all(target_arch = "x86_64", target_feature = "sse")))]
1530fn i8x16_swizzle(_store: &mut dyn VMStore, _instance: InstanceId, _a: i8x16, _b: i8x16) -> i8x16 {
1531    unreachable!()
1532}
1533
1534// This intrinsic is only used on x86_64 platforms as an implementation of
1535// the `i8x16.shuffle` instruction when `pshufb` in SSSE3 is not available.
1536#[cfg(all(target_arch = "x86_64", target_feature = "sse"))]
1537fn i8x16_shuffle(
1538    _store: &mut dyn VMStore,
1539    _instance: InstanceId,
1540    a: i8x16,
1541    b: i8x16,
1542    c: i8x16,
1543) -> i8x16 {
1544    union U {
1545        reg: i8x16,
1546        mem: [u8; 16],
1547    }
1548
1549    unsafe {
1550        let ab = [U { reg: a }.mem, U { reg: b }.mem];
1551        let c = U { reg: c }.mem;
1552
1553        // Use the `shuffle` semantics of returning 0 on any out-of-bounds
1554        // index, rather than the x86 pshufb semantics, since Wasmtime uses
1555        // this to implement `i8x16.shuffle`.
1556        let select = |arr: &[[u8; 16]; 2], byte: u8| {
1557            if byte >= 32 {
1558                0x00
1559            } else if byte >= 16 {
1560                arr[1][byte as usize - 16]
1561            } else {
1562                arr[0][byte as usize]
1563            }
1564        };
1565
1566        U {
1567            mem: [
1568                select(&ab, c[0]),
1569                select(&ab, c[1]),
1570                select(&ab, c[2]),
1571                select(&ab, c[3]),
1572                select(&ab, c[4]),
1573                select(&ab, c[5]),
1574                select(&ab, c[6]),
1575                select(&ab, c[7]),
1576                select(&ab, c[8]),
1577                select(&ab, c[9]),
1578                select(&ab, c[10]),
1579                select(&ab, c[11]),
1580                select(&ab, c[12]),
1581                select(&ab, c[13]),
1582                select(&ab, c[14]),
1583                select(&ab, c[15]),
1584            ],
1585        }
1586        .reg
1587    }
1588}
1589
1590#[cfg(not(all(target_arch = "x86_64", target_feature = "sse")))]
1591fn i8x16_shuffle(
1592    _store: &mut dyn VMStore,
1593    _instance: InstanceId,
1594    _a: i8x16,
1595    _b: i8x16,
1596    _c: i8x16,
1597) -> i8x16 {
1598    unreachable!()
1599}
1600
1601fn fma_f32x4(
1602    _store: &mut dyn VMStore,
1603    _instance: InstanceId,
1604    x: f32x4,
1605    y: f32x4,
1606    z: f32x4,
1607) -> f32x4 {
1608    union U {
1609        reg: f32x4,
1610        mem: [f32; 4],
1611    }
1612
1613    unsafe {
1614        let x = U { reg: x }.mem;
1615        let y = U { reg: y }.mem;
1616        let z = U { reg: z }.mem;
1617
1618        U {
1619            mem: [
1620                wasmtime_math::WasmFloat::wasm_mul_add(x[0], y[0], z[0]),
1621                wasmtime_math::WasmFloat::wasm_mul_add(x[1], y[1], z[1]),
1622                wasmtime_math::WasmFloat::wasm_mul_add(x[2], y[2], z[2]),
1623                wasmtime_math::WasmFloat::wasm_mul_add(x[3], y[3], z[3]),
1624            ],
1625        }
1626        .reg
1627    }
1628}
1629
1630fn fma_f64x2(
1631    _store: &mut dyn VMStore,
1632    _instance: InstanceId,
1633    x: f64x2,
1634    y: f64x2,
1635    z: f64x2,
1636) -> f64x2 {
1637    union U {
1638        reg: f64x2,
1639        mem: [f64; 2],
1640    }
1641
1642    unsafe {
1643        let x = U { reg: x }.mem;
1644        let y = U { reg: y }.mem;
1645        let z = U { reg: z }.mem;
1646
1647        U {
1648            mem: [
1649                wasmtime_math::WasmFloat::wasm_mul_add(x[0], y[0], z[0]),
1650                wasmtime_math::WasmFloat::wasm_mul_add(x[1], y[1], z[1]),
1651            ],
1652        }
1653        .reg
1654    }
1655}
1656
1657/// This intrinsic is just used to record trap information.
1658///
1659/// The `Infallible` "ok" type here means that this never returns success, it
1660/// only ever returns an error, and this hooks into the machinery to handle
1661/// `Result` values to record such trap information.
1662fn trap(
1663    _store: &mut dyn VMStore,
1664    _instance: InstanceId,
1665    code: u8,
1666) -> Result<Infallible, TrapReason> {
1667    Err(TrapReason::Wasm(
1668        wasmtime_environ::Trap::from_u8(code).unwrap(),
1669    ))
1670}
1671
1672fn raise(store: &mut dyn VMStore, _instance: InstanceId) {
1673    // SAFETY: this is only called from compiled wasm so we know that wasm has
1674    // already been entered. It's a dynamic safety precondition that the trap
1675    // information has already been arranged to be present.
1676    unsafe { crate::runtime::vm::traphandlers::raise_preexisting_trap(store) }
1677}
1678
1679// Builtins for continuations. These are thin wrappers around the
1680// respective definitions in stack_switching.rs.
1681#[cfg(feature = "stack-switching")]
1682fn cont_new(
1683    store: &mut dyn VMStore,
1684    instance: InstanceId,
1685    func: *mut u8,
1686    param_count: u32,
1687    result_count: u32,
1688) -> Result<Option<AllocationSize>> {
1689    let ans =
1690        crate::vm::stack_switching::cont_new(store, instance, func, param_count, result_count)?;
1691    Ok(Some(AllocationSize(ans.cast::<u8>() as usize)))
1692}
1693
1694#[cfg(feature = "gc")]
1695fn get_instance_id(_store: &mut dyn VMStore, instance: InstanceId) -> u32 {
1696    instance.as_u32()
1697}
1698
1699#[cfg(feature = "gc")]
1700fn throw_ref(
1701    store: &mut dyn VMStore,
1702    _instance: InstanceId,
1703    exnref: u32,
1704) -> Result<(), TrapReason> {
1705    let exnref = VMGcRef::from_raw_u32(exnref).ok_or_else(|| Trap::NullReference)?;
1706    let exnref = store.unwrap_gc_store_mut().clone_gc_ref(&exnref);
1707    let exnref = exnref
1708        .into_exnref(&*store.unwrap_gc_store().gc_heap)
1709        .expect("gc ref should be an exception object");
1710    store.set_pending_exception(exnref);
1711    Err(TrapReason::Exception)
1712}
1713
1714fn breakpoint(store: &mut dyn VMStore, _instance: InstanceId) -> Result<()> {
1715    #[cfg(feature = "debug")]
1716    {
1717        log::trace!("hit breakpoint");
1718        store.block_on_debug_handler(crate::DebugEvent::Breakpoint)?;
1719    }
1720    // Avoid unused-argument warning in no-debugger builds.
1721    let _ = store;
1722    Ok(())
1723}