wasmtime/runtime/vm/
libcalls.rs

1//! Runtime library calls.
2//!
3//! Note that Wasm compilers may sometimes perform these inline rather than
4//! calling them, particularly when CPUs have special instructions which compute
5//! them directly.
6//!
7//! These functions are called by compiled Wasm code, and therefore must take
8//! certain care about some things:
9//!
10//! * They must only contain basic, raw i32/i64/f32/f64/pointer parameters that
11//!   are safe to pass across the system ABI.
12//!
13//! * If any nested function propagates an `Err(trap)` out to the library
14//!   function frame, we need to raise it. This involves some nasty and quite
15//!   unsafe code under the covers! Notably, after raising the trap, drops
16//!   **will not** be run for local variables! This can lead to things like
17//!   leaking `InstanceHandle`s which leads to never deallocating JIT code,
18//!   instances, and modules if we are not careful!
19//!
20//! * The libcall must be entered via a Wasm-to-libcall trampoline that saves
21//!   the last Wasm FP and PC for stack walking purposes. (For more details, see
22//!   `crates/wasmtime/src/runtime/vm/backtrace.rs`.)
23//!
24//! To make it easier to correctly handle all these things, **all** libcalls
25//! must be defined via the `libcall!` helper macro! See its doc comments below
26//! for an example, or just look at the rest of the file.
27//!
28//! ## Dealing with `externref`s
29//!
30//! When receiving a raw `*mut u8` that is actually a `VMExternRef` reference,
31//! convert it into a proper `VMExternRef` with `VMExternRef::clone_from_raw` as
32//! soon as apossible. Any GC before raw pointer is converted into a reference
33//! can potentially collect the referenced object, which could lead to use after
34//! free.
35//!
36//! Avoid this by eagerly converting into a proper `VMExternRef`! (Unfortunately
37//! there is no macro to help us automatically get this correct, so stay
38//! vigilant!)
39//!
40//! ```ignore
41//! pub unsafe extern "C" my_libcall_takes_ref(raw_extern_ref: *mut u8) {
42//!     // Before `clone_from_raw`, `raw_extern_ref` is potentially unrooted,
43//!     // and doing GC here could lead to use after free!
44//!
45//!     let my_extern_ref = if raw_extern_ref.is_null() {
46//!         None
47//!     } else {
48//!         Some(VMExternRef::clone_from_raw(raw_extern_ref))
49//!     };
50//!
51//!     // Now that we did `clone_from_raw`, it is safe to do a GC (or do
52//!     // anything else that might transitively GC, like call back into
53//!     // Wasm!)
54//! }
55//! ```
56
57#[cfg(feature = "stack-switching")]
58use super::stack_switching::VMContObj;
59use crate::prelude::*;
60use crate::runtime::store::{InstanceId, StoreInstanceId, StoreOpaque};
61#[cfg(feature = "gc")]
62use crate::runtime::vm::VMGcRef;
63use crate::runtime::vm::table::TableElementType;
64use crate::runtime::vm::vmcontext::VMFuncRef;
65use crate::runtime::vm::{
66    self, HostResultHasUnwindSentinel, SendSyncPtr, TrapReason, VMStore, f32x4, f64x2, i8x16,
67};
68use core::convert::Infallible;
69use core::ptr::NonNull;
70#[cfg(feature = "threads")]
71use core::time::Duration;
72use wasmtime_environ::{
73    DataIndex, DefinedMemoryIndex, DefinedTableIndex, ElemIndex, FuncIndex, MemoryIndex,
74    TableIndex, Trap,
75};
76#[cfg(feature = "wmemcheck")]
77use wasmtime_wmemcheck::AccessError::{
78    DoubleMalloc, InvalidFree, InvalidRead, InvalidWrite, OutOfBounds,
79};
80
81/// Raw functions which are actually called from compiled code.
82///
83/// Invocation of a builtin currently looks like:
84///
85/// * A wasm function calls a cranelift-compiled trampoline that's generated
86///   once-per-builtin.
87/// * The cranelift-compiled trampoline performs any necessary actions to exit
88///   wasm, such as dealing with fp/pc/etc.
89/// * The cranelift-compiled trampoline loads a function pointer from an array
90///   stored in `VMContext` That function pointer is defined in this module.
91/// * This module runs, handling things like `catch_unwind` and `Result` and
92///   such.
93/// * This module delegates to the outer module (this file) which has the actual
94///   implementation.
95///
96/// For more information on converting from host-defined values to Cranelift ABI
97/// values see the `catch_unwind_and_record_trap` function.
98pub mod raw {
99    use crate::runtime::vm::{Instance, VMContext, f32x4, f64x2, i8x16};
100    use core::ptr::NonNull;
101
102    macro_rules! libcall {
103        (
104            $(
105                $( #[cfg($attr:meta)] )?
106                $name:ident( vmctx: vmctx $(, $pname:ident: $param:ident )* ) $(-> $result:ident)?;
107            )*
108        ) => {
109            $(
110                // This is the direct entrypoint from the compiled module which
111                // still has the raw signature.
112                //
113                // This will delegate to the outer module to the actual
114                // implementation and automatically perform `catch_unwind` along
115                // with conversion of the return value in the face of traps.
116                #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
117                #[allow(unused_variables, reason = "macro-generated")]
118                pub unsafe extern "C" fn $name(
119                    vmctx: NonNull<VMContext>,
120                    $( $pname : libcall!(@ty $param), )*
121                ) $(-> libcall!(@ty $result))? {
122                    $(#[cfg($attr)])?
123                    unsafe {
124                        Instance::enter_host_from_wasm(vmctx, |store, instance| {
125                            super::$name(store, instance, $($pname),*)
126                        })
127                    }
128                    $(
129                        #[cfg(not($attr))]
130                        {
131                            let _ = vmctx;
132                            unreachable!();
133                        }
134                    )?
135                }
136
137                // This works around a `rustc` bug where compiling with LTO
138                // will sometimes strip out some of these symbols resulting
139                // in a linking failure.
140                #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
141                const _: () = {
142                    #[used]
143                    static I_AM_USED: unsafe extern "C" fn(
144                        NonNull<VMContext>,
145                        $( $pname : libcall!(@ty $param), )*
146                    ) $( -> libcall!(@ty $result))? = $name;
147                };
148            )*
149        };
150
151        (@ty u32) => (u32);
152        (@ty u64) => (u64);
153        (@ty f32) => (f32);
154        (@ty f64) => (f64);
155        (@ty u8) => (u8);
156        (@ty i8x16) => (i8x16);
157        (@ty f32x4) => (f32x4);
158        (@ty f64x2) => (f64x2);
159        (@ty bool) => (bool);
160        (@ty pointer) => (*mut u8);
161        (@ty size) => (usize);
162    }
163
164    wasmtime_environ::foreach_builtin_function!(libcall);
165}
166
167/// Uses the `$store` provided to invoke the async closure `$f` and block on the
168/// result.
169///
170/// This will internally multiplex on `$store.with_blocking(...)` vs simply
171/// asserting the closure is ready depending on whether a store's
172/// `async_support` flag is set or not.
173///
174/// FIXME: ideally this would be a function, not a macro. If this is a function
175/// though it would require placing a bound on the async closure $f where the
176/// returned future is itself `Send`. That's not possible in Rust right now,
177/// unfortunately.
178///
179/// As a workaround this takes advantage of the fact that we can assume that the
180/// compiler can infer that the future returned by `$f` is indeed `Send` so long
181/// as we don't try to name the type or place it behind a generic. In the future
182/// when we can bound the return future of async functions with `Send` this
183/// macro should be replaced with an equivalent function.
184macro_rules! block_on {
185    ($store:expr, $f:expr) => {{
186        let store: &mut StoreOpaque = $store;
187        let closure = assert_async_fn_closure($f);
188        if store.async_support() {
189            #[cfg(feature = "async")]
190            {
191                store.with_blocking(|store, cx| cx.block_on(closure(store)))
192            }
193            #[cfg(not(feature = "async"))]
194            {
195                unreachable!()
196            }
197        } else {
198            // Note that if `async_support` is disabled then it should not be
199            // possible to introduce await points so the provided future should
200            // always be ready.
201            anyhow::Ok(vm::assert_ready(closure(store)))
202        }
203    }};
204}
205
206fn assert_async_fn_closure<F, R>(f: F) -> F
207where
208    F: AsyncFnOnce(&mut StoreOpaque) -> R,
209{
210    f
211}
212
213fn memory_grow(
214    store: &mut dyn VMStore,
215    instance: InstanceId,
216    delta: u64,
217    memory_index: u32,
218) -> Result<Option<AllocationSize>> {
219    let memory_index = DefinedMemoryIndex::from_u32(memory_index);
220    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
221    let limiter = limiter.as_mut();
222    block_on!(store, async |store| {
223        let instance = store.instance_mut(instance);
224        let module = instance.env_module();
225        let page_size_log2 = module.memories[module.memory_index(memory_index)].page_size_log2;
226
227        let result = instance
228            .memory_grow(limiter, memory_index, delta)
229            .await?
230            .map(|size_in_bytes| AllocationSize(size_in_bytes >> page_size_log2));
231
232        Ok(result)
233    })?
234}
235
236/// A helper structure to represent the return value of a memory or table growth
237/// call.
238///
239/// This represents a byte or element-based count of the size of an item on the
240/// host. For example a memory is how many bytes large the memory is, or a table
241/// is how many elements large it is. It's assumed that the value here is never
242/// -1 or -2 as that would mean the entire host address space is allocated which
243/// is not possible.
244struct AllocationSize(usize);
245
246/// Special implementation for growth-related libcalls.
247///
248/// Here the optional return value means:
249///
250/// * `Some(val)` - the growth succeeded and the previous size of the item was
251///   `val`.
252/// * `None` - the growth failed.
253///
254/// The failure case returns -1 (or `usize::MAX` as an unsigned integer) and the
255/// successful case returns the `val` itself. Note that -2 (`usize::MAX - 1`
256/// when unsigned) is unwind as a sentinel to indicate an unwind as no valid
257/// allocation can be that large.
258unsafe impl HostResultHasUnwindSentinel for Option<AllocationSize> {
259    type Abi = *mut u8;
260    const SENTINEL: *mut u8 = (usize::MAX - 1) as *mut u8;
261
262    fn into_abi(self) -> *mut u8 {
263        match self {
264            Some(size) => {
265                debug_assert!(size.0 < (usize::MAX - 1));
266                size.0 as *mut u8
267            }
268            None => usize::MAX as *mut u8,
269        }
270    }
271}
272
273/// Implementation of `table.grow` for `funcref` tables.
274unsafe fn table_grow_func_ref(
275    store: &mut dyn VMStore,
276    instance: InstanceId,
277    defined_table_index: u32,
278    delta: u64,
279    init_value: *mut u8,
280) -> Result<Option<AllocationSize>> {
281    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
282    let element = NonNull::new(init_value.cast::<VMFuncRef>()).map(SendSyncPtr::new);
283    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
284    let limiter = limiter.as_mut();
285    block_on!(store, async |store| {
286        let mut instance = store.instance_mut(instance);
287        let table_index = instance.env_module().table_index(defined_table_index);
288        debug_assert!(matches!(
289            instance.as_mut().table_element_type(table_index),
290            TableElementType::Func,
291        ));
292        let result = instance
293            .defined_table_grow(defined_table_index, async |table| unsafe {
294                table.grow_func(limiter, delta, element).await
295            })
296            .await?
297            .map(AllocationSize);
298        Ok(result)
299    })?
300}
301
302/// Implementation of `table.grow` for GC-reference tables.
303#[cfg(feature = "gc")]
304fn table_grow_gc_ref(
305    store: &mut dyn VMStore,
306    instance: InstanceId,
307    defined_table_index: u32,
308    delta: u64,
309    init_value: u32,
310) -> Result<Option<AllocationSize>> {
311    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
312    let element = VMGcRef::from_raw_u32(init_value);
313    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
314    let limiter = limiter.as_mut();
315    block_on!(store, async |store| {
316        let (gc_store, mut instance) = store.optional_gc_store_and_instance_mut(instance);
317        let table_index = instance.env_module().table_index(defined_table_index);
318        debug_assert!(matches!(
319            instance.as_mut().table_element_type(table_index),
320            TableElementType::GcRef,
321        ));
322
323        let result = instance
324            .defined_table_grow(defined_table_index, async |table| unsafe {
325                table
326                    .grow_gc_ref(limiter, gc_store, delta, element.as_ref())
327                    .await
328            })
329            .await?
330            .map(AllocationSize);
331        Ok(result)
332    })?
333}
334
335#[cfg(feature = "stack-switching")]
336unsafe fn table_grow_cont_obj(
337    store: &mut dyn VMStore,
338    instance: InstanceId,
339    defined_table_index: u32,
340    delta: u64,
341    // The following two values together form the initial Option<VMContObj>.
342    // A None value is indicated by the pointer being null.
343    init_value_contref: *mut u8,
344    init_value_revision: usize,
345) -> Result<Option<AllocationSize>> {
346    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
347    let element = unsafe { VMContObj::from_raw_parts(init_value_contref, init_value_revision) };
348    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
349    let limiter = limiter.as_mut();
350    block_on!(store, async |store| {
351        let mut instance = store.instance_mut(instance);
352        let table_index = instance.env_module().table_index(defined_table_index);
353        debug_assert!(matches!(
354            instance.as_mut().table_element_type(table_index),
355            TableElementType::Cont,
356        ));
357        let result = instance
358            .defined_table_grow(defined_table_index, async |table| unsafe {
359                table.grow_cont(limiter, delta, element).await
360            })
361            .await?
362            .map(AllocationSize);
363        Ok(result)
364    })?
365}
366
367/// Implementation of `table.fill` for `funcref`s.
368unsafe fn table_fill_func_ref(
369    store: &mut dyn VMStore,
370    instance: InstanceId,
371    table_index: u32,
372    dst: u64,
373    val: *mut u8,
374    len: u64,
375) -> Result<()> {
376    let instance = store.instance_mut(instance);
377    let table_index = DefinedTableIndex::from_u32(table_index);
378    let table = instance.get_defined_table(table_index);
379    match table.element_type() {
380        TableElementType::Func => {
381            let val = NonNull::new(val.cast::<VMFuncRef>());
382            table.fill_func(dst, val, len)?;
383            Ok(())
384        }
385        TableElementType::GcRef => unreachable!(),
386        TableElementType::Cont => unreachable!(),
387    }
388}
389
390#[cfg(feature = "gc")]
391fn table_fill_gc_ref(
392    store: &mut dyn VMStore,
393    instance: InstanceId,
394    table_index: u32,
395    dst: u64,
396    val: u32,
397    len: u64,
398) -> Result<()> {
399    let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance);
400    let table_index = DefinedTableIndex::from_u32(table_index);
401    let table = instance.get_defined_table(table_index);
402    match table.element_type() {
403        TableElementType::Func => unreachable!(),
404        TableElementType::GcRef => {
405            let gc_ref = VMGcRef::from_raw_u32(val);
406            table.fill_gc_ref(gc_store, dst, gc_ref.as_ref(), len)?;
407            Ok(())
408        }
409
410        TableElementType::Cont => unreachable!(),
411    }
412}
413
414#[cfg(feature = "stack-switching")]
415unsafe fn table_fill_cont_obj(
416    store: &mut dyn VMStore,
417    instance: InstanceId,
418    table_index: u32,
419    dst: u64,
420    value_contref: *mut u8,
421    value_revision: usize,
422    len: u64,
423) -> Result<()> {
424    let instance = store.instance_mut(instance);
425    let table_index = DefinedTableIndex::from_u32(table_index);
426    let table = instance.get_defined_table(table_index);
427    match table.element_type() {
428        TableElementType::Cont => {
429            let contobj = unsafe { VMContObj::from_raw_parts(value_contref, value_revision) };
430            table.fill_cont(dst, contobj, len)?;
431            Ok(())
432        }
433        _ => panic!("Wrong table filling function"),
434    }
435}
436
437// Implementation of `table.copy`.
438fn table_copy(
439    store: &mut dyn VMStore,
440    instance: InstanceId,
441    dst_table_index: u32,
442    src_table_index: u32,
443    dst: u64,
444    src: u64,
445    len: u64,
446) -> Result<(), Trap> {
447    let dst_table_index = TableIndex::from_u32(dst_table_index);
448    let src_table_index = TableIndex::from_u32(src_table_index);
449    let store = store.store_opaque_mut();
450    let mut instance = store.instance_mut(instance);
451
452    // Convert the two table indices relative to `instance` into two
453    // defining instances and the defined table index within that instance.
454    let (dst_def_index, dst_instance) = instance
455        .as_mut()
456        .defined_table_index_and_instance(dst_table_index);
457    let dst_instance_id = dst_instance.id();
458    let (src_def_index, src_instance) = instance
459        .as_mut()
460        .defined_table_index_and_instance(src_table_index);
461    let src_instance_id = src_instance.id();
462
463    let src_table = crate::Table::from_raw(
464        StoreInstanceId::new(store.id(), src_instance_id),
465        src_def_index,
466    );
467    let dst_table = crate::Table::from_raw(
468        StoreInstanceId::new(store.id(), dst_instance_id),
469        dst_def_index,
470    );
471
472    // SAFETY: this is only safe if the two tables have the same type, and that
473    // was validated during wasm-validation time.
474    unsafe { crate::Table::copy_raw(store, &dst_table, dst, &src_table, src, len) }
475}
476
477// Implementation of `table.init`.
478fn table_init(
479    store: &mut dyn VMStore,
480    instance: InstanceId,
481    table_index: u32,
482    elem_index: u32,
483    dst: u64,
484    src: u64,
485    len: u64,
486) -> Result<()> {
487    let table_index = TableIndex::from_u32(table_index);
488    let elem_index = ElemIndex::from_u32(elem_index);
489
490    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
491    block_on!(store, async |store| {
492        vm::Instance::table_init(
493            store,
494            limiter.as_mut(),
495            instance,
496            table_index,
497            elem_index,
498            dst,
499            src,
500            len,
501        )
502        .await
503    })??;
504    Ok(())
505}
506
507// Implementation of `elem.drop`.
508fn elem_drop(store: &mut dyn VMStore, instance: InstanceId, elem_index: u32) {
509    let elem_index = ElemIndex::from_u32(elem_index);
510    store.instance_mut(instance).elem_drop(elem_index)
511}
512
513// Implementation of `memory.copy`.
514fn memory_copy(
515    store: &mut dyn VMStore,
516    instance: InstanceId,
517    dst_index: u32,
518    dst: u64,
519    src_index: u32,
520    src: u64,
521    len: u64,
522) -> Result<(), Trap> {
523    let src_index = MemoryIndex::from_u32(src_index);
524    let dst_index = MemoryIndex::from_u32(dst_index);
525    store
526        .instance_mut(instance)
527        .memory_copy(dst_index, dst, src_index, src, len)
528}
529
530// Implementation of `memory.fill` for locally defined memories.
531fn memory_fill(
532    store: &mut dyn VMStore,
533    instance: InstanceId,
534    memory_index: u32,
535    dst: u64,
536    val: u32,
537    len: u64,
538) -> Result<(), Trap> {
539    let memory_index = DefinedMemoryIndex::from_u32(memory_index);
540    #[expect(clippy::cast_possible_truncation, reason = "known to truncate here")]
541    store
542        .instance_mut(instance)
543        .memory_fill(memory_index, dst, val as u8, len)
544}
545
546// Implementation of `memory.init`.
547fn memory_init(
548    store: &mut dyn VMStore,
549    instance: InstanceId,
550    memory_index: u32,
551    data_index: u32,
552    dst: u64,
553    src: u32,
554    len: u32,
555) -> Result<(), Trap> {
556    let memory_index = MemoryIndex::from_u32(memory_index);
557    let data_index = DataIndex::from_u32(data_index);
558    store
559        .instance_mut(instance)
560        .memory_init(memory_index, data_index, dst, src, len)
561}
562
563// Implementation of `ref.func`.
564fn ref_func(store: &mut dyn VMStore, instance: InstanceId, func_index: u32) -> NonNull<u8> {
565    store
566        .instance_mut(instance)
567        .get_func_ref(FuncIndex::from_u32(func_index))
568        .expect("ref_func: funcref should always be available for given func index")
569        .cast()
570}
571
572// Implementation of `data.drop`.
573fn data_drop(store: &mut dyn VMStore, instance: InstanceId, data_index: u32) {
574    let data_index = DataIndex::from_u32(data_index);
575    store.instance_mut(instance).data_drop(data_index)
576}
577
578// Returns a table entry after lazily initializing it.
579fn table_get_lazy_init_func_ref(
580    store: &mut dyn VMStore,
581    instance: InstanceId,
582    table_index: u32,
583    index: u64,
584) -> *mut u8 {
585    let table_index = TableIndex::from_u32(table_index);
586    let table = store
587        .instance_mut(instance)
588        .get_table_with_lazy_init(table_index, core::iter::once(index));
589    let elem = table
590        .get_func(index)
591        .expect("table access already bounds-checked");
592
593    match elem {
594        Some(ptr) => ptr.as_ptr().cast(),
595        None => core::ptr::null_mut(),
596    }
597}
598
599/// Drop a GC reference.
600#[cfg(feature = "gc-drc")]
601fn drop_gc_ref(store: &mut dyn VMStore, _instance: InstanceId, gc_ref: u32) {
602    log::trace!("libcalls::drop_gc_ref({gc_ref:#x})");
603    let gc_ref = VMGcRef::from_raw_u32(gc_ref).expect("non-null VMGcRef");
604    store
605        .store_opaque_mut()
606        .unwrap_gc_store_mut()
607        .drop_gc_ref(gc_ref);
608}
609
610/// Grow the GC heap.
611#[cfg(feature = "gc-null")]
612fn grow_gc_heap(store: &mut dyn VMStore, _instance: InstanceId, bytes_needed: u64) -> Result<()> {
613    let orig_len = u64::try_from(
614        store
615            .require_gc_store()?
616            .gc_heap
617            .vmmemory()
618            .current_length(),
619    )
620    .unwrap();
621
622    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
623    block_on!(store, async |store| {
624        store.gc(limiter.as_mut(), None, Some(bytes_needed)).await;
625    })?;
626
627    // JIT code relies on the memory having grown by `bytes_needed` bytes if
628    // this libcall returns successfully, so trap if we didn't grow that much.
629    let new_len = u64::try_from(
630        store
631            .require_gc_store()?
632            .gc_heap
633            .vmmemory()
634            .current_length(),
635    )
636    .unwrap();
637    if orig_len
638        .checked_add(bytes_needed)
639        .is_none_or(|expected_len| new_len < expected_len)
640    {
641        return Err(crate::Trap::AllocationTooLarge.into());
642    }
643
644    Ok(())
645}
646
647/// Allocate a raw, unininitialized GC object for Wasm code.
648///
649/// The Wasm code is responsible for initializing the object.
650#[cfg(feature = "gc-drc")]
651fn gc_alloc_raw(
652    store: &mut dyn VMStore,
653    instance: InstanceId,
654    kind_and_reserved: u32,
655    module_interned_type_index: u32,
656    size: u32,
657    align: u32,
658) -> Result<core::num::NonZeroU32> {
659    use crate::vm::VMGcHeader;
660    use core::alloc::Layout;
661    use wasmtime_environ::{ModuleInternedTypeIndex, VMGcKind};
662
663    let kind = VMGcKind::from_high_bits_of_u32(kind_and_reserved);
664    log::trace!("gc_alloc_raw(kind={kind:?}, size={size}, align={align})");
665
666    let module = store
667        .instance(instance)
668        .runtime_module()
669        .expect("should never allocate GC types defined in a dummy module");
670
671    let module_interned_type_index = ModuleInternedTypeIndex::from_u32(module_interned_type_index);
672    let shared_type_index = module
673        .signatures()
674        .shared_type(module_interned_type_index)
675        .expect("should have engine type index for module type index");
676
677    let mut header = VMGcHeader::from_kind_and_index(kind, shared_type_index);
678    header.set_reserved_u26(kind_and_reserved & VMGcKind::UNUSED_MASK);
679
680    let size = usize::try_from(size).unwrap();
681    let align = usize::try_from(align).unwrap();
682    assert!(align.is_power_of_two());
683    let layout = Layout::from_size_align(size, align).map_err(|e| {
684        let err = Error::from(crate::Trap::AllocationTooLarge);
685        err.context(e)
686    })?;
687
688    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
689    block_on!(store, async |store| {
690        let gc_ref = store
691            .retry_after_gc_async(limiter.as_mut(), (), |store, ()| {
692                store
693                    .unwrap_gc_store_mut()
694                    .alloc_raw(header, layout)?
695                    .map_err(|bytes_needed| crate::GcHeapOutOfMemory::new((), bytes_needed).into())
696            })
697            .await?;
698
699        let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
700        Ok(raw)
701    })?
702}
703
704// Intern a `funcref` into the GC heap, returning its `FuncRefTableId`.
705//
706// This libcall may not GC.
707#[cfg(feature = "gc")]
708unsafe fn intern_func_ref_for_gc_heap(
709    store: &mut dyn VMStore,
710    _instance: InstanceId,
711    func_ref: *mut u8,
712) -> Result<u32> {
713    use crate::{store::AutoAssertNoGc, vm::SendSyncPtr};
714    use core::ptr::NonNull;
715
716    let mut store = AutoAssertNoGc::new(store.store_opaque_mut());
717
718    let func_ref = func_ref.cast::<VMFuncRef>();
719    let func_ref = NonNull::new(func_ref).map(SendSyncPtr::new);
720
721    let func_ref_id = unsafe {
722        store
723            .require_gc_store_mut()?
724            .func_ref_table
725            .intern(func_ref)
726    };
727    Ok(func_ref_id.into_raw())
728}
729
730// Get the raw `VMFuncRef` pointer associated with a `FuncRefTableId` from an
731// earlier `intern_func_ref_for_gc_heap` call.
732//
733// This libcall may not GC.
734#[cfg(feature = "gc")]
735fn get_interned_func_ref(
736    store: &mut dyn VMStore,
737    instance: InstanceId,
738    func_ref_id: u32,
739    module_interned_type_index: u32,
740) -> *mut u8 {
741    use super::FuncRefTableId;
742    use crate::store::AutoAssertNoGc;
743    use wasmtime_environ::{ModuleInternedTypeIndex, packed_option::ReservedValue};
744
745    let store = AutoAssertNoGc::new(store.store_opaque_mut());
746
747    let func_ref_id = FuncRefTableId::from_raw(func_ref_id);
748    let module_interned_type_index = ModuleInternedTypeIndex::from_bits(module_interned_type_index);
749
750    let func_ref = if module_interned_type_index.is_reserved_value() {
751        store
752            .unwrap_gc_store()
753            .func_ref_table
754            .get_untyped(func_ref_id)
755    } else {
756        let types = store.engine().signatures();
757        let engine_ty = store
758            .instance(instance)
759            .engine_type_index(module_interned_type_index);
760        store
761            .unwrap_gc_store()
762            .func_ref_table
763            .get_typed(types, func_ref_id, engine_ty)
764    };
765
766    func_ref.map_or(core::ptr::null_mut(), |f| f.as_ptr().cast())
767}
768
769/// Implementation of the `array.new_data` instruction.
770#[cfg(feature = "gc")]
771fn array_new_data(
772    store: &mut dyn VMStore,
773    instance_id: InstanceId,
774    array_type_index: u32,
775    data_index: u32,
776    src: u32,
777    len: u32,
778) -> Result<core::num::NonZeroU32> {
779    use crate::ArrayType;
780    use wasmtime_environ::ModuleInternedTypeIndex;
781
782    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
783    block_on!(store, async |store| {
784        let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
785        let data_index = DataIndex::from_u32(data_index);
786        let instance = store.instance(instance_id);
787
788        // Calculate the byte-length of the data (as opposed to the element-length
789        // of the array).
790        let data_range = instance.wasm_data_range(data_index);
791        let shared_ty = instance.engine_type_index(array_type_index);
792        let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
793        let one_elem_size = array_ty
794            .element_type()
795            .data_byte_size()
796            .expect("Wasm validation ensures that this type have a defined byte size");
797        let byte_len = len
798            .checked_mul(one_elem_size)
799            .and_then(|x| usize::try_from(x).ok())
800            .ok_or_else(|| Trap::MemoryOutOfBounds)?;
801
802        // Get the data from the segment, checking bounds.
803        let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?;
804        instance
805            .wasm_data(data_range.clone())
806            .get(src..)
807            .and_then(|d| d.get(..byte_len))
808            .ok_or_else(|| Trap::MemoryOutOfBounds)?;
809
810        // Allocate the (uninitialized) array.
811        let gc_layout = store
812            .engine()
813            .signatures()
814            .layout(shared_ty)
815            .expect("array types have GC layouts");
816        let array_layout = gc_layout.unwrap_array();
817        let array_ref = store
818            .retry_after_gc_async(limiter.as_mut(), (), |store, ()| {
819                store
820                    .unwrap_gc_store_mut()
821                    .alloc_uninit_array(shared_ty, len, &array_layout)?
822                    .map_err(|bytes_needed| crate::GcHeapOutOfMemory::new((), bytes_needed).into())
823            })
824            .await?;
825
826        let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance_id);
827        let gc_store = gc_store.unwrap();
828        let data = &instance.wasm_data(data_range)[src..][..byte_len];
829
830        // Copy the data into the array, initializing it.
831        gc_store
832            .gc_object_data(array_ref.as_gc_ref())
833            .copy_from_slice(array_layout.base_size, data);
834
835        // Return the array to Wasm!
836        let raw = gc_store.expose_gc_ref_to_wasm(array_ref.into());
837        Ok(raw)
838    })?
839}
840
841/// Implementation of the `array.init_data` instruction.
842#[cfg(feature = "gc")]
843fn array_init_data(
844    store: &mut dyn VMStore,
845    instance_id: InstanceId,
846    array_type_index: u32,
847    array: u32,
848    dst: u32,
849    data_index: u32,
850    src: u32,
851    len: u32,
852) -> Result<()> {
853    use crate::ArrayType;
854    use wasmtime_environ::ModuleInternedTypeIndex;
855
856    let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
857    let data_index = DataIndex::from_u32(data_index);
858    let instance = store.instance(instance_id);
859
860    log::trace!(
861        "array.init_data(array={array:#x}, dst={dst}, data_index={data_index:?}, src={src}, len={len})",
862    );
863
864    // Null check the array.
865    let gc_ref = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?;
866    let array = gc_ref
867        .into_arrayref(&*store.unwrap_gc_store().gc_heap)
868        .expect("gc ref should be an array");
869
870    let dst = usize::try_from(dst).map_err(|_| Trap::MemoryOutOfBounds)?;
871    let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?;
872    let len = usize::try_from(len).map_err(|_| Trap::MemoryOutOfBounds)?;
873
874    // Bounds check the array.
875    let array_len = array.len(store.store_opaque());
876    let array_len = usize::try_from(array_len).map_err(|_| Trap::ArrayOutOfBounds)?;
877    if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len {
878        return Err(Trap::ArrayOutOfBounds.into());
879    }
880
881    // Calculate the byte length from the array length.
882    let shared_ty = instance.engine_type_index(array_type_index);
883    let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
884    let one_elem_size = array_ty
885        .element_type()
886        .data_byte_size()
887        .expect("Wasm validation ensures that this type have a defined byte size");
888    let data_len = len
889        .checked_mul(usize::try_from(one_elem_size).unwrap())
890        .ok_or_else(|| Trap::MemoryOutOfBounds)?;
891
892    // Get the data from the segment, checking its bounds.
893    let data_range = instance.wasm_data_range(data_index);
894    instance
895        .wasm_data(data_range.clone())
896        .get(src..)
897        .and_then(|d| d.get(..data_len))
898        .ok_or_else(|| Trap::MemoryOutOfBounds)?;
899
900    // Copy the data into the array.
901
902    let dst_offset = u32::try_from(dst)
903        .unwrap()
904        .checked_mul(one_elem_size)
905        .unwrap();
906
907    let array_layout = store
908        .engine()
909        .signatures()
910        .layout(shared_ty)
911        .expect("array types have GC layouts");
912    let array_layout = array_layout.unwrap_array();
913
914    let obj_offset = array_layout.base_size.checked_add(dst_offset).unwrap();
915
916    let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance_id);
917    let gc_store = gc_store.unwrap();
918    let data = &instance.wasm_data(data_range)[src..][..data_len];
919    gc_store
920        .gc_object_data(array.as_gc_ref())
921        .copy_from_slice(obj_offset, data);
922
923    Ok(())
924}
925
926#[cfg(feature = "gc")]
927fn array_new_elem(
928    store: &mut dyn VMStore,
929    instance_id: InstanceId,
930    array_type_index: u32,
931    elem_index: u32,
932    src: u32,
933    len: u32,
934) -> Result<core::num::NonZeroU32> {
935    use crate::{
936        ArrayRef, ArrayRefPre, ArrayType, Func, OpaqueRootScope, RootedGcRefImpl, Val,
937        store::AutoAssertNoGc,
938        vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
939    };
940    use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
941
942    // Convert indices to their typed forms.
943    let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
944    let elem_index = ElemIndex::from_u32(elem_index);
945    let instance = store.instance(instance_id);
946
947    let mut storage = None;
948    let elements = instance.passive_element_segment(&mut storage, elem_index);
949
950    let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
951    let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
952
953    let shared_ty = instance.engine_type_index(array_type_index);
954    let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
955    let pre = ArrayRefPre::_new(store, array_ty);
956
957    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
958    block_on!(store, async |store| {
959        let mut store = OpaqueRootScope::new(store);
960        // Turn the elements into `Val`s.
961        let mut vals = Vec::with_capacity(usize::try_from(elements.len()).unwrap());
962        match elements {
963            TableSegmentElements::Functions(fs) => {
964                let store_id = store.id();
965                let mut instance = store.instance_mut(instance_id);
966                vals.extend(
967                    fs.get(src..)
968                        .and_then(|s| s.get(..len))
969                        .ok_or_else(|| Trap::TableOutOfBounds)?
970                        .iter()
971                        .map(|f| {
972                            let raw_func_ref = instance.as_mut().get_func_ref(*f);
973                            let func = unsafe {
974                                raw_func_ref.map(|p| Func::from_vm_func_ref(store_id, p))
975                            };
976                            Val::FuncRef(func)
977                        }),
978                );
979            }
980            TableSegmentElements::Expressions(xs) => {
981                let xs = xs
982                    .get(src..)
983                    .and_then(|s| s.get(..len))
984                    .ok_or_else(|| Trap::TableOutOfBounds)?;
985
986                let mut const_context = ConstEvalContext::new(instance_id);
987                let mut const_evaluator = ConstExprEvaluator::default();
988
989                for x in xs.iter() {
990                    let val = *const_evaluator
991                        .eval(&mut store, limiter.as_mut(), &mut const_context, x)
992                        .await?;
993                    vals.push(val);
994                }
995            }
996        }
997
998        let array = ArrayRef::_new_fixed_async(&mut store, limiter.as_mut(), &pre, &vals).await?;
999
1000        let mut store = AutoAssertNoGc::new(&mut store);
1001        let gc_ref = array.try_clone_gc_ref(&mut store)?;
1002        let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
1003        Ok(raw)
1004    })?
1005}
1006
1007#[cfg(feature = "gc")]
1008fn array_init_elem(
1009    store: &mut dyn VMStore,
1010    instance: InstanceId,
1011    array_type_index: u32,
1012    array: u32,
1013    dst: u32,
1014    elem_index: u32,
1015    src: u32,
1016    len: u32,
1017) -> Result<()> {
1018    use crate::{
1019        ArrayRef, Func, OpaqueRootScope, Val,
1020        store::AutoAssertNoGc,
1021        vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
1022    };
1023    use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
1024
1025    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
1026    block_on!(store, async |store| {
1027        let mut store = OpaqueRootScope::new(store);
1028
1029        // Convert the indices into their typed forms.
1030        let _array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
1031        let elem_index = ElemIndex::from_u32(elem_index);
1032
1033        log::trace!(
1034            "array.init_elem(array={array:#x}, dst={dst}, elem_index={elem_index:?}, src={src}, len={len})",
1035        );
1036
1037        // Convert the raw GC ref into a `Rooted<ArrayRef>`.
1038        let array = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?;
1039        let array = store.unwrap_gc_store_mut().clone_gc_ref(&array);
1040        let array = {
1041            let mut no_gc = AutoAssertNoGc::new(&mut store);
1042            ArrayRef::from_cloned_gc_ref(&mut no_gc, array)
1043        };
1044
1045        // Bounds check the destination within the array.
1046        let array_len = array._len(&store)?;
1047        log::trace!("array_len = {array_len}");
1048        if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len {
1049            return Err(Trap::ArrayOutOfBounds.into());
1050        }
1051
1052        // Get the passive element segment.
1053        let mut storage = None;
1054        let store_id = store.id();
1055        let mut instance = store.instance_mut(instance);
1056        let elements = instance.passive_element_segment(&mut storage, elem_index);
1057
1058        // Convert array offsets into `usize`s.
1059        let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
1060        let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
1061
1062        // Turn the elements into `Val`s.
1063        let vals = match elements {
1064            TableSegmentElements::Functions(fs) => fs
1065                .get(src..)
1066                .and_then(|s| s.get(..len))
1067                .ok_or_else(|| Trap::TableOutOfBounds)?
1068                .iter()
1069                .map(|f| {
1070                    let raw_func_ref = instance.as_mut().get_func_ref(*f);
1071                    let func = unsafe { raw_func_ref.map(|p| Func::from_vm_func_ref(store_id, p)) };
1072                    Val::FuncRef(func)
1073                })
1074                .collect::<Vec<_>>(),
1075            TableSegmentElements::Expressions(xs) => {
1076                let mut const_context = ConstEvalContext::new(instance.id());
1077                let mut const_evaluator = ConstExprEvaluator::default();
1078
1079                let mut vals = Vec::new();
1080                for x in xs
1081                    .get(src..)
1082                    .and_then(|s| s.get(..len))
1083                    .ok_or_else(|| Trap::TableOutOfBounds)?
1084                {
1085                    let val = *const_evaluator
1086                        .eval(&mut store, limiter.as_mut(), &mut const_context, x)
1087                        .await?;
1088                    vals.push(val);
1089                }
1090                vals
1091            }
1092        };
1093
1094        // Copy the values into the array.
1095        for (i, val) in vals.into_iter().enumerate() {
1096            let i = u32::try_from(i).unwrap();
1097            let j = dst.checked_add(i).unwrap();
1098            array._set(&mut store, j, val)?;
1099        }
1100
1101        Ok(())
1102    })?
1103}
1104
1105// TODO: Specialize this libcall for only non-GC array elements, so we never
1106// have to do GC barriers and their associated indirect calls through the `dyn
1107// GcHeap`. Instead, implement those copies inline in Wasm code. Then, use bulk
1108// `memcpy`-style APIs to do the actual copies here.
1109#[cfg(feature = "gc")]
1110fn array_copy(
1111    store: &mut dyn VMStore,
1112    _instance: InstanceId,
1113    dst_array: u32,
1114    dst: u32,
1115    src_array: u32,
1116    src: u32,
1117    len: u32,
1118) -> Result<()> {
1119    use crate::{ArrayRef, OpaqueRootScope, store::AutoAssertNoGc};
1120
1121    log::trace!(
1122        "array.copy(dst_array={dst_array:#x}, dst_index={dst}, src_array={src_array:#x}, src_index={src}, len={len})",
1123    );
1124
1125    let mut store = OpaqueRootScope::new(store.store_opaque_mut());
1126    let mut store = AutoAssertNoGc::new(&mut store);
1127
1128    // Convert the raw GC refs into `Rooted<ArrayRef>`s.
1129    let dst_array = VMGcRef::from_raw_u32(dst_array).ok_or_else(|| Trap::NullReference)?;
1130    let dst_array = store.unwrap_gc_store_mut().clone_gc_ref(&dst_array);
1131    let dst_array = ArrayRef::from_cloned_gc_ref(&mut store, dst_array);
1132    let src_array = VMGcRef::from_raw_u32(src_array).ok_or_else(|| Trap::NullReference)?;
1133    let src_array = store.unwrap_gc_store_mut().clone_gc_ref(&src_array);
1134    let src_array = ArrayRef::from_cloned_gc_ref(&mut store, src_array);
1135
1136    // Bounds check the destination array's elements.
1137    let dst_array_len = dst_array._len(&store)?;
1138    if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > dst_array_len {
1139        return Err(Trap::ArrayOutOfBounds.into());
1140    }
1141
1142    // Bounds check the source array's elements.
1143    let src_array_len = src_array._len(&store)?;
1144    if src.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > src_array_len {
1145        return Err(Trap::ArrayOutOfBounds.into());
1146    }
1147
1148    let mut store = AutoAssertNoGc::new(&mut store);
1149    // If `src_array` and `dst_array` are the same array, then we are
1150    // potentially doing an overlapping copy, so make sure to copy elements in
1151    // the order that doesn't clobber the source elements before they are
1152    // copied. If they are different arrays, the order doesn't matter, but we
1153    // simply don't bother checking.
1154    if src > dst {
1155        for i in 0..len {
1156            let src_elem = src_array._get(&mut store, src + i)?;
1157            let dst_i = dst + i;
1158            dst_array._set(&mut store, dst_i, src_elem)?;
1159        }
1160    } else {
1161        for i in (0..len).rev() {
1162            let src_elem = src_array._get(&mut store, src + i)?;
1163            let dst_i = dst + i;
1164            dst_array._set(&mut store, dst_i, src_elem)?;
1165        }
1166    }
1167    Ok(())
1168}
1169
1170#[cfg(feature = "gc")]
1171fn is_subtype(
1172    store: &mut dyn VMStore,
1173    _instance: InstanceId,
1174    actual_engine_type: u32,
1175    expected_engine_type: u32,
1176) -> u32 {
1177    use wasmtime_environ::VMSharedTypeIndex;
1178
1179    let actual = VMSharedTypeIndex::from_u32(actual_engine_type);
1180    let expected = VMSharedTypeIndex::from_u32(expected_engine_type);
1181
1182    let is_subtype: bool = store.engine().signatures().is_subtype(actual, expected);
1183
1184    log::trace!("is_subtype(actual={actual:?}, expected={expected:?}) -> {is_subtype}",);
1185    is_subtype as u32
1186}
1187
1188// Implementation of `memory.atomic.notify` for locally defined memories.
1189#[cfg(feature = "threads")]
1190fn memory_atomic_notify(
1191    store: &mut dyn VMStore,
1192    instance: InstanceId,
1193    memory_index: u32,
1194    addr_index: u64,
1195    count: u32,
1196) -> Result<u32, Trap> {
1197    let memory = DefinedMemoryIndex::from_u32(memory_index);
1198    store
1199        .instance_mut(instance)
1200        .get_defined_memory_mut(memory)
1201        .atomic_notify(addr_index, count)
1202}
1203
1204// Implementation of `memory.atomic.wait32` for locally defined memories.
1205#[cfg(feature = "threads")]
1206fn memory_atomic_wait32(
1207    store: &mut dyn VMStore,
1208    instance: InstanceId,
1209    memory_index: u32,
1210    addr_index: u64,
1211    expected: u32,
1212    timeout: u64,
1213) -> Result<u32, Trap> {
1214    let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
1215    let memory = DefinedMemoryIndex::from_u32(memory_index);
1216    Ok(store
1217        .instance_mut(instance)
1218        .get_defined_memory_mut(memory)
1219        .atomic_wait32(addr_index, expected, timeout)? as u32)
1220}
1221
1222// Implementation of `memory.atomic.wait64` for locally defined memories.
1223#[cfg(feature = "threads")]
1224fn memory_atomic_wait64(
1225    store: &mut dyn VMStore,
1226    instance: InstanceId,
1227    memory_index: u32,
1228    addr_index: u64,
1229    expected: u64,
1230    timeout: u64,
1231) -> Result<u32, Trap> {
1232    let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
1233    let memory = DefinedMemoryIndex::from_u32(memory_index);
1234    Ok(store
1235        .instance_mut(instance)
1236        .get_defined_memory_mut(memory)
1237        .atomic_wait64(addr_index, expected, timeout)? as u32)
1238}
1239
1240// Hook for when an instance runs out of fuel.
1241fn out_of_gas(store: &mut dyn VMStore, _instance: InstanceId) -> Result<()> {
1242    block_on!(store, async |store| {
1243        if !store.refuel() {
1244            return Err(Trap::OutOfFuel.into());
1245        }
1246        #[cfg(feature = "async")]
1247        if store.fuel_yield_interval.is_some() {
1248            crate::runtime::vm::Yield::new().await;
1249        }
1250        Ok(())
1251    })?
1252}
1253
1254// Hook for when an instance observes that the epoch has changed.
1255#[cfg(target_has_atomic = "64")]
1256fn new_epoch(store: &mut dyn VMStore, _instance: InstanceId) -> Result<NextEpoch> {
1257    use crate::UpdateDeadline;
1258
1259    let update_deadline = store.new_epoch_updated_deadline()?;
1260    block_on!(store, async move |store| {
1261        let delta = match update_deadline {
1262            UpdateDeadline::Interrupt => return Err(Trap::Interrupt.into()),
1263            UpdateDeadline::Continue(delta) => delta,
1264
1265            // Note that custom assertions for `async_support` are needed below
1266            // as otherwise if these are used in an
1267            // `async_support`-disabled-build it'll trip the `assert_ready` part
1268            // of `block_on!` above. The assertion here provides a more direct
1269            // error message as to what's going on.
1270            #[cfg(feature = "async")]
1271            UpdateDeadline::Yield(delta) => {
1272                assert!(
1273                    store.async_support(),
1274                    "cannot use `UpdateDeadline::Yield` without enabling \
1275                     async support in the config"
1276                );
1277                crate::runtime::vm::Yield::new().await;
1278                delta
1279            }
1280            #[cfg(feature = "async")]
1281            UpdateDeadline::YieldCustom(delta, future) => {
1282                assert!(
1283                    store.async_support(),
1284                    "cannot use `UpdateDeadline::YieldCustom` without enabling \
1285                     async support in the config"
1286                );
1287                future.await;
1288                delta
1289            }
1290        };
1291
1292        // Set a new deadline and return the new epoch deadline so
1293        // the Wasm code doesn't have to reload it.
1294        store.set_epoch_deadline(delta);
1295        Ok(NextEpoch(store.get_epoch_deadline()))
1296    })?
1297}
1298
1299struct NextEpoch(u64);
1300
1301unsafe impl HostResultHasUnwindSentinel for NextEpoch {
1302    type Abi = u64;
1303    const SENTINEL: u64 = u64::MAX;
1304    fn into_abi(self) -> u64 {
1305        self.0
1306    }
1307}
1308
1309// Hook for validating malloc using wmemcheck_state.
1310#[cfg(feature = "wmemcheck")]
1311fn check_malloc(store: &mut dyn VMStore, instance: InstanceId, addr: u32, len: u32) -> Result<()> {
1312    let instance = store.instance_mut(instance);
1313    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1314        let result = wmemcheck_state.malloc(addr as usize, len as usize);
1315        wmemcheck_state.memcheck_on();
1316        match result {
1317            Ok(()) => {}
1318            Err(DoubleMalloc { addr, len }) => {
1319                bail!("Double malloc at addr {:#x} of size {}", addr, len)
1320            }
1321            Err(OutOfBounds { addr, len }) => {
1322                bail!("Malloc out of bounds at addr {:#x} of size {}", addr, len);
1323            }
1324            _ => {
1325                panic!("unreachable")
1326            }
1327        }
1328    }
1329    Ok(())
1330}
1331
1332// Hook for validating free using wmemcheck_state.
1333#[cfg(feature = "wmemcheck")]
1334fn check_free(store: &mut dyn VMStore, instance: InstanceId, addr: u32) -> Result<()> {
1335    let instance = store.instance_mut(instance);
1336    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1337        let result = wmemcheck_state.free(addr as usize);
1338        wmemcheck_state.memcheck_on();
1339        match result {
1340            Ok(()) => {}
1341            Err(InvalidFree { addr }) => {
1342                bail!("Invalid free at addr {:#x}", addr)
1343            }
1344            _ => {
1345                panic!("unreachable")
1346            }
1347        }
1348    }
1349    Ok(())
1350}
1351
1352// Hook for validating load using wmemcheck_state.
1353#[cfg(feature = "wmemcheck")]
1354fn check_load(
1355    store: &mut dyn VMStore,
1356    instance: InstanceId,
1357    num_bytes: u32,
1358    addr: u32,
1359    offset: u32,
1360) -> Result<()> {
1361    let instance = store.instance_mut(instance);
1362    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1363        let result = wmemcheck_state.read(addr as usize + offset as usize, num_bytes as usize);
1364        match result {
1365            Ok(()) => {}
1366            Err(InvalidRead { addr, len }) => {
1367                bail!("Invalid load at addr {:#x} of size {}", addr, len);
1368            }
1369            Err(OutOfBounds { addr, len }) => {
1370                bail!("Load out of bounds at addr {:#x} of size {}", addr, len);
1371            }
1372            _ => {
1373                panic!("unreachable")
1374            }
1375        }
1376    }
1377    Ok(())
1378}
1379
1380// Hook for validating store using wmemcheck_state.
1381#[cfg(feature = "wmemcheck")]
1382fn check_store(
1383    store: &mut dyn VMStore,
1384    instance: InstanceId,
1385    num_bytes: u32,
1386    addr: u32,
1387    offset: u32,
1388) -> Result<()> {
1389    let instance = store.instance_mut(instance);
1390    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1391        let result = wmemcheck_state.write(addr as usize + offset as usize, num_bytes as usize);
1392        match result {
1393            Ok(()) => {}
1394            Err(InvalidWrite { addr, len }) => {
1395                bail!("Invalid store at addr {:#x} of size {}", addr, len)
1396            }
1397            Err(OutOfBounds { addr, len }) => {
1398                bail!("Store out of bounds at addr {:#x} of size {}", addr, len)
1399            }
1400            _ => {
1401                panic!("unreachable")
1402            }
1403        }
1404    }
1405    Ok(())
1406}
1407
1408// Hook for turning wmemcheck load/store validation off when entering a malloc function.
1409#[cfg(feature = "wmemcheck")]
1410fn malloc_start(store: &mut dyn VMStore, instance: InstanceId) {
1411    let instance = store.instance_mut(instance);
1412    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1413        wmemcheck_state.memcheck_off();
1414    }
1415}
1416
1417// Hook for turning wmemcheck load/store validation off when entering a free function.
1418#[cfg(feature = "wmemcheck")]
1419fn free_start(store: &mut dyn VMStore, instance: InstanceId) {
1420    let instance = store.instance_mut(instance);
1421    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1422        wmemcheck_state.memcheck_off();
1423    }
1424}
1425
1426// Hook for tracking wasm stack updates using wmemcheck_state.
1427#[cfg(feature = "wmemcheck")]
1428fn update_stack_pointer(_store: &mut dyn VMStore, _instance: InstanceId, _value: u32) {
1429    // TODO: stack-tracing has yet to be finalized. All memory below
1430    // the address of the top of the stack is marked as valid for
1431    // loads and stores.
1432    // if let Some(wmemcheck_state) = &mut instance.wmemcheck_state {
1433    //     instance.wmemcheck_state.update_stack_pointer(value as usize);
1434    // }
1435}
1436
1437// Hook updating wmemcheck_state memory state vector every time memory.grow is called.
1438#[cfg(feature = "wmemcheck")]
1439fn update_mem_size(store: &mut dyn VMStore, instance: InstanceId, num_pages: u32) {
1440    let instance = store.instance_mut(instance);
1441    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1442        const KIB: usize = 1024;
1443        let num_bytes = num_pages as usize * 64 * KIB;
1444        wmemcheck_state.update_mem_size(num_bytes);
1445    }
1446}
1447
1448fn floor_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1449    wasmtime_math::WasmFloat::wasm_floor(val)
1450}
1451
1452fn floor_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1453    wasmtime_math::WasmFloat::wasm_floor(val)
1454}
1455
1456fn ceil_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1457    wasmtime_math::WasmFloat::wasm_ceil(val)
1458}
1459
1460fn ceil_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1461    wasmtime_math::WasmFloat::wasm_ceil(val)
1462}
1463
1464fn trunc_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1465    wasmtime_math::WasmFloat::wasm_trunc(val)
1466}
1467
1468fn trunc_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1469    wasmtime_math::WasmFloat::wasm_trunc(val)
1470}
1471
1472fn nearest_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1473    wasmtime_math::WasmFloat::wasm_nearest(val)
1474}
1475
1476fn nearest_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1477    wasmtime_math::WasmFloat::wasm_nearest(val)
1478}
1479
1480// This intrinsic is only used on x86_64 platforms as an implementation of
1481// the `i8x16.swizzle` instruction when `pshufb` in SSSE3 is not available.
1482#[cfg(all(target_arch = "x86_64", target_feature = "sse"))]
1483fn i8x16_swizzle(_store: &mut dyn VMStore, _instance: InstanceId, a: i8x16, b: i8x16) -> i8x16 {
1484    union U {
1485        reg: i8x16,
1486        mem: [u8; 16],
1487    }
1488
1489    unsafe {
1490        let a = U { reg: a }.mem;
1491        let b = U { reg: b }.mem;
1492
1493        // Use the `swizzle` semantics of returning 0 on any out-of-bounds
1494        // index, rather than the x86 pshufb semantics, since Wasmtime uses
1495        // this to implement `i8x16.swizzle`.
1496        let select = |arr: &[u8; 16], byte: u8| {
1497            if byte >= 16 { 0x00 } else { arr[byte as usize] }
1498        };
1499
1500        U {
1501            mem: [
1502                select(&a, b[0]),
1503                select(&a, b[1]),
1504                select(&a, b[2]),
1505                select(&a, b[3]),
1506                select(&a, b[4]),
1507                select(&a, b[5]),
1508                select(&a, b[6]),
1509                select(&a, b[7]),
1510                select(&a, b[8]),
1511                select(&a, b[9]),
1512                select(&a, b[10]),
1513                select(&a, b[11]),
1514                select(&a, b[12]),
1515                select(&a, b[13]),
1516                select(&a, b[14]),
1517                select(&a, b[15]),
1518            ],
1519        }
1520        .reg
1521    }
1522}
1523
1524#[cfg(not(all(target_arch = "x86_64", target_feature = "sse")))]
1525fn i8x16_swizzle(_store: &mut dyn VMStore, _instance: InstanceId, _a: i8x16, _b: i8x16) -> i8x16 {
1526    unreachable!()
1527}
1528
1529// This intrinsic is only used on x86_64 platforms as an implementation of
1530// the `i8x16.shuffle` instruction when `pshufb` in SSSE3 is not available.
1531#[cfg(all(target_arch = "x86_64", target_feature = "sse"))]
1532fn i8x16_shuffle(
1533    _store: &mut dyn VMStore,
1534    _instance: InstanceId,
1535    a: i8x16,
1536    b: i8x16,
1537    c: i8x16,
1538) -> i8x16 {
1539    union U {
1540        reg: i8x16,
1541        mem: [u8; 16],
1542    }
1543
1544    unsafe {
1545        let ab = [U { reg: a }.mem, U { reg: b }.mem];
1546        let c = U { reg: c }.mem;
1547
1548        // Use the `shuffle` semantics of returning 0 on any out-of-bounds
1549        // index, rather than the x86 pshufb semantics, since Wasmtime uses
1550        // this to implement `i8x16.shuffle`.
1551        let select = |arr: &[[u8; 16]; 2], byte: u8| {
1552            if byte >= 32 {
1553                0x00
1554            } else if byte >= 16 {
1555                arr[1][byte as usize - 16]
1556            } else {
1557                arr[0][byte as usize]
1558            }
1559        };
1560
1561        U {
1562            mem: [
1563                select(&ab, c[0]),
1564                select(&ab, c[1]),
1565                select(&ab, c[2]),
1566                select(&ab, c[3]),
1567                select(&ab, c[4]),
1568                select(&ab, c[5]),
1569                select(&ab, c[6]),
1570                select(&ab, c[7]),
1571                select(&ab, c[8]),
1572                select(&ab, c[9]),
1573                select(&ab, c[10]),
1574                select(&ab, c[11]),
1575                select(&ab, c[12]),
1576                select(&ab, c[13]),
1577                select(&ab, c[14]),
1578                select(&ab, c[15]),
1579            ],
1580        }
1581        .reg
1582    }
1583}
1584
1585#[cfg(not(all(target_arch = "x86_64", target_feature = "sse")))]
1586fn i8x16_shuffle(
1587    _store: &mut dyn VMStore,
1588    _instance: InstanceId,
1589    _a: i8x16,
1590    _b: i8x16,
1591    _c: i8x16,
1592) -> i8x16 {
1593    unreachable!()
1594}
1595
1596fn fma_f32x4(
1597    _store: &mut dyn VMStore,
1598    _instance: InstanceId,
1599    x: f32x4,
1600    y: f32x4,
1601    z: f32x4,
1602) -> f32x4 {
1603    union U {
1604        reg: f32x4,
1605        mem: [f32; 4],
1606    }
1607
1608    unsafe {
1609        let x = U { reg: x }.mem;
1610        let y = U { reg: y }.mem;
1611        let z = U { reg: z }.mem;
1612
1613        U {
1614            mem: [
1615                wasmtime_math::WasmFloat::wasm_mul_add(x[0], y[0], z[0]),
1616                wasmtime_math::WasmFloat::wasm_mul_add(x[1], y[1], z[1]),
1617                wasmtime_math::WasmFloat::wasm_mul_add(x[2], y[2], z[2]),
1618                wasmtime_math::WasmFloat::wasm_mul_add(x[3], y[3], z[3]),
1619            ],
1620        }
1621        .reg
1622    }
1623}
1624
1625fn fma_f64x2(
1626    _store: &mut dyn VMStore,
1627    _instance: InstanceId,
1628    x: f64x2,
1629    y: f64x2,
1630    z: f64x2,
1631) -> f64x2 {
1632    union U {
1633        reg: f64x2,
1634        mem: [f64; 2],
1635    }
1636
1637    unsafe {
1638        let x = U { reg: x }.mem;
1639        let y = U { reg: y }.mem;
1640        let z = U { reg: z }.mem;
1641
1642        U {
1643            mem: [
1644                wasmtime_math::WasmFloat::wasm_mul_add(x[0], y[0], z[0]),
1645                wasmtime_math::WasmFloat::wasm_mul_add(x[1], y[1], z[1]),
1646            ],
1647        }
1648        .reg
1649    }
1650}
1651
1652/// This intrinsic is just used to record trap information.
1653///
1654/// The `Infallible` "ok" type here means that this never returns success, it
1655/// only ever returns an error, and this hooks into the machinery to handle
1656/// `Result` values to record such trap information.
1657fn trap(
1658    _store: &mut dyn VMStore,
1659    _instance: InstanceId,
1660    code: u8,
1661) -> Result<Infallible, TrapReason> {
1662    Err(TrapReason::Wasm(
1663        wasmtime_environ::Trap::from_u8(code).unwrap(),
1664    ))
1665}
1666
1667fn raise(store: &mut dyn VMStore, _instance: InstanceId) {
1668    // SAFETY: this is only called from compiled wasm so we know that wasm has
1669    // already been entered. It's a dynamic safety precondition that the trap
1670    // information has already been arranged to be present.
1671    unsafe { crate::runtime::vm::traphandlers::raise_preexisting_trap(store) }
1672}
1673
1674// Builtins for continuations. These are thin wrappers around the
1675// respective definitions in stack_switching.rs.
1676#[cfg(feature = "stack-switching")]
1677fn cont_new(
1678    store: &mut dyn VMStore,
1679    instance: InstanceId,
1680    func: *mut u8,
1681    param_count: u32,
1682    result_count: u32,
1683) -> Result<Option<AllocationSize>> {
1684    let ans =
1685        crate::vm::stack_switching::cont_new(store, instance, func, param_count, result_count)?;
1686    Ok(Some(AllocationSize(ans.cast::<u8>() as usize)))
1687}
1688
1689#[cfg(feature = "gc")]
1690fn get_instance_id(_store: &mut dyn VMStore, instance: InstanceId) -> u32 {
1691    instance.as_u32()
1692}
1693
1694#[cfg(feature = "gc")]
1695fn throw_ref(
1696    store: &mut dyn VMStore,
1697    _instance: InstanceId,
1698    exnref: u32,
1699) -> Result<(), TrapReason> {
1700    let exnref = VMGcRef::from_raw_u32(exnref).ok_or_else(|| Trap::NullReference)?;
1701    let exnref = store.unwrap_gc_store_mut().clone_gc_ref(&exnref);
1702    let exnref = exnref
1703        .into_exnref(&*store.unwrap_gc_store().gc_heap)
1704        .expect("gc ref should be an exception object");
1705    store.set_pending_exception(exnref);
1706    Err(TrapReason::Exception)
1707}