wasmtime/runtime/vm/
libcalls.rs

1//! Runtime library calls.
2//!
3//! Note that Wasm compilers may sometimes perform these inline rather than
4//! calling them, particularly when CPUs have special instructions which compute
5//! them directly.
6//!
7//! These functions are called by compiled Wasm code, and therefore must take
8//! certain care about some things:
9//!
10//! * They must only contain basic, raw i32/i64/f32/f64/pointer parameters that
11//!   are safe to pass across the system ABI.
12//!
13//! * If any nested function propagates an `Err(trap)` out to the library
14//!   function frame, we need to raise it. This involves some nasty and quite
15//!   unsafe code under the covers! Notably, after raising the trap, drops
16//!   **will not** be run for local variables! This can lead to things like
17//!   leaking `InstanceHandle`s which leads to never deallocating JIT code,
18//!   instances, and modules if we are not careful!
19//!
20//! * The libcall must be entered via a Wasm-to-libcall trampoline that saves
21//!   the last Wasm FP and PC for stack walking purposes. (For more details, see
22//!   `crates/wasmtime/src/runtime/vm/backtrace.rs`.)
23//!
24//! To make it easier to correctly handle all these things, **all** libcalls
25//! must be defined via the `libcall!` helper macro! See its doc comments below
26//! for an example, or just look at the rest of the file.
27//!
28//! ## Dealing with `externref`s
29//!
30//! When receiving a raw `*mut u8` that is actually a `VMExternRef` reference,
31//! convert it into a proper `VMExternRef` with `VMExternRef::clone_from_raw` as
32//! soon as apossible. Any GC before raw pointer is converted into a reference
33//! can potentially collect the referenced object, which could lead to use after
34//! free.
35//!
36//! Avoid this by eagerly converting into a proper `VMExternRef`! (Unfortunately
37//! there is no macro to help us automatically get this correct, so stay
38//! vigilant!)
39//!
40//! ```ignore
41//! pub unsafe extern "C" my_libcall_takes_ref(raw_extern_ref: *mut u8) {
42//!     // Before `clone_from_raw`, `raw_extern_ref` is potentially unrooted,
43//!     // and doing GC here could lead to use after free!
44//!
45//!     let my_extern_ref = if raw_extern_ref.is_null() {
46//!         None
47//!     } else {
48//!         Some(VMExternRef::clone_from_raw(raw_extern_ref))
49//!     };
50//!
51//!     // Now that we did `clone_from_raw`, it is safe to do a GC (or do
52//!     // anything else that might transitively GC, like call back into
53//!     // Wasm!)
54//! }
55//! ```
56
57#[cfg(feature = "stack-switching")]
58use super::stack_switching::VMContObj;
59use crate::prelude::*;
60use crate::runtime::store::{Asyncness, InstanceId, StoreInstanceId, StoreOpaque};
61#[cfg(feature = "gc")]
62use crate::runtime::vm::VMGcRef;
63use crate::runtime::vm::table::TableElementType;
64use crate::runtime::vm::vmcontext::VMFuncRef;
65use crate::runtime::vm::{
66    self, HostResultHasUnwindSentinel, SendSyncPtr, TrapReason, VMStore, f32x4, f64x2, i8x16,
67};
68use core::convert::Infallible;
69use core::ptr::NonNull;
70#[cfg(feature = "threads")]
71use core::time::Duration;
72use wasmtime_core::math::WasmFloat;
73use wasmtime_environ::{
74    DataIndex, DefinedMemoryIndex, DefinedTableIndex, ElemIndex, FuncIndex, MemoryIndex,
75    TableIndex, Trap,
76};
77#[cfg(feature = "wmemcheck")]
78use wasmtime_wmemcheck::AccessError::{
79    DoubleMalloc, InvalidFree, InvalidRead, InvalidWrite, OutOfBounds,
80};
81
82/// Raw functions which are actually called from compiled code.
83///
84/// Invocation of a builtin currently looks like:
85///
86/// * A wasm function calls a cranelift-compiled trampoline that's generated
87///   once-per-builtin.
88/// * The cranelift-compiled trampoline performs any necessary actions to exit
89///   wasm, such as dealing with fp/pc/etc.
90/// * The cranelift-compiled trampoline loads a function pointer from an array
91///   stored in `VMContext` That function pointer is defined in this module.
92/// * This module runs, handling things like `catch_unwind` and `Result` and
93///   such.
94/// * This module delegates to the outer module (this file) which has the actual
95///   implementation.
96///
97/// For more information on converting from host-defined values to Cranelift ABI
98/// values see the `catch_unwind_and_record_trap` function.
99pub mod raw {
100    use crate::runtime::vm::{Instance, VMContext, f32x4, f64x2, i8x16};
101    use core::ptr::NonNull;
102
103    macro_rules! libcall {
104        (
105            $(
106                $( #[cfg($attr:meta)] )?
107                $name:ident( vmctx: vmctx $(, $pname:ident: $param:ident )* ) $(-> $result:ident)?;
108            )*
109        ) => {
110            $(
111                // This is the direct entrypoint from the compiled module which
112                // still has the raw signature.
113                //
114                // This will delegate to the outer module to the actual
115                // implementation and automatically perform `catch_unwind` along
116                // with conversion of the return value in the face of traps.
117                #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
118                #[allow(unused_variables, reason = "macro-generated")]
119                #[allow(unreachable_code, reason = "some types uninhabited on some platforms")]
120                pub unsafe extern "C" fn $name(
121                    vmctx: NonNull<VMContext>,
122                    $( $pname : libcall!(@ty $param), )*
123                ) $(-> libcall!(@ty $result))? {
124                    $(#[cfg($attr)])?
125                    unsafe {
126                        Instance::enter_host_from_wasm(vmctx, |store, instance| {
127                            super::$name(store, instance, $($pname),*)
128                        })
129                    }
130                    $(
131                        #[cfg(not($attr))]
132                        {
133                            let _ = vmctx;
134                            unreachable!();
135                        }
136                    )?
137                }
138
139                // This works around a `rustc` bug where compiling with LTO
140                // will sometimes strip out some of these symbols resulting
141                // in a linking failure.
142                #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
143                const _: () = {
144                    #[used]
145                    static I_AM_USED: unsafe extern "C" fn(
146                        NonNull<VMContext>,
147                        $( $pname : libcall!(@ty $param), )*
148                    ) $( -> libcall!(@ty $result))? = $name;
149                };
150            )*
151        };
152
153        (@ty u32) => (u32);
154        (@ty u64) => (u64);
155        (@ty f32) => (f32);
156        (@ty f64) => (f64);
157        (@ty u8) => (u8);
158        (@ty i8x16) => (i8x16);
159        (@ty f32x4) => (f32x4);
160        (@ty f64x2) => (f64x2);
161        (@ty bool) => (bool);
162        (@ty pointer) => (*mut u8);
163        (@ty size) => (usize);
164    }
165
166    wasmtime_environ::foreach_builtin_function!(libcall);
167}
168
169/// Uses the `$store` provided to invoke the async closure `$f` and block on the
170/// result.
171///
172/// This will internally multiplex on `$store.with_blocking(...)` vs simply
173/// asserting the closure is ready depending on whether a store's
174/// `can_block` flag is set or not.
175///
176/// FIXME: ideally this would be a function, not a macro. If this is a function
177/// though it would require placing a bound on the async closure $f where the
178/// returned future is itself `Send`. That's not possible in Rust right now,
179/// unfortunately.
180///
181/// As a workaround this takes advantage of the fact that we can assume that the
182/// compiler can infer that the future returned by `$f` is indeed `Send` so long
183/// as we don't try to name the type or place it behind a generic. In the future
184/// when we can bound the return future of async functions with `Send` this
185/// macro should be replaced with an equivalent function.
186macro_rules! block_on {
187    ($store:expr, $f:expr) => {{
188        let store: &mut StoreOpaque = $store;
189        let closure = assert_async_fn_closure($f);
190
191        if store.can_block() {
192            // If the store can block then that means it's on a fiber. We can
193            // forward to `block_on` and everything should be fine and dandy.
194            #[cfg(feature = "async")]
195            {
196                store.with_blocking(|store, cx| cx.block_on(closure(store, Asyncness::Yes)))
197            }
198            #[cfg(not(feature = "async"))]
199            {
200                unreachable!()
201            }
202        } else {
203            // If the store cannot block it's not on a fiber. That means that we get
204            // at most one poll of `closure(store)` here. In the typical case
205            // what this means is that nothing async is configured in the store
206            // and one poll should be all we need. There are niche cases where
207            // one poll is not sufficient though, for example:
208            //
209            // * Store is created.
210            // * Wasm is called.
211            // * Wasm calls host.
212            // * Host configures an async resource limiter, returns back to
213            //   wasm.
214            // * Wasm grows memory.
215            // * Limiter wants to block asynchronously.
216            //
217            // Technically there's nothing wrong with this, but it means that
218            // we're in wasm and one poll is not enough here. Given the niche
219            // nature of this scenario and how it's not really expected to work
220            // this translates failures in `closure` to a trap. This trap is
221            // only expected to show up in niche-ish scenarios, not for actual
222            // blocking work, as that would otherwise be too surprising.
223            vm::one_poll(closure(store, Asyncness::No)).ok_or_else(|| {
224                crate::format_err!(
225                    "
226
227A synchronously called wasm function invoked an async-defined libcall which
228failed to complete synchronously and is thus raising a trap. It's expected
229that this indicates that the store was configured to do async things after the
230original synchronous entrypoint to wasm was called. That's generally not
231supported in Wasmtime and async entrypoint should be used instead. If you're
232seeing this message in error please file an issue on Wasmtime.
233
234"
235                )
236            })
237        }
238    }};
239}
240
241fn assert_async_fn_closure<F, R>(f: F) -> F
242where
243    F: AsyncFnOnce(&mut StoreOpaque, Asyncness) -> R,
244{
245    f
246}
247
248fn memory_grow(
249    store: &mut dyn VMStore,
250    instance: InstanceId,
251    delta: u64,
252    memory_index: u32,
253) -> Result<Option<AllocationSize>> {
254    let memory_index = DefinedMemoryIndex::from_u32(memory_index);
255    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
256    let limiter = limiter.as_mut();
257    block_on!(store, async |store, _| {
258        let instance = store.instance_mut(instance);
259        let module = instance.env_module();
260        let page_size_log2 = module.memories[module.memory_index(memory_index)].page_size_log2;
261
262        let result = instance
263            .memory_grow(limiter, memory_index, delta)
264            .await?
265            .map(|size_in_bytes| AllocationSize(size_in_bytes >> page_size_log2));
266
267        Ok(result)
268    })?
269}
270
271/// A helper structure to represent the return value of a memory or table growth
272/// call.
273///
274/// This represents a byte or element-based count of the size of an item on the
275/// host. For example a memory is how many bytes large the memory is, or a table
276/// is how many elements large it is. It's assumed that the value here is never
277/// -1 or -2 as that would mean the entire host address space is allocated which
278/// is not possible.
279struct AllocationSize(usize);
280
281/// Special implementation for growth-related libcalls.
282///
283/// Here the optional return value means:
284///
285/// * `Some(val)` - the growth succeeded and the previous size of the item was
286///   `val`.
287/// * `None` - the growth failed.
288///
289/// The failure case returns -1 (or `usize::MAX` as an unsigned integer) and the
290/// successful case returns the `val` itself. Note that -2 (`usize::MAX - 1`
291/// when unsigned) is unwind as a sentinel to indicate an unwind as no valid
292/// allocation can be that large.
293unsafe impl HostResultHasUnwindSentinel for Option<AllocationSize> {
294    type Abi = *mut u8;
295    const SENTINEL: *mut u8 = (usize::MAX - 1) as *mut u8;
296
297    fn into_abi(self) -> *mut u8 {
298        match self {
299            Some(size) => {
300                debug_assert!(size.0 < (usize::MAX - 1));
301                size.0 as *mut u8
302            }
303            None => usize::MAX as *mut u8,
304        }
305    }
306}
307
308/// Implementation of `table.grow` for `funcref` tables.
309unsafe fn table_grow_func_ref(
310    store: &mut dyn VMStore,
311    instance: InstanceId,
312    defined_table_index: u32,
313    delta: u64,
314    init_value: *mut u8,
315) -> Result<Option<AllocationSize>> {
316    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
317    let element = NonNull::new(init_value.cast::<VMFuncRef>()).map(SendSyncPtr::new);
318    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
319    let limiter = limiter.as_mut();
320    block_on!(store, async |store, _| {
321        let mut instance = store.instance_mut(instance);
322        let table_index = instance.env_module().table_index(defined_table_index);
323        debug_assert!(matches!(
324            instance.as_mut().table_element_type(table_index),
325            TableElementType::Func,
326        ));
327        let result = instance
328            .defined_table_grow(defined_table_index, async |table| unsafe {
329                table.grow_func(limiter, delta, element).await
330            })
331            .await?
332            .map(AllocationSize);
333        Ok(result)
334    })?
335}
336
337/// Implementation of `table.grow` for GC-reference tables.
338#[cfg(feature = "gc")]
339fn table_grow_gc_ref(
340    store: &mut dyn VMStore,
341    instance: InstanceId,
342    defined_table_index: u32,
343    delta: u64,
344    init_value: u32,
345) -> Result<Option<AllocationSize>> {
346    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
347    let element = VMGcRef::from_raw_u32(init_value);
348    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
349    let limiter = limiter.as_mut();
350    block_on!(store, async |store, _| {
351        let (gc_store, mut instance) = store.optional_gc_store_and_instance_mut(instance);
352        let table_index = instance.env_module().table_index(defined_table_index);
353        debug_assert!(matches!(
354            instance.as_mut().table_element_type(table_index),
355            TableElementType::GcRef,
356        ));
357
358        let result = instance
359            .defined_table_grow(defined_table_index, async |table| unsafe {
360                table
361                    .grow_gc_ref(limiter, gc_store, delta, element.as_ref())
362                    .await
363            })
364            .await?
365            .map(AllocationSize);
366        Ok(result)
367    })?
368}
369
370#[cfg(feature = "stack-switching")]
371unsafe fn table_grow_cont_obj(
372    store: &mut dyn VMStore,
373    instance: InstanceId,
374    defined_table_index: u32,
375    delta: u64,
376    // The following two values together form the initial Option<VMContObj>.
377    // A None value is indicated by the pointer being null.
378    init_value_contref: *mut u8,
379    init_value_revision: usize,
380) -> Result<Option<AllocationSize>> {
381    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
382    let element = unsafe { VMContObj::from_raw_parts(init_value_contref, init_value_revision) };
383    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
384    let limiter = limiter.as_mut();
385    block_on!(store, async |store, _| {
386        let mut instance = store.instance_mut(instance);
387        let table_index = instance.env_module().table_index(defined_table_index);
388        debug_assert!(matches!(
389            instance.as_mut().table_element_type(table_index),
390            TableElementType::Cont,
391        ));
392        let result = instance
393            .defined_table_grow(defined_table_index, async |table| unsafe {
394                table.grow_cont(limiter, delta, element).await
395            })
396            .await?
397            .map(AllocationSize);
398        Ok(result)
399    })?
400}
401
402/// Implementation of `table.fill` for `funcref`s.
403unsafe fn table_fill_func_ref(
404    store: &mut dyn VMStore,
405    instance: InstanceId,
406    table_index: u32,
407    dst: u64,
408    val: *mut u8,
409    len: u64,
410) -> Result<()> {
411    let instance = store.instance_mut(instance);
412    let table_index = DefinedTableIndex::from_u32(table_index);
413    let table = instance.get_defined_table(table_index);
414    match table.element_type() {
415        TableElementType::Func => {
416            let val = NonNull::new(val.cast::<VMFuncRef>());
417            table.fill_func(dst, val, len)?;
418            Ok(())
419        }
420        TableElementType::GcRef => unreachable!(),
421        TableElementType::Cont => unreachable!(),
422    }
423}
424
425#[cfg(feature = "gc")]
426fn table_fill_gc_ref(
427    store: &mut dyn VMStore,
428    instance: InstanceId,
429    table_index: u32,
430    dst: u64,
431    val: u32,
432    len: u64,
433) -> Result<()> {
434    let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance);
435    let table_index = DefinedTableIndex::from_u32(table_index);
436    let table = instance.get_defined_table(table_index);
437    match table.element_type() {
438        TableElementType::Func => unreachable!(),
439        TableElementType::GcRef => {
440            let gc_ref = VMGcRef::from_raw_u32(val);
441            table.fill_gc_ref(gc_store, dst, gc_ref.as_ref(), len)?;
442            Ok(())
443        }
444
445        TableElementType::Cont => unreachable!(),
446    }
447}
448
449#[cfg(feature = "stack-switching")]
450unsafe fn table_fill_cont_obj(
451    store: &mut dyn VMStore,
452    instance: InstanceId,
453    table_index: u32,
454    dst: u64,
455    value_contref: *mut u8,
456    value_revision: usize,
457    len: u64,
458) -> Result<()> {
459    let instance = store.instance_mut(instance);
460    let table_index = DefinedTableIndex::from_u32(table_index);
461    let table = instance.get_defined_table(table_index);
462    match table.element_type() {
463        TableElementType::Cont => {
464            let contobj = unsafe { VMContObj::from_raw_parts(value_contref, value_revision) };
465            table.fill_cont(dst, contobj, len)?;
466            Ok(())
467        }
468        _ => panic!("Wrong table filling function"),
469    }
470}
471
472// Implementation of `table.copy`.
473fn table_copy(
474    store: &mut dyn VMStore,
475    instance: InstanceId,
476    dst_table_index: u32,
477    src_table_index: u32,
478    dst: u64,
479    src: u64,
480    len: u64,
481) -> Result<(), Trap> {
482    let dst_table_index = TableIndex::from_u32(dst_table_index);
483    let src_table_index = TableIndex::from_u32(src_table_index);
484    let store = store.store_opaque_mut();
485    let mut instance = store.instance_mut(instance);
486
487    // Convert the two table indices relative to `instance` into two
488    // defining instances and the defined table index within that instance.
489    let (dst_def_index, dst_instance) = instance
490        .as_mut()
491        .defined_table_index_and_instance(dst_table_index);
492    let dst_instance_id = dst_instance.id();
493    let (src_def_index, src_instance) = instance
494        .as_mut()
495        .defined_table_index_and_instance(src_table_index);
496    let src_instance_id = src_instance.id();
497
498    let src_table = crate::Table::from_raw(
499        StoreInstanceId::new(store.id(), src_instance_id),
500        src_def_index,
501    );
502    let dst_table = crate::Table::from_raw(
503        StoreInstanceId::new(store.id(), dst_instance_id),
504        dst_def_index,
505    );
506
507    // SAFETY: this is only safe if the two tables have the same type, and that
508    // was validated during wasm-validation time.
509    unsafe { crate::Table::copy_raw(store, &dst_table, dst, &src_table, src, len) }
510}
511
512// Implementation of `table.init`.
513fn table_init(
514    store: &mut dyn VMStore,
515    instance: InstanceId,
516    table_index: u32,
517    elem_index: u32,
518    dst: u64,
519    src: u64,
520    len: u64,
521) -> Result<()> {
522    let table_index = TableIndex::from_u32(table_index);
523    let elem_index = ElemIndex::from_u32(elem_index);
524
525    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
526    block_on!(store, async |store, asyncness| {
527        vm::Instance::table_init(
528            store,
529            limiter.as_mut(),
530            asyncness,
531            instance,
532            table_index,
533            elem_index,
534            dst,
535            src,
536            len,
537        )
538        .await
539    })??;
540    Ok(())
541}
542
543// Implementation of `elem.drop`.
544fn elem_drop(store: &mut dyn VMStore, instance: InstanceId, elem_index: u32) {
545    let elem_index = ElemIndex::from_u32(elem_index);
546    store.instance_mut(instance).elem_drop(elem_index)
547}
548
549// Implementation of `memory.copy`.
550fn memory_copy(
551    store: &mut dyn VMStore,
552    instance: InstanceId,
553    dst_index: u32,
554    dst: u64,
555    src_index: u32,
556    src: u64,
557    len: u64,
558) -> Result<(), Trap> {
559    let src_index = MemoryIndex::from_u32(src_index);
560    let dst_index = MemoryIndex::from_u32(dst_index);
561    store
562        .instance_mut(instance)
563        .memory_copy(dst_index, dst, src_index, src, len)
564}
565
566// Implementation of `memory.fill` for locally defined memories.
567fn memory_fill(
568    store: &mut dyn VMStore,
569    instance: InstanceId,
570    memory_index: u32,
571    dst: u64,
572    val: u32,
573    len: u64,
574) -> Result<(), Trap> {
575    let memory_index = DefinedMemoryIndex::from_u32(memory_index);
576    #[expect(clippy::cast_possible_truncation, reason = "known to truncate here")]
577    store
578        .instance_mut(instance)
579        .memory_fill(memory_index, dst, val as u8, len)
580}
581
582// Implementation of `memory.init`.
583fn memory_init(
584    store: &mut dyn VMStore,
585    instance: InstanceId,
586    memory_index: u32,
587    data_index: u32,
588    dst: u64,
589    src: u32,
590    len: u32,
591) -> Result<(), Trap> {
592    let memory_index = MemoryIndex::from_u32(memory_index);
593    let data_index = DataIndex::from_u32(data_index);
594    store
595        .instance_mut(instance)
596        .memory_init(memory_index, data_index, dst, src, len)
597}
598
599// Implementation of `ref.func`.
600fn ref_func(store: &mut dyn VMStore, instance: InstanceId, func_index: u32) -> NonNull<u8> {
601    let (instance, registry) = store.instance_and_module_registry_mut(instance);
602    instance
603        .get_func_ref(registry, FuncIndex::from_u32(func_index))
604        .expect("ref_func: funcref should always be available for given func index")
605        .cast()
606}
607
608// Implementation of `data.drop`.
609fn data_drop(store: &mut dyn VMStore, instance: InstanceId, data_index: u32) {
610    let data_index = DataIndex::from_u32(data_index);
611    store.instance_mut(instance).data_drop(data_index)
612}
613
614// Returns a table entry after lazily initializing it.
615fn table_get_lazy_init_func_ref(
616    store: &mut dyn VMStore,
617    instance: InstanceId,
618    table_index: u32,
619    index: u64,
620) -> *mut u8 {
621    let table_index = TableIndex::from_u32(table_index);
622    let (instance, registry) = store.instance_and_module_registry_mut(instance);
623    let table = instance.get_table_with_lazy_init(registry, table_index, core::iter::once(index));
624    let elem = table
625        .get_func(index)
626        .expect("table access already bounds-checked");
627
628    match elem {
629        Some(ptr) => ptr.as_ptr().cast(),
630        None => core::ptr::null_mut(),
631    }
632}
633
634/// Drop a GC reference.
635#[cfg(feature = "gc-drc")]
636fn drop_gc_ref(store: &mut dyn VMStore, _instance: InstanceId, gc_ref: u32) {
637    log::trace!("libcalls::drop_gc_ref({gc_ref:#x})");
638    let gc_ref = VMGcRef::from_raw_u32(gc_ref).expect("non-null VMGcRef");
639    store
640        .store_opaque_mut()
641        .unwrap_gc_store_mut()
642        .drop_gc_ref(gc_ref);
643}
644
645/// Grow the GC heap.
646#[cfg(feature = "gc-null")]
647fn grow_gc_heap(store: &mut dyn VMStore, _instance: InstanceId, bytes_needed: u64) -> Result<()> {
648    let orig_len = u64::try_from(
649        store
650            .require_gc_store()?
651            .gc_heap
652            .vmmemory()
653            .current_length(),
654    )
655    .unwrap();
656
657    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
658    block_on!(store, async |store, asyncness| {
659        store
660            .gc(limiter.as_mut(), None, Some(bytes_needed), asyncness)
661            .await;
662    })?;
663
664    // JIT code relies on the memory having grown by `bytes_needed` bytes if
665    // this libcall returns successfully, so trap if we didn't grow that much.
666    let new_len = u64::try_from(
667        store
668            .require_gc_store()?
669            .gc_heap
670            .vmmemory()
671            .current_length(),
672    )
673    .unwrap();
674    if orig_len
675        .checked_add(bytes_needed)
676        .is_none_or(|expected_len| new_len < expected_len)
677    {
678        return Err(crate::Trap::AllocationTooLarge.into());
679    }
680
681    Ok(())
682}
683
684/// Allocate a raw, unininitialized GC object for Wasm code.
685///
686/// The Wasm code is responsible for initializing the object.
687#[cfg(feature = "gc-drc")]
688fn gc_alloc_raw(
689    store: &mut dyn VMStore,
690    instance: InstanceId,
691    kind_and_reserved: u32,
692    module_interned_type_index: u32,
693    size: u32,
694    align: u32,
695) -> Result<core::num::NonZeroU32> {
696    use crate::vm::VMGcHeader;
697    use core::alloc::Layout;
698    use wasmtime_environ::{ModuleInternedTypeIndex, VMGcKind};
699
700    let kind = VMGcKind::from_high_bits_of_u32(kind_and_reserved);
701    log::trace!("gc_alloc_raw(kind={kind:?}, size={size}, align={align})");
702
703    let module = store
704        .instance(instance)
705        .runtime_module()
706        .expect("should never allocate GC types defined in a dummy module");
707
708    let module_interned_type_index = ModuleInternedTypeIndex::from_u32(module_interned_type_index);
709    let shared_type_index = module
710        .signatures()
711        .shared_type(module_interned_type_index)
712        .expect("should have engine type index for module type index");
713
714    let mut header = VMGcHeader::from_kind_and_index(kind, shared_type_index);
715    header.set_reserved_u26(kind_and_reserved & VMGcKind::UNUSED_MASK);
716
717    let size = usize::try_from(size).unwrap();
718    let align = usize::try_from(align).unwrap();
719    assert!(align.is_power_of_two());
720    let layout = Layout::from_size_align(size, align).map_err(|e| {
721        let err = Error::from(crate::Trap::AllocationTooLarge);
722        err.context(e)
723    })?;
724
725    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
726    block_on!(store, async |store, asyncness| {
727        let gc_ref = store
728            .retry_after_gc_async(limiter.as_mut(), (), asyncness, |store, ()| {
729                store
730                    .unwrap_gc_store_mut()
731                    .alloc_raw(header, layout)?
732                    .map_err(|bytes_needed| crate::GcHeapOutOfMemory::new((), bytes_needed).into())
733            })
734            .await?;
735
736        let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
737        Ok(raw)
738    })?
739}
740
741// Intern a `funcref` into the GC heap, returning its `FuncRefTableId`.
742//
743// This libcall may not GC.
744#[cfg(feature = "gc")]
745unsafe fn intern_func_ref_for_gc_heap(
746    store: &mut dyn VMStore,
747    _instance: InstanceId,
748    func_ref: *mut u8,
749) -> Result<u32> {
750    use crate::{store::AutoAssertNoGc, vm::SendSyncPtr};
751    use core::ptr::NonNull;
752
753    let mut store = AutoAssertNoGc::new(store.store_opaque_mut());
754
755    let func_ref = func_ref.cast::<VMFuncRef>();
756    let func_ref = NonNull::new(func_ref).map(SendSyncPtr::new);
757
758    let func_ref_id = unsafe {
759        store
760            .require_gc_store_mut()?
761            .func_ref_table
762            .intern(func_ref)
763    };
764    Ok(func_ref_id.into_raw())
765}
766
767// Get the raw `VMFuncRef` pointer associated with a `FuncRefTableId` from an
768// earlier `intern_func_ref_for_gc_heap` call.
769//
770// This libcall may not GC.
771#[cfg(feature = "gc")]
772fn get_interned_func_ref(
773    store: &mut dyn VMStore,
774    instance: InstanceId,
775    func_ref_id: u32,
776    module_interned_type_index: u32,
777) -> *mut u8 {
778    use super::FuncRefTableId;
779    use crate::store::AutoAssertNoGc;
780    use wasmtime_environ::{ModuleInternedTypeIndex, packed_option::ReservedValue};
781
782    let store = AutoAssertNoGc::new(store.store_opaque_mut());
783
784    let func_ref_id = FuncRefTableId::from_raw(func_ref_id);
785    let module_interned_type_index = ModuleInternedTypeIndex::from_bits(module_interned_type_index);
786
787    let func_ref = if module_interned_type_index.is_reserved_value() {
788        store
789            .unwrap_gc_store()
790            .func_ref_table
791            .get_untyped(func_ref_id)
792    } else {
793        let types = store.engine().signatures();
794        let engine_ty = store
795            .instance(instance)
796            .engine_type_index(module_interned_type_index);
797        store
798            .unwrap_gc_store()
799            .func_ref_table
800            .get_typed(types, func_ref_id, engine_ty)
801    };
802
803    func_ref.map_or(core::ptr::null_mut(), |f| f.as_ptr().cast())
804}
805
806/// Implementation of the `array.new_data` instruction.
807#[cfg(feature = "gc")]
808fn array_new_data(
809    store: &mut dyn VMStore,
810    instance_id: InstanceId,
811    array_type_index: u32,
812    data_index: u32,
813    src: u32,
814    len: u32,
815) -> Result<core::num::NonZeroU32> {
816    use crate::ArrayType;
817    use wasmtime_environ::ModuleInternedTypeIndex;
818
819    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
820    block_on!(store, async |store, asyncness| {
821        let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
822        let data_index = DataIndex::from_u32(data_index);
823        let instance = store.instance(instance_id);
824
825        // Calculate the byte-length of the data (as opposed to the element-length
826        // of the array).
827        let data_range = instance.wasm_data_range(data_index);
828        let shared_ty = instance.engine_type_index(array_type_index);
829        let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
830        let one_elem_size = array_ty
831            .element_type()
832            .data_byte_size()
833            .expect("Wasm validation ensures that this type have a defined byte size");
834        let byte_len = len
835            .checked_mul(one_elem_size)
836            .and_then(|x| usize::try_from(x).ok())
837            .ok_or_else(|| Trap::MemoryOutOfBounds)?;
838
839        // Get the data from the segment, checking bounds.
840        let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?;
841        instance
842            .wasm_data(data_range.clone())
843            .get(src..)
844            .and_then(|d| d.get(..byte_len))
845            .ok_or_else(|| Trap::MemoryOutOfBounds)?;
846
847        // Allocate the (uninitialized) array.
848        let gc_layout = store
849            .engine()
850            .signatures()
851            .layout(shared_ty)
852            .expect("array types have GC layouts");
853        let array_layout = gc_layout.unwrap_array();
854        let array_ref = store
855            .retry_after_gc_async(limiter.as_mut(), (), asyncness, |store, ()| {
856                store
857                    .unwrap_gc_store_mut()
858                    .alloc_uninit_array(shared_ty, len, &array_layout)?
859                    .map_err(|bytes_needed| crate::GcHeapOutOfMemory::new((), bytes_needed).into())
860            })
861            .await?;
862
863        let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance_id);
864        let gc_store = gc_store.unwrap();
865        let data = &instance.wasm_data(data_range)[src..][..byte_len];
866
867        // Copy the data into the array, initializing it.
868        gc_store
869            .gc_object_data(array_ref.as_gc_ref())
870            .copy_from_slice(array_layout.base_size, data);
871
872        // Return the array to Wasm!
873        let raw = gc_store.expose_gc_ref_to_wasm(array_ref.into());
874        Ok(raw)
875    })?
876}
877
878/// Implementation of the `array.init_data` instruction.
879#[cfg(feature = "gc")]
880fn array_init_data(
881    store: &mut dyn VMStore,
882    instance_id: InstanceId,
883    array_type_index: u32,
884    array: u32,
885    dst: u32,
886    data_index: u32,
887    src: u32,
888    len: u32,
889) -> Result<()> {
890    use crate::ArrayType;
891    use wasmtime_environ::ModuleInternedTypeIndex;
892
893    let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
894    let data_index = DataIndex::from_u32(data_index);
895    let instance = store.instance(instance_id);
896
897    log::trace!(
898        "array.init_data(array={array:#x}, dst={dst}, data_index={data_index:?}, src={src}, len={len})",
899    );
900
901    // Null check the array.
902    let gc_ref = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?;
903    let array = gc_ref
904        .into_arrayref(&*store.unwrap_gc_store().gc_heap)
905        .expect("gc ref should be an array");
906
907    let dst = usize::try_from(dst).map_err(|_| Trap::MemoryOutOfBounds)?;
908    let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?;
909    let len = usize::try_from(len).map_err(|_| Trap::MemoryOutOfBounds)?;
910
911    // Bounds check the array.
912    let array_len = array.len(store.store_opaque());
913    let array_len = usize::try_from(array_len).map_err(|_| Trap::ArrayOutOfBounds)?;
914    if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len {
915        return Err(Trap::ArrayOutOfBounds.into());
916    }
917
918    // Calculate the byte length from the array length.
919    let shared_ty = instance.engine_type_index(array_type_index);
920    let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
921    let one_elem_size = array_ty
922        .element_type()
923        .data_byte_size()
924        .expect("Wasm validation ensures that this type have a defined byte size");
925    let data_len = len
926        .checked_mul(usize::try_from(one_elem_size).unwrap())
927        .ok_or_else(|| Trap::MemoryOutOfBounds)?;
928
929    // Get the data from the segment, checking its bounds.
930    let data_range = instance.wasm_data_range(data_index);
931    instance
932        .wasm_data(data_range.clone())
933        .get(src..)
934        .and_then(|d| d.get(..data_len))
935        .ok_or_else(|| Trap::MemoryOutOfBounds)?;
936
937    // Copy the data into the array.
938
939    let dst_offset = u32::try_from(dst)
940        .unwrap()
941        .checked_mul(one_elem_size)
942        .unwrap();
943
944    let array_layout = store
945        .engine()
946        .signatures()
947        .layout(shared_ty)
948        .expect("array types have GC layouts");
949    let array_layout = array_layout.unwrap_array();
950
951    let obj_offset = array_layout.base_size.checked_add(dst_offset).unwrap();
952
953    let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance_id);
954    let gc_store = gc_store.unwrap();
955    let data = &instance.wasm_data(data_range)[src..][..data_len];
956    gc_store
957        .gc_object_data(array.as_gc_ref())
958        .copy_from_slice(obj_offset, data);
959
960    Ok(())
961}
962
963#[cfg(feature = "gc")]
964fn array_new_elem(
965    store: &mut dyn VMStore,
966    instance_id: InstanceId,
967    array_type_index: u32,
968    elem_index: u32,
969    src: u32,
970    len: u32,
971) -> Result<core::num::NonZeroU32> {
972    use crate::{
973        ArrayRef, ArrayRefPre, ArrayType, Func, OpaqueRootScope, RootedGcRefImpl, Val,
974        store::AutoAssertNoGc,
975        vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
976    };
977    use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
978
979    // Convert indices to their typed forms.
980    let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
981    let elem_index = ElemIndex::from_u32(elem_index);
982    let instance = store.instance(instance_id);
983
984    let mut storage = None;
985    let elements = instance.passive_element_segment(&mut storage, elem_index);
986
987    let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
988    let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
989
990    let shared_ty = instance.engine_type_index(array_type_index);
991    let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
992    let pre = ArrayRefPre::_new(store, array_ty);
993
994    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
995    block_on!(store, async |store, asyncness| {
996        let mut store = OpaqueRootScope::new(store);
997        // Turn the elements into `Val`s.
998        let mut vals = Vec::with_capacity(usize::try_from(elements.len()).unwrap());
999        match elements {
1000            TableSegmentElements::Functions(fs) => {
1001                let store_id = store.id();
1002                let (mut instance, registry) = store.instance_and_module_registry_mut(instance_id);
1003                vals.extend(
1004                    fs.get(src..)
1005                        .and_then(|s| s.get(..len))
1006                        .ok_or_else(|| Trap::TableOutOfBounds)?
1007                        .iter()
1008                        .map(|f| {
1009                            let raw_func_ref = instance.as_mut().get_func_ref(registry, *f);
1010                            let func = unsafe {
1011                                raw_func_ref.map(|p| Func::from_vm_func_ref(store_id, p))
1012                            };
1013                            Val::FuncRef(func)
1014                        }),
1015                );
1016            }
1017            TableSegmentElements::Expressions(xs) => {
1018                let xs = xs
1019                    .get(src..)
1020                    .and_then(|s| s.get(..len))
1021                    .ok_or_else(|| Trap::TableOutOfBounds)?;
1022
1023                let mut const_context = ConstEvalContext::new(instance_id, asyncness);
1024                let mut const_evaluator = ConstExprEvaluator::default();
1025
1026                for x in xs.iter() {
1027                    let val = *const_evaluator
1028                        .eval(&mut store, limiter.as_mut(), &mut const_context, x)
1029                        .await?;
1030                    vals.push(val);
1031                }
1032            }
1033        }
1034
1035        let array =
1036            ArrayRef::_new_fixed_async(&mut store, limiter.as_mut(), &pre, &vals, asyncness)
1037                .await?;
1038
1039        let mut store = AutoAssertNoGc::new(&mut store);
1040        let gc_ref = array.try_clone_gc_ref(&mut store)?;
1041        let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
1042        Ok(raw)
1043    })?
1044}
1045
1046#[cfg(feature = "gc")]
1047fn array_init_elem(
1048    store: &mut dyn VMStore,
1049    instance: InstanceId,
1050    array_type_index: u32,
1051    array: u32,
1052    dst: u32,
1053    elem_index: u32,
1054    src: u32,
1055    len: u32,
1056) -> Result<()> {
1057    use crate::{
1058        ArrayRef, Func, OpaqueRootScope, Val,
1059        store::AutoAssertNoGc,
1060        vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
1061    };
1062    use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
1063
1064    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
1065    block_on!(store, async |store, asyncness| {
1066        let mut store = OpaqueRootScope::new(store);
1067
1068        // Convert the indices into their typed forms.
1069        let _array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
1070        let elem_index = ElemIndex::from_u32(elem_index);
1071
1072        log::trace!(
1073            "array.init_elem(array={array:#x}, dst={dst}, elem_index={elem_index:?}, src={src}, len={len})",
1074        );
1075
1076        // Convert the raw GC ref into a `Rooted<ArrayRef>`.
1077        let array = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?;
1078        let array = store.unwrap_gc_store_mut().clone_gc_ref(&array);
1079        let array = {
1080            let mut no_gc = AutoAssertNoGc::new(&mut store);
1081            ArrayRef::from_cloned_gc_ref(&mut no_gc, array)
1082        };
1083
1084        // Bounds check the destination within the array.
1085        let array_len = array._len(&store)?;
1086        log::trace!("array_len = {array_len}");
1087        if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len {
1088            return Err(Trap::ArrayOutOfBounds.into());
1089        }
1090
1091        // Get the passive element segment.
1092        let mut storage = None;
1093        let store_id = store.id();
1094        let (mut instance, registry) = store.instance_and_module_registry_mut(instance);
1095        let elements = instance.passive_element_segment(&mut storage, elem_index);
1096
1097        // Convert array offsets into `usize`s.
1098        let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
1099        let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
1100
1101        // Turn the elements into `Val`s.
1102        let vals = match elements {
1103            TableSegmentElements::Functions(fs) => fs
1104                .get(src..)
1105                .and_then(|s| s.get(..len))
1106                .ok_or_else(|| Trap::TableOutOfBounds)?
1107                .iter()
1108                .map(|f| {
1109                    let raw_func_ref = instance.as_mut().get_func_ref(registry, *f);
1110                    let func = unsafe { raw_func_ref.map(|p| Func::from_vm_func_ref(store_id, p)) };
1111                    Val::FuncRef(func)
1112                })
1113                .collect::<Vec<_>>(),
1114            TableSegmentElements::Expressions(xs) => {
1115                let mut const_context = ConstEvalContext::new(instance.id(), asyncness);
1116                let mut const_evaluator = ConstExprEvaluator::default();
1117
1118                let mut vals = Vec::new();
1119                for x in xs
1120                    .get(src..)
1121                    .and_then(|s| s.get(..len))
1122                    .ok_or_else(|| Trap::TableOutOfBounds)?
1123                {
1124                    let val = *const_evaluator
1125                        .eval(&mut store, limiter.as_mut(), &mut const_context, x)
1126                        .await?;
1127                    vals.push(val);
1128                }
1129                vals
1130            }
1131        };
1132
1133        // Copy the values into the array.
1134        for (i, val) in vals.into_iter().enumerate() {
1135            let i = u32::try_from(i).unwrap();
1136            let j = dst.checked_add(i).unwrap();
1137            array._set(&mut store, j, val)?;
1138        }
1139
1140        Ok(())
1141    })?
1142}
1143
1144// TODO: Specialize this libcall for only non-GC array elements, so we never
1145// have to do GC barriers and their associated indirect calls through the `dyn
1146// GcHeap`. Instead, implement those copies inline in Wasm code. Then, use bulk
1147// `memcpy`-style APIs to do the actual copies here.
1148#[cfg(feature = "gc")]
1149fn array_copy(
1150    store: &mut dyn VMStore,
1151    _instance: InstanceId,
1152    dst_array: u32,
1153    dst: u32,
1154    src_array: u32,
1155    src: u32,
1156    len: u32,
1157) -> Result<()> {
1158    use crate::{ArrayRef, OpaqueRootScope, store::AutoAssertNoGc};
1159
1160    log::trace!(
1161        "array.copy(dst_array={dst_array:#x}, dst_index={dst}, src_array={src_array:#x}, src_index={src}, len={len})",
1162    );
1163
1164    let mut store = OpaqueRootScope::new(store.store_opaque_mut());
1165    let mut store = AutoAssertNoGc::new(&mut store);
1166
1167    // Convert the raw GC refs into `Rooted<ArrayRef>`s.
1168    let dst_array = VMGcRef::from_raw_u32(dst_array).ok_or_else(|| Trap::NullReference)?;
1169    let dst_array = store.unwrap_gc_store_mut().clone_gc_ref(&dst_array);
1170    let dst_array = ArrayRef::from_cloned_gc_ref(&mut store, dst_array);
1171    let src_array = VMGcRef::from_raw_u32(src_array).ok_or_else(|| Trap::NullReference)?;
1172    let src_array = store.unwrap_gc_store_mut().clone_gc_ref(&src_array);
1173    let src_array = ArrayRef::from_cloned_gc_ref(&mut store, src_array);
1174
1175    // Bounds check the destination array's elements.
1176    let dst_array_len = dst_array._len(&store)?;
1177    if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > dst_array_len {
1178        return Err(Trap::ArrayOutOfBounds.into());
1179    }
1180
1181    // Bounds check the source array's elements.
1182    let src_array_len = src_array._len(&store)?;
1183    if src.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > src_array_len {
1184        return Err(Trap::ArrayOutOfBounds.into());
1185    }
1186
1187    let mut store = AutoAssertNoGc::new(&mut store);
1188    // If `src_array` and `dst_array` are the same array, then we are
1189    // potentially doing an overlapping copy, so make sure to copy elements in
1190    // the order that doesn't clobber the source elements before they are
1191    // copied. If they are different arrays, the order doesn't matter, but we
1192    // simply don't bother checking.
1193    if src > dst {
1194        for i in 0..len {
1195            let src_elem = src_array._get(&mut store, src + i)?;
1196            let dst_i = dst + i;
1197            dst_array._set(&mut store, dst_i, src_elem)?;
1198        }
1199    } else {
1200        for i in (0..len).rev() {
1201            let src_elem = src_array._get(&mut store, src + i)?;
1202            let dst_i = dst + i;
1203            dst_array._set(&mut store, dst_i, src_elem)?;
1204        }
1205    }
1206    Ok(())
1207}
1208
1209#[cfg(feature = "gc")]
1210fn is_subtype(
1211    store: &mut dyn VMStore,
1212    _instance: InstanceId,
1213    actual_engine_type: u32,
1214    expected_engine_type: u32,
1215) -> u32 {
1216    use wasmtime_environ::VMSharedTypeIndex;
1217
1218    let actual = VMSharedTypeIndex::from_u32(actual_engine_type);
1219    let expected = VMSharedTypeIndex::from_u32(expected_engine_type);
1220
1221    let is_subtype: bool = store.engine().signatures().is_subtype(actual, expected);
1222
1223    log::trace!("is_subtype(actual={actual:?}, expected={expected:?}) -> {is_subtype}",);
1224    is_subtype as u32
1225}
1226
1227// Implementation of `memory.atomic.notify` for locally defined memories.
1228#[cfg(feature = "threads")]
1229fn memory_atomic_notify(
1230    store: &mut dyn VMStore,
1231    instance: InstanceId,
1232    memory_index: u32,
1233    addr_index: u64,
1234    count: u32,
1235) -> Result<u32, Trap> {
1236    let memory = DefinedMemoryIndex::from_u32(memory_index);
1237    store
1238        .instance_mut(instance)
1239        .get_defined_memory_mut(memory)
1240        .atomic_notify(addr_index, count)
1241}
1242
1243// Implementation of `memory.atomic.wait32` for locally defined memories.
1244#[cfg(feature = "threads")]
1245fn memory_atomic_wait32(
1246    store: &mut dyn VMStore,
1247    instance: InstanceId,
1248    memory_index: u32,
1249    addr_index: u64,
1250    expected: u32,
1251    timeout: u64,
1252) -> Result<u32, Trap> {
1253    let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
1254    let memory = DefinedMemoryIndex::from_u32(memory_index);
1255    Ok(store
1256        .instance_mut(instance)
1257        .get_defined_memory_mut(memory)
1258        .atomic_wait32(addr_index, expected, timeout)? as u32)
1259}
1260
1261// Implementation of `memory.atomic.wait64` for locally defined memories.
1262#[cfg(feature = "threads")]
1263fn memory_atomic_wait64(
1264    store: &mut dyn VMStore,
1265    instance: InstanceId,
1266    memory_index: u32,
1267    addr_index: u64,
1268    expected: u64,
1269    timeout: u64,
1270) -> Result<u32, Trap> {
1271    let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
1272    let memory = DefinedMemoryIndex::from_u32(memory_index);
1273    Ok(store
1274        .instance_mut(instance)
1275        .get_defined_memory_mut(memory)
1276        .atomic_wait64(addr_index, expected, timeout)? as u32)
1277}
1278
1279// Hook for when an instance runs out of fuel.
1280fn out_of_gas(store: &mut dyn VMStore, _instance: InstanceId) -> Result<()> {
1281    block_on!(store, async |store, _| {
1282        if !store.refuel() {
1283            return Err(Trap::OutOfFuel.into());
1284        }
1285        #[cfg(feature = "async")]
1286        if store.fuel_yield_interval.is_some() {
1287            crate::runtime::vm::Yield::new().await;
1288        }
1289        Ok(())
1290    })?
1291}
1292
1293// Hook for when an instance observes that the epoch has changed.
1294#[cfg(target_has_atomic = "64")]
1295fn new_epoch(store: &mut dyn VMStore, _instance: InstanceId) -> Result<NextEpoch> {
1296    use crate::UpdateDeadline;
1297
1298    #[cfg(feature = "debug")]
1299    {
1300        store.block_on_debug_handler(crate::DebugEvent::EpochYield)?;
1301    }
1302
1303    let update_deadline = store.new_epoch_updated_deadline()?;
1304    block_on!(store, async move |store, asyncness| {
1305        #[cfg(not(feature = "async"))]
1306        let _ = asyncness;
1307
1308        let delta = match update_deadline {
1309            UpdateDeadline::Interrupt => return Err(Trap::Interrupt.into()),
1310            UpdateDeadline::Continue(delta) => delta,
1311
1312            // Note that custom errors are used here to avoid tripping up on the
1313            // `block_on!` message that otherwise assumes
1314            // async-configuration-after-the-fact.
1315            #[cfg(feature = "async")]
1316            UpdateDeadline::Yield(delta) => {
1317                if asyncness != Asyncness::Yes {
1318                    bail!(
1319                        "cannot use `UpdateDeadline::Yield` without using \
1320                         an async wasm entrypoint",
1321                    );
1322                }
1323                crate::runtime::vm::Yield::new().await;
1324                delta
1325            }
1326            #[cfg(feature = "async")]
1327            UpdateDeadline::YieldCustom(delta, future) => {
1328                if asyncness != Asyncness::Yes {
1329                    bail!(
1330                        "cannot use `UpdateDeadline::YieldCustom` without using \
1331                         an async wasm entrypoint",
1332                    );
1333                }
1334                future.await;
1335                delta
1336            }
1337        };
1338
1339        // Set a new deadline and return the new epoch deadline so
1340        // the Wasm code doesn't have to reload it.
1341        store.set_epoch_deadline(delta);
1342        Ok(NextEpoch(store.get_epoch_deadline()))
1343    })?
1344}
1345
1346struct NextEpoch(u64);
1347
1348unsafe impl HostResultHasUnwindSentinel for NextEpoch {
1349    type Abi = u64;
1350    const SENTINEL: u64 = u64::MAX;
1351    fn into_abi(self) -> u64 {
1352        self.0
1353    }
1354}
1355
1356// Hook for validating malloc using wmemcheck_state.
1357#[cfg(feature = "wmemcheck")]
1358fn check_malloc(store: &mut dyn VMStore, instance: InstanceId, addr: u32, len: u32) -> Result<()> {
1359    let instance = store.instance_mut(instance);
1360    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1361        let result = wmemcheck_state.malloc(addr as usize, len as usize);
1362        wmemcheck_state.memcheck_on();
1363        match result {
1364            Ok(()) => {}
1365            Err(DoubleMalloc { addr, len }) => {
1366                bail!("Double malloc at addr {:#x} of size {}", addr, len)
1367            }
1368            Err(OutOfBounds { addr, len }) => {
1369                bail!("Malloc out of bounds at addr {:#x} of size {}", addr, len);
1370            }
1371            _ => {
1372                panic!("unreachable")
1373            }
1374        }
1375    }
1376    Ok(())
1377}
1378
1379// Hook for validating free using wmemcheck_state.
1380#[cfg(feature = "wmemcheck")]
1381fn check_free(store: &mut dyn VMStore, instance: InstanceId, addr: u32) -> Result<()> {
1382    let instance = store.instance_mut(instance);
1383    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1384        let result = wmemcheck_state.free(addr as usize);
1385        wmemcheck_state.memcheck_on();
1386        match result {
1387            Ok(()) => {}
1388            Err(InvalidFree { addr }) => {
1389                bail!("Invalid free at addr {:#x}", addr)
1390            }
1391            _ => {
1392                panic!("unreachable")
1393            }
1394        }
1395    }
1396    Ok(())
1397}
1398
1399// Hook for validating load using wmemcheck_state.
1400#[cfg(feature = "wmemcheck")]
1401fn check_load(
1402    store: &mut dyn VMStore,
1403    instance: InstanceId,
1404    num_bytes: u32,
1405    addr: u32,
1406    offset: u32,
1407) -> Result<()> {
1408    let instance = store.instance_mut(instance);
1409    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1410        let result = wmemcheck_state.read(addr as usize + offset as usize, num_bytes as usize);
1411        match result {
1412            Ok(()) => {}
1413            Err(InvalidRead { addr, len }) => {
1414                bail!("Invalid load at addr {:#x} of size {}", addr, len);
1415            }
1416            Err(OutOfBounds { addr, len }) => {
1417                bail!("Load out of bounds at addr {:#x} of size {}", addr, len);
1418            }
1419            _ => {
1420                panic!("unreachable")
1421            }
1422        }
1423    }
1424    Ok(())
1425}
1426
1427// Hook for validating store using wmemcheck_state.
1428#[cfg(feature = "wmemcheck")]
1429fn check_store(
1430    store: &mut dyn VMStore,
1431    instance: InstanceId,
1432    num_bytes: u32,
1433    addr: u32,
1434    offset: u32,
1435) -> Result<()> {
1436    let instance = store.instance_mut(instance);
1437    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1438        let result = wmemcheck_state.write(addr as usize + offset as usize, num_bytes as usize);
1439        match result {
1440            Ok(()) => {}
1441            Err(InvalidWrite { addr, len }) => {
1442                bail!("Invalid store at addr {:#x} of size {}", addr, len)
1443            }
1444            Err(OutOfBounds { addr, len }) => {
1445                bail!("Store out of bounds at addr {:#x} of size {}", addr, len)
1446            }
1447            _ => {
1448                panic!("unreachable")
1449            }
1450        }
1451    }
1452    Ok(())
1453}
1454
1455// Hook for turning wmemcheck load/store validation off when entering a malloc function.
1456#[cfg(feature = "wmemcheck")]
1457fn malloc_start(store: &mut dyn VMStore, instance: InstanceId) {
1458    let instance = store.instance_mut(instance);
1459    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1460        wmemcheck_state.memcheck_off();
1461    }
1462}
1463
1464// Hook for turning wmemcheck load/store validation off when entering a free function.
1465#[cfg(feature = "wmemcheck")]
1466fn free_start(store: &mut dyn VMStore, instance: InstanceId) {
1467    let instance = store.instance_mut(instance);
1468    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1469        wmemcheck_state.memcheck_off();
1470    }
1471}
1472
1473// Hook for tracking wasm stack updates using wmemcheck_state.
1474#[cfg(feature = "wmemcheck")]
1475fn update_stack_pointer(_store: &mut dyn VMStore, _instance: InstanceId, _value: u32) {
1476    // TODO: stack-tracing has yet to be finalized. All memory below
1477    // the address of the top of the stack is marked as valid for
1478    // loads and stores.
1479    // if let Some(wmemcheck_state) = &mut instance.wmemcheck_state {
1480    //     instance.wmemcheck_state.update_stack_pointer(value as usize);
1481    // }
1482}
1483
1484// Hook updating wmemcheck_state memory state vector every time memory.grow is called.
1485#[cfg(feature = "wmemcheck")]
1486fn update_mem_size(store: &mut dyn VMStore, instance: InstanceId, num_pages: u32) {
1487    let instance = store.instance_mut(instance);
1488    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1489        const KIB: usize = 1024;
1490        let num_bytes = num_pages as usize * 64 * KIB;
1491        wmemcheck_state.update_mem_size(num_bytes);
1492    }
1493}
1494
1495fn floor_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1496    val.wasm_floor()
1497}
1498
1499fn floor_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1500    val.wasm_floor()
1501}
1502
1503fn ceil_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1504    val.wasm_ceil()
1505}
1506
1507fn ceil_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1508    val.wasm_ceil()
1509}
1510
1511fn trunc_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1512    val.wasm_trunc()
1513}
1514
1515fn trunc_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1516    val.wasm_trunc()
1517}
1518
1519fn nearest_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1520    val.wasm_nearest()
1521}
1522
1523fn nearest_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1524    val.wasm_nearest()
1525}
1526
1527// This intrinsic is only used on x86_64 platforms as an implementation of
1528// the `i8x16.swizzle` instruction when `pshufb` in SSSE3 is not available.
1529#[cfg(all(target_arch = "x86_64", target_feature = "sse"))]
1530fn i8x16_swizzle(_store: &mut dyn VMStore, _instance: InstanceId, a: i8x16, b: i8x16) -> i8x16 {
1531    union U {
1532        reg: i8x16,
1533        mem: [u8; 16],
1534    }
1535
1536    unsafe {
1537        let a = U { reg: a }.mem;
1538        let b = U { reg: b }.mem;
1539
1540        // Use the `swizzle` semantics of returning 0 on any out-of-bounds
1541        // index, rather than the x86 pshufb semantics, since Wasmtime uses
1542        // this to implement `i8x16.swizzle`.
1543        let select = |arr: &[u8; 16], byte: u8| {
1544            if byte >= 16 { 0x00 } else { arr[byte as usize] }
1545        };
1546
1547        U {
1548            mem: [
1549                select(&a, b[0]),
1550                select(&a, b[1]),
1551                select(&a, b[2]),
1552                select(&a, b[3]),
1553                select(&a, b[4]),
1554                select(&a, b[5]),
1555                select(&a, b[6]),
1556                select(&a, b[7]),
1557                select(&a, b[8]),
1558                select(&a, b[9]),
1559                select(&a, b[10]),
1560                select(&a, b[11]),
1561                select(&a, b[12]),
1562                select(&a, b[13]),
1563                select(&a, b[14]),
1564                select(&a, b[15]),
1565            ],
1566        }
1567        .reg
1568    }
1569}
1570
1571#[cfg(not(all(target_arch = "x86_64", target_feature = "sse")))]
1572fn i8x16_swizzle(_store: &mut dyn VMStore, _instance: InstanceId, _a: i8x16, _b: i8x16) -> i8x16 {
1573    unreachable!()
1574}
1575
1576// This intrinsic is only used on x86_64 platforms as an implementation of
1577// the `i8x16.shuffle` instruction when `pshufb` in SSSE3 is not available.
1578#[cfg(all(target_arch = "x86_64", target_feature = "sse"))]
1579fn i8x16_shuffle(
1580    _store: &mut dyn VMStore,
1581    _instance: InstanceId,
1582    a: i8x16,
1583    b: i8x16,
1584    c: i8x16,
1585) -> i8x16 {
1586    union U {
1587        reg: i8x16,
1588        mem: [u8; 16],
1589    }
1590
1591    unsafe {
1592        let ab = [U { reg: a }.mem, U { reg: b }.mem];
1593        let c = U { reg: c }.mem;
1594
1595        // Use the `shuffle` semantics of returning 0 on any out-of-bounds
1596        // index, rather than the x86 pshufb semantics, since Wasmtime uses
1597        // this to implement `i8x16.shuffle`.
1598        let select = |arr: &[[u8; 16]; 2], byte: u8| {
1599            if byte >= 32 {
1600                0x00
1601            } else if byte >= 16 {
1602                arr[1][byte as usize - 16]
1603            } else {
1604                arr[0][byte as usize]
1605            }
1606        };
1607
1608        U {
1609            mem: [
1610                select(&ab, c[0]),
1611                select(&ab, c[1]),
1612                select(&ab, c[2]),
1613                select(&ab, c[3]),
1614                select(&ab, c[4]),
1615                select(&ab, c[5]),
1616                select(&ab, c[6]),
1617                select(&ab, c[7]),
1618                select(&ab, c[8]),
1619                select(&ab, c[9]),
1620                select(&ab, c[10]),
1621                select(&ab, c[11]),
1622                select(&ab, c[12]),
1623                select(&ab, c[13]),
1624                select(&ab, c[14]),
1625                select(&ab, c[15]),
1626            ],
1627        }
1628        .reg
1629    }
1630}
1631
1632#[cfg(not(all(target_arch = "x86_64", target_feature = "sse")))]
1633fn i8x16_shuffle(
1634    _store: &mut dyn VMStore,
1635    _instance: InstanceId,
1636    _a: i8x16,
1637    _b: i8x16,
1638    _c: i8x16,
1639) -> i8x16 {
1640    unreachable!()
1641}
1642
1643fn fma_f32x4(
1644    _store: &mut dyn VMStore,
1645    _instance: InstanceId,
1646    x: f32x4,
1647    y: f32x4,
1648    z: f32x4,
1649) -> f32x4 {
1650    union U {
1651        reg: f32x4,
1652        mem: [f32; 4],
1653    }
1654
1655    unsafe {
1656        let x = U { reg: x }.mem;
1657        let y = U { reg: y }.mem;
1658        let z = U { reg: z }.mem;
1659
1660        U {
1661            mem: [
1662                x[0].wasm_mul_add(y[0], z[0]),
1663                x[1].wasm_mul_add(y[1], z[1]),
1664                x[2].wasm_mul_add(y[2], z[2]),
1665                x[3].wasm_mul_add(y[3], z[3]),
1666            ],
1667        }
1668        .reg
1669    }
1670}
1671
1672fn fma_f64x2(
1673    _store: &mut dyn VMStore,
1674    _instance: InstanceId,
1675    x: f64x2,
1676    y: f64x2,
1677    z: f64x2,
1678) -> f64x2 {
1679    union U {
1680        reg: f64x2,
1681        mem: [f64; 2],
1682    }
1683
1684    unsafe {
1685        let x = U { reg: x }.mem;
1686        let y = U { reg: y }.mem;
1687        let z = U { reg: z }.mem;
1688
1689        U {
1690            mem: [x[0].wasm_mul_add(y[0], z[0]), x[1].wasm_mul_add(y[1], z[1])],
1691        }
1692        .reg
1693    }
1694}
1695
1696/// This intrinsic is just used to record trap information.
1697///
1698/// The `Infallible` "ok" type here means that this never returns success, it
1699/// only ever returns an error, and this hooks into the machinery to handle
1700/// `Result` values to record such trap information.
1701fn trap(
1702    _store: &mut dyn VMStore,
1703    _instance: InstanceId,
1704    code: u8,
1705) -> Result<Infallible, TrapReason> {
1706    Err(TrapReason::Wasm(
1707        wasmtime_environ::Trap::from_u8(code).unwrap(),
1708    ))
1709}
1710
1711fn raise(store: &mut dyn VMStore, _instance: InstanceId) {
1712    // SAFETY: this is only called from compiled wasm so we know that wasm has
1713    // already been entered. It's a dynamic safety precondition that the trap
1714    // information has already been arranged to be present.
1715    unsafe { crate::runtime::vm::traphandlers::raise_preexisting_trap(store) }
1716}
1717
1718// Builtins for continuations. These are thin wrappers around the
1719// respective definitions in stack_switching.rs.
1720#[cfg(feature = "stack-switching")]
1721fn cont_new(
1722    store: &mut dyn VMStore,
1723    instance: InstanceId,
1724    func: *mut u8,
1725    param_count: u32,
1726    result_count: u32,
1727) -> Result<Option<AllocationSize>> {
1728    let ans =
1729        crate::vm::stack_switching::cont_new(store, instance, func, param_count, result_count)?;
1730    Ok(Some(AllocationSize(ans.cast::<u8>() as usize)))
1731}
1732
1733#[cfg(feature = "gc")]
1734fn get_instance_id(_store: &mut dyn VMStore, instance: InstanceId) -> u32 {
1735    instance.as_u32()
1736}
1737
1738#[cfg(feature = "gc")]
1739fn throw_ref(
1740    store: &mut dyn VMStore,
1741    _instance: InstanceId,
1742    exnref: u32,
1743) -> Result<(), TrapReason> {
1744    let exnref = VMGcRef::from_raw_u32(exnref).ok_or_else(|| Trap::NullReference)?;
1745    let exnref = store.unwrap_gc_store_mut().clone_gc_ref(&exnref);
1746    let exnref = exnref
1747        .into_exnref(&*store.unwrap_gc_store().gc_heap)
1748        .expect("gc ref should be an exception object");
1749    store.set_pending_exception(exnref);
1750    Err(TrapReason::Exception)
1751}
1752
1753fn breakpoint(store: &mut dyn VMStore, _instance: InstanceId) -> Result<()> {
1754    #[cfg(feature = "debug")]
1755    {
1756        log::trace!("hit breakpoint");
1757        store.block_on_debug_handler(crate::DebugEvent::Breakpoint)?;
1758    }
1759    // Avoid unused-argument warning in no-debugger builds.
1760    let _ = store;
1761    Ok(())
1762}