Skip to main content

wasmtime/runtime/vm/
libcalls.rs

1//! Runtime library calls.
2//!
3//! Note that Wasm compilers may sometimes perform these inline rather than
4//! calling them, particularly when CPUs have special instructions which compute
5//! them directly.
6//!
7//! These functions are called by compiled Wasm code, and therefore must take
8//! certain care about some things:
9//!
10//! * They must only contain basic, raw i32/i64/f32/f64/pointer parameters that
11//!   are safe to pass across the system ABI.
12//!
13//! * If any nested function propagates an `Err(trap)` out to the library
14//!   function frame, we need to raise it. This involves some nasty and quite
15//!   unsafe code under the covers! Notably, after raising the trap, drops
16//!   **will not** be run for local variables! This can lead to things like
17//!   leaking `InstanceHandle`s which leads to never deallocating JIT code,
18//!   instances, and modules if we are not careful!
19//!
20//! * The libcall must be entered via a Wasm-to-libcall trampoline that saves
21//!   the last Wasm FP and PC for stack walking purposes. (For more details, see
22//!   `crates/wasmtime/src/runtime/vm/backtrace.rs`.)
23//!
24//! To make it easier to correctly handle all these things, **all** libcalls
25//! must be defined via the `libcall!` helper macro! See its doc comments below
26//! for an example, or just look at the rest of the file.
27//!
28//! ## Dealing with `externref`s
29//!
30//! When receiving a raw `*mut u8` that is actually a `VMExternRef` reference,
31//! convert it into a proper `VMExternRef` with `VMExternRef::clone_from_raw` as
32//! soon as apossible. Any GC before raw pointer is converted into a reference
33//! can potentially collect the referenced object, which could lead to use after
34//! free.
35//!
36//! Avoid this by eagerly converting into a proper `VMExternRef`! (Unfortunately
37//! there is no macro to help us automatically get this correct, so stay
38//! vigilant!)
39//!
40//! ```ignore
41//! pub unsafe extern "C" my_libcall_takes_ref(raw_extern_ref: *mut u8) {
42//!     // Before `clone_from_raw`, `raw_extern_ref` is potentially unrooted,
43//!     // and doing GC here could lead to use after free!
44//!
45//!     let my_extern_ref = if raw_extern_ref.is_null() {
46//!         None
47//!     } else {
48//!         Some(VMExternRef::clone_from_raw(raw_extern_ref))
49//!     };
50//!
51//!     // Now that we did `clone_from_raw`, it is safe to do a GC (or do
52//!     // anything else that might transitively GC, like call back into
53//!     // Wasm!)
54//! }
55//! ```
56
57#[cfg(feature = "stack-switching")]
58use super::stack_switching::VMContObj;
59use crate::prelude::*;
60use crate::runtime::store::{Asyncness, InstanceId, StoreInstanceId, StoreOpaque};
61#[cfg(feature = "gc")]
62use crate::runtime::vm::VMGcRef;
63use crate::runtime::vm::table::TableElementType;
64use crate::runtime::vm::vmcontext::VMFuncRef;
65use crate::runtime::vm::{
66    self, HostResultHasUnwindSentinel, SendSyncPtr, TrapReason, VMStore, f32x4, f64x2, i8x16,
67};
68use core::convert::Infallible;
69use core::ptr::NonNull;
70#[cfg(feature = "threads")]
71use core::time::Duration;
72use wasmtime_core::math::WasmFloat;
73use wasmtime_environ::{
74    DataIndex, DefinedMemoryIndex, DefinedTableIndex, ElemIndex, FuncIndex, MemoryIndex,
75    TableIndex, Trap,
76};
77#[cfg(feature = "wmemcheck")]
78use wasmtime_wmemcheck::AccessError::{
79    DoubleMalloc, InvalidFree, InvalidRead, InvalidWrite, OutOfBounds,
80};
81
82/// Raw functions which are actually called from compiled code.
83///
84/// Invocation of a builtin currently looks like:
85///
86/// * A wasm function calls a cranelift-compiled trampoline that's generated
87///   once-per-builtin.
88/// * The cranelift-compiled trampoline performs any necessary actions to exit
89///   wasm, such as dealing with fp/pc/etc.
90/// * The cranelift-compiled trampoline loads a function pointer from an array
91///   stored in `VMContext` That function pointer is defined in this module.
92/// * This module runs, handling things like `catch_unwind` and `Result` and
93///   such.
94/// * This module delegates to the outer module (this file) which has the actual
95///   implementation.
96///
97/// For more information on converting from host-defined values to Cranelift ABI
98/// values see the `catch_unwind_and_record_trap` function.
99pub mod raw {
100    use crate::runtime::vm::{Instance, VMContext, f32x4, f64x2, i8x16};
101    use core::ptr::NonNull;
102
103    macro_rules! libcall {
104        (
105            $(
106                $( #[cfg($attr:meta)] )?
107                $name:ident( vmctx: vmctx $(, $pname:ident: $param:ident )* ) $(-> $result:ident)?;
108            )*
109        ) => {
110            $(
111                // This is the direct entrypoint from the compiled module which
112                // still has the raw signature.
113                //
114                // This will delegate to the outer module to the actual
115                // implementation and automatically perform `catch_unwind` along
116                // with conversion of the return value in the face of traps.
117                #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
118                #[allow(unused_variables, reason = "macro-generated")]
119                #[allow(unreachable_code, reason = "some types uninhabited on some platforms")]
120                pub unsafe extern "C" fn $name(
121                    vmctx: NonNull<VMContext>,
122                    $( $pname : libcall!(@ty $param), )*
123                ) $(-> libcall!(@ty $result))? {
124                    $(#[cfg($attr)])?
125                    unsafe {
126                        Instance::enter_host_from_wasm(vmctx, |store, instance| {
127                            super::$name(store, instance, $($pname),*)
128                        })
129                    }
130                    $(
131                        #[cfg(not($attr))]
132                        {
133                            let _ = vmctx;
134                            unreachable!();
135                        }
136                    )?
137                }
138
139                // This works around a `rustc` bug where compiling with LTO
140                // will sometimes strip out some of these symbols resulting
141                // in a linking failure.
142                #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
143                const _: () = {
144                    #[used]
145                    static I_AM_USED: unsafe extern "C" fn(
146                        NonNull<VMContext>,
147                        $( $pname : libcall!(@ty $param), )*
148                    ) $( -> libcall!(@ty $result))? = $name;
149                };
150            )*
151        };
152
153        (@ty u32) => (u32);
154        (@ty u64) => (u64);
155        (@ty f32) => (f32);
156        (@ty f64) => (f64);
157        (@ty u8) => (u8);
158        (@ty i8x16) => (i8x16);
159        (@ty f32x4) => (f32x4);
160        (@ty f64x2) => (f64x2);
161        (@ty bool) => (bool);
162        (@ty pointer) => (*mut u8);
163        (@ty size) => (usize);
164    }
165
166    wasmtime_environ::foreach_builtin_function!(libcall);
167}
168
169/// Uses the `$store` provided to invoke the async closure `$f` and block on the
170/// result.
171///
172/// This will internally multiplex on `$store.with_blocking(...)` vs simply
173/// asserting the closure is ready depending on whether a store's
174/// `can_block` flag is set or not.
175///
176/// FIXME: ideally this would be a function, not a macro. If this is a function
177/// though it would require placing a bound on the async closure $f where the
178/// returned future is itself `Send`. That's not possible in Rust right now,
179/// unfortunately.
180///
181/// As a workaround this takes advantage of the fact that we can assume that the
182/// compiler can infer that the future returned by `$f` is indeed `Send` so long
183/// as we don't try to name the type or place it behind a generic. In the future
184/// when we can bound the return future of async functions with `Send` this
185/// macro should be replaced with an equivalent function.
186macro_rules! block_on {
187    ($store:expr, $f:expr) => {{
188        let store: &mut StoreOpaque = $store;
189        let closure = assert_async_fn_closure($f);
190
191        if store.can_block() {
192            // If the store can block then that means it's on a fiber. We can
193            // forward to `block_on` and everything should be fine and dandy.
194            #[cfg(feature = "async")]
195            {
196                store.with_blocking(|store, cx| cx.block_on(closure(store, Asyncness::Yes)))
197            }
198            #[cfg(not(feature = "async"))]
199            {
200                unreachable!()
201            }
202        } else {
203            // If the store cannot block it's not on a fiber. That means that we get
204            // at most one poll of `closure(store)` here. In the typical case
205            // what this means is that nothing async is configured in the store
206            // and one poll should be all we need. There are niche cases where
207            // one poll is not sufficient though, for example:
208            //
209            // * Store is created.
210            // * Wasm is called.
211            // * Wasm calls host.
212            // * Host configures an async resource limiter, returns back to
213            //   wasm.
214            // * Wasm grows memory.
215            // * Limiter wants to block asynchronously.
216            //
217            // Technically there's nothing wrong with this, but it means that
218            // we're in wasm and one poll is not enough here. Given the niche
219            // nature of this scenario and how it's not really expected to work
220            // this translates failures in `closure` to a trap. This trap is
221            // only expected to show up in niche-ish scenarios, not for actual
222            // blocking work, as that would otherwise be too surprising.
223            vm::one_poll(closure(store, Asyncness::No)).ok_or_else(|| {
224                crate::format_err!(
225                    "
226
227A synchronously called wasm function invoked an async-defined libcall which
228failed to complete synchronously and is thus raising a trap. It's expected
229that this indicates that the store was configured to do async things after the
230original synchronous entrypoint to wasm was called. That's generally not
231supported in Wasmtime and async entrypoint should be used instead. If you're
232seeing this message in error please file an issue on Wasmtime.
233
234"
235                )
236            })
237        }
238    }};
239}
240
241fn assert_async_fn_closure<F, R>(f: F) -> F
242where
243    F: AsyncFnOnce(&mut StoreOpaque, Asyncness) -> R,
244{
245    f
246}
247
248fn memory_grow(
249    store: &mut dyn VMStore,
250    instance: InstanceId,
251    delta: u64,
252    memory_index: u32,
253) -> Result<Option<AllocationSize>> {
254    let memory_index = DefinedMemoryIndex::from_u32(memory_index);
255    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
256    let limiter = limiter.as_mut();
257    block_on!(store, async |store, _| {
258        let instance = store.instance_mut(instance);
259        let module = instance.env_module();
260        let page_size_log2 = module.memories[module.memory_index(memory_index)].page_size_log2;
261
262        let result = instance
263            .memory_grow(limiter, memory_index, delta)
264            .await?
265            .map(|size_in_bytes| AllocationSize(size_in_bytes >> page_size_log2));
266
267        Ok(result)
268    })?
269}
270
271/// A helper structure to represent the return value of a memory or table growth
272/// call.
273///
274/// This represents a byte or element-based count of the size of an item on the
275/// host. For example a memory is how many bytes large the memory is, or a table
276/// is how many elements large it is. It's assumed that the value here is never
277/// -1 or -2 as that would mean the entire host address space is allocated which
278/// is not possible.
279struct AllocationSize(usize);
280
281/// Special implementation for growth-related libcalls.
282///
283/// Here the optional return value means:
284///
285/// * `Some(val)` - the growth succeeded and the previous size of the item was
286///   `val`.
287/// * `None` - the growth failed.
288///
289/// The failure case returns -1 (or `usize::MAX` as an unsigned integer) and the
290/// successful case returns the `val` itself. Note that -2 (`usize::MAX - 1`
291/// when unsigned) is unwind as a sentinel to indicate an unwind as no valid
292/// allocation can be that large.
293unsafe impl HostResultHasUnwindSentinel for Option<AllocationSize> {
294    type Abi = *mut u8;
295    const SENTINEL: *mut u8 = (usize::MAX - 1) as *mut u8;
296
297    fn into_abi(self) -> *mut u8 {
298        match self {
299            Some(size) => {
300                debug_assert!(size.0 < (usize::MAX - 1));
301                size.0 as *mut u8
302            }
303            None => usize::MAX as *mut u8,
304        }
305    }
306}
307
308/// Implementation of `table.grow` for `funcref` tables.
309unsafe fn table_grow_func_ref(
310    store: &mut dyn VMStore,
311    instance: InstanceId,
312    defined_table_index: u32,
313    delta: u64,
314    init_value: *mut u8,
315) -> Result<Option<AllocationSize>> {
316    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
317    let element = NonNull::new(init_value.cast::<VMFuncRef>()).map(SendSyncPtr::new);
318    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
319    let limiter = limiter.as_mut();
320    block_on!(store, async |store, _| {
321        let mut instance = store.instance_mut(instance);
322        let table_index = instance.env_module().table_index(defined_table_index);
323        debug_assert!(matches!(
324            instance.as_mut().table_element_type(table_index),
325            TableElementType::Func,
326        ));
327        let result = instance
328            .defined_table_grow(defined_table_index, async |table| unsafe {
329                table.grow_func(limiter, delta, element).await
330            })
331            .await?
332            .map(AllocationSize);
333        Ok(result)
334    })?
335}
336
337/// Implementation of `table.grow` for GC-reference tables.
338#[cfg(feature = "gc")]
339fn table_grow_gc_ref(
340    store: &mut dyn VMStore,
341    instance: InstanceId,
342    defined_table_index: u32,
343    delta: u64,
344    init_value: u32,
345) -> Result<Option<AllocationSize>> {
346    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
347    let element = VMGcRef::from_raw_u32(init_value);
348    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
349    let limiter = limiter.as_mut();
350    block_on!(store, async |store, _| {
351        let (gc_store, mut instance) = store.optional_gc_store_and_instance_mut(instance);
352        let table_index = instance.env_module().table_index(defined_table_index);
353        debug_assert!(matches!(
354            instance.as_mut().table_element_type(table_index),
355            TableElementType::GcRef,
356        ));
357
358        let result = instance
359            .defined_table_grow(defined_table_index, async |table| unsafe {
360                table
361                    .grow_gc_ref(limiter, gc_store, delta, element.as_ref())
362                    .await
363            })
364            .await?
365            .map(AllocationSize);
366        Ok(result)
367    })?
368}
369
370#[cfg(feature = "stack-switching")]
371unsafe fn table_grow_cont_obj(
372    store: &mut dyn VMStore,
373    instance: InstanceId,
374    defined_table_index: u32,
375    delta: u64,
376    // The following two values together form the initial Option<VMContObj>.
377    // A None value is indicated by the pointer being null.
378    init_value_contref: *mut u8,
379    init_value_revision: usize,
380) -> Result<Option<AllocationSize>> {
381    let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
382    let element = unsafe { VMContObj::from_raw_parts(init_value_contref, init_value_revision) };
383    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
384    let limiter = limiter.as_mut();
385    block_on!(store, async |store, _| {
386        let mut instance = store.instance_mut(instance);
387        let table_index = instance.env_module().table_index(defined_table_index);
388        debug_assert!(matches!(
389            instance.as_mut().table_element_type(table_index),
390            TableElementType::Cont,
391        ));
392        let result = instance
393            .defined_table_grow(defined_table_index, async |table| unsafe {
394                table.grow_cont(limiter, delta, element).await
395            })
396            .await?
397            .map(AllocationSize);
398        Ok(result)
399    })?
400}
401
402/// Implementation of `table.fill` for `funcref`s.
403unsafe fn table_fill_func_ref(
404    store: &mut dyn VMStore,
405    instance: InstanceId,
406    table_index: u32,
407    dst: u64,
408    val: *mut u8,
409    len: u64,
410) -> Result<()> {
411    let instance = store.instance_mut(instance);
412    let table_index = DefinedTableIndex::from_u32(table_index);
413    let table = instance.get_defined_table(table_index);
414    match table.element_type() {
415        TableElementType::Func => {
416            let val = NonNull::new(val.cast::<VMFuncRef>());
417            table.fill_func(dst, val, len)?;
418            Ok(())
419        }
420        TableElementType::GcRef => unreachable!(),
421        TableElementType::Cont => unreachable!(),
422    }
423}
424
425#[cfg(feature = "gc")]
426fn table_fill_gc_ref(
427    store: &mut dyn VMStore,
428    instance: InstanceId,
429    table_index: u32,
430    dst: u64,
431    val: u32,
432    len: u64,
433) -> Result<()> {
434    let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance);
435    let table_index = DefinedTableIndex::from_u32(table_index);
436    let table = instance.get_defined_table(table_index);
437    match table.element_type() {
438        TableElementType::Func => unreachable!(),
439        TableElementType::GcRef => {
440            let gc_ref = VMGcRef::from_raw_u32(val);
441            table.fill_gc_ref(gc_store, dst, gc_ref.as_ref(), len)?;
442            Ok(())
443        }
444
445        TableElementType::Cont => unreachable!(),
446    }
447}
448
449#[cfg(feature = "stack-switching")]
450unsafe fn table_fill_cont_obj(
451    store: &mut dyn VMStore,
452    instance: InstanceId,
453    table_index: u32,
454    dst: u64,
455    value_contref: *mut u8,
456    value_revision: usize,
457    len: u64,
458) -> Result<()> {
459    let instance = store.instance_mut(instance);
460    let table_index = DefinedTableIndex::from_u32(table_index);
461    let table = instance.get_defined_table(table_index);
462    match table.element_type() {
463        TableElementType::Cont => {
464            let contobj = unsafe { VMContObj::from_raw_parts(value_contref, value_revision) };
465            table.fill_cont(dst, contobj, len)?;
466            Ok(())
467        }
468        _ => panic!("Wrong table filling function"),
469    }
470}
471
472// Implementation of `table.copy`.
473fn table_copy(
474    store: &mut dyn VMStore,
475    instance: InstanceId,
476    dst_table_index: u32,
477    src_table_index: u32,
478    dst: u64,
479    src: u64,
480    len: u64,
481) -> Result<(), Trap> {
482    let dst_table_index = TableIndex::from_u32(dst_table_index);
483    let src_table_index = TableIndex::from_u32(src_table_index);
484    let store = store.store_opaque_mut();
485    let mut instance = store.instance_mut(instance);
486
487    // Convert the two table indices relative to `instance` into two
488    // defining instances and the defined table index within that instance.
489    let (dst_def_index, dst_instance) = instance
490        .as_mut()
491        .defined_table_index_and_instance(dst_table_index);
492    let dst_instance_id = dst_instance.id();
493    let (src_def_index, src_instance) = instance
494        .as_mut()
495        .defined_table_index_and_instance(src_table_index);
496    let src_instance_id = src_instance.id();
497
498    let src_table = crate::Table::from_raw(
499        StoreInstanceId::new(store.id(), src_instance_id),
500        src_def_index,
501    );
502    let dst_table = crate::Table::from_raw(
503        StoreInstanceId::new(store.id(), dst_instance_id),
504        dst_def_index,
505    );
506
507    // SAFETY: this is only safe if the two tables have the same type, and that
508    // was validated during wasm-validation time.
509    unsafe { crate::Table::copy_raw(store, &dst_table, dst, &src_table, src, len) }
510}
511
512// Implementation of `table.init`.
513fn table_init(
514    store: &mut dyn VMStore,
515    instance: InstanceId,
516    table_index: u32,
517    elem_index: u32,
518    dst: u64,
519    src: u64,
520    len: u64,
521) -> Result<()> {
522    let table_index = TableIndex::from_u32(table_index);
523    let elem_index = ElemIndex::from_u32(elem_index);
524
525    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
526    block_on!(store, async |store, asyncness| {
527        vm::Instance::table_init(
528            store,
529            limiter.as_mut(),
530            asyncness,
531            instance,
532            table_index,
533            elem_index,
534            dst,
535            src,
536            len,
537        )
538        .await
539    })??;
540    Ok(())
541}
542
543// Implementation of `elem.drop`.
544fn elem_drop(store: &mut dyn VMStore, instance: InstanceId, elem_index: u32) -> Result<()> {
545    let elem_index = ElemIndex::from_u32(elem_index);
546    store.instance_mut(instance).elem_drop(elem_index)?;
547    Ok(())
548}
549
550// Implementation of `memory.copy`.
551fn memory_copy(
552    store: &mut dyn VMStore,
553    instance: InstanceId,
554    dst_index: u32,
555    dst: u64,
556    src_index: u32,
557    src: u64,
558    len: u64,
559) -> Result<(), Trap> {
560    let src_index = MemoryIndex::from_u32(src_index);
561    let dst_index = MemoryIndex::from_u32(dst_index);
562    store
563        .instance_mut(instance)
564        .memory_copy(dst_index, dst, src_index, src, len)
565}
566
567// Implementation of `memory.fill` for locally defined memories.
568fn memory_fill(
569    store: &mut dyn VMStore,
570    instance: InstanceId,
571    memory_index: u32,
572    dst: u64,
573    val: u32,
574    len: u64,
575) -> Result<(), Trap> {
576    let memory_index = DefinedMemoryIndex::from_u32(memory_index);
577    #[expect(clippy::cast_possible_truncation, reason = "known to truncate here")]
578    store
579        .instance_mut(instance)
580        .memory_fill(memory_index, dst, val as u8, len)
581}
582
583// Implementation of `memory.init`.
584fn memory_init(
585    store: &mut dyn VMStore,
586    instance: InstanceId,
587    memory_index: u32,
588    data_index: u32,
589    dst: u64,
590    src: u32,
591    len: u32,
592) -> Result<(), Trap> {
593    let memory_index = MemoryIndex::from_u32(memory_index);
594    let data_index = DataIndex::from_u32(data_index);
595    store
596        .instance_mut(instance)
597        .memory_init(memory_index, data_index, dst, src, len)
598}
599
600// Implementation of `ref.func`.
601fn ref_func(store: &mut dyn VMStore, instance: InstanceId, func_index: u32) -> NonNull<u8> {
602    let (instance, registry) = store.instance_and_module_registry_mut(instance);
603    instance
604        .get_func_ref(registry, FuncIndex::from_u32(func_index))
605        .expect("ref_func: funcref should always be available for given func index")
606        .cast()
607}
608
609// Implementation of `data.drop`.
610fn data_drop(store: &mut dyn VMStore, instance: InstanceId, data_index: u32) -> Result<()> {
611    let data_index = DataIndex::from_u32(data_index);
612    store.instance_mut(instance).data_drop(data_index)?;
613    Ok(())
614}
615
616// Returns a table entry after lazily initializing it.
617fn table_get_lazy_init_func_ref(
618    store: &mut dyn VMStore,
619    instance: InstanceId,
620    table_index: u32,
621    index: u64,
622) -> *mut u8 {
623    let table_index = TableIndex::from_u32(table_index);
624    let (instance, registry) = store.instance_and_module_registry_mut(instance);
625    let table = instance.get_table_with_lazy_init(registry, table_index, core::iter::once(index));
626    let elem = table
627        .get_func(index)
628        .expect("table access already bounds-checked");
629
630    match elem {
631        Some(ptr) => ptr.as_ptr().cast(),
632        None => core::ptr::null_mut(),
633    }
634}
635
636/// Drop a GC reference.
637#[cfg(feature = "gc-drc")]
638fn drop_gc_ref(store: &mut dyn VMStore, _instance: InstanceId, gc_ref: u32) {
639    log::trace!("libcalls::drop_gc_ref({gc_ref:#x})");
640    let gc_ref = VMGcRef::from_raw_u32(gc_ref).expect("non-null VMGcRef");
641    store
642        .store_opaque_mut()
643        .unwrap_gc_store_mut()
644        .drop_gc_ref(gc_ref);
645}
646
647/// Grow the GC heap.
648#[cfg(feature = "gc-null")]
649fn grow_gc_heap(store: &mut dyn VMStore, _instance: InstanceId, bytes_needed: u64) -> Result<()> {
650    let orig_len = u64::try_from(
651        store
652            .require_gc_store()?
653            .gc_heap
654            .vmmemory()
655            .current_length(),
656    )
657    .unwrap();
658
659    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
660    block_on!(store, async |store, asyncness| {
661        store
662            .gc(limiter.as_mut(), None, Some(bytes_needed), asyncness)
663            .await;
664    })?;
665
666    // JIT code relies on the memory having grown by `bytes_needed` bytes if
667    // this libcall returns successfully, so trap if we didn't grow that much.
668    let new_len = u64::try_from(
669        store
670            .require_gc_store()?
671            .gc_heap
672            .vmmemory()
673            .current_length(),
674    )
675    .unwrap();
676    if orig_len
677        .checked_add(bytes_needed)
678        .is_none_or(|expected_len| new_len < expected_len)
679    {
680        return Err(crate::Trap::AllocationTooLarge.into());
681    }
682
683    Ok(())
684}
685
686/// Allocate a raw, unininitialized GC object for Wasm code.
687///
688/// The Wasm code is responsible for initializing the object.
689#[cfg(feature = "gc-drc")]
690fn gc_alloc_raw(
691    store: &mut dyn VMStore,
692    instance: InstanceId,
693    kind_and_reserved: u32,
694    module_interned_type_index: u32,
695    size: u32,
696    align: u32,
697) -> Result<core::num::NonZeroU32> {
698    use crate::vm::VMGcHeader;
699    use core::alloc::Layout;
700    use wasmtime_environ::{ModuleInternedTypeIndex, VMGcKind};
701
702    let kind = VMGcKind::from_high_bits_of_u32(kind_and_reserved);
703    log::trace!("gc_alloc_raw(kind={kind:?}, size={size}, align={align})");
704
705    let module = store
706        .instance(instance)
707        .runtime_module()
708        .expect("should never allocate GC types defined in a dummy module");
709
710    let module_interned_type_index = ModuleInternedTypeIndex::from_u32(module_interned_type_index);
711    let shared_type_index = module
712        .signatures()
713        .shared_type(module_interned_type_index)
714        .expect("should have engine type index for module type index");
715
716    let mut header = VMGcHeader::from_kind_and_index(kind, shared_type_index);
717    header.set_reserved_u26(kind_and_reserved & VMGcKind::UNUSED_MASK);
718
719    let size = usize::try_from(size).unwrap();
720    let align = usize::try_from(align).unwrap();
721    assert!(align.is_power_of_two());
722    let layout = Layout::from_size_align(size, align).map_err(|e| {
723        let err = Error::from(crate::Trap::AllocationTooLarge);
724        err.context(e)
725    })?;
726
727    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
728    block_on!(store, async |store, asyncness| {
729        let gc_ref = store
730            .retry_after_gc_async(limiter.as_mut(), (), asyncness, |store, ()| {
731                store
732                    .unwrap_gc_store_mut()
733                    .alloc_raw(header, layout)?
734                    .map_err(|bytes_needed| crate::GcHeapOutOfMemory::new((), bytes_needed).into())
735            })
736            .await?;
737
738        let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
739        Ok(raw)
740    })?
741}
742
743// Intern a `funcref` into the GC heap, returning its `FuncRefTableId`.
744//
745// This libcall may not GC.
746#[cfg(feature = "gc")]
747unsafe fn intern_func_ref_for_gc_heap(
748    store: &mut dyn VMStore,
749    _instance: InstanceId,
750    func_ref: *mut u8,
751) -> Result<u32> {
752    use crate::{store::AutoAssertNoGc, vm::SendSyncPtr};
753    use core::ptr::NonNull;
754
755    let mut store = AutoAssertNoGc::new(store.store_opaque_mut());
756
757    let func_ref = func_ref.cast::<VMFuncRef>();
758    let func_ref = NonNull::new(func_ref).map(SendSyncPtr::new);
759
760    let func_ref_id = unsafe {
761        store
762            .require_gc_store_mut()?
763            .func_ref_table
764            .intern(func_ref)
765    };
766    Ok(func_ref_id.into_raw())
767}
768
769// Get the raw `VMFuncRef` pointer associated with a `FuncRefTableId` from an
770// earlier `intern_func_ref_for_gc_heap` call.
771//
772// This libcall may not GC.
773#[cfg(feature = "gc")]
774fn get_interned_func_ref(
775    store: &mut dyn VMStore,
776    instance: InstanceId,
777    func_ref_id: u32,
778    module_interned_type_index: u32,
779) -> *mut u8 {
780    use super::FuncRefTableId;
781    use crate::store::AutoAssertNoGc;
782    use wasmtime_environ::{ModuleInternedTypeIndex, packed_option::ReservedValue};
783
784    let store = AutoAssertNoGc::new(store.store_opaque_mut());
785
786    let func_ref_id = FuncRefTableId::from_raw(func_ref_id);
787    let module_interned_type_index = ModuleInternedTypeIndex::from_bits(module_interned_type_index);
788
789    let func_ref = if module_interned_type_index.is_reserved_value() {
790        store
791            .unwrap_gc_store()
792            .func_ref_table
793            .get_untyped(func_ref_id)
794    } else {
795        let types = store.engine().signatures();
796        let engine_ty = store
797            .instance(instance)
798            .engine_type_index(module_interned_type_index);
799        store
800            .unwrap_gc_store()
801            .func_ref_table
802            .get_typed(types, func_ref_id, engine_ty)
803    };
804
805    func_ref.map_or(core::ptr::null_mut(), |f| f.as_ptr().cast())
806}
807
808/// Implementation of the `array.new_data` instruction.
809#[cfg(feature = "gc")]
810fn array_new_data(
811    store: &mut dyn VMStore,
812    instance_id: InstanceId,
813    array_type_index: u32,
814    data_index: u32,
815    src: u32,
816    len: u32,
817) -> Result<core::num::NonZeroU32> {
818    use crate::ArrayType;
819    use wasmtime_environ::ModuleInternedTypeIndex;
820
821    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
822    block_on!(store, async |store, asyncness| {
823        let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
824        let data_index = DataIndex::from_u32(data_index);
825        let instance = store.instance(instance_id);
826
827        // Calculate the byte-length of the data (as opposed to the element-length
828        // of the array).
829        let data_range = instance.wasm_data_range(data_index);
830        let shared_ty = instance.engine_type_index(array_type_index);
831        let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
832        let one_elem_size = array_ty
833            .element_type()
834            .data_byte_size()
835            .expect("Wasm validation ensures that this type have a defined byte size");
836        let byte_len = len
837            .checked_mul(one_elem_size)
838            .and_then(|x| usize::try_from(x).ok())
839            .ok_or_else(|| Trap::MemoryOutOfBounds)?;
840
841        // Get the data from the segment, checking bounds.
842        let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?;
843        instance
844            .wasm_data(data_range.clone())
845            .get(src..)
846            .and_then(|d| d.get(..byte_len))
847            .ok_or_else(|| Trap::MemoryOutOfBounds)?;
848
849        // Allocate the (uninitialized) array.
850        let gc_layout = store
851            .engine()
852            .signatures()
853            .layout(shared_ty)
854            .expect("array types have GC layouts");
855        let array_layout = gc_layout.unwrap_array();
856        let array_ref = store
857            .retry_after_gc_async(limiter.as_mut(), (), asyncness, |store, ()| {
858                store
859                    .unwrap_gc_store_mut()
860                    .alloc_uninit_array(shared_ty, len, &array_layout)?
861                    .map_err(|bytes_needed| crate::GcHeapOutOfMemory::new((), bytes_needed).into())
862            })
863            .await?;
864
865        let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance_id);
866        let gc_store = gc_store.unwrap();
867        let data = &instance.wasm_data(data_range)[src..][..byte_len];
868
869        // Copy the data into the array, initializing it.
870        gc_store
871            .gc_object_data(array_ref.as_gc_ref())
872            .copy_from_slice(array_layout.base_size, data);
873
874        // Return the array to Wasm!
875        let raw = gc_store.expose_gc_ref_to_wasm(array_ref.into());
876        Ok(raw)
877    })?
878}
879
880/// Implementation of the `array.init_data` instruction.
881#[cfg(feature = "gc")]
882fn array_init_data(
883    store: &mut dyn VMStore,
884    instance_id: InstanceId,
885    array_type_index: u32,
886    array: u32,
887    dst: u32,
888    data_index: u32,
889    src: u32,
890    len: u32,
891) -> Result<()> {
892    use crate::ArrayType;
893    use wasmtime_environ::ModuleInternedTypeIndex;
894
895    let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
896    let data_index = DataIndex::from_u32(data_index);
897    let instance = store.instance(instance_id);
898
899    log::trace!(
900        "array.init_data(array={array:#x}, dst={dst}, data_index={data_index:?}, src={src}, len={len})",
901    );
902
903    // Null check the array.
904    let gc_ref = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?;
905    let array = gc_ref
906        .into_arrayref(&*store.unwrap_gc_store().gc_heap)
907        .expect("gc ref should be an array");
908
909    let dst = usize::try_from(dst).map_err(|_| Trap::MemoryOutOfBounds)?;
910    let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?;
911    let len = usize::try_from(len).map_err(|_| Trap::MemoryOutOfBounds)?;
912
913    // Bounds check the array.
914    let array_len = array.len(store.store_opaque());
915    let array_len = usize::try_from(array_len).map_err(|_| Trap::ArrayOutOfBounds)?;
916    if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len {
917        return Err(Trap::ArrayOutOfBounds.into());
918    }
919
920    // Calculate the byte length from the array length.
921    let shared_ty = instance.engine_type_index(array_type_index);
922    let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
923    let one_elem_size = array_ty
924        .element_type()
925        .data_byte_size()
926        .expect("Wasm validation ensures that this type have a defined byte size");
927    let data_len = len
928        .checked_mul(usize::try_from(one_elem_size).unwrap())
929        .ok_or_else(|| Trap::MemoryOutOfBounds)?;
930
931    // Get the data from the segment, checking its bounds.
932    let data_range = instance.wasm_data_range(data_index);
933    instance
934        .wasm_data(data_range.clone())
935        .get(src..)
936        .and_then(|d| d.get(..data_len))
937        .ok_or_else(|| Trap::MemoryOutOfBounds)?;
938
939    // Copy the data into the array.
940
941    let dst_offset = u32::try_from(dst)
942        .unwrap()
943        .checked_mul(one_elem_size)
944        .unwrap();
945
946    let array_layout = store
947        .engine()
948        .signatures()
949        .layout(shared_ty)
950        .expect("array types have GC layouts");
951    let array_layout = array_layout.unwrap_array();
952
953    let obj_offset = array_layout.base_size.checked_add(dst_offset).unwrap();
954
955    let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance_id);
956    let gc_store = gc_store.unwrap();
957    let data = &instance.wasm_data(data_range)[src..][..data_len];
958    gc_store
959        .gc_object_data(array.as_gc_ref())
960        .copy_from_slice(obj_offset, data);
961
962    Ok(())
963}
964
965#[cfg(feature = "gc")]
966fn array_new_elem(
967    store: &mut dyn VMStore,
968    instance_id: InstanceId,
969    array_type_index: u32,
970    elem_index: u32,
971    src: u32,
972    len: u32,
973) -> Result<core::num::NonZeroU32> {
974    use crate::{
975        ArrayRef, ArrayRefPre, ArrayType, Func, OpaqueRootScope, RootedGcRefImpl, Val,
976        store::AutoAssertNoGc,
977        vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
978    };
979    use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
980
981    // Convert indices to their typed forms.
982    let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
983    let elem_index = ElemIndex::from_u32(elem_index);
984    let instance = store.instance(instance_id);
985
986    let mut storage = None;
987    let elements = instance.passive_element_segment(&mut storage, elem_index);
988
989    let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
990    let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
991
992    let shared_ty = instance.engine_type_index(array_type_index);
993    let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
994    let pre = ArrayRefPre::_new(store, array_ty);
995
996    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
997    block_on!(store, async |store, asyncness| {
998        let mut store = OpaqueRootScope::new(store);
999        // Turn the elements into `Val`s.
1000        let mut vals = Vec::with_capacity(usize::try_from(elements.len()).unwrap());
1001        match elements {
1002            TableSegmentElements::Functions(fs) => {
1003                let store_id = store.id();
1004                let (mut instance, registry) = store.instance_and_module_registry_mut(instance_id);
1005                vals.extend(
1006                    fs.get(src..)
1007                        .and_then(|s| s.get(..len))
1008                        .ok_or_else(|| Trap::TableOutOfBounds)?
1009                        .iter()
1010                        .map(|f| {
1011                            let raw_func_ref = instance.as_mut().get_func_ref(registry, *f);
1012                            let func = unsafe {
1013                                raw_func_ref.map(|p| Func::from_vm_func_ref(store_id, p))
1014                            };
1015                            Val::FuncRef(func)
1016                        }),
1017                );
1018            }
1019            TableSegmentElements::Expressions(xs) => {
1020                let xs = xs
1021                    .get(src..)
1022                    .and_then(|s| s.get(..len))
1023                    .ok_or_else(|| Trap::TableOutOfBounds)?;
1024
1025                let mut const_context = ConstEvalContext::new(instance_id, asyncness);
1026                let mut const_evaluator = ConstExprEvaluator::default();
1027
1028                for x in xs.iter() {
1029                    let val = *const_evaluator
1030                        .eval(&mut store, limiter.as_mut(), &mut const_context, x)
1031                        .await?;
1032                    vals.push(val);
1033                }
1034            }
1035        }
1036
1037        let array =
1038            ArrayRef::_new_fixed_async(&mut store, limiter.as_mut(), &pre, &vals, asyncness)
1039                .await?;
1040
1041        let mut store = AutoAssertNoGc::new(&mut store);
1042        let gc_ref = array.try_clone_gc_ref(&mut store)?;
1043        let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
1044        Ok(raw)
1045    })?
1046}
1047
1048#[cfg(feature = "gc")]
1049fn array_init_elem(
1050    store: &mut dyn VMStore,
1051    instance: InstanceId,
1052    array_type_index: u32,
1053    array: u32,
1054    dst: u32,
1055    elem_index: u32,
1056    src: u32,
1057    len: u32,
1058) -> Result<()> {
1059    use crate::{
1060        ArrayRef, Func, OpaqueRootScope, Val,
1061        store::AutoAssertNoGc,
1062        vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
1063    };
1064    use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
1065
1066    let (mut limiter, store) = store.resource_limiter_and_store_opaque();
1067    block_on!(store, async |store, asyncness| {
1068        let mut store = OpaqueRootScope::new(store);
1069
1070        // Convert the indices into their typed forms.
1071        let _array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
1072        let elem_index = ElemIndex::from_u32(elem_index);
1073
1074        log::trace!(
1075            "array.init_elem(array={array:#x}, dst={dst}, elem_index={elem_index:?}, src={src}, len={len})",
1076        );
1077
1078        // Convert the raw GC ref into a `Rooted<ArrayRef>`.
1079        let array = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?;
1080        let array = store.unwrap_gc_store_mut().clone_gc_ref(&array);
1081        let array = {
1082            let mut no_gc = AutoAssertNoGc::new(&mut store);
1083            ArrayRef::from_cloned_gc_ref(&mut no_gc, array)
1084        };
1085
1086        // Bounds check the destination within the array.
1087        let array_len = array._len(&store)?;
1088        log::trace!("array_len = {array_len}");
1089        if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len {
1090            return Err(Trap::ArrayOutOfBounds.into());
1091        }
1092
1093        // Get the passive element segment.
1094        let mut storage = None;
1095        let store_id = store.id();
1096        let (mut instance, registry) = store.instance_and_module_registry_mut(instance);
1097        let elements = instance.passive_element_segment(&mut storage, elem_index);
1098
1099        // Convert array offsets into `usize`s.
1100        let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
1101        let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
1102
1103        // Turn the elements into `Val`s.
1104        let vals = match elements {
1105            TableSegmentElements::Functions(fs) => fs
1106                .get(src..)
1107                .and_then(|s| s.get(..len))
1108                .ok_or_else(|| Trap::TableOutOfBounds)?
1109                .iter()
1110                .map(|f| {
1111                    let raw_func_ref = instance.as_mut().get_func_ref(registry, *f);
1112                    let func = unsafe { raw_func_ref.map(|p| Func::from_vm_func_ref(store_id, p)) };
1113                    Val::FuncRef(func)
1114                })
1115                .collect::<Vec<_>>(),
1116            TableSegmentElements::Expressions(xs) => {
1117                let mut const_context = ConstEvalContext::new(instance.id(), asyncness);
1118                let mut const_evaluator = ConstExprEvaluator::default();
1119
1120                let mut vals = Vec::new();
1121                for x in xs
1122                    .get(src..)
1123                    .and_then(|s| s.get(..len))
1124                    .ok_or_else(|| Trap::TableOutOfBounds)?
1125                {
1126                    let val = *const_evaluator
1127                        .eval(&mut store, limiter.as_mut(), &mut const_context, x)
1128                        .await?;
1129                    vals.push(val);
1130                }
1131                vals
1132            }
1133        };
1134
1135        // Copy the values into the array.
1136        for (i, val) in vals.into_iter().enumerate() {
1137            let i = u32::try_from(i).unwrap();
1138            let j = dst.checked_add(i).unwrap();
1139            array._set(&mut store, j, val)?;
1140        }
1141
1142        Ok(())
1143    })?
1144}
1145
1146// TODO: Specialize this libcall for only non-GC array elements, so we never
1147// have to do GC barriers and their associated indirect calls through the `dyn
1148// GcHeap`. Instead, implement those copies inline in Wasm code. Then, use bulk
1149// `memcpy`-style APIs to do the actual copies here.
1150#[cfg(feature = "gc")]
1151fn array_copy(
1152    store: &mut dyn VMStore,
1153    _instance: InstanceId,
1154    dst_array: u32,
1155    dst: u32,
1156    src_array: u32,
1157    src: u32,
1158    len: u32,
1159) -> Result<()> {
1160    use crate::{ArrayRef, OpaqueRootScope, store::AutoAssertNoGc};
1161
1162    log::trace!(
1163        "array.copy(dst_array={dst_array:#x}, dst_index={dst}, src_array={src_array:#x}, src_index={src}, len={len})",
1164    );
1165
1166    let mut store = OpaqueRootScope::new(store.store_opaque_mut());
1167    let mut store = AutoAssertNoGc::new(&mut store);
1168
1169    // Convert the raw GC refs into `Rooted<ArrayRef>`s.
1170    let dst_array = VMGcRef::from_raw_u32(dst_array).ok_or_else(|| Trap::NullReference)?;
1171    let dst_array = store.unwrap_gc_store_mut().clone_gc_ref(&dst_array);
1172    let dst_array = ArrayRef::from_cloned_gc_ref(&mut store, dst_array);
1173    let src_array = VMGcRef::from_raw_u32(src_array).ok_or_else(|| Trap::NullReference)?;
1174    let src_array = store.unwrap_gc_store_mut().clone_gc_ref(&src_array);
1175    let src_array = ArrayRef::from_cloned_gc_ref(&mut store, src_array);
1176
1177    // Bounds check the destination array's elements.
1178    let dst_array_len = dst_array._len(&store)?;
1179    if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > dst_array_len {
1180        return Err(Trap::ArrayOutOfBounds.into());
1181    }
1182
1183    // Bounds check the source array's elements.
1184    let src_array_len = src_array._len(&store)?;
1185    if src.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > src_array_len {
1186        return Err(Trap::ArrayOutOfBounds.into());
1187    }
1188
1189    let mut store = AutoAssertNoGc::new(&mut store);
1190    // If `src_array` and `dst_array` are the same array, then we are
1191    // potentially doing an overlapping copy, so make sure to copy elements in
1192    // the order that doesn't clobber the source elements before they are
1193    // copied. If they are different arrays, the order doesn't matter, but we
1194    // simply don't bother checking.
1195    if src > dst {
1196        for i in 0..len {
1197            let src_elem = src_array._get(&mut store, src + i)?;
1198            let dst_i = dst + i;
1199            dst_array._set(&mut store, dst_i, src_elem)?;
1200        }
1201    } else {
1202        for i in (0..len).rev() {
1203            let src_elem = src_array._get(&mut store, src + i)?;
1204            let dst_i = dst + i;
1205            dst_array._set(&mut store, dst_i, src_elem)?;
1206        }
1207    }
1208    Ok(())
1209}
1210
1211#[cfg(feature = "gc")]
1212fn is_subtype(
1213    store: &mut dyn VMStore,
1214    _instance: InstanceId,
1215    actual_engine_type: u32,
1216    expected_engine_type: u32,
1217) -> u32 {
1218    use wasmtime_environ::VMSharedTypeIndex;
1219
1220    let actual = VMSharedTypeIndex::from_u32(actual_engine_type);
1221    let expected = VMSharedTypeIndex::from_u32(expected_engine_type);
1222
1223    let is_subtype: bool = store.engine().signatures().is_subtype(actual, expected);
1224
1225    log::trace!("is_subtype(actual={actual:?}, expected={expected:?}) -> {is_subtype}",);
1226    is_subtype as u32
1227}
1228
1229// Implementation of `memory.atomic.notify` for locally defined memories.
1230#[cfg(feature = "threads")]
1231fn memory_atomic_notify(
1232    store: &mut dyn VMStore,
1233    instance: InstanceId,
1234    memory_index: u32,
1235    addr_index: u64,
1236    count: u32,
1237) -> Result<u32, Trap> {
1238    let memory = DefinedMemoryIndex::from_u32(memory_index);
1239    store
1240        .instance_mut(instance)
1241        .get_defined_memory_mut(memory)
1242        .atomic_notify(addr_index, count)
1243}
1244
1245// Implementation of `memory.atomic.wait32` for locally defined memories.
1246#[cfg(feature = "threads")]
1247fn memory_atomic_wait32(
1248    store: &mut dyn VMStore,
1249    instance: InstanceId,
1250    memory_index: u32,
1251    addr_index: u64,
1252    expected: u32,
1253    timeout: u64,
1254) -> Result<u32, Trap> {
1255    let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
1256    let memory = DefinedMemoryIndex::from_u32(memory_index);
1257    Ok(store
1258        .instance_mut(instance)
1259        .get_defined_memory_mut(memory)
1260        .atomic_wait32(addr_index, expected, timeout)? as u32)
1261}
1262
1263// Implementation of `memory.atomic.wait64` for locally defined memories.
1264#[cfg(feature = "threads")]
1265fn memory_atomic_wait64(
1266    store: &mut dyn VMStore,
1267    instance: InstanceId,
1268    memory_index: u32,
1269    addr_index: u64,
1270    expected: u64,
1271    timeout: u64,
1272) -> Result<u32, Trap> {
1273    let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
1274    let memory = DefinedMemoryIndex::from_u32(memory_index);
1275    Ok(store
1276        .instance_mut(instance)
1277        .get_defined_memory_mut(memory)
1278        .atomic_wait64(addr_index, expected, timeout)? as u32)
1279}
1280
1281// Hook for when an instance runs out of fuel.
1282fn out_of_gas(store: &mut dyn VMStore, _instance: InstanceId) -> Result<()> {
1283    block_on!(store, async |store, _| {
1284        if !store.refuel() {
1285            return Err(Trap::OutOfFuel.into());
1286        }
1287        #[cfg(feature = "async")]
1288        if store.fuel_yield_interval.is_some() {
1289            crate::runtime::vm::Yield::new().await;
1290        }
1291        Ok(())
1292    })?
1293}
1294
1295// Hook for when an instance observes that the epoch has changed.
1296#[cfg(target_has_atomic = "64")]
1297fn new_epoch(store: &mut dyn VMStore, _instance: InstanceId) -> Result<NextEpoch> {
1298    use crate::UpdateDeadline;
1299
1300    #[cfg(feature = "debug")]
1301    {
1302        store.block_on_debug_handler(crate::DebugEvent::EpochYield)?;
1303    }
1304
1305    let update_deadline = store.new_epoch_updated_deadline()?;
1306    block_on!(store, async move |store, asyncness| {
1307        #[cfg(not(feature = "async"))]
1308        let _ = asyncness;
1309
1310        let delta = match update_deadline {
1311            UpdateDeadline::Interrupt => return Err(Trap::Interrupt.into()),
1312            UpdateDeadline::Continue(delta) => delta,
1313
1314            // Note that custom errors are used here to avoid tripping up on the
1315            // `block_on!` message that otherwise assumes
1316            // async-configuration-after-the-fact.
1317            #[cfg(feature = "async")]
1318            UpdateDeadline::Yield(delta) => {
1319                if asyncness != Asyncness::Yes {
1320                    bail!(
1321                        "cannot use `UpdateDeadline::Yield` without using \
1322                         an async wasm entrypoint",
1323                    );
1324                }
1325                crate::runtime::vm::Yield::new().await;
1326                delta
1327            }
1328            #[cfg(feature = "async")]
1329            UpdateDeadline::YieldCustom(delta, future) => {
1330                if asyncness != Asyncness::Yes {
1331                    bail!(
1332                        "cannot use `UpdateDeadline::YieldCustom` without using \
1333                         an async wasm entrypoint",
1334                    );
1335                }
1336                future.await;
1337                delta
1338            }
1339        };
1340
1341        // Set a new deadline and return the new epoch deadline so
1342        // the Wasm code doesn't have to reload it.
1343        store.set_epoch_deadline(delta);
1344        Ok(NextEpoch(store.get_epoch_deadline()))
1345    })?
1346}
1347
1348struct NextEpoch(u64);
1349
1350unsafe impl HostResultHasUnwindSentinel for NextEpoch {
1351    type Abi = u64;
1352    const SENTINEL: u64 = u64::MAX;
1353    fn into_abi(self) -> u64 {
1354        self.0
1355    }
1356}
1357
1358// Hook for validating malloc using wmemcheck_state.
1359#[cfg(feature = "wmemcheck")]
1360fn check_malloc(store: &mut dyn VMStore, instance: InstanceId, addr: u32, len: u32) -> Result<()> {
1361    let instance = store.instance_mut(instance);
1362    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1363        let result = wmemcheck_state.malloc(addr as usize, len as usize);
1364        wmemcheck_state.memcheck_on();
1365        match result {
1366            Ok(()) => {}
1367            Err(DoubleMalloc { addr, len }) => {
1368                bail!("Double malloc at addr {:#x} of size {}", addr, len)
1369            }
1370            Err(OutOfBounds { addr, len }) => {
1371                bail!("Malloc out of bounds at addr {:#x} of size {}", addr, len);
1372            }
1373            _ => {
1374                panic!("unreachable")
1375            }
1376        }
1377    }
1378    Ok(())
1379}
1380
1381// Hook for validating free using wmemcheck_state.
1382#[cfg(feature = "wmemcheck")]
1383fn check_free(store: &mut dyn VMStore, instance: InstanceId, addr: u32) -> Result<()> {
1384    let instance = store.instance_mut(instance);
1385    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1386        let result = wmemcheck_state.free(addr as usize);
1387        wmemcheck_state.memcheck_on();
1388        match result {
1389            Ok(()) => {}
1390            Err(InvalidFree { addr }) => {
1391                bail!("Invalid free at addr {:#x}", addr)
1392            }
1393            _ => {
1394                panic!("unreachable")
1395            }
1396        }
1397    }
1398    Ok(())
1399}
1400
1401// Hook for validating load using wmemcheck_state.
1402#[cfg(feature = "wmemcheck")]
1403fn check_load(
1404    store: &mut dyn VMStore,
1405    instance: InstanceId,
1406    num_bytes: u32,
1407    addr: u32,
1408    offset: u32,
1409) -> Result<()> {
1410    let instance = store.instance_mut(instance);
1411    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1412        let result = wmemcheck_state.read(addr as usize + offset as usize, num_bytes as usize);
1413        match result {
1414            Ok(()) => {}
1415            Err(InvalidRead { addr, len }) => {
1416                bail!("Invalid load at addr {:#x} of size {}", addr, len);
1417            }
1418            Err(OutOfBounds { addr, len }) => {
1419                bail!("Load out of bounds at addr {:#x} of size {}", addr, len);
1420            }
1421            _ => {
1422                panic!("unreachable")
1423            }
1424        }
1425    }
1426    Ok(())
1427}
1428
1429// Hook for validating store using wmemcheck_state.
1430#[cfg(feature = "wmemcheck")]
1431fn check_store(
1432    store: &mut dyn VMStore,
1433    instance: InstanceId,
1434    num_bytes: u32,
1435    addr: u32,
1436    offset: u32,
1437) -> Result<()> {
1438    let instance = store.instance_mut(instance);
1439    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1440        let result = wmemcheck_state.write(addr as usize + offset as usize, num_bytes as usize);
1441        match result {
1442            Ok(()) => {}
1443            Err(InvalidWrite { addr, len }) => {
1444                bail!("Invalid store at addr {:#x} of size {}", addr, len)
1445            }
1446            Err(OutOfBounds { addr, len }) => {
1447                bail!("Store out of bounds at addr {:#x} of size {}", addr, len)
1448            }
1449            _ => {
1450                panic!("unreachable")
1451            }
1452        }
1453    }
1454    Ok(())
1455}
1456
1457// Hook for turning wmemcheck load/store validation off when entering a malloc function.
1458#[cfg(feature = "wmemcheck")]
1459fn malloc_start(store: &mut dyn VMStore, instance: InstanceId) {
1460    let instance = store.instance_mut(instance);
1461    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1462        wmemcheck_state.memcheck_off();
1463    }
1464}
1465
1466// Hook for turning wmemcheck load/store validation off when entering a free function.
1467#[cfg(feature = "wmemcheck")]
1468fn free_start(store: &mut dyn VMStore, instance: InstanceId) {
1469    let instance = store.instance_mut(instance);
1470    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1471        wmemcheck_state.memcheck_off();
1472    }
1473}
1474
1475// Hook for tracking wasm stack updates using wmemcheck_state.
1476#[cfg(feature = "wmemcheck")]
1477fn update_stack_pointer(_store: &mut dyn VMStore, _instance: InstanceId, _value: u32) {
1478    // TODO: stack-tracing has yet to be finalized. All memory below
1479    // the address of the top of the stack is marked as valid for
1480    // loads and stores.
1481    // if let Some(wmemcheck_state) = &mut instance.wmemcheck_state {
1482    //     instance.wmemcheck_state.update_stack_pointer(value as usize);
1483    // }
1484}
1485
1486// Hook updating wmemcheck_state memory state vector every time memory.grow is called.
1487#[cfg(feature = "wmemcheck")]
1488fn update_mem_size(store: &mut dyn VMStore, instance: InstanceId, num_pages: u32) {
1489    let instance = store.instance_mut(instance);
1490    if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1491        const KIB: usize = 1024;
1492        let num_bytes = num_pages as usize * 64 * KIB;
1493        wmemcheck_state.update_mem_size(num_bytes);
1494    }
1495}
1496
1497fn floor_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1498    val.wasm_floor()
1499}
1500
1501fn floor_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1502    val.wasm_floor()
1503}
1504
1505fn ceil_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1506    val.wasm_ceil()
1507}
1508
1509fn ceil_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1510    val.wasm_ceil()
1511}
1512
1513fn trunc_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1514    val.wasm_trunc()
1515}
1516
1517fn trunc_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1518    val.wasm_trunc()
1519}
1520
1521fn nearest_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1522    val.wasm_nearest()
1523}
1524
1525fn nearest_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1526    val.wasm_nearest()
1527}
1528
1529// This intrinsic is only used on x86_64 platforms as an implementation of
1530// the `i8x16.swizzle` instruction when `pshufb` in SSSE3 is not available.
1531#[cfg(all(target_arch = "x86_64", target_feature = "sse"))]
1532fn i8x16_swizzle(_store: &mut dyn VMStore, _instance: InstanceId, a: i8x16, b: i8x16) -> i8x16 {
1533    union U {
1534        reg: i8x16,
1535        mem: [u8; 16],
1536    }
1537
1538    unsafe {
1539        let a = U { reg: a }.mem;
1540        let b = U { reg: b }.mem;
1541
1542        // Use the `swizzle` semantics of returning 0 on any out-of-bounds
1543        // index, rather than the x86 pshufb semantics, since Wasmtime uses
1544        // this to implement `i8x16.swizzle`.
1545        let select = |arr: &[u8; 16], byte: u8| {
1546            if byte >= 16 { 0x00 } else { arr[byte as usize] }
1547        };
1548
1549        U {
1550            mem: [
1551                select(&a, b[0]),
1552                select(&a, b[1]),
1553                select(&a, b[2]),
1554                select(&a, b[3]),
1555                select(&a, b[4]),
1556                select(&a, b[5]),
1557                select(&a, b[6]),
1558                select(&a, b[7]),
1559                select(&a, b[8]),
1560                select(&a, b[9]),
1561                select(&a, b[10]),
1562                select(&a, b[11]),
1563                select(&a, b[12]),
1564                select(&a, b[13]),
1565                select(&a, b[14]),
1566                select(&a, b[15]),
1567            ],
1568        }
1569        .reg
1570    }
1571}
1572
1573#[cfg(not(all(target_arch = "x86_64", target_feature = "sse")))]
1574fn i8x16_swizzle(_store: &mut dyn VMStore, _instance: InstanceId, _a: i8x16, _b: i8x16) -> i8x16 {
1575    unreachable!()
1576}
1577
1578// This intrinsic is only used on x86_64 platforms as an implementation of
1579// the `i8x16.shuffle` instruction when `pshufb` in SSSE3 is not available.
1580#[cfg(all(target_arch = "x86_64", target_feature = "sse"))]
1581fn i8x16_shuffle(
1582    _store: &mut dyn VMStore,
1583    _instance: InstanceId,
1584    a: i8x16,
1585    b: i8x16,
1586    c: i8x16,
1587) -> i8x16 {
1588    union U {
1589        reg: i8x16,
1590        mem: [u8; 16],
1591    }
1592
1593    unsafe {
1594        let ab = [U { reg: a }.mem, U { reg: b }.mem];
1595        let c = U { reg: c }.mem;
1596
1597        // Use the `shuffle` semantics of returning 0 on any out-of-bounds
1598        // index, rather than the x86 pshufb semantics, since Wasmtime uses
1599        // this to implement `i8x16.shuffle`.
1600        let select = |arr: &[[u8; 16]; 2], byte: u8| {
1601            if byte >= 32 {
1602                0x00
1603            } else if byte >= 16 {
1604                arr[1][byte as usize - 16]
1605            } else {
1606                arr[0][byte as usize]
1607            }
1608        };
1609
1610        U {
1611            mem: [
1612                select(&ab, c[0]),
1613                select(&ab, c[1]),
1614                select(&ab, c[2]),
1615                select(&ab, c[3]),
1616                select(&ab, c[4]),
1617                select(&ab, c[5]),
1618                select(&ab, c[6]),
1619                select(&ab, c[7]),
1620                select(&ab, c[8]),
1621                select(&ab, c[9]),
1622                select(&ab, c[10]),
1623                select(&ab, c[11]),
1624                select(&ab, c[12]),
1625                select(&ab, c[13]),
1626                select(&ab, c[14]),
1627                select(&ab, c[15]),
1628            ],
1629        }
1630        .reg
1631    }
1632}
1633
1634#[cfg(not(all(target_arch = "x86_64", target_feature = "sse")))]
1635fn i8x16_shuffle(
1636    _store: &mut dyn VMStore,
1637    _instance: InstanceId,
1638    _a: i8x16,
1639    _b: i8x16,
1640    _c: i8x16,
1641) -> i8x16 {
1642    unreachable!()
1643}
1644
1645fn fma_f32x4(
1646    _store: &mut dyn VMStore,
1647    _instance: InstanceId,
1648    x: f32x4,
1649    y: f32x4,
1650    z: f32x4,
1651) -> f32x4 {
1652    union U {
1653        reg: f32x4,
1654        mem: [f32; 4],
1655    }
1656
1657    unsafe {
1658        let x = U { reg: x }.mem;
1659        let y = U { reg: y }.mem;
1660        let z = U { reg: z }.mem;
1661
1662        U {
1663            mem: [
1664                x[0].wasm_mul_add(y[0], z[0]),
1665                x[1].wasm_mul_add(y[1], z[1]),
1666                x[2].wasm_mul_add(y[2], z[2]),
1667                x[3].wasm_mul_add(y[3], z[3]),
1668            ],
1669        }
1670        .reg
1671    }
1672}
1673
1674fn fma_f64x2(
1675    _store: &mut dyn VMStore,
1676    _instance: InstanceId,
1677    x: f64x2,
1678    y: f64x2,
1679    z: f64x2,
1680) -> f64x2 {
1681    union U {
1682        reg: f64x2,
1683        mem: [f64; 2],
1684    }
1685
1686    unsafe {
1687        let x = U { reg: x }.mem;
1688        let y = U { reg: y }.mem;
1689        let z = U { reg: z }.mem;
1690
1691        U {
1692            mem: [x[0].wasm_mul_add(y[0], z[0]), x[1].wasm_mul_add(y[1], z[1])],
1693        }
1694        .reg
1695    }
1696}
1697
1698/// This intrinsic is just used to record trap information.
1699///
1700/// The `Infallible` "ok" type here means that this never returns success, it
1701/// only ever returns an error, and this hooks into the machinery to handle
1702/// `Result` values to record such trap information.
1703fn trap(
1704    _store: &mut dyn VMStore,
1705    _instance: InstanceId,
1706    code: u8,
1707) -> Result<Infallible, TrapReason> {
1708    Err(TrapReason::Wasm(
1709        wasmtime_environ::Trap::from_u8(code).unwrap(),
1710    ))
1711}
1712
1713fn raise(store: &mut dyn VMStore, _instance: InstanceId) {
1714    // SAFETY: this is only called from compiled wasm so we know that wasm has
1715    // already been entered. It's a dynamic safety precondition that the trap
1716    // information has already been arranged to be present.
1717    unsafe { crate::runtime::vm::traphandlers::raise_preexisting_trap(store) }
1718}
1719
1720// Builtins for continuations. These are thin wrappers around the
1721// respective definitions in stack_switching.rs.
1722#[cfg(feature = "stack-switching")]
1723fn cont_new(
1724    store: &mut dyn VMStore,
1725    instance: InstanceId,
1726    func: *mut u8,
1727    param_count: u32,
1728    result_count: u32,
1729) -> Result<Option<AllocationSize>> {
1730    let ans =
1731        crate::vm::stack_switching::cont_new(store, instance, func, param_count, result_count)?;
1732    Ok(Some(AllocationSize(ans.cast::<u8>() as usize)))
1733}
1734
1735#[cfg(feature = "gc")]
1736fn get_instance_id(_store: &mut dyn VMStore, instance: InstanceId) -> u32 {
1737    instance.as_u32()
1738}
1739
1740#[cfg(feature = "gc")]
1741fn throw_ref(
1742    store: &mut dyn VMStore,
1743    _instance: InstanceId,
1744    exnref: u32,
1745) -> Result<(), TrapReason> {
1746    let exnref = VMGcRef::from_raw_u32(exnref).ok_or_else(|| Trap::NullReference)?;
1747    let exnref = store.unwrap_gc_store_mut().clone_gc_ref(&exnref);
1748    let exnref = exnref
1749        .into_exnref(&*store.unwrap_gc_store().gc_heap)
1750        .expect("gc ref should be an exception object");
1751    store.set_pending_exception(exnref);
1752    Err(TrapReason::Exception)
1753}
1754
1755fn breakpoint(store: &mut dyn VMStore, _instance: InstanceId) -> Result<()> {
1756    #[cfg(feature = "debug")]
1757    {
1758        log::trace!("hit breakpoint");
1759        store.block_on_debug_handler(crate::DebugEvent::Breakpoint)?;
1760    }
1761    // Avoid unused-argument warning in no-debugger builds.
1762    let _ = store;
1763    Ok(())
1764}