1#[cfg(feature = "stack-switching")]
58use super::stack_switching::VMContObj;
59use crate::prelude::*;
60use crate::runtime::store::{InstanceId, StoreInstanceId, StoreOpaque};
61#[cfg(feature = "gc")]
62use crate::runtime::vm::VMGcRef;
63use crate::runtime::vm::table::TableElementType;
64use crate::runtime::vm::vmcontext::VMFuncRef;
65use crate::runtime::vm::{
66 self, HostResultHasUnwindSentinel, SendSyncPtr, TrapReason, VMStore, f32x4, f64x2, i8x16,
67};
68use core::convert::Infallible;
69use core::ptr::NonNull;
70#[cfg(feature = "threads")]
71use core::time::Duration;
72use wasmtime_environ::{
73 DataIndex, DefinedMemoryIndex, DefinedTableIndex, ElemIndex, FuncIndex, MemoryIndex,
74 TableIndex, Trap,
75};
76#[cfg(feature = "wmemcheck")]
77use wasmtime_wmemcheck::AccessError::{
78 DoubleMalloc, InvalidFree, InvalidRead, InvalidWrite, OutOfBounds,
79};
80
81pub mod raw {
99 use crate::runtime::vm::{Instance, VMContext, f32x4, f64x2, i8x16};
100 use core::ptr::NonNull;
101
102 macro_rules! libcall {
103 (
104 $(
105 $( #[cfg($attr:meta)] )?
106 $name:ident( vmctx: vmctx $(, $pname:ident: $param:ident )* ) $(-> $result:ident)?;
107 )*
108 ) => {
109 $(
110 #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
117 #[allow(unused_variables, reason = "macro-generated")]
118 pub unsafe extern "C" fn $name(
119 vmctx: NonNull<VMContext>,
120 $( $pname : libcall!(@ty $param), )*
121 ) $(-> libcall!(@ty $result))? {
122 $(#[cfg($attr)])?
123 unsafe {
124 Instance::enter_host_from_wasm(vmctx, |store, instance| {
125 super::$name(store, instance, $($pname),*)
126 })
127 }
128 $(
129 #[cfg(not($attr))]
130 {
131 let _ = vmctx;
132 unreachable!();
133 }
134 )?
135 }
136
137 #[allow(improper_ctypes_definitions, reason = "__m128i known not FFI-safe")]
141 const _: () = {
142 #[used]
143 static I_AM_USED: unsafe extern "C" fn(
144 NonNull<VMContext>,
145 $( $pname : libcall!(@ty $param), )*
146 ) $( -> libcall!(@ty $result))? = $name;
147 };
148 )*
149 };
150
151 (@ty u32) => (u32);
152 (@ty u64) => (u64);
153 (@ty f32) => (f32);
154 (@ty f64) => (f64);
155 (@ty u8) => (u8);
156 (@ty i8x16) => (i8x16);
157 (@ty f32x4) => (f32x4);
158 (@ty f64x2) => (f64x2);
159 (@ty bool) => (bool);
160 (@ty pointer) => (*mut u8);
161 (@ty size) => (usize);
162 }
163
164 wasmtime_environ::foreach_builtin_function!(libcall);
165}
166
167macro_rules! block_on {
185 ($store:expr, $f:expr) => {{
186 let store: &mut StoreOpaque = $store;
187 let closure = assert_async_fn_closure($f);
188 if store.async_support() {
189 #[cfg(feature = "async")]
190 {
191 store.with_blocking(|store, cx| cx.block_on(closure(store)))
192 }
193 #[cfg(not(feature = "async"))]
194 {
195 unreachable!()
196 }
197 } else {
198 anyhow::Ok(vm::assert_ready(closure(store)))
202 }
203 }};
204}
205
206fn assert_async_fn_closure<F, R>(f: F) -> F
207where
208 F: AsyncFnOnce(&mut StoreOpaque) -> R,
209{
210 f
211}
212
213fn memory_grow(
214 store: &mut dyn VMStore,
215 instance: InstanceId,
216 delta: u64,
217 memory_index: u32,
218) -> Result<Option<AllocationSize>> {
219 let memory_index = DefinedMemoryIndex::from_u32(memory_index);
220 let (mut limiter, store) = store.resource_limiter_and_store_opaque();
221 let limiter = limiter.as_mut();
222 block_on!(store, async |store| {
223 let instance = store.instance_mut(instance);
224 let module = instance.env_module();
225 let page_size_log2 = module.memories[module.memory_index(memory_index)].page_size_log2;
226
227 let result = instance
228 .memory_grow(limiter, memory_index, delta)
229 .await?
230 .map(|size_in_bytes| AllocationSize(size_in_bytes >> page_size_log2));
231
232 Ok(result)
233 })?
234}
235
236struct AllocationSize(usize);
245
246unsafe impl HostResultHasUnwindSentinel for Option<AllocationSize> {
259 type Abi = *mut u8;
260 const SENTINEL: *mut u8 = (usize::MAX - 1) as *mut u8;
261
262 fn into_abi(self) -> *mut u8 {
263 match self {
264 Some(size) => {
265 debug_assert!(size.0 < (usize::MAX - 1));
266 size.0 as *mut u8
267 }
268 None => usize::MAX as *mut u8,
269 }
270 }
271}
272
273unsafe fn table_grow_func_ref(
275 store: &mut dyn VMStore,
276 instance: InstanceId,
277 defined_table_index: u32,
278 delta: u64,
279 init_value: *mut u8,
280) -> Result<Option<AllocationSize>> {
281 let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
282 let element = NonNull::new(init_value.cast::<VMFuncRef>()).map(SendSyncPtr::new);
283 let (mut limiter, store) = store.resource_limiter_and_store_opaque();
284 let limiter = limiter.as_mut();
285 block_on!(store, async |store| {
286 let mut instance = store.instance_mut(instance);
287 let table_index = instance.env_module().table_index(defined_table_index);
288 debug_assert!(matches!(
289 instance.as_mut().table_element_type(table_index),
290 TableElementType::Func,
291 ));
292 let result = instance
293 .defined_table_grow(defined_table_index, async |table| unsafe {
294 table.grow_func(limiter, delta, element).await
295 })
296 .await?
297 .map(AllocationSize);
298 Ok(result)
299 })?
300}
301
302#[cfg(feature = "gc")]
304fn table_grow_gc_ref(
305 store: &mut dyn VMStore,
306 instance: InstanceId,
307 defined_table_index: u32,
308 delta: u64,
309 init_value: u32,
310) -> Result<Option<AllocationSize>> {
311 let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
312 let element = VMGcRef::from_raw_u32(init_value);
313 let (mut limiter, store) = store.resource_limiter_and_store_opaque();
314 let limiter = limiter.as_mut();
315 block_on!(store, async |store| {
316 let (gc_store, mut instance) = store.optional_gc_store_and_instance_mut(instance);
317 let table_index = instance.env_module().table_index(defined_table_index);
318 debug_assert!(matches!(
319 instance.as_mut().table_element_type(table_index),
320 TableElementType::GcRef,
321 ));
322
323 let result = instance
324 .defined_table_grow(defined_table_index, async |table| unsafe {
325 table
326 .grow_gc_ref(limiter, gc_store, delta, element.as_ref())
327 .await
328 })
329 .await?
330 .map(AllocationSize);
331 Ok(result)
332 })?
333}
334
335#[cfg(feature = "stack-switching")]
336unsafe fn table_grow_cont_obj(
337 store: &mut dyn VMStore,
338 instance: InstanceId,
339 defined_table_index: u32,
340 delta: u64,
341 init_value_contref: *mut u8,
344 init_value_revision: usize,
345) -> Result<Option<AllocationSize>> {
346 let defined_table_index = DefinedTableIndex::from_u32(defined_table_index);
347 let element = unsafe { VMContObj::from_raw_parts(init_value_contref, init_value_revision) };
348 let (mut limiter, store) = store.resource_limiter_and_store_opaque();
349 let limiter = limiter.as_mut();
350 block_on!(store, async |store| {
351 let mut instance = store.instance_mut(instance);
352 let table_index = instance.env_module().table_index(defined_table_index);
353 debug_assert!(matches!(
354 instance.as_mut().table_element_type(table_index),
355 TableElementType::Cont,
356 ));
357 let result = instance
358 .defined_table_grow(defined_table_index, async |table| unsafe {
359 table.grow_cont(limiter, delta, element).await
360 })
361 .await?
362 .map(AllocationSize);
363 Ok(result)
364 })?
365}
366
367unsafe fn table_fill_func_ref(
369 store: &mut dyn VMStore,
370 instance: InstanceId,
371 table_index: u32,
372 dst: u64,
373 val: *mut u8,
374 len: u64,
375) -> Result<()> {
376 let instance = store.instance_mut(instance);
377 let table_index = DefinedTableIndex::from_u32(table_index);
378 let table = instance.get_defined_table(table_index);
379 match table.element_type() {
380 TableElementType::Func => {
381 let val = NonNull::new(val.cast::<VMFuncRef>());
382 table.fill_func(dst, val, len)?;
383 Ok(())
384 }
385 TableElementType::GcRef => unreachable!(),
386 TableElementType::Cont => unreachable!(),
387 }
388}
389
390#[cfg(feature = "gc")]
391fn table_fill_gc_ref(
392 store: &mut dyn VMStore,
393 instance: InstanceId,
394 table_index: u32,
395 dst: u64,
396 val: u32,
397 len: u64,
398) -> Result<()> {
399 let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance);
400 let table_index = DefinedTableIndex::from_u32(table_index);
401 let table = instance.get_defined_table(table_index);
402 match table.element_type() {
403 TableElementType::Func => unreachable!(),
404 TableElementType::GcRef => {
405 let gc_ref = VMGcRef::from_raw_u32(val);
406 table.fill_gc_ref(gc_store, dst, gc_ref.as_ref(), len)?;
407 Ok(())
408 }
409
410 TableElementType::Cont => unreachable!(),
411 }
412}
413
414#[cfg(feature = "stack-switching")]
415unsafe fn table_fill_cont_obj(
416 store: &mut dyn VMStore,
417 instance: InstanceId,
418 table_index: u32,
419 dst: u64,
420 value_contref: *mut u8,
421 value_revision: usize,
422 len: u64,
423) -> Result<()> {
424 let instance = store.instance_mut(instance);
425 let table_index = DefinedTableIndex::from_u32(table_index);
426 let table = instance.get_defined_table(table_index);
427 match table.element_type() {
428 TableElementType::Cont => {
429 let contobj = unsafe { VMContObj::from_raw_parts(value_contref, value_revision) };
430 table.fill_cont(dst, contobj, len)?;
431 Ok(())
432 }
433 _ => panic!("Wrong table filling function"),
434 }
435}
436
437fn table_copy(
439 store: &mut dyn VMStore,
440 instance: InstanceId,
441 dst_table_index: u32,
442 src_table_index: u32,
443 dst: u64,
444 src: u64,
445 len: u64,
446) -> Result<(), Trap> {
447 let dst_table_index = TableIndex::from_u32(dst_table_index);
448 let src_table_index = TableIndex::from_u32(src_table_index);
449 let store = store.store_opaque_mut();
450 let mut instance = store.instance_mut(instance);
451
452 let (dst_def_index, dst_instance) = instance
455 .as_mut()
456 .defined_table_index_and_instance(dst_table_index);
457 let dst_instance_id = dst_instance.id();
458 let (src_def_index, src_instance) = instance
459 .as_mut()
460 .defined_table_index_and_instance(src_table_index);
461 let src_instance_id = src_instance.id();
462
463 let src_table = crate::Table::from_raw(
464 StoreInstanceId::new(store.id(), src_instance_id),
465 src_def_index,
466 );
467 let dst_table = crate::Table::from_raw(
468 StoreInstanceId::new(store.id(), dst_instance_id),
469 dst_def_index,
470 );
471
472 unsafe { crate::Table::copy_raw(store, &dst_table, dst, &src_table, src, len) }
475}
476
477fn table_init(
479 store: &mut dyn VMStore,
480 instance: InstanceId,
481 table_index: u32,
482 elem_index: u32,
483 dst: u64,
484 src: u64,
485 len: u64,
486) -> Result<()> {
487 let table_index = TableIndex::from_u32(table_index);
488 let elem_index = ElemIndex::from_u32(elem_index);
489
490 let (mut limiter, store) = store.resource_limiter_and_store_opaque();
491 block_on!(store, async |store| {
492 vm::Instance::table_init(
493 store,
494 limiter.as_mut(),
495 instance,
496 table_index,
497 elem_index,
498 dst,
499 src,
500 len,
501 )
502 .await
503 })??;
504 Ok(())
505}
506
507fn elem_drop(store: &mut dyn VMStore, instance: InstanceId, elem_index: u32) {
509 let elem_index = ElemIndex::from_u32(elem_index);
510 store.instance_mut(instance).elem_drop(elem_index)
511}
512
513fn memory_copy(
515 store: &mut dyn VMStore,
516 instance: InstanceId,
517 dst_index: u32,
518 dst: u64,
519 src_index: u32,
520 src: u64,
521 len: u64,
522) -> Result<(), Trap> {
523 let src_index = MemoryIndex::from_u32(src_index);
524 let dst_index = MemoryIndex::from_u32(dst_index);
525 store
526 .instance_mut(instance)
527 .memory_copy(dst_index, dst, src_index, src, len)
528}
529
530fn memory_fill(
532 store: &mut dyn VMStore,
533 instance: InstanceId,
534 memory_index: u32,
535 dst: u64,
536 val: u32,
537 len: u64,
538) -> Result<(), Trap> {
539 let memory_index = DefinedMemoryIndex::from_u32(memory_index);
540 #[expect(clippy::cast_possible_truncation, reason = "known to truncate here")]
541 store
542 .instance_mut(instance)
543 .memory_fill(memory_index, dst, val as u8, len)
544}
545
546fn memory_init(
548 store: &mut dyn VMStore,
549 instance: InstanceId,
550 memory_index: u32,
551 data_index: u32,
552 dst: u64,
553 src: u32,
554 len: u32,
555) -> Result<(), Trap> {
556 let memory_index = MemoryIndex::from_u32(memory_index);
557 let data_index = DataIndex::from_u32(data_index);
558 store
559 .instance_mut(instance)
560 .memory_init(memory_index, data_index, dst, src, len)
561}
562
563fn ref_func(store: &mut dyn VMStore, instance: InstanceId, func_index: u32) -> NonNull<u8> {
565 let (instance, registry) = store.instance_and_module_registry_mut(instance);
566 instance
567 .get_func_ref(registry, FuncIndex::from_u32(func_index))
568 .expect("ref_func: funcref should always be available for given func index")
569 .cast()
570}
571
572fn data_drop(store: &mut dyn VMStore, instance: InstanceId, data_index: u32) {
574 let data_index = DataIndex::from_u32(data_index);
575 store.instance_mut(instance).data_drop(data_index)
576}
577
578fn table_get_lazy_init_func_ref(
580 store: &mut dyn VMStore,
581 instance: InstanceId,
582 table_index: u32,
583 index: u64,
584) -> *mut u8 {
585 let table_index = TableIndex::from_u32(table_index);
586 let (instance, registry) = store.instance_and_module_registry_mut(instance);
587 let table = instance.get_table_with_lazy_init(registry, table_index, core::iter::once(index));
588 let elem = table
589 .get_func(index)
590 .expect("table access already bounds-checked");
591
592 match elem {
593 Some(ptr) => ptr.as_ptr().cast(),
594 None => core::ptr::null_mut(),
595 }
596}
597
598#[cfg(feature = "gc-drc")]
600fn drop_gc_ref(store: &mut dyn VMStore, _instance: InstanceId, gc_ref: u32) {
601 log::trace!("libcalls::drop_gc_ref({gc_ref:#x})");
602 let gc_ref = VMGcRef::from_raw_u32(gc_ref).expect("non-null VMGcRef");
603 store
604 .store_opaque_mut()
605 .unwrap_gc_store_mut()
606 .drop_gc_ref(gc_ref);
607}
608
609#[cfg(feature = "gc-null")]
611fn grow_gc_heap(store: &mut dyn VMStore, _instance: InstanceId, bytes_needed: u64) -> Result<()> {
612 let orig_len = u64::try_from(
613 store
614 .require_gc_store()?
615 .gc_heap
616 .vmmemory()
617 .current_length(),
618 )
619 .unwrap();
620
621 let (mut limiter, store) = store.resource_limiter_and_store_opaque();
622 block_on!(store, async |store| {
623 store.gc(limiter.as_mut(), None, Some(bytes_needed)).await;
624 })?;
625
626 let new_len = u64::try_from(
629 store
630 .require_gc_store()?
631 .gc_heap
632 .vmmemory()
633 .current_length(),
634 )
635 .unwrap();
636 if orig_len
637 .checked_add(bytes_needed)
638 .is_none_or(|expected_len| new_len < expected_len)
639 {
640 return Err(crate::Trap::AllocationTooLarge.into());
641 }
642
643 Ok(())
644}
645
646#[cfg(feature = "gc-drc")]
650fn gc_alloc_raw(
651 store: &mut dyn VMStore,
652 instance: InstanceId,
653 kind_and_reserved: u32,
654 module_interned_type_index: u32,
655 size: u32,
656 align: u32,
657) -> Result<core::num::NonZeroU32> {
658 use crate::vm::VMGcHeader;
659 use core::alloc::Layout;
660 use wasmtime_environ::{ModuleInternedTypeIndex, VMGcKind};
661
662 let kind = VMGcKind::from_high_bits_of_u32(kind_and_reserved);
663 log::trace!("gc_alloc_raw(kind={kind:?}, size={size}, align={align})");
664
665 let module = store
666 .instance(instance)
667 .runtime_module()
668 .expect("should never allocate GC types defined in a dummy module");
669
670 let module_interned_type_index = ModuleInternedTypeIndex::from_u32(module_interned_type_index);
671 let shared_type_index = module
672 .signatures()
673 .shared_type(module_interned_type_index)
674 .expect("should have engine type index for module type index");
675
676 let mut header = VMGcHeader::from_kind_and_index(kind, shared_type_index);
677 header.set_reserved_u26(kind_and_reserved & VMGcKind::UNUSED_MASK);
678
679 let size = usize::try_from(size).unwrap();
680 let align = usize::try_from(align).unwrap();
681 assert!(align.is_power_of_two());
682 let layout = Layout::from_size_align(size, align).map_err(|e| {
683 let err = Error::from(crate::Trap::AllocationTooLarge);
684 err.context(e)
685 })?;
686
687 let (mut limiter, store) = store.resource_limiter_and_store_opaque();
688 block_on!(store, async |store| {
689 let gc_ref = store
690 .retry_after_gc_async(limiter.as_mut(), (), |store, ()| {
691 store
692 .unwrap_gc_store_mut()
693 .alloc_raw(header, layout)?
694 .map_err(|bytes_needed| crate::GcHeapOutOfMemory::new((), bytes_needed).into())
695 })
696 .await?;
697
698 let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
699 Ok(raw)
700 })?
701}
702
703#[cfg(feature = "gc")]
707unsafe fn intern_func_ref_for_gc_heap(
708 store: &mut dyn VMStore,
709 _instance: InstanceId,
710 func_ref: *mut u8,
711) -> Result<u32> {
712 use crate::{store::AutoAssertNoGc, vm::SendSyncPtr};
713 use core::ptr::NonNull;
714
715 let mut store = AutoAssertNoGc::new(store.store_opaque_mut());
716
717 let func_ref = func_ref.cast::<VMFuncRef>();
718 let func_ref = NonNull::new(func_ref).map(SendSyncPtr::new);
719
720 let func_ref_id = unsafe {
721 store
722 .require_gc_store_mut()?
723 .func_ref_table
724 .intern(func_ref)
725 };
726 Ok(func_ref_id.into_raw())
727}
728
729#[cfg(feature = "gc")]
734fn get_interned_func_ref(
735 store: &mut dyn VMStore,
736 instance: InstanceId,
737 func_ref_id: u32,
738 module_interned_type_index: u32,
739) -> *mut u8 {
740 use super::FuncRefTableId;
741 use crate::store::AutoAssertNoGc;
742 use wasmtime_environ::{ModuleInternedTypeIndex, packed_option::ReservedValue};
743
744 let store = AutoAssertNoGc::new(store.store_opaque_mut());
745
746 let func_ref_id = FuncRefTableId::from_raw(func_ref_id);
747 let module_interned_type_index = ModuleInternedTypeIndex::from_bits(module_interned_type_index);
748
749 let func_ref = if module_interned_type_index.is_reserved_value() {
750 store
751 .unwrap_gc_store()
752 .func_ref_table
753 .get_untyped(func_ref_id)
754 } else {
755 let types = store.engine().signatures();
756 let engine_ty = store
757 .instance(instance)
758 .engine_type_index(module_interned_type_index);
759 store
760 .unwrap_gc_store()
761 .func_ref_table
762 .get_typed(types, func_ref_id, engine_ty)
763 };
764
765 func_ref.map_or(core::ptr::null_mut(), |f| f.as_ptr().cast())
766}
767
768#[cfg(feature = "gc")]
770fn array_new_data(
771 store: &mut dyn VMStore,
772 instance_id: InstanceId,
773 array_type_index: u32,
774 data_index: u32,
775 src: u32,
776 len: u32,
777) -> Result<core::num::NonZeroU32> {
778 use crate::ArrayType;
779 use wasmtime_environ::ModuleInternedTypeIndex;
780
781 let (mut limiter, store) = store.resource_limiter_and_store_opaque();
782 block_on!(store, async |store| {
783 let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
784 let data_index = DataIndex::from_u32(data_index);
785 let instance = store.instance(instance_id);
786
787 let data_range = instance.wasm_data_range(data_index);
790 let shared_ty = instance.engine_type_index(array_type_index);
791 let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
792 let one_elem_size = array_ty
793 .element_type()
794 .data_byte_size()
795 .expect("Wasm validation ensures that this type have a defined byte size");
796 let byte_len = len
797 .checked_mul(one_elem_size)
798 .and_then(|x| usize::try_from(x).ok())
799 .ok_or_else(|| Trap::MemoryOutOfBounds)?;
800
801 let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?;
803 instance
804 .wasm_data(data_range.clone())
805 .get(src..)
806 .and_then(|d| d.get(..byte_len))
807 .ok_or_else(|| Trap::MemoryOutOfBounds)?;
808
809 let gc_layout = store
811 .engine()
812 .signatures()
813 .layout(shared_ty)
814 .expect("array types have GC layouts");
815 let array_layout = gc_layout.unwrap_array();
816 let array_ref = store
817 .retry_after_gc_async(limiter.as_mut(), (), |store, ()| {
818 store
819 .unwrap_gc_store_mut()
820 .alloc_uninit_array(shared_ty, len, &array_layout)?
821 .map_err(|bytes_needed| crate::GcHeapOutOfMemory::new((), bytes_needed).into())
822 })
823 .await?;
824
825 let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance_id);
826 let gc_store = gc_store.unwrap();
827 let data = &instance.wasm_data(data_range)[src..][..byte_len];
828
829 gc_store
831 .gc_object_data(array_ref.as_gc_ref())
832 .copy_from_slice(array_layout.base_size, data);
833
834 let raw = gc_store.expose_gc_ref_to_wasm(array_ref.into());
836 Ok(raw)
837 })?
838}
839
840#[cfg(feature = "gc")]
842fn array_init_data(
843 store: &mut dyn VMStore,
844 instance_id: InstanceId,
845 array_type_index: u32,
846 array: u32,
847 dst: u32,
848 data_index: u32,
849 src: u32,
850 len: u32,
851) -> Result<()> {
852 use crate::ArrayType;
853 use wasmtime_environ::ModuleInternedTypeIndex;
854
855 let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
856 let data_index = DataIndex::from_u32(data_index);
857 let instance = store.instance(instance_id);
858
859 log::trace!(
860 "array.init_data(array={array:#x}, dst={dst}, data_index={data_index:?}, src={src}, len={len})",
861 );
862
863 let gc_ref = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?;
865 let array = gc_ref
866 .into_arrayref(&*store.unwrap_gc_store().gc_heap)
867 .expect("gc ref should be an array");
868
869 let dst = usize::try_from(dst).map_err(|_| Trap::MemoryOutOfBounds)?;
870 let src = usize::try_from(src).map_err(|_| Trap::MemoryOutOfBounds)?;
871 let len = usize::try_from(len).map_err(|_| Trap::MemoryOutOfBounds)?;
872
873 let array_len = array.len(store.store_opaque());
875 let array_len = usize::try_from(array_len).map_err(|_| Trap::ArrayOutOfBounds)?;
876 if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len {
877 return Err(Trap::ArrayOutOfBounds.into());
878 }
879
880 let shared_ty = instance.engine_type_index(array_type_index);
882 let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
883 let one_elem_size = array_ty
884 .element_type()
885 .data_byte_size()
886 .expect("Wasm validation ensures that this type have a defined byte size");
887 let data_len = len
888 .checked_mul(usize::try_from(one_elem_size).unwrap())
889 .ok_or_else(|| Trap::MemoryOutOfBounds)?;
890
891 let data_range = instance.wasm_data_range(data_index);
893 instance
894 .wasm_data(data_range.clone())
895 .get(src..)
896 .and_then(|d| d.get(..data_len))
897 .ok_or_else(|| Trap::MemoryOutOfBounds)?;
898
899 let dst_offset = u32::try_from(dst)
902 .unwrap()
903 .checked_mul(one_elem_size)
904 .unwrap();
905
906 let array_layout = store
907 .engine()
908 .signatures()
909 .layout(shared_ty)
910 .expect("array types have GC layouts");
911 let array_layout = array_layout.unwrap_array();
912
913 let obj_offset = array_layout.base_size.checked_add(dst_offset).unwrap();
914
915 let (gc_store, instance) = store.optional_gc_store_and_instance_mut(instance_id);
916 let gc_store = gc_store.unwrap();
917 let data = &instance.wasm_data(data_range)[src..][..data_len];
918 gc_store
919 .gc_object_data(array.as_gc_ref())
920 .copy_from_slice(obj_offset, data);
921
922 Ok(())
923}
924
925#[cfg(feature = "gc")]
926fn array_new_elem(
927 store: &mut dyn VMStore,
928 instance_id: InstanceId,
929 array_type_index: u32,
930 elem_index: u32,
931 src: u32,
932 len: u32,
933) -> Result<core::num::NonZeroU32> {
934 use crate::{
935 ArrayRef, ArrayRefPre, ArrayType, Func, OpaqueRootScope, RootedGcRefImpl, Val,
936 store::AutoAssertNoGc,
937 vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
938 };
939 use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
940
941 let array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
943 let elem_index = ElemIndex::from_u32(elem_index);
944 let instance = store.instance(instance_id);
945
946 let mut storage = None;
947 let elements = instance.passive_element_segment(&mut storage, elem_index);
948
949 let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
950 let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
951
952 let shared_ty = instance.engine_type_index(array_type_index);
953 let array_ty = ArrayType::from_shared_type_index(store.engine(), shared_ty);
954 let pre = ArrayRefPre::_new(store, array_ty);
955
956 let (mut limiter, store) = store.resource_limiter_and_store_opaque();
957 block_on!(store, async |store| {
958 let mut store = OpaqueRootScope::new(store);
959 let mut vals = Vec::with_capacity(usize::try_from(elements.len()).unwrap());
961 match elements {
962 TableSegmentElements::Functions(fs) => {
963 let store_id = store.id();
964 let (mut instance, registry) = store.instance_and_module_registry_mut(instance_id);
965 vals.extend(
966 fs.get(src..)
967 .and_then(|s| s.get(..len))
968 .ok_or_else(|| Trap::TableOutOfBounds)?
969 .iter()
970 .map(|f| {
971 let raw_func_ref = instance.as_mut().get_func_ref(registry, *f);
972 let func = unsafe {
973 raw_func_ref.map(|p| Func::from_vm_func_ref(store_id, p))
974 };
975 Val::FuncRef(func)
976 }),
977 );
978 }
979 TableSegmentElements::Expressions(xs) => {
980 let xs = xs
981 .get(src..)
982 .and_then(|s| s.get(..len))
983 .ok_or_else(|| Trap::TableOutOfBounds)?;
984
985 let mut const_context = ConstEvalContext::new(instance_id);
986 let mut const_evaluator = ConstExprEvaluator::default();
987
988 for x in xs.iter() {
989 let val = *const_evaluator
990 .eval(&mut store, limiter.as_mut(), &mut const_context, x)
991 .await?;
992 vals.push(val);
993 }
994 }
995 }
996
997 let array = ArrayRef::_new_fixed_async(&mut store, limiter.as_mut(), &pre, &vals).await?;
998
999 let mut store = AutoAssertNoGc::new(&mut store);
1000 let gc_ref = array.try_clone_gc_ref(&mut store)?;
1001 let raw = store.unwrap_gc_store_mut().expose_gc_ref_to_wasm(gc_ref);
1002 Ok(raw)
1003 })?
1004}
1005
1006#[cfg(feature = "gc")]
1007fn array_init_elem(
1008 store: &mut dyn VMStore,
1009 instance: InstanceId,
1010 array_type_index: u32,
1011 array: u32,
1012 dst: u32,
1013 elem_index: u32,
1014 src: u32,
1015 len: u32,
1016) -> Result<()> {
1017 use crate::{
1018 ArrayRef, Func, OpaqueRootScope, Val,
1019 store::AutoAssertNoGc,
1020 vm::const_expr::{ConstEvalContext, ConstExprEvaluator},
1021 };
1022 use wasmtime_environ::{ModuleInternedTypeIndex, TableSegmentElements};
1023
1024 let (mut limiter, store) = store.resource_limiter_and_store_opaque();
1025 block_on!(store, async |store| {
1026 let mut store = OpaqueRootScope::new(store);
1027
1028 let _array_type_index = ModuleInternedTypeIndex::from_u32(array_type_index);
1030 let elem_index = ElemIndex::from_u32(elem_index);
1031
1032 log::trace!(
1033 "array.init_elem(array={array:#x}, dst={dst}, elem_index={elem_index:?}, src={src}, len={len})",
1034 );
1035
1036 let array = VMGcRef::from_raw_u32(array).ok_or_else(|| Trap::NullReference)?;
1038 let array = store.unwrap_gc_store_mut().clone_gc_ref(&array);
1039 let array = {
1040 let mut no_gc = AutoAssertNoGc::new(&mut store);
1041 ArrayRef::from_cloned_gc_ref(&mut no_gc, array)
1042 };
1043
1044 let array_len = array._len(&store)?;
1046 log::trace!("array_len = {array_len}");
1047 if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > array_len {
1048 return Err(Trap::ArrayOutOfBounds.into());
1049 }
1050
1051 let mut storage = None;
1053 let store_id = store.id();
1054 let (mut instance, registry) = store.instance_and_module_registry_mut(instance);
1055 let elements = instance.passive_element_segment(&mut storage, elem_index);
1056
1057 let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
1059 let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
1060
1061 let vals = match elements {
1063 TableSegmentElements::Functions(fs) => fs
1064 .get(src..)
1065 .and_then(|s| s.get(..len))
1066 .ok_or_else(|| Trap::TableOutOfBounds)?
1067 .iter()
1068 .map(|f| {
1069 let raw_func_ref = instance.as_mut().get_func_ref(registry, *f);
1070 let func = unsafe { raw_func_ref.map(|p| Func::from_vm_func_ref(store_id, p)) };
1071 Val::FuncRef(func)
1072 })
1073 .collect::<Vec<_>>(),
1074 TableSegmentElements::Expressions(xs) => {
1075 let mut const_context = ConstEvalContext::new(instance.id());
1076 let mut const_evaluator = ConstExprEvaluator::default();
1077
1078 let mut vals = Vec::new();
1079 for x in xs
1080 .get(src..)
1081 .and_then(|s| s.get(..len))
1082 .ok_or_else(|| Trap::TableOutOfBounds)?
1083 {
1084 let val = *const_evaluator
1085 .eval(&mut store, limiter.as_mut(), &mut const_context, x)
1086 .await?;
1087 vals.push(val);
1088 }
1089 vals
1090 }
1091 };
1092
1093 for (i, val) in vals.into_iter().enumerate() {
1095 let i = u32::try_from(i).unwrap();
1096 let j = dst.checked_add(i).unwrap();
1097 array._set(&mut store, j, val)?;
1098 }
1099
1100 Ok(())
1101 })?
1102}
1103
1104#[cfg(feature = "gc")]
1109fn array_copy(
1110 store: &mut dyn VMStore,
1111 _instance: InstanceId,
1112 dst_array: u32,
1113 dst: u32,
1114 src_array: u32,
1115 src: u32,
1116 len: u32,
1117) -> Result<()> {
1118 use crate::{ArrayRef, OpaqueRootScope, store::AutoAssertNoGc};
1119
1120 log::trace!(
1121 "array.copy(dst_array={dst_array:#x}, dst_index={dst}, src_array={src_array:#x}, src_index={src}, len={len})",
1122 );
1123
1124 let mut store = OpaqueRootScope::new(store.store_opaque_mut());
1125 let mut store = AutoAssertNoGc::new(&mut store);
1126
1127 let dst_array = VMGcRef::from_raw_u32(dst_array).ok_or_else(|| Trap::NullReference)?;
1129 let dst_array = store.unwrap_gc_store_mut().clone_gc_ref(&dst_array);
1130 let dst_array = ArrayRef::from_cloned_gc_ref(&mut store, dst_array);
1131 let src_array = VMGcRef::from_raw_u32(src_array).ok_or_else(|| Trap::NullReference)?;
1132 let src_array = store.unwrap_gc_store_mut().clone_gc_ref(&src_array);
1133 let src_array = ArrayRef::from_cloned_gc_ref(&mut store, src_array);
1134
1135 let dst_array_len = dst_array._len(&store)?;
1137 if dst.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > dst_array_len {
1138 return Err(Trap::ArrayOutOfBounds.into());
1139 }
1140
1141 let src_array_len = src_array._len(&store)?;
1143 if src.checked_add(len).ok_or_else(|| Trap::ArrayOutOfBounds)? > src_array_len {
1144 return Err(Trap::ArrayOutOfBounds.into());
1145 }
1146
1147 let mut store = AutoAssertNoGc::new(&mut store);
1148 if src > dst {
1154 for i in 0..len {
1155 let src_elem = src_array._get(&mut store, src + i)?;
1156 let dst_i = dst + i;
1157 dst_array._set(&mut store, dst_i, src_elem)?;
1158 }
1159 } else {
1160 for i in (0..len).rev() {
1161 let src_elem = src_array._get(&mut store, src + i)?;
1162 let dst_i = dst + i;
1163 dst_array._set(&mut store, dst_i, src_elem)?;
1164 }
1165 }
1166 Ok(())
1167}
1168
1169#[cfg(feature = "gc")]
1170fn is_subtype(
1171 store: &mut dyn VMStore,
1172 _instance: InstanceId,
1173 actual_engine_type: u32,
1174 expected_engine_type: u32,
1175) -> u32 {
1176 use wasmtime_environ::VMSharedTypeIndex;
1177
1178 let actual = VMSharedTypeIndex::from_u32(actual_engine_type);
1179 let expected = VMSharedTypeIndex::from_u32(expected_engine_type);
1180
1181 let is_subtype: bool = store.engine().signatures().is_subtype(actual, expected);
1182
1183 log::trace!("is_subtype(actual={actual:?}, expected={expected:?}) -> {is_subtype}",);
1184 is_subtype as u32
1185}
1186
1187#[cfg(feature = "threads")]
1189fn memory_atomic_notify(
1190 store: &mut dyn VMStore,
1191 instance: InstanceId,
1192 memory_index: u32,
1193 addr_index: u64,
1194 count: u32,
1195) -> Result<u32, Trap> {
1196 let memory = DefinedMemoryIndex::from_u32(memory_index);
1197 store
1198 .instance_mut(instance)
1199 .get_defined_memory_mut(memory)
1200 .atomic_notify(addr_index, count)
1201}
1202
1203#[cfg(feature = "threads")]
1205fn memory_atomic_wait32(
1206 store: &mut dyn VMStore,
1207 instance: InstanceId,
1208 memory_index: u32,
1209 addr_index: u64,
1210 expected: u32,
1211 timeout: u64,
1212) -> Result<u32, Trap> {
1213 let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
1214 let memory = DefinedMemoryIndex::from_u32(memory_index);
1215 Ok(store
1216 .instance_mut(instance)
1217 .get_defined_memory_mut(memory)
1218 .atomic_wait32(addr_index, expected, timeout)? as u32)
1219}
1220
1221#[cfg(feature = "threads")]
1223fn memory_atomic_wait64(
1224 store: &mut dyn VMStore,
1225 instance: InstanceId,
1226 memory_index: u32,
1227 addr_index: u64,
1228 expected: u64,
1229 timeout: u64,
1230) -> Result<u32, Trap> {
1231 let timeout = (timeout as i64 >= 0).then(|| Duration::from_nanos(timeout));
1232 let memory = DefinedMemoryIndex::from_u32(memory_index);
1233 Ok(store
1234 .instance_mut(instance)
1235 .get_defined_memory_mut(memory)
1236 .atomic_wait64(addr_index, expected, timeout)? as u32)
1237}
1238
1239fn out_of_gas(store: &mut dyn VMStore, _instance: InstanceId) -> Result<()> {
1241 block_on!(store, async |store| {
1242 if !store.refuel() {
1243 return Err(Trap::OutOfFuel.into());
1244 }
1245 #[cfg(feature = "async")]
1246 if store.fuel_yield_interval.is_some() {
1247 crate::runtime::vm::Yield::new().await;
1248 }
1249 Ok(())
1250 })?
1251}
1252
1253#[cfg(target_has_atomic = "64")]
1255fn new_epoch(store: &mut dyn VMStore, _instance: InstanceId) -> Result<NextEpoch> {
1256 use crate::UpdateDeadline;
1257
1258 let update_deadline = store.new_epoch_updated_deadline()?;
1259 block_on!(store, async move |store| {
1260 let delta = match update_deadline {
1261 UpdateDeadline::Interrupt => return Err(Trap::Interrupt.into()),
1262 UpdateDeadline::Continue(delta) => delta,
1263
1264 #[cfg(feature = "async")]
1270 UpdateDeadline::Yield(delta) => {
1271 assert!(
1272 store.async_support(),
1273 "cannot use `UpdateDeadline::Yield` without enabling \
1274 async support in the config"
1275 );
1276 crate::runtime::vm::Yield::new().await;
1277 delta
1278 }
1279 #[cfg(feature = "async")]
1280 UpdateDeadline::YieldCustom(delta, future) => {
1281 assert!(
1282 store.async_support(),
1283 "cannot use `UpdateDeadline::YieldCustom` without enabling \
1284 async support in the config"
1285 );
1286 future.await;
1287 delta
1288 }
1289 };
1290
1291 store.set_epoch_deadline(delta);
1294 Ok(NextEpoch(store.get_epoch_deadline()))
1295 })?
1296}
1297
1298struct NextEpoch(u64);
1299
1300unsafe impl HostResultHasUnwindSentinel for NextEpoch {
1301 type Abi = u64;
1302 const SENTINEL: u64 = u64::MAX;
1303 fn into_abi(self) -> u64 {
1304 self.0
1305 }
1306}
1307
1308#[cfg(feature = "wmemcheck")]
1310fn check_malloc(store: &mut dyn VMStore, instance: InstanceId, addr: u32, len: u32) -> Result<()> {
1311 let instance = store.instance_mut(instance);
1312 if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1313 let result = wmemcheck_state.malloc(addr as usize, len as usize);
1314 wmemcheck_state.memcheck_on();
1315 match result {
1316 Ok(()) => {}
1317 Err(DoubleMalloc { addr, len }) => {
1318 bail!("Double malloc at addr {:#x} of size {}", addr, len)
1319 }
1320 Err(OutOfBounds { addr, len }) => {
1321 bail!("Malloc out of bounds at addr {:#x} of size {}", addr, len);
1322 }
1323 _ => {
1324 panic!("unreachable")
1325 }
1326 }
1327 }
1328 Ok(())
1329}
1330
1331#[cfg(feature = "wmemcheck")]
1333fn check_free(store: &mut dyn VMStore, instance: InstanceId, addr: u32) -> Result<()> {
1334 let instance = store.instance_mut(instance);
1335 if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1336 let result = wmemcheck_state.free(addr as usize);
1337 wmemcheck_state.memcheck_on();
1338 match result {
1339 Ok(()) => {}
1340 Err(InvalidFree { addr }) => {
1341 bail!("Invalid free at addr {:#x}", addr)
1342 }
1343 _ => {
1344 panic!("unreachable")
1345 }
1346 }
1347 }
1348 Ok(())
1349}
1350
1351#[cfg(feature = "wmemcheck")]
1353fn check_load(
1354 store: &mut dyn VMStore,
1355 instance: InstanceId,
1356 num_bytes: u32,
1357 addr: u32,
1358 offset: u32,
1359) -> Result<()> {
1360 let instance = store.instance_mut(instance);
1361 if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1362 let result = wmemcheck_state.read(addr as usize + offset as usize, num_bytes as usize);
1363 match result {
1364 Ok(()) => {}
1365 Err(InvalidRead { addr, len }) => {
1366 bail!("Invalid load at addr {:#x} of size {}", addr, len);
1367 }
1368 Err(OutOfBounds { addr, len }) => {
1369 bail!("Load out of bounds at addr {:#x} of size {}", addr, len);
1370 }
1371 _ => {
1372 panic!("unreachable")
1373 }
1374 }
1375 }
1376 Ok(())
1377}
1378
1379#[cfg(feature = "wmemcheck")]
1381fn check_store(
1382 store: &mut dyn VMStore,
1383 instance: InstanceId,
1384 num_bytes: u32,
1385 addr: u32,
1386 offset: u32,
1387) -> Result<()> {
1388 let instance = store.instance_mut(instance);
1389 if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1390 let result = wmemcheck_state.write(addr as usize + offset as usize, num_bytes as usize);
1391 match result {
1392 Ok(()) => {}
1393 Err(InvalidWrite { addr, len }) => {
1394 bail!("Invalid store at addr {:#x} of size {}", addr, len)
1395 }
1396 Err(OutOfBounds { addr, len }) => {
1397 bail!("Store out of bounds at addr {:#x} of size {}", addr, len)
1398 }
1399 _ => {
1400 panic!("unreachable")
1401 }
1402 }
1403 }
1404 Ok(())
1405}
1406
1407#[cfg(feature = "wmemcheck")]
1409fn malloc_start(store: &mut dyn VMStore, instance: InstanceId) {
1410 let instance = store.instance_mut(instance);
1411 if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1412 wmemcheck_state.memcheck_off();
1413 }
1414}
1415
1416#[cfg(feature = "wmemcheck")]
1418fn free_start(store: &mut dyn VMStore, instance: InstanceId) {
1419 let instance = store.instance_mut(instance);
1420 if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1421 wmemcheck_state.memcheck_off();
1422 }
1423}
1424
1425#[cfg(feature = "wmemcheck")]
1427fn update_stack_pointer(_store: &mut dyn VMStore, _instance: InstanceId, _value: u32) {
1428 }
1435
1436#[cfg(feature = "wmemcheck")]
1438fn update_mem_size(store: &mut dyn VMStore, instance: InstanceId, num_pages: u32) {
1439 let instance = store.instance_mut(instance);
1440 if let Some(wmemcheck_state) = instance.wmemcheck_state_mut() {
1441 const KIB: usize = 1024;
1442 let num_bytes = num_pages as usize * 64 * KIB;
1443 wmemcheck_state.update_mem_size(num_bytes);
1444 }
1445}
1446
1447fn floor_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1448 wasmtime_math::WasmFloat::wasm_floor(val)
1449}
1450
1451fn floor_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1452 wasmtime_math::WasmFloat::wasm_floor(val)
1453}
1454
1455fn ceil_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1456 wasmtime_math::WasmFloat::wasm_ceil(val)
1457}
1458
1459fn ceil_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1460 wasmtime_math::WasmFloat::wasm_ceil(val)
1461}
1462
1463fn trunc_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1464 wasmtime_math::WasmFloat::wasm_trunc(val)
1465}
1466
1467fn trunc_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1468 wasmtime_math::WasmFloat::wasm_trunc(val)
1469}
1470
1471fn nearest_f32(_store: &mut dyn VMStore, _instance: InstanceId, val: f32) -> f32 {
1472 wasmtime_math::WasmFloat::wasm_nearest(val)
1473}
1474
1475fn nearest_f64(_store: &mut dyn VMStore, _instance: InstanceId, val: f64) -> f64 {
1476 wasmtime_math::WasmFloat::wasm_nearest(val)
1477}
1478
1479#[cfg(all(target_arch = "x86_64", target_feature = "sse"))]
1482fn i8x16_swizzle(_store: &mut dyn VMStore, _instance: InstanceId, a: i8x16, b: i8x16) -> i8x16 {
1483 union U {
1484 reg: i8x16,
1485 mem: [u8; 16],
1486 }
1487
1488 unsafe {
1489 let a = U { reg: a }.mem;
1490 let b = U { reg: b }.mem;
1491
1492 let select = |arr: &[u8; 16], byte: u8| {
1496 if byte >= 16 { 0x00 } else { arr[byte as usize] }
1497 };
1498
1499 U {
1500 mem: [
1501 select(&a, b[0]),
1502 select(&a, b[1]),
1503 select(&a, b[2]),
1504 select(&a, b[3]),
1505 select(&a, b[4]),
1506 select(&a, b[5]),
1507 select(&a, b[6]),
1508 select(&a, b[7]),
1509 select(&a, b[8]),
1510 select(&a, b[9]),
1511 select(&a, b[10]),
1512 select(&a, b[11]),
1513 select(&a, b[12]),
1514 select(&a, b[13]),
1515 select(&a, b[14]),
1516 select(&a, b[15]),
1517 ],
1518 }
1519 .reg
1520 }
1521}
1522
1523#[cfg(not(all(target_arch = "x86_64", target_feature = "sse")))]
1524fn i8x16_swizzle(_store: &mut dyn VMStore, _instance: InstanceId, _a: i8x16, _b: i8x16) -> i8x16 {
1525 unreachable!()
1526}
1527
1528#[cfg(all(target_arch = "x86_64", target_feature = "sse"))]
1531fn i8x16_shuffle(
1532 _store: &mut dyn VMStore,
1533 _instance: InstanceId,
1534 a: i8x16,
1535 b: i8x16,
1536 c: i8x16,
1537) -> i8x16 {
1538 union U {
1539 reg: i8x16,
1540 mem: [u8; 16],
1541 }
1542
1543 unsafe {
1544 let ab = [U { reg: a }.mem, U { reg: b }.mem];
1545 let c = U { reg: c }.mem;
1546
1547 let select = |arr: &[[u8; 16]; 2], byte: u8| {
1551 if byte >= 32 {
1552 0x00
1553 } else if byte >= 16 {
1554 arr[1][byte as usize - 16]
1555 } else {
1556 arr[0][byte as usize]
1557 }
1558 };
1559
1560 U {
1561 mem: [
1562 select(&ab, c[0]),
1563 select(&ab, c[1]),
1564 select(&ab, c[2]),
1565 select(&ab, c[3]),
1566 select(&ab, c[4]),
1567 select(&ab, c[5]),
1568 select(&ab, c[6]),
1569 select(&ab, c[7]),
1570 select(&ab, c[8]),
1571 select(&ab, c[9]),
1572 select(&ab, c[10]),
1573 select(&ab, c[11]),
1574 select(&ab, c[12]),
1575 select(&ab, c[13]),
1576 select(&ab, c[14]),
1577 select(&ab, c[15]),
1578 ],
1579 }
1580 .reg
1581 }
1582}
1583
1584#[cfg(not(all(target_arch = "x86_64", target_feature = "sse")))]
1585fn i8x16_shuffle(
1586 _store: &mut dyn VMStore,
1587 _instance: InstanceId,
1588 _a: i8x16,
1589 _b: i8x16,
1590 _c: i8x16,
1591) -> i8x16 {
1592 unreachable!()
1593}
1594
1595fn fma_f32x4(
1596 _store: &mut dyn VMStore,
1597 _instance: InstanceId,
1598 x: f32x4,
1599 y: f32x4,
1600 z: f32x4,
1601) -> f32x4 {
1602 union U {
1603 reg: f32x4,
1604 mem: [f32; 4],
1605 }
1606
1607 unsafe {
1608 let x = U { reg: x }.mem;
1609 let y = U { reg: y }.mem;
1610 let z = U { reg: z }.mem;
1611
1612 U {
1613 mem: [
1614 wasmtime_math::WasmFloat::wasm_mul_add(x[0], y[0], z[0]),
1615 wasmtime_math::WasmFloat::wasm_mul_add(x[1], y[1], z[1]),
1616 wasmtime_math::WasmFloat::wasm_mul_add(x[2], y[2], z[2]),
1617 wasmtime_math::WasmFloat::wasm_mul_add(x[3], y[3], z[3]),
1618 ],
1619 }
1620 .reg
1621 }
1622}
1623
1624fn fma_f64x2(
1625 _store: &mut dyn VMStore,
1626 _instance: InstanceId,
1627 x: f64x2,
1628 y: f64x2,
1629 z: f64x2,
1630) -> f64x2 {
1631 union U {
1632 reg: f64x2,
1633 mem: [f64; 2],
1634 }
1635
1636 unsafe {
1637 let x = U { reg: x }.mem;
1638 let y = U { reg: y }.mem;
1639 let z = U { reg: z }.mem;
1640
1641 U {
1642 mem: [
1643 wasmtime_math::WasmFloat::wasm_mul_add(x[0], y[0], z[0]),
1644 wasmtime_math::WasmFloat::wasm_mul_add(x[1], y[1], z[1]),
1645 ],
1646 }
1647 .reg
1648 }
1649}
1650
1651fn trap(
1657 _store: &mut dyn VMStore,
1658 _instance: InstanceId,
1659 code: u8,
1660) -> Result<Infallible, TrapReason> {
1661 Err(TrapReason::Wasm(
1662 wasmtime_environ::Trap::from_u8(code).unwrap(),
1663 ))
1664}
1665
1666fn raise(store: &mut dyn VMStore, _instance: InstanceId) {
1667 unsafe { crate::runtime::vm::traphandlers::raise_preexisting_trap(store) }
1671}
1672
1673#[cfg(feature = "stack-switching")]
1676fn cont_new(
1677 store: &mut dyn VMStore,
1678 instance: InstanceId,
1679 func: *mut u8,
1680 param_count: u32,
1681 result_count: u32,
1682) -> Result<Option<AllocationSize>> {
1683 let ans =
1684 crate::vm::stack_switching::cont_new(store, instance, func, param_count, result_count)?;
1685 Ok(Some(AllocationSize(ans.cast::<u8>() as usize)))
1686}
1687
1688#[cfg(feature = "gc")]
1689fn get_instance_id(_store: &mut dyn VMStore, instance: InstanceId) -> u32 {
1690 instance.as_u32()
1691}
1692
1693#[cfg(feature = "gc")]
1694fn throw_ref(
1695 store: &mut dyn VMStore,
1696 _instance: InstanceId,
1697 exnref: u32,
1698) -> Result<(), TrapReason> {
1699 let exnref = VMGcRef::from_raw_u32(exnref).ok_or_else(|| Trap::NullReference)?;
1700 let exnref = store.unwrap_gc_store_mut().clone_gc_ref(&exnref);
1701 let exnref = exnref
1702 .into_exnref(&*store.unwrap_gc_store().gc_heap)
1703 .expect("gc ref should be an exception object");
1704 store.set_pending_exception(exnref);
1705 Err(TrapReason::Exception)
1706}