wasmtime/runtime/vm/vmcontext.rs
1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::prelude::*;
8use crate::runtime::vm::{GcStore, InterpreterRef, VMGcRef, VmPtr, VmSafe};
9use crate::store::StoreOpaque;
10use core::cell::UnsafeCell;
11use core::ffi::c_void;
12use core::fmt;
13use core::marker;
14use core::mem::{self, MaybeUninit};
15use core::ptr::{self, NonNull};
16use core::sync::atomic::{AtomicUsize, Ordering};
17use sptr::Strict;
18use wasmtime_environ::{
19 BuiltinFunctionIndex, DefinedMemoryIndex, Unsigned, VMSharedTypeIndex, WasmHeapTopType,
20 WasmValType, VMCONTEXT_MAGIC,
21};
22
23/// A function pointer that exposes the array calling convention.
24///
25/// Regardless of the underlying Wasm function type, all functions using the
26/// array calling convention have the same Rust signature.
27///
28/// Arguments:
29///
30/// * Callee `vmctx` for the function itself.
31///
32/// * Caller's `vmctx` (so that host functions can access the linear memory of
33/// their Wasm callers).
34///
35/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
36/// this function, and where results are returned from this function.
37///
38/// * The capacity of the `ValRaw` buffer. Must always be at least
39/// `max(len(wasm_params), len(wasm_results))`.
40///
41/// Return value:
42///
43/// * `true` if this call succeeded.
44/// * `false` if this call failed and a trap was recorded in TLS.
45pub type VMArrayCallNative = unsafe extern "C" fn(
46 NonNull<VMOpaqueContext>,
47 NonNull<VMOpaqueContext>,
48 NonNull<ValRaw>,
49 usize,
50) -> bool;
51
52/// An opaque function pointer which might be `VMArrayCallNative` or it might be
53/// pulley bytecode. Requires external knowledge to determine what kind of
54/// function pointer this is.
55#[repr(transparent)]
56pub struct VMArrayCallFunction(VMFunctionBody);
57
58/// A function pointer that exposes the Wasm calling convention.
59///
60/// In practice, different Wasm function types end up mapping to different Rust
61/// function types, so this isn't simply a type alias the way that
62/// `VMArrayCallFunction` is. However, the exact details of the calling
63/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
64/// code never does anything with these function pointers except shuffle them
65/// around and pass them back to Wasm.
66#[repr(transparent)]
67pub struct VMWasmCallFunction(VMFunctionBody);
68
69/// An imported function.
70#[derive(Debug, Copy, Clone)]
71#[repr(C)]
72pub struct VMFunctionImport {
73 /// Function pointer to use when calling this imported function from Wasm.
74 pub wasm_call: VmPtr<VMWasmCallFunction>,
75
76 /// Function pointer to use when calling this imported function with the
77 /// "array" calling convention that `Func::new` et al use.
78 pub array_call: VmPtr<VMArrayCallFunction>,
79
80 /// The VM state associated with this function.
81 ///
82 /// For Wasm functions defined by core wasm instances this will be `*mut
83 /// VMContext`, but for lifted/lowered component model functions this will
84 /// be a `VMComponentContext`, and for a host function it will be a
85 /// `VMHostFuncContext`, etc.
86 pub vmctx: VmPtr<VMOpaqueContext>,
87}
88
89// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
90unsafe impl VmSafe for VMFunctionImport {}
91
92#[cfg(test)]
93mod test_vmfunction_import {
94 use super::VMFunctionImport;
95 use core::mem::offset_of;
96 use std::mem::size_of;
97 use wasmtime_environ::{HostPtr, Module, VMOffsets};
98
99 #[test]
100 fn check_vmfunction_import_offsets() {
101 let module = Module::new();
102 let offsets = VMOffsets::new(HostPtr, &module);
103 assert_eq!(
104 size_of::<VMFunctionImport>(),
105 usize::from(offsets.size_of_vmfunction_import())
106 );
107 assert_eq!(
108 offset_of!(VMFunctionImport, wasm_call),
109 usize::from(offsets.vmfunction_import_wasm_call())
110 );
111 assert_eq!(
112 offset_of!(VMFunctionImport, array_call),
113 usize::from(offsets.vmfunction_import_array_call())
114 );
115 assert_eq!(
116 offset_of!(VMFunctionImport, vmctx),
117 usize::from(offsets.vmfunction_import_vmctx())
118 );
119 }
120}
121
122/// A placeholder byte-sized type which is just used to provide some amount of type
123/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
124/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
125/// around.
126#[repr(C)]
127pub struct VMFunctionBody(u8);
128
129// SAFETY: this structure is never read and is safe to pass to jit code.
130unsafe impl VmSafe for VMFunctionBody {}
131
132#[cfg(test)]
133mod test_vmfunction_body {
134 use super::VMFunctionBody;
135 use std::mem::size_of;
136
137 #[test]
138 fn check_vmfunction_body_offsets() {
139 assert_eq!(size_of::<VMFunctionBody>(), 1);
140 }
141}
142
143/// The fields compiled code needs to access to utilize a WebAssembly table
144/// imported from another instance.
145#[derive(Debug, Copy, Clone)]
146#[repr(C)]
147pub struct VMTable {
148 /// A pointer to the imported table description.
149 pub from: VmPtr<VMTableDefinition>,
150
151 /// A pointer to the `VMContext` that owns the table description.
152 pub vmctx: VmPtr<VMContext>,
153}
154
155// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
156unsafe impl VmSafe for VMTable {}
157
158#[cfg(test)]
159mod test_vmtable {
160 use super::VMTable;
161 use core::mem::offset_of;
162 use std::mem::size_of;
163 use wasmtime_environ::component::{Component, VMComponentOffsets};
164 use wasmtime_environ::{HostPtr, Module, VMOffsets};
165
166 #[test]
167 fn check_vmtable_offsets() {
168 let module = Module::new();
169 let offsets = VMOffsets::new(HostPtr, &module);
170 assert_eq!(size_of::<VMTable>(), usize::from(offsets.size_of_vmtable()));
171 assert_eq!(
172 offset_of!(VMTable, from),
173 usize::from(offsets.vmtable_from())
174 );
175 assert_eq!(
176 offset_of!(VMTable, vmctx),
177 usize::from(offsets.vmtable_vmctx())
178 );
179 }
180
181 #[test]
182 fn ensure_sizes_match() {
183 // Because we use `VMTable` for recording tables used by components, we
184 // want to make sure that the size calculations between `VMOffsets` and
185 // `VMComponentOffsets` stay the same.
186 let module = Module::new();
187 let vm_offsets = VMOffsets::new(HostPtr, &module);
188 let component = Component::default();
189 let vm_component_offsets = VMComponentOffsets::new(HostPtr, &component);
190 assert_eq!(
191 vm_offsets.size_of_vmtable(),
192 vm_component_offsets.size_of_vmtable()
193 );
194 }
195}
196
197/// The fields compiled code needs to access to utilize a WebAssembly linear
198/// memory imported from another instance.
199#[derive(Debug, Copy, Clone)]
200#[repr(C)]
201pub struct VMMemoryImport {
202 /// A pointer to the imported memory description.
203 pub from: VmPtr<VMMemoryDefinition>,
204
205 /// A pointer to the `VMContext` that owns the memory description.
206 pub vmctx: VmPtr<VMContext>,
207
208 /// The index of the memory in the containing `vmctx`.
209 pub index: DefinedMemoryIndex,
210}
211
212// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
213unsafe impl VmSafe for VMMemoryImport {}
214
215#[cfg(test)]
216mod test_vmmemory_import {
217 use super::VMMemoryImport;
218 use core::mem::offset_of;
219 use std::mem::size_of;
220 use wasmtime_environ::{HostPtr, Module, VMOffsets};
221
222 #[test]
223 fn check_vmmemory_import_offsets() {
224 let module = Module::new();
225 let offsets = VMOffsets::new(HostPtr, &module);
226 assert_eq!(
227 size_of::<VMMemoryImport>(),
228 usize::from(offsets.size_of_vmmemory_import())
229 );
230 assert_eq!(
231 offset_of!(VMMemoryImport, from),
232 usize::from(offsets.vmmemory_import_from())
233 );
234 assert_eq!(
235 offset_of!(VMMemoryImport, vmctx),
236 usize::from(offsets.vmmemory_import_vmctx())
237 );
238 }
239}
240
241/// The fields compiled code needs to access to utilize a WebAssembly global
242/// variable imported from another instance.
243///
244/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
245/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
246/// require a `vmctx` pointer to access.
247#[derive(Debug, Copy, Clone)]
248#[repr(C)]
249pub struct VMGlobalImport {
250 /// A pointer to the imported global variable description.
251 pub from: VmPtr<VMGlobalDefinition>,
252}
253
254// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
255unsafe impl VmSafe for VMGlobalImport {}
256
257#[cfg(test)]
258mod test_vmglobal_import {
259 use super::VMGlobalImport;
260 use core::mem::offset_of;
261 use std::mem::size_of;
262 use wasmtime_environ::{HostPtr, Module, VMOffsets};
263
264 #[test]
265 fn check_vmglobal_import_offsets() {
266 let module = Module::new();
267 let offsets = VMOffsets::new(HostPtr, &module);
268 assert_eq!(
269 size_of::<VMGlobalImport>(),
270 usize::from(offsets.size_of_vmglobal_import())
271 );
272 assert_eq!(
273 offset_of!(VMGlobalImport, from),
274 usize::from(offsets.vmglobal_import_from())
275 );
276 }
277}
278
279/// The fields compiled code needs to access to utilize a WebAssembly
280/// tag imported from another instance.
281#[derive(Debug, Copy, Clone)]
282#[repr(C)]
283pub struct VMTagImport {
284 /// A pointer to the imported tag description.
285 pub from: VmPtr<VMTagDefinition>,
286}
287
288// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
289unsafe impl VmSafe for VMTagImport {}
290
291#[cfg(test)]
292mod test_vmtag_import {
293 use super::VMTagImport;
294 use core::mem::{offset_of, size_of};
295 use wasmtime_environ::{HostPtr, Module, VMOffsets};
296
297 #[test]
298 fn check_vmtag_import_offsets() {
299 let module = Module::new();
300 let offsets = VMOffsets::new(HostPtr, &module);
301 assert_eq!(
302 size_of::<VMTagImport>(),
303 usize::from(offsets.size_of_vmtag_import())
304 );
305 assert_eq!(
306 offset_of!(VMTagImport, from),
307 usize::from(offsets.vmtag_import_from())
308 );
309 }
310}
311
312/// The fields compiled code needs to access to utilize a WebAssembly linear
313/// memory defined within the instance, namely the start address and the
314/// size in bytes.
315#[derive(Debug)]
316#[repr(C)]
317pub struct VMMemoryDefinition {
318 /// The start address.
319 pub base: VmPtr<u8>,
320
321 /// The current logical size of this linear memory in bytes.
322 ///
323 /// This is atomic because shared memories must be able to grow their length
324 /// atomically. For relaxed access, see
325 /// [`VMMemoryDefinition::current_length()`].
326 pub current_length: AtomicUsize,
327}
328
329// SAFETY: the above definition has `repr(C)` and each field individually
330// implements `VmSafe`, which satisfies the requirements of this trait.
331unsafe impl VmSafe for VMMemoryDefinition {}
332
333impl VMMemoryDefinition {
334 /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
335 /// performing a relaxed load; do not use this function for situations in
336 /// which a precise length is needed. Owned memories (i.e., non-shared) will
337 /// always return a precise result (since no concurrent modification is
338 /// possible) but shared memories may see an imprecise value--a
339 /// `current_length` potentially smaller than what some other thread
340 /// observes. Since Wasm memory only grows, this under-estimation may be
341 /// acceptable in certain cases.
342 pub fn current_length(&self) -> usize {
343 self.current_length.load(Ordering::Relaxed)
344 }
345
346 /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
347 /// `current_length`; see [`VMMemoryDefinition::current_length()`].
348 pub unsafe fn load(ptr: *mut Self) -> Self {
349 let other = &*ptr;
350 VMMemoryDefinition {
351 base: other.base,
352 current_length: other.current_length().into(),
353 }
354 }
355}
356
357#[cfg(test)]
358mod test_vmmemory_definition {
359 use super::VMMemoryDefinition;
360 use core::mem::offset_of;
361 use std::mem::size_of;
362 use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
363
364 #[test]
365 fn check_vmmemory_definition_offsets() {
366 let module = Module::new();
367 let offsets = VMOffsets::new(HostPtr, &module);
368 assert_eq!(
369 size_of::<VMMemoryDefinition>(),
370 usize::from(offsets.ptr.size_of_vmmemory_definition())
371 );
372 assert_eq!(
373 offset_of!(VMMemoryDefinition, base),
374 usize::from(offsets.ptr.vmmemory_definition_base())
375 );
376 assert_eq!(
377 offset_of!(VMMemoryDefinition, current_length),
378 usize::from(offsets.ptr.vmmemory_definition_current_length())
379 );
380 /* TODO: Assert that the size of `current_length` matches.
381 assert_eq!(
382 size_of::<VMMemoryDefinition::current_length>(),
383 usize::from(offsets.size_of_vmmemory_definition_current_length())
384 );
385 */
386 }
387}
388
389/// The fields compiled code needs to access to utilize a WebAssembly table
390/// defined within the instance.
391#[derive(Debug, Copy, Clone)]
392#[repr(C)]
393pub struct VMTableDefinition {
394 /// Pointer to the table data.
395 pub base: VmPtr<u8>,
396
397 /// The current number of elements in the table.
398 pub current_elements: usize,
399}
400
401// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
402unsafe impl VmSafe for VMTableDefinition {}
403
404#[cfg(test)]
405mod test_vmtable_definition {
406 use super::VMTableDefinition;
407 use core::mem::offset_of;
408 use std::mem::size_of;
409 use wasmtime_environ::{HostPtr, Module, VMOffsets};
410
411 #[test]
412 fn check_vmtable_definition_offsets() {
413 let module = Module::new();
414 let offsets = VMOffsets::new(HostPtr, &module);
415 assert_eq!(
416 size_of::<VMTableDefinition>(),
417 usize::from(offsets.size_of_vmtable_definition())
418 );
419 assert_eq!(
420 offset_of!(VMTableDefinition, base),
421 usize::from(offsets.vmtable_definition_base())
422 );
423 assert_eq!(
424 offset_of!(VMTableDefinition, current_elements),
425 usize::from(offsets.vmtable_definition_current_elements())
426 );
427 }
428}
429
430/// The storage for a WebAssembly global defined within the instance.
431///
432/// TODO: Pack the globals more densely, rather than using the same size
433/// for every type.
434#[derive(Debug)]
435#[repr(C, align(16))]
436pub struct VMGlobalDefinition {
437 storage: [u8; 16],
438 // If more elements are added here, remember to add offset_of tests below!
439}
440
441// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
442unsafe impl VmSafe for VMGlobalDefinition {}
443
444#[cfg(test)]
445mod test_vmglobal_definition {
446 use super::VMGlobalDefinition;
447 use std::mem::{align_of, size_of};
448 use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
449
450 #[test]
451 fn check_vmglobal_definition_alignment() {
452 assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
453 assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
454 assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
455 assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
456 assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
457 }
458
459 #[test]
460 fn check_vmglobal_definition_offsets() {
461 let module = Module::new();
462 let offsets = VMOffsets::new(HostPtr, &module);
463 assert_eq!(
464 size_of::<VMGlobalDefinition>(),
465 usize::from(offsets.ptr.size_of_vmglobal_definition())
466 );
467 }
468
469 #[test]
470 fn check_vmglobal_begins_aligned() {
471 let module = Module::new();
472 let offsets = VMOffsets::new(HostPtr, &module);
473 assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
474 }
475
476 #[test]
477 #[cfg(feature = "gc")]
478 fn check_vmglobal_can_contain_gc_ref() {
479 assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
480 }
481}
482
483impl VMGlobalDefinition {
484 /// Construct a `VMGlobalDefinition`.
485 pub fn new() -> Self {
486 Self { storage: [0; 16] }
487 }
488
489 /// Create a `VMGlobalDefinition` from a `ValRaw`.
490 ///
491 /// # Unsafety
492 ///
493 /// This raw value's type must match the given `WasmValType`.
494 pub unsafe fn from_val_raw(
495 store: &mut StoreOpaque,
496 wasm_ty: WasmValType,
497 raw: ValRaw,
498 ) -> Result<Self> {
499 let mut global = Self::new();
500 match wasm_ty {
501 WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
502 WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
503 WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
504 WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
505 WasmValType::V128 => global.set_u128(raw.get_v128()),
506 WasmValType::Ref(r) => match r.heap_type.top() {
507 WasmHeapTopType::Extern => {
508 let r = VMGcRef::from_raw_u32(raw.get_externref());
509 global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
510 }
511 WasmHeapTopType::Any => {
512 let r = VMGcRef::from_raw_u32(raw.get_anyref());
513 global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
514 }
515 WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
516 WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
517 },
518 }
519 Ok(global)
520 }
521
522 /// Get this global's value as a `ValRaw`.
523 ///
524 /// # Unsafety
525 ///
526 /// This global's value's type must match the given `WasmValType`.
527 pub unsafe fn to_val_raw(
528 &self,
529 store: &mut StoreOpaque,
530 wasm_ty: WasmValType,
531 ) -> Result<ValRaw> {
532 Ok(match wasm_ty {
533 WasmValType::I32 => ValRaw::i32(*self.as_i32()),
534 WasmValType::I64 => ValRaw::i64(*self.as_i64()),
535 WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
536 WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
537 WasmValType::V128 => ValRaw::v128(self.get_u128()),
538 WasmValType::Ref(r) => match r.heap_type.top() {
539 WasmHeapTopType::Extern => ValRaw::externref(match self.as_gc_ref() {
540 Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
541 None => 0,
542 }),
543 WasmHeapTopType::Any => ValRaw::anyref({
544 match self.as_gc_ref() {
545 Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
546 None => 0,
547 }
548 }),
549 WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
550 WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
551 },
552 })
553 }
554
555 /// Return a reference to the value as an i32.
556 pub unsafe fn as_i32(&self) -> &i32 {
557 &*(self.storage.as_ref().as_ptr().cast::<i32>())
558 }
559
560 /// Return a mutable reference to the value as an i32.
561 pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
562 &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>())
563 }
564
565 /// Return a reference to the value as a u32.
566 pub unsafe fn as_u32(&self) -> &u32 {
567 &*(self.storage.as_ref().as_ptr().cast::<u32>())
568 }
569
570 /// Return a mutable reference to the value as an u32.
571 pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
572 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
573 }
574
575 /// Return a reference to the value as an i64.
576 pub unsafe fn as_i64(&self) -> &i64 {
577 &*(self.storage.as_ref().as_ptr().cast::<i64>())
578 }
579
580 /// Return a mutable reference to the value as an i64.
581 pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
582 &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>())
583 }
584
585 /// Return a reference to the value as an u64.
586 pub unsafe fn as_u64(&self) -> &u64 {
587 &*(self.storage.as_ref().as_ptr().cast::<u64>())
588 }
589
590 /// Return a mutable reference to the value as an u64.
591 pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
592 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
593 }
594
595 /// Return a reference to the value as an f32.
596 pub unsafe fn as_f32(&self) -> &f32 {
597 &*(self.storage.as_ref().as_ptr().cast::<f32>())
598 }
599
600 /// Return a mutable reference to the value as an f32.
601 pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
602 &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>())
603 }
604
605 /// Return a reference to the value as f32 bits.
606 pub unsafe fn as_f32_bits(&self) -> &u32 {
607 &*(self.storage.as_ref().as_ptr().cast::<u32>())
608 }
609
610 /// Return a mutable reference to the value as f32 bits.
611 pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
612 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
613 }
614
615 /// Return a reference to the value as an f64.
616 pub unsafe fn as_f64(&self) -> &f64 {
617 &*(self.storage.as_ref().as_ptr().cast::<f64>())
618 }
619
620 /// Return a mutable reference to the value as an f64.
621 pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
622 &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>())
623 }
624
625 /// Return a reference to the value as f64 bits.
626 pub unsafe fn as_f64_bits(&self) -> &u64 {
627 &*(self.storage.as_ref().as_ptr().cast::<u64>())
628 }
629
630 /// Return a mutable reference to the value as f64 bits.
631 pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
632 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
633 }
634
635 /// Gets the underlying 128-bit vector value.
636 //
637 // Note that vectors are stored in little-endian format while other types
638 // are stored in native-endian format.
639 pub unsafe fn get_u128(&self) -> u128 {
640 u128::from_le(*(self.storage.as_ref().as_ptr().cast::<u128>()))
641 }
642
643 /// Sets the 128-bit vector values.
644 //
645 // Note that vectors are stored in little-endian format while other types
646 // are stored in native-endian format.
647 pub unsafe fn set_u128(&mut self, val: u128) {
648 *self.storage.as_mut().as_mut_ptr().cast::<u128>() = val.to_le();
649 }
650
651 /// Return a reference to the value as u128 bits.
652 pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
653 &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>())
654 }
655
656 /// Return a mutable reference to the value as u128 bits.
657 pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
658 &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>())
659 }
660
661 /// Return a reference to the global value as a borrowed GC reference.
662 pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
663 let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
664 let ret = (*raw_ptr).as_ref();
665 assert!(cfg!(feature = "gc") || ret.is_none());
666 ret
667 }
668
669 /// Initialize a global to the given GC reference.
670 pub unsafe fn init_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
671 assert!(cfg!(feature = "gc") || gc_ref.is_none());
672
673 let dest = &mut *(self
674 .storage
675 .as_mut()
676 .as_mut_ptr()
677 .cast::<MaybeUninit<Option<VMGcRef>>>());
678
679 gc_store.init_gc_ref(dest, gc_ref)
680 }
681
682 /// Write a GC reference into this global value.
683 pub unsafe fn write_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
684 assert!(cfg!(feature = "gc") || gc_ref.is_none());
685
686 let dest = &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>());
687 assert!(cfg!(feature = "gc") || dest.is_none());
688
689 gc_store.write_gc_ref(dest, gc_ref)
690 }
691
692 /// Return a reference to the value as a `VMFuncRef`.
693 pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
694 *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>())
695 }
696
697 /// Return a mutable reference to the value as a `VMFuncRef`.
698 pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
699 &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>())
700 }
701}
702
703#[cfg(test)]
704mod test_vmshared_type_index {
705 use super::VMSharedTypeIndex;
706 use std::mem::size_of;
707 use wasmtime_environ::{HostPtr, Module, VMOffsets};
708
709 #[test]
710 fn check_vmshared_type_index() {
711 let module = Module::new();
712 let offsets = VMOffsets::new(HostPtr, &module);
713 assert_eq!(
714 size_of::<VMSharedTypeIndex>(),
715 usize::from(offsets.size_of_vmshared_type_index())
716 );
717 }
718}
719
720/// A WebAssembly tag defined within the instance.
721///
722#[derive(Debug)]
723#[repr(C)]
724pub struct VMTagDefinition {
725 /// Function signature's type id.
726 pub type_index: VMSharedTypeIndex,
727}
728
729impl VMTagDefinition {
730 pub fn new(type_index: VMSharedTypeIndex) -> Self {
731 Self { type_index }
732 }
733}
734
735// SAFETY: the above structure is repr(C) and only contains VmSafe
736// fields.
737unsafe impl VmSafe for VMTagDefinition {}
738
739#[cfg(test)]
740mod test_vmtag_definition {
741 use super::VMTagDefinition;
742 use std::mem::size_of;
743 use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
744
745 #[test]
746 fn check_vmtag_definition_offsets() {
747 let module = Module::new();
748 let offsets = VMOffsets::new(HostPtr, &module);
749 assert_eq!(
750 size_of::<VMTagDefinition>(),
751 usize::from(offsets.ptr.size_of_vmtag_definition())
752 );
753 }
754
755 #[test]
756 fn check_vmtag_begins_aligned() {
757 let module = Module::new();
758 let offsets = VMOffsets::new(HostPtr, &module);
759 assert_eq!(offsets.vmctx_tags_begin() % 16, 0);
760 }
761}
762
763/// The VM caller-checked "funcref" record, for caller-side signature checking.
764///
765/// It consists of function pointer(s), a type id to be checked by the
766/// caller, and the vmctx closure associated with this function.
767#[derive(Debug, Clone)]
768#[repr(C)]
769pub struct VMFuncRef {
770 /// Function pointer for this funcref if being called via the "array"
771 /// calling convention that `Func::new` et al use.
772 pub array_call: VmPtr<VMArrayCallFunction>,
773
774 /// Function pointer for this funcref if being called via the calling
775 /// convention we use when compiling Wasm.
776 ///
777 /// Most functions come with a function pointer that we can use when they
778 /// are called from Wasm. The notable exception is when we `Func::wrap` a
779 /// host function, and we don't have a Wasm compiler on hand to compile a
780 /// Wasm-to-native trampoline for the function. In this case, we leave
781 /// `wasm_call` empty until the function is passed as an import to Wasm (or
782 /// otherwise exposed to Wasm via tables/globals). At this point, we look up
783 /// a Wasm-to-native trampoline for the function in the Wasm's compiled
784 /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
785 /// there is no guarantee that the Wasm module has a trampoline for this
786 /// function's signature. The Wasm module only has trampolines for its
787 /// types, and if this function isn't of one of those types, then the Wasm
788 /// module will not have a trampoline for it. This is actually okay, because
789 /// it means that the Wasm cannot actually call this function. But it does
790 /// mean that this field needs to be an `Option` even though it is non-null
791 /// the vast vast vast majority of the time.
792 pub wasm_call: Option<VmPtr<VMWasmCallFunction>>,
793
794 /// Function signature's type id.
795 pub type_index: VMSharedTypeIndex,
796
797 /// The VM state associated with this function.
798 ///
799 /// The actual definition of what this pointer points to depends on the
800 /// function being referenced: for core Wasm functions, this is a `*mut
801 /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
802 /// component functions it is a `*mut VMComponentContext`.
803 pub vmctx: VmPtr<VMOpaqueContext>,
804 // If more elements are added here, remember to add offset_of tests below!
805}
806
807// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
808unsafe impl VmSafe for VMFuncRef {}
809
810impl VMFuncRef {
811 /// Invokes the `array_call` field of this `VMFuncRef` with the supplied
812 /// arguments.
813 ///
814 /// This will invoke the function pointer in the `array_call` field with:
815 ///
816 /// * the `callee` vmctx as `self.vmctx`
817 /// * the `caller` as `caller` specified here
818 /// * the args pointer as `args_and_results`
819 /// * the args length as `args_and_results`
820 ///
821 /// The `args_and_results` area must be large enough to both load all
822 /// arguments from and store all results to.
823 ///
824 /// Returns whether a trap was recorded in TLS for raising.
825 ///
826 /// # Unsafety
827 ///
828 /// This method is unsafe because it can be called with any pointers. They
829 /// must all be valid for this wasm function call to proceed. For example
830 /// the `caller` must be valid machine code if `pulley` is `None` or it must
831 /// be valid bytecode if `pulley` is `Some`. Additionally `args_and_results`
832 /// must be large enough to handle all the arguments/results for this call.
833 ///
834 /// Note that the unsafety invariants to maintain here are not currently
835 /// exhaustively documented.
836 pub unsafe fn array_call(
837 &self,
838 pulley: Option<InterpreterRef<'_>>,
839 caller: NonNull<VMOpaqueContext>,
840 args_and_results: NonNull<[ValRaw]>,
841 ) -> bool {
842 match pulley {
843 Some(vm) => self.array_call_interpreted(vm, caller, args_and_results),
844 None => self.array_call_native(caller, args_and_results),
845 }
846 }
847
848 unsafe fn array_call_interpreted(
849 &self,
850 vm: InterpreterRef<'_>,
851 caller: NonNull<VMOpaqueContext>,
852 args_and_results: NonNull<[ValRaw]>,
853 ) -> bool {
854 // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the
855 // interpreter, even though it's available, as `array_call` will be
856 // native code.
857 if self.vmctx.as_non_null().as_ref().magic
858 == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC
859 {
860 return self.array_call_native(caller, args_and_results);
861 }
862 vm.call(
863 self.array_call.as_non_null().cast(),
864 self.vmctx.as_non_null(),
865 caller,
866 args_and_results,
867 )
868 }
869
870 unsafe fn array_call_native(
871 &self,
872 caller: NonNull<VMOpaqueContext>,
873 args_and_results: NonNull<[ValRaw]>,
874 ) -> bool {
875 union GetNativePointer {
876 native: VMArrayCallNative,
877 ptr: NonNull<VMArrayCallFunction>,
878 }
879 let native = GetNativePointer {
880 ptr: self.array_call.as_non_null(),
881 }
882 .native;
883 native(
884 self.vmctx.as_non_null(),
885 caller,
886 args_and_results.cast(),
887 args_and_results.len(),
888 )
889 }
890}
891
892#[cfg(test)]
893mod test_vm_func_ref {
894 use super::VMFuncRef;
895 use core::mem::offset_of;
896 use std::mem::size_of;
897 use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
898
899 #[test]
900 fn check_vm_func_ref_offsets() {
901 let module = Module::new();
902 let offsets = VMOffsets::new(HostPtr, &module);
903 assert_eq!(
904 size_of::<VMFuncRef>(),
905 usize::from(offsets.ptr.size_of_vm_func_ref())
906 );
907 assert_eq!(
908 offset_of!(VMFuncRef, array_call),
909 usize::from(offsets.ptr.vm_func_ref_array_call())
910 );
911 assert_eq!(
912 offset_of!(VMFuncRef, wasm_call),
913 usize::from(offsets.ptr.vm_func_ref_wasm_call())
914 );
915 assert_eq!(
916 offset_of!(VMFuncRef, type_index),
917 usize::from(offsets.ptr.vm_func_ref_type_index())
918 );
919 assert_eq!(
920 offset_of!(VMFuncRef, vmctx),
921 usize::from(offsets.ptr.vm_func_ref_vmctx())
922 );
923 }
924}
925
926macro_rules! define_builtin_array {
927 (
928 $(
929 $( #[$attr:meta] )*
930 $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
931 )*
932 ) => {
933 /// An array that stores addresses of builtin functions. We translate code
934 /// to use indirect calls. This way, we don't have to patch the code.
935 #[repr(C)]
936 pub struct VMBuiltinFunctionsArray {
937 $(
938 $name: unsafe extern "C" fn(
939 $(define_builtin_array!(@ty $param)),*
940 ) $( -> define_builtin_array!(@ty $result))?,
941 )*
942 }
943
944 impl VMBuiltinFunctionsArray {
945 #[allow(unused_doc_comments)]
946 pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
947 $(
948 $name: crate::runtime::vm::libcalls::raw::$name,
949 )*
950 };
951
952 /// Helper to call `expose_provenance()` on all contained pointers.
953 ///
954 /// This is required to be called at least once before entering wasm
955 /// to inform the compiler that these function pointers may all be
956 /// loaded/stored and used on the "other end" to reacquire
957 /// provenance in Pulley. Pulley models hostcalls with a host
958 /// pointer as the first parameter that's a function pointer under
959 /// the hood, and this call ensures that the use of the function
960 /// pointer is considered valid.
961 pub fn expose_provenance(&self) -> NonNull<Self>{
962 $(
963 (self.$name as *mut u8).expose_provenance();
964 )*
965 NonNull::from(self)
966 }
967 }
968 };
969
970 (@ty u32) => (u32);
971 (@ty u64) => (u64);
972 (@ty u8) => (u8);
973 (@ty bool) => (bool);
974 (@ty pointer) => (*mut u8);
975 (@ty vmctx) => (NonNull<VMContext>);
976}
977
978// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
979unsafe impl VmSafe for VMBuiltinFunctionsArray {}
980
981wasmtime_environ::foreach_builtin_function!(define_builtin_array);
982
983const _: () = {
984 assert!(
985 mem::size_of::<VMBuiltinFunctionsArray>()
986 == mem::size_of::<usize>() * (BuiltinFunctionIndex::len() as usize)
987 )
988};
989
990/// Structure that holds all mutable context that is shared across all instances
991/// in a store, for example data related to fuel or epochs.
992///
993/// `VMStoreContext`s are one-to-one with `wasmtime::Store`s, the same way that
994/// `VMContext`s are one-to-one with `wasmtime::Instance`s. And the same way
995/// that multiple `wasmtime::Instance`s may be associated with the same
996/// `wasmtime::Store`, multiple `VMContext`s hold a pointer to the same
997/// `VMStoreContext` when they are associated with the same `wasmtime::Store`.
998#[derive(Debug)]
999#[repr(C)]
1000pub struct VMStoreContext {
1001 // NB: 64-bit integer fields are located first with pointer-sized fields
1002 // trailing afterwards. That makes the offsets in this structure easier to
1003 // calculate on 32-bit platforms as we don't have to worry about the
1004 // alignment of 64-bit integers.
1005 //
1006 /// Indicator of how much fuel has been consumed and is remaining to
1007 /// WebAssembly.
1008 ///
1009 /// This field is typically negative and increments towards positive. Upon
1010 /// turning positive a wasm trap will be generated. This field is only
1011 /// modified if wasm is configured to consume fuel.
1012 pub fuel_consumed: UnsafeCell<i64>,
1013
1014 /// Deadline epoch for interruption: if epoch-based interruption
1015 /// is enabled and the global (per engine) epoch counter is
1016 /// observed to reach or exceed this value, the guest code will
1017 /// yield if running asynchronously.
1018 pub epoch_deadline: UnsafeCell<u64>,
1019
1020 /// Current stack limit of the wasm module.
1021 ///
1022 /// For more information see `crates/cranelift/src/lib.rs`.
1023 pub stack_limit: UnsafeCell<usize>,
1024
1025 /// The value of the frame pointer register when we last called from Wasm to
1026 /// the host.
1027 ///
1028 /// Maintained by our Wasm-to-host trampoline, and cleared just before
1029 /// calling into Wasm in `catch_traps`.
1030 ///
1031 /// This member is `0` when Wasm is actively running and has not called out
1032 /// to the host.
1033 ///
1034 /// Used to find the start of a a contiguous sequence of Wasm frames when
1035 /// walking the stack.
1036 pub last_wasm_exit_fp: UnsafeCell<usize>,
1037
1038 /// The last Wasm program counter before we called from Wasm to the host.
1039 ///
1040 /// Maintained by our Wasm-to-host trampoline, and cleared just before
1041 /// calling into Wasm in `catch_traps`.
1042 ///
1043 /// This member is `0` when Wasm is actively running and has not called out
1044 /// to the host.
1045 ///
1046 /// Used when walking a contiguous sequence of Wasm frames.
1047 pub last_wasm_exit_pc: UnsafeCell<usize>,
1048
1049 /// The last host stack pointer before we called into Wasm from the host.
1050 ///
1051 /// Maintained by our host-to-Wasm trampoline, and cleared just before
1052 /// calling into Wasm in `catch_traps`.
1053 ///
1054 /// This member is `0` when Wasm is actively running and has not called out
1055 /// to the host.
1056 ///
1057 /// When a host function is wrapped into a `wasmtime::Func`, and is then
1058 /// called from the host, then this member has the sentinel value of `-1 as
1059 /// usize`, meaning that this contiguous sequence of Wasm frames is the
1060 /// empty sequence, and it is not safe to dereference the
1061 /// `last_wasm_exit_fp`.
1062 ///
1063 /// Used to find the end of a contiguous sequence of Wasm frames when
1064 /// walking the stack.
1065 pub last_wasm_entry_fp: UnsafeCell<usize>,
1066}
1067
1068// The `VMStoreContext` type is a pod-type with no destructor, and we don't
1069// access any fields from other threads, so add in these trait impls which are
1070// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
1071// variables in `VMStoreContext`.
1072unsafe impl Send for VMStoreContext {}
1073unsafe impl Sync for VMStoreContext {}
1074
1075// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1076unsafe impl VmSafe for VMStoreContext {}
1077
1078impl Default for VMStoreContext {
1079 fn default() -> VMStoreContext {
1080 VMStoreContext {
1081 stack_limit: UnsafeCell::new(usize::max_value()),
1082 fuel_consumed: UnsafeCell::new(0),
1083 epoch_deadline: UnsafeCell::new(0),
1084 last_wasm_exit_fp: UnsafeCell::new(0),
1085 last_wasm_exit_pc: UnsafeCell::new(0),
1086 last_wasm_entry_fp: UnsafeCell::new(0),
1087 }
1088 }
1089}
1090
1091#[cfg(test)]
1092mod test_vmstore_context {
1093 use super::VMStoreContext;
1094 use core::mem::offset_of;
1095 use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
1096
1097 #[test]
1098 fn field_offsets() {
1099 let module = Module::new();
1100 let offsets = VMOffsets::new(HostPtr, &module);
1101 assert_eq!(
1102 offset_of!(VMStoreContext, stack_limit),
1103 usize::from(offsets.ptr.vmstore_context_stack_limit())
1104 );
1105 assert_eq!(
1106 offset_of!(VMStoreContext, fuel_consumed),
1107 usize::from(offsets.ptr.vmstore_context_fuel_consumed())
1108 );
1109 assert_eq!(
1110 offset_of!(VMStoreContext, epoch_deadline),
1111 usize::from(offsets.ptr.vmstore_context_epoch_deadline())
1112 );
1113 assert_eq!(
1114 offset_of!(VMStoreContext, last_wasm_exit_fp),
1115 usize::from(offsets.ptr.vmstore_context_last_wasm_exit_fp())
1116 );
1117 assert_eq!(
1118 offset_of!(VMStoreContext, last_wasm_exit_pc),
1119 usize::from(offsets.ptr.vmstore_context_last_wasm_exit_pc())
1120 );
1121 assert_eq!(
1122 offset_of!(VMStoreContext, last_wasm_entry_fp),
1123 usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp())
1124 );
1125 }
1126}
1127
1128/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
1129/// This has information about globals, memories, tables, and other runtime
1130/// state associated with the current instance.
1131///
1132/// The struct here is empty, as the sizes of these fields are dynamic, and
1133/// we can't describe them in Rust's type system. Sufficient memory is
1134/// allocated at runtime.
1135#[derive(Debug)]
1136#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
1137pub struct VMContext {
1138 /// There's some more discussion about this within `wasmtime/src/lib.rs` but
1139 /// the idea is that we want to tell the compiler that this contains
1140 /// pointers which transitively refers to itself, to suppress some
1141 /// optimizations that might otherwise assume this doesn't exist.
1142 ///
1143 /// The self-referential pointer we care about is the `*mut Store` pointer
1144 /// early on in this context, which if you follow through enough levels of
1145 /// nesting, eventually can refer back to this `VMContext`
1146 pub _marker: marker::PhantomPinned,
1147}
1148
1149impl VMContext {
1150 /// Helper function to cast between context types using a debug assertion to
1151 /// protect against some mistakes.
1152 #[inline]
1153 pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMContext> {
1154 // Note that in general the offset of the "magic" field is stored in
1155 // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
1156 // about converting this pointer to another type we ideally don't want
1157 // to read the offset from potentially corrupt memory. Instead it would
1158 // be better to catch errors here as soon as possible.
1159 //
1160 // To accomplish this the `VMContext` structure is laid out with the
1161 // magic field at a statically known offset (here it's 0 for now). This
1162 // static offset is asserted in `VMOffsets::from` and needs to be kept
1163 // in sync with this line for this debug assertion to work.
1164 //
1165 // Also note that this magic is only ever invalid in the presence of
1166 // bugs, meaning we don't actually read the magic and act differently
1167 // at runtime depending what it is, so this is a debug assertion as
1168 // opposed to a regular assertion.
1169 debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC);
1170 opaque.cast()
1171 }
1172}
1173
1174/// A "raw" and unsafe representation of a WebAssembly value.
1175///
1176/// This is provided for use with the `Func::new_unchecked` and
1177/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
1178/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
1179///
1180/// This is notably an "unsafe" way to work with `Val` and it's recommended to
1181/// instead use `Val` where possible. An important note about this union is that
1182/// fields are all stored in little-endian format, regardless of the endianness
1183/// of the host system.
1184#[allow(missing_docs)]
1185#[repr(C)]
1186#[derive(Copy, Clone)]
1187pub union ValRaw {
1188 /// A WebAssembly `i32` value.
1189 ///
1190 /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
1191 /// type does not assign an interpretation of the upper bit as either signed
1192 /// or unsigned. The Rust type `i32` is simply chosen for convenience.
1193 ///
1194 /// This value is always stored in a little-endian format.
1195 i32: i32,
1196
1197 /// A WebAssembly `i64` value.
1198 ///
1199 /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
1200 /// type does not assign an interpretation of the upper bit as either signed
1201 /// or unsigned. The Rust type `i64` is simply chosen for convenience.
1202 ///
1203 /// This value is always stored in a little-endian format.
1204 i64: i64,
1205
1206 /// A WebAssembly `f32` value.
1207 ///
1208 /// Note that the payload here is a Rust `u32`. This is to allow passing any
1209 /// representation of NaN into WebAssembly without risk of changing NaN
1210 /// payload bits as its gets passed around the system. Otherwise though this
1211 /// `u32` value is the return value of `f32::to_bits` in Rust.
1212 ///
1213 /// This value is always stored in a little-endian format.
1214 f32: u32,
1215
1216 /// A WebAssembly `f64` value.
1217 ///
1218 /// Note that the payload here is a Rust `u64`. This is to allow passing any
1219 /// representation of NaN into WebAssembly without risk of changing NaN
1220 /// payload bits as its gets passed around the system. Otherwise though this
1221 /// `u64` value is the return value of `f64::to_bits` in Rust.
1222 ///
1223 /// This value is always stored in a little-endian format.
1224 f64: u64,
1225
1226 /// A WebAssembly `v128` value.
1227 ///
1228 /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1229 /// but note that `v128` in WebAssembly is often considered a vector type
1230 /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1231 /// of the underlying bits is left up to the instructions which consume
1232 /// this value.
1233 ///
1234 /// This value is always stored in a little-endian format.
1235 v128: [u8; 16],
1236
1237 /// A WebAssembly `funcref` value (or one of its subtypes).
1238 ///
1239 /// The payload here is a pointer which is runtime-defined. This is one of
1240 /// the main points of unsafety about the `ValRaw` type as the validity of
1241 /// the pointer here is not easily verified and must be preserved by
1242 /// carefully calling the correct functions throughout the runtime.
1243 ///
1244 /// This value is always stored in a little-endian format.
1245 funcref: *mut c_void,
1246
1247 /// A WebAssembly `externref` value (or one of its subtypes).
1248 ///
1249 /// The payload here is a compressed pointer value which is
1250 /// runtime-defined. This is one of the main points of unsafety about the
1251 /// `ValRaw` type as the validity of the pointer here is not easily verified
1252 /// and must be preserved by carefully calling the correct functions
1253 /// throughout the runtime.
1254 ///
1255 /// This value is always stored in a little-endian format.
1256 externref: u32,
1257
1258 /// A WebAssembly `anyref` value (or one of its subtypes).
1259 ///
1260 /// The payload here is a compressed pointer value which is
1261 /// runtime-defined. This is one of the main points of unsafety about the
1262 /// `ValRaw` type as the validity of the pointer here is not easily verified
1263 /// and must be preserved by carefully calling the correct functions
1264 /// throughout the runtime.
1265 ///
1266 /// This value is always stored in a little-endian format.
1267 anyref: u32,
1268}
1269
1270// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1271// are some simple assertions about the shape of the type which are additionally
1272// matched in C.
1273const _: () = {
1274 assert!(mem::size_of::<ValRaw>() == 16);
1275 assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1276};
1277
1278// This type is just a bag-of-bits so it's up to the caller to figure out how
1279// to safely deal with threading concerns and safely access interior bits.
1280unsafe impl Send for ValRaw {}
1281unsafe impl Sync for ValRaw {}
1282
1283impl fmt::Debug for ValRaw {
1284 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1285 struct Hex<T>(T);
1286 impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1287 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1288 let bytes = mem::size_of::<T>();
1289 let hex_digits_per_byte = 2;
1290 let hex_digits = bytes * hex_digits_per_byte;
1291 write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1292 }
1293 }
1294
1295 unsafe {
1296 f.debug_struct("ValRaw")
1297 .field("i32", &Hex(self.i32))
1298 .field("i64", &Hex(self.i64))
1299 .field("f32", &Hex(self.f32))
1300 .field("f64", &Hex(self.f64))
1301 .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1302 .field("funcref", &self.funcref)
1303 .field("externref", &Hex(self.externref))
1304 .field("anyref", &Hex(self.anyref))
1305 .finish()
1306 }
1307 }
1308}
1309
1310impl ValRaw {
1311 /// Create a null reference that is compatible with any of
1312 /// `{any,extern,func}ref`.
1313 pub fn null() -> ValRaw {
1314 unsafe {
1315 let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1316 debug_assert_eq!(raw.get_anyref(), 0);
1317 debug_assert_eq!(raw.get_externref(), 0);
1318 debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1319 raw
1320 }
1321 }
1322
1323 /// Creates a WebAssembly `i32` value
1324 #[inline]
1325 pub fn i32(i: i32) -> ValRaw {
1326 // Note that this is intentionally not setting the `i32` field, instead
1327 // setting the `i64` field with a zero-extended version of `i`. For more
1328 // information on this see the comments on `Lower for Result` in the
1329 // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1330 // otherwise constrained to guarantee that the initial 64-bits are
1331 // always initialized.
1332 ValRaw::u64(i.unsigned().into())
1333 }
1334
1335 /// Creates a WebAssembly `i64` value
1336 #[inline]
1337 pub fn i64(i: i64) -> ValRaw {
1338 ValRaw { i64: i.to_le() }
1339 }
1340
1341 /// Creates a WebAssembly `i32` value
1342 #[inline]
1343 pub fn u32(i: u32) -> ValRaw {
1344 // See comments in `ValRaw::i32` for why this is setting the upper
1345 // 32-bits as well.
1346 ValRaw::u64(i.into())
1347 }
1348
1349 /// Creates a WebAssembly `i64` value
1350 #[inline]
1351 pub fn u64(i: u64) -> ValRaw {
1352 ValRaw::i64(i as i64)
1353 }
1354
1355 /// Creates a WebAssembly `f32` value
1356 #[inline]
1357 pub fn f32(i: u32) -> ValRaw {
1358 // See comments in `ValRaw::i32` for why this is setting the upper
1359 // 32-bits as well.
1360 ValRaw::u64(i.into())
1361 }
1362
1363 /// Creates a WebAssembly `f64` value
1364 #[inline]
1365 pub fn f64(i: u64) -> ValRaw {
1366 ValRaw { f64: i.to_le() }
1367 }
1368
1369 /// Creates a WebAssembly `v128` value
1370 #[inline]
1371 pub fn v128(i: u128) -> ValRaw {
1372 ValRaw {
1373 v128: i.to_le_bytes(),
1374 }
1375 }
1376
1377 /// Creates a WebAssembly `funcref` value
1378 #[inline]
1379 pub fn funcref(i: *mut c_void) -> ValRaw {
1380 ValRaw {
1381 funcref: Strict::map_addr(i, |i| i.to_le()),
1382 }
1383 }
1384
1385 /// Creates a WebAssembly `externref` value
1386 #[inline]
1387 pub fn externref(e: u32) -> ValRaw {
1388 assert!(cfg!(feature = "gc") || e == 0);
1389 ValRaw {
1390 externref: e.to_le(),
1391 }
1392 }
1393
1394 /// Creates a WebAssembly `anyref` value
1395 #[inline]
1396 pub fn anyref(r: u32) -> ValRaw {
1397 assert!(cfg!(feature = "gc") || r == 0);
1398 ValRaw { anyref: r.to_le() }
1399 }
1400
1401 /// Gets the WebAssembly `i32` value
1402 #[inline]
1403 pub fn get_i32(&self) -> i32 {
1404 unsafe { i32::from_le(self.i32) }
1405 }
1406
1407 /// Gets the WebAssembly `i64` value
1408 #[inline]
1409 pub fn get_i64(&self) -> i64 {
1410 unsafe { i64::from_le(self.i64) }
1411 }
1412
1413 /// Gets the WebAssembly `i32` value
1414 #[inline]
1415 pub fn get_u32(&self) -> u32 {
1416 self.get_i32().unsigned()
1417 }
1418
1419 /// Gets the WebAssembly `i64` value
1420 #[inline]
1421 pub fn get_u64(&self) -> u64 {
1422 self.get_i64().unsigned()
1423 }
1424
1425 /// Gets the WebAssembly `f32` value
1426 #[inline]
1427 pub fn get_f32(&self) -> u32 {
1428 unsafe { u32::from_le(self.f32) }
1429 }
1430
1431 /// Gets the WebAssembly `f64` value
1432 #[inline]
1433 pub fn get_f64(&self) -> u64 {
1434 unsafe { u64::from_le(self.f64) }
1435 }
1436
1437 /// Gets the WebAssembly `v128` value
1438 #[inline]
1439 pub fn get_v128(&self) -> u128 {
1440 unsafe { u128::from_le_bytes(self.v128) }
1441 }
1442
1443 /// Gets the WebAssembly `funcref` value
1444 #[inline]
1445 pub fn get_funcref(&self) -> *mut c_void {
1446 unsafe { Strict::map_addr(self.funcref, |i| usize::from_le(i)) }
1447 }
1448
1449 /// Gets the WebAssembly `externref` value
1450 #[inline]
1451 pub fn get_externref(&self) -> u32 {
1452 let externref = u32::from_le(unsafe { self.externref });
1453 assert!(cfg!(feature = "gc") || externref == 0);
1454 externref
1455 }
1456
1457 /// Gets the WebAssembly `anyref` value
1458 #[inline]
1459 pub fn get_anyref(&self) -> u32 {
1460 let anyref = u32::from_le(unsafe { self.anyref });
1461 assert!(cfg!(feature = "gc") || anyref == 0);
1462 anyref
1463 }
1464}
1465
1466/// An "opaque" version of `VMContext` which must be explicitly casted to a
1467/// target context.
1468///
1469/// This context is used to represent that contexts specified in
1470/// `VMFuncRef` can have any type and don't have an implicit
1471/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1472/// structure of an opaque context in general and only the code which configured
1473/// the context is able to rely on a particular structure. This is because the
1474/// context pointer configured for `VMFuncRef` is guaranteed to be
1475/// the first parameter passed.
1476///
1477/// Note that Wasmtime currently has a layout where all contexts that are casted
1478/// to an opaque context start with a 32-bit "magic" which can be used in debug
1479/// mode to debug-assert that the casts here are correct and have at least a
1480/// little protection against incorrect casts.
1481pub struct VMOpaqueContext {
1482 pub(crate) magic: u32,
1483 _marker: marker::PhantomPinned,
1484}
1485
1486impl VMOpaqueContext {
1487 /// Helper function to clearly indicate that casts are desired.
1488 #[inline]
1489 pub fn from_vmcontext(ptr: NonNull<VMContext>) -> NonNull<VMOpaqueContext> {
1490 ptr.cast()
1491 }
1492
1493 /// Helper function to clearly indicate that casts are desired.
1494 #[inline]
1495 pub fn from_vm_array_call_host_func_context(
1496 ptr: NonNull<VMArrayCallHostFuncContext>,
1497 ) -> NonNull<VMOpaqueContext> {
1498 ptr.cast()
1499 }
1500}