wasmtime/runtime/vm/instance.rs
1//! An `Instance` contains all the runtime state used by execution of a
2//! wasm module (except its callstack and register state). An
3//! `InstanceHandle` is a reference-counting handle for an `Instance`.
4
5use crate::runtime::vm::const_expr::{ConstEvalContext, ConstExprEvaluator};
6use crate::runtime::vm::export::Export;
7use crate::runtime::vm::memory::{Memory, RuntimeMemoryCreator};
8use crate::runtime::vm::table::{Table, TableElement, TableElementType};
9use crate::runtime::vm::vmcontext::{
10 VMBuiltinFunctionsArray, VMContext, VMFuncRef, VMFunctionImport, VMGlobalDefinition,
11 VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext, VMStoreContext,
12 VMTableDefinition, VMTableImport, VMTagDefinition, VMTagImport,
13};
14use crate::runtime::vm::{
15 ExportFunction, ExportGlobal, ExportMemory, ExportTable, ExportTag, GcStore, Imports,
16 ModuleRuntimeInfo, SendSyncPtr, VMFunctionBody, VMGcRef, VMStore, VMStoreRawPtr, VmPtr, VmSafe,
17 WasmFault,
18};
19use crate::store::{StoreInner, StoreOpaque};
20use crate::{prelude::*, StoreContextMut};
21use alloc::sync::Arc;
22use core::alloc::Layout;
23use core::any::Any;
24use core::ops::Range;
25use core::ptr::NonNull;
26#[cfg(target_has_atomic = "64")]
27use core::sync::atomic::AtomicU64;
28use core::{mem, ptr};
29use sptr::Strict;
30#[cfg(feature = "gc")]
31use wasmtime_environ::ModuleInternedTypeIndex;
32use wasmtime_environ::{
33 packed_option::ReservedValue, DataIndex, DefinedGlobalIndex, DefinedMemoryIndex,
34 DefinedTableIndex, DefinedTagIndex, ElemIndex, EntityIndex, EntityRef, EntitySet, FuncIndex,
35 GlobalIndex, HostPtr, MemoryIndex, Module, PrimaryMap, PtrSize, TableIndex, TableInitialValue,
36 TableSegmentElements, TagIndex, Trap, VMOffsets, VMSharedTypeIndex, WasmHeapTopType,
37 VMCONTEXT_MAGIC,
38};
39#[cfg(feature = "wmemcheck")]
40use wasmtime_wmemcheck::Wmemcheck;
41
42mod allocator;
43pub use allocator::*;
44
45/// The pair of an instance and a raw pointer its associated store.
46///
47/// ### Safety
48///
49/// Getting a borrow of a vmctx's store is one of the fundamental bits of unsafe
50/// code in Wasmtime. No matter how we architect the runtime, some kind of
51/// unsafe conversion from a raw vmctx pointer that Wasm is using into a Rust
52/// struct must happen.
53///
54/// It is our responsibility to ensure that multiple (exclusive) borrows of the
55/// vmctx's store never exist at the same time. The distinction between the
56/// `Instance` type (which doesn't expose its underlying vmctx pointer or a way
57/// to get a borrow of its associated store) and this type (which does) is
58/// designed to help with that.
59///
60/// Going from a `*mut VMContext` to a `&mut StoreInner<T>` is naturally unsafe
61/// due to the raw pointer usage, but additionally the `T` type parameter needs
62/// to be the same `T` that was used to define the `dyn VMStore` trait object
63/// that was stuffed into the vmctx.
64///
65/// ### Usage
66///
67/// Usage generally looks like:
68///
69/// 1. You get a raw `*mut VMContext` from Wasm
70///
71/// 2. You call `InstanceAndStore::from_vmctx` on that raw pointer
72///
73/// 3. You then call `InstanceAndStore::unpack_mut` (or another helper) to get
74/// the underlying `&mut Instance` and `&mut dyn VMStore` (or `&mut
75/// StoreInner<T>`).
76///
77/// 4. You then use whatever `Instance` methods you need to, each of which take
78/// a store argument as necessary.
79///
80/// In step (4) you no longer need to worry about double exclusive borrows of
81/// the store, so long as you don't do (1-2) again. Note also that the borrow
82/// checker prevents repeating step (3) if you never repeat (1-2). In general,
83/// steps (1-3) should be done in a single, common, internally-unsafe,
84/// plumbing-code bottleneck and the raw pointer should never be exposed to Rust
85/// code that does (4) after the `InstanceAndStore` is created. Follow this
86/// pattern, and everything using the resulting `Instance` and `Store` can be
87/// safe code (at least, with regards to accessing the store itself).
88///
89/// As an illustrative example, the common plumbing code for our various
90/// libcalls performs steps (1-3) before calling into each actual libcall
91/// implementation function that does (4). The plumbing code hides the raw vmctx
92/// pointer and never gives out access to it to the libcall implementation
93/// functions, nor does an `Instance` expose its internal vmctx pointer, which
94/// would allow unsafely repeating steps (1-2).
95#[repr(transparent)]
96pub struct InstanceAndStore {
97 instance: Instance,
98}
99
100impl InstanceAndStore {
101 /// Converts the provided `*mut VMContext` to an `InstanceAndStore`
102 /// reference and calls the provided closure with it.
103 ///
104 /// This method will move the `vmctx` pointer backwards to point to the
105 /// original `Instance` that precedes it. The closure is provided a
106 /// temporary reference to the `InstanceAndStore` with a constrained
107 /// lifetime to ensure that it doesn't accidentally escape.
108 ///
109 /// # Safety
110 ///
111 /// Callers must validate that the `vmctx` pointer is a valid allocation and
112 /// that it's valid to acquire `&mut InstanceAndStore` at this time. For
113 /// example this can't be called twice on the same `VMContext` to get two
114 /// active mutable borrows to the same `InstanceAndStore`.
115 ///
116 /// See also the safety discussion in this type's documentation.
117 #[inline]
118 pub(crate) unsafe fn from_vmctx<R>(
119 vmctx: NonNull<VMContext>,
120 f: impl for<'a> FnOnce(&'a mut Self) -> R,
121 ) -> R {
122 const _: () = assert!(mem::size_of::<InstanceAndStore>() == mem::size_of::<Instance>());
123 let mut ptr = vmctx
124 .byte_sub(mem::size_of::<Instance>())
125 .cast::<InstanceAndStore>();
126
127 f(ptr.as_mut())
128 }
129
130 /// Unpacks this `InstanceAndStore` into its underlying `Instance` and `dyn
131 /// VMStore`.
132 #[inline]
133 pub(crate) fn unpack_mut(&mut self) -> (&mut Instance, &mut dyn VMStore) {
134 unsafe {
135 let store = &mut *self.store_ptr();
136 (&mut self.instance, store)
137 }
138 }
139
140 /// Unpacks this `InstanceAndStore` into its underlying `Instance` and
141 /// `StoreInner<T>`.
142 ///
143 /// # Safety
144 ///
145 /// The `T` must be the same `T` that was used to define this store's
146 /// instance.
147 #[inline]
148 pub(crate) unsafe fn unpack_context_mut<T>(
149 &mut self,
150 ) -> (&mut Instance, StoreContextMut<'_, T>) {
151 let store_ptr = self.store_ptr().cast::<StoreInner<T>>();
152 (&mut self.instance, StoreContextMut(&mut *store_ptr))
153 }
154
155 /// Gets a pointer to this instance's `Store` which was originally
156 /// configured on creation.
157 ///
158 /// # Panics
159 ///
160 /// May panic if the originally configured store was `None`. That can happen
161 /// for host functions so host functions can't be queried what their
162 /// original `Store` was since it's just retained as null (since host
163 /// functions are shared amongst threads and don't all share the same
164 /// store).
165 #[inline]
166 fn store_ptr(&self) -> *mut dyn VMStore {
167 self.instance.store.unwrap().0.as_ptr()
168 }
169}
170
171/// A type that roughly corresponds to a WebAssembly instance, but is also used
172/// for host-defined objects.
173///
174/// This structure is is never allocated directly but is instead managed through
175/// an `InstanceHandle`. This structure ends with a `VMContext` which has a
176/// dynamic size corresponding to the `module` configured within. Memory
177/// management of this structure is always externalized.
178///
179/// Instances here can correspond to actual instantiated modules, but it's also
180/// used ubiquitously for host-defined objects. For example creating a
181/// host-defined memory will have a `module` that looks like it exports a single
182/// memory (and similar for other constructs).
183///
184/// This `Instance` type is used as a ubiquitous representation for WebAssembly
185/// values, whether or not they were created on the host or through a module.
186#[repr(C)] // ensure that the vmctx field is last.
187pub struct Instance {
188 /// The runtime info (corresponding to the "compiled module"
189 /// abstraction in higher layers) that is retained and needed for
190 /// lazy initialization. This provides access to the underlying
191 /// Wasm module entities, the compiled JIT code, metadata about
192 /// functions, lazy initialization state, etc.
193 runtime_info: ModuleRuntimeInfo,
194
195 /// WebAssembly linear memory data.
196 ///
197 /// This is where all runtime information about defined linear memories in
198 /// this module lives.
199 ///
200 /// The `MemoryAllocationIndex` was given from our `InstanceAllocator` and
201 /// must be given back to the instance allocator when deallocating each
202 /// memory.
203 memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
204
205 /// WebAssembly table data.
206 ///
207 /// Like memories, this is only for defined tables in the module and
208 /// contains all of their runtime state.
209 ///
210 /// The `TableAllocationIndex` was given from our `InstanceAllocator` and
211 /// must be given back to the instance allocator when deallocating each
212 /// table.
213 tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
214
215 /// Stores the dropped passive element segments in this instantiation by index.
216 /// If the index is present in the set, the segment has been dropped.
217 dropped_elements: EntitySet<ElemIndex>,
218
219 /// Stores the dropped passive data segments in this instantiation by index.
220 /// If the index is present in the set, the segment has been dropped.
221 dropped_data: EntitySet<DataIndex>,
222
223 /// Hosts can store arbitrary per-instance information here.
224 ///
225 /// Most of the time from Wasmtime this is `Box::new(())`, a noop
226 /// allocation, but some host-defined objects will store their state here.
227 host_state: Box<dyn Any + Send + Sync>,
228
229 /// A pointer to the `vmctx` field at the end of the `Instance`.
230 ///
231 /// If you're looking at this a reasonable question would be "why do we need
232 /// a pointer to ourselves?" because after all the pointer's value is
233 /// trivially derivable from any `&Instance` pointer. The rationale for this
234 /// field's existence is subtle, but it's required for correctness. The
235 /// short version is "this makes miri happy".
236 ///
237 /// The long version of why this field exists is that the rules that MIRI
238 /// uses to ensure pointers are used correctly have various conditions on
239 /// them depend on how pointers are used. More specifically if `*mut T` is
240 /// derived from `&mut T`, then that invalidates all prior pointers drived
241 /// from the `&mut T`. This means that while we liberally want to re-acquire
242 /// a `*mut VMContext` throughout the implementation of `Instance` the
243 /// trivial way, a function `fn vmctx(&mut Instance) -> *mut VMContext`
244 /// would effectively invalidate all prior `*mut VMContext` pointers
245 /// acquired. The purpose of this field is to serve as a sort of
246 /// source-of-truth for where `*mut VMContext` pointers come from.
247 ///
248 /// This field is initialized when the `Instance` is created with the
249 /// original allocation's pointer. That means that the provenance of this
250 /// pointer contains the entire allocation (both instance and `VMContext`).
251 /// This provenance bit is then "carried through" where `fn vmctx` will base
252 /// all returned pointers on this pointer itself. This provides the means of
253 /// never invalidating this pointer throughout MIRI and additionally being
254 /// able to still temporarily have `&mut Instance` methods and such.
255 ///
256 /// It's important to note, though, that this is not here purely for MIRI.
257 /// The careful construction of the `fn vmctx` method has ramifications on
258 /// the LLVM IR generated, for example. A historical CVE on Wasmtime,
259 /// GHSA-ch89-5g45-qwc7, was caused due to relying on undefined behavior. By
260 /// deriving VMContext pointers from this pointer it specifically hints to
261 /// LLVM that trickery is afoot and it properly informs `noalias` and such
262 /// annotations and analysis. More-or-less this pointer is actually loaded
263 /// in LLVM IR which helps defeat otherwise present aliasing optimizations,
264 /// which we want, since writes to this should basically never be optimized
265 /// out.
266 ///
267 /// As a final note it's worth pointing out that the machine code generated
268 /// for accessing `fn vmctx` is still as one would expect. This member isn't
269 /// actually ever loaded at runtime (or at least shouldn't be). Perhaps in
270 /// the future if the memory consumption of this field is a problem we could
271 /// shrink it slightly, but for now one extra pointer per wasm instance
272 /// seems not too bad.
273 vmctx_self_reference: SendSyncPtr<VMContext>,
274
275 // TODO: add support for multiple memories; `wmemcheck_state` corresponds to
276 // memory 0.
277 #[cfg(feature = "wmemcheck")]
278 pub(crate) wmemcheck_state: Option<Wmemcheck>,
279
280 /// Self-pointer back to `Store<T>` and its functions. Not present for
281 /// the brief time that `Store<T>` is itself being created. Also not
282 /// present for some niche uses that are disconnected from stores (e.g.
283 /// cross-thread stuff used in `InstancePre`)
284 store: Option<VMStoreRawPtr>,
285
286 /// Additional context used by compiled wasm code. This field is last, and
287 /// represents a dynamically-sized array that extends beyond the nominal
288 /// end of the struct (similar to a flexible array member).
289 vmctx: VMContext,
290}
291
292impl Instance {
293 /// Create an instance at the given memory address.
294 ///
295 /// It is assumed the memory was properly aligned and the
296 /// allocation was `alloc_size` in bytes.
297 unsafe fn new(
298 req: InstanceAllocationRequest,
299 memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
300 tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
301 memory_tys: &PrimaryMap<MemoryIndex, wasmtime_environ::Memory>,
302 ) -> InstanceHandle {
303 // The allocation must be *at least* the size required of `Instance`.
304 let layout = Self::alloc_layout(req.runtime_info.offsets());
305 let ptr = alloc::alloc::alloc(layout);
306 if ptr.is_null() {
307 alloc::alloc::handle_alloc_error(layout);
308 }
309 let ptr = ptr.cast::<Instance>();
310
311 let module = req.runtime_info.env_module();
312 let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
313 let dropped_data = EntitySet::with_capacity(module.passive_data_map.len());
314
315 #[cfg(not(feature = "wmemcheck"))]
316 let _ = memory_tys;
317
318 ptr::write(
319 ptr,
320 Instance {
321 runtime_info: req.runtime_info.clone(),
322 memories,
323 tables,
324 dropped_elements,
325 dropped_data,
326 host_state: req.host_state,
327 vmctx_self_reference: SendSyncPtr::new(NonNull::new(ptr.add(1).cast()).unwrap()),
328 vmctx: VMContext {
329 _marker: core::marker::PhantomPinned,
330 },
331 #[cfg(feature = "wmemcheck")]
332 wmemcheck_state: {
333 if req.wmemcheck {
334 let size = memory_tys
335 .iter()
336 .next()
337 .map(|memory| memory.1.limits.min)
338 .unwrap_or(0)
339 * 64
340 * 1024;
341 Some(Wmemcheck::new(size as usize))
342 } else {
343 None
344 }
345 },
346 store: None,
347 },
348 );
349
350 (*ptr).initialize_vmctx(module, req.runtime_info.offsets(), req.store, req.imports);
351 InstanceHandle {
352 instance: Some(SendSyncPtr::new(NonNull::new(ptr).unwrap())),
353 }
354 }
355
356 /// Converts the provided `*mut VMContext` to an `Instance` pointer and runs
357 /// the provided closure with the instance.
358 ///
359 /// This method will move the `vmctx` pointer backwards to point to the
360 /// original `Instance` that precedes it. The closure is provided a
361 /// temporary version of the `Instance` pointer with a constrained lifetime
362 /// to the closure to ensure it doesn't accidentally escape.
363 ///
364 /// # Unsafety
365 ///
366 /// Callers must validate that the `vmctx` pointer is a valid allocation
367 /// and that it's valid to acquire `&mut Instance` at this time. For example
368 /// this can't be called twice on the same `VMContext` to get two active
369 /// pointers to the same `Instance`.
370 #[inline]
371 pub unsafe fn from_vmctx<R>(
372 vmctx: NonNull<VMContext>,
373 f: impl FnOnce(&mut Instance) -> R,
374 ) -> R {
375 let mut ptr = vmctx
376 .byte_sub(mem::size_of::<Instance>())
377 .cast::<Instance>();
378 f(ptr.as_mut())
379 }
380
381 /// Helper function to access various locations offset from our `*mut
382 /// VMContext` object.
383 ///
384 /// # Safety
385 ///
386 /// This method is unsafe because the `offset` must be within bounds of the
387 /// `VMContext` object trailing this instance.
388 unsafe fn vmctx_plus_offset<T: VmSafe>(&self, offset: impl Into<u32>) -> *const T {
389 self.vmctx()
390 .as_ptr()
391 .byte_add(usize::try_from(offset.into()).unwrap())
392 .cast()
393 }
394
395 /// Dual of `vmctx_plus_offset`, but for mutability.
396 unsafe fn vmctx_plus_offset_mut<T: VmSafe>(&mut self, offset: impl Into<u32>) -> NonNull<T> {
397 self.vmctx()
398 .byte_add(usize::try_from(offset.into()).unwrap())
399 .cast()
400 }
401
402 pub(crate) fn env_module(&self) -> &Arc<wasmtime_environ::Module> {
403 self.runtime_info.env_module()
404 }
405
406 #[cfg(feature = "gc")]
407 pub(crate) fn runtime_module(&self) -> Option<&crate::Module> {
408 match &self.runtime_info {
409 ModuleRuntimeInfo::Module(m) => Some(m),
410 ModuleRuntimeInfo::Bare(_) => None,
411 }
412 }
413
414 /// Translate a module-level interned type index into an engine-level
415 /// interned type index.
416 #[cfg(feature = "gc")]
417 pub fn engine_type_index(&self, module_index: ModuleInternedTypeIndex) -> VMSharedTypeIndex {
418 self.runtime_info.engine_type_index(module_index)
419 }
420
421 #[inline]
422 fn offsets(&self) -> &VMOffsets<HostPtr> {
423 self.runtime_info.offsets()
424 }
425
426 /// Return the indexed `VMFunctionImport`.
427 fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
428 unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
429 }
430
431 /// Return the index `VMTableImport`.
432 fn imported_table(&self, index: TableIndex) -> &VMTableImport {
433 unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
434 }
435
436 /// Return the indexed `VMMemoryImport`.
437 fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
438 unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
439 }
440
441 /// Return the indexed `VMGlobalImport`.
442 fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
443 unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
444 }
445
446 /// Return the indexed `VMTagImport`.
447 fn imported_tag(&self, index: TagIndex) -> &VMTagImport {
448 unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmtag_import(index)) }
449 }
450
451 /// Return the indexed `VMTagDefinition`.
452 fn tag_ptr(&mut self, index: DefinedTagIndex) -> NonNull<VMTagDefinition> {
453 unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_vmtag_definition(index)) }
454 }
455
456 /// Return the indexed `VMTableDefinition`.
457 #[allow(dead_code)]
458 fn table(&mut self, index: DefinedTableIndex) -> VMTableDefinition {
459 unsafe { self.table_ptr(index).read() }
460 }
461
462 /// Updates the value for a defined table to `VMTableDefinition`.
463 fn set_table(&mut self, index: DefinedTableIndex, table: VMTableDefinition) {
464 unsafe {
465 self.table_ptr(index).write(table);
466 }
467 }
468
469 /// Return the indexed `VMTableDefinition`.
470 fn table_ptr(&mut self, index: DefinedTableIndex) -> NonNull<VMTableDefinition> {
471 unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_vmtable_definition(index)) }
472 }
473
474 /// Get a locally defined or imported memory.
475 pub(crate) fn get_memory(&self, index: MemoryIndex) -> VMMemoryDefinition {
476 if let Some(defined_index) = self.env_module().defined_memory_index(index) {
477 self.memory(defined_index)
478 } else {
479 let import = self.imported_memory(index);
480 unsafe { VMMemoryDefinition::load(import.from.as_ptr()) }
481 }
482 }
483
484 /// Get a locally defined or imported memory.
485 #[cfg(feature = "threads")]
486 pub(crate) fn get_runtime_memory(&mut self, index: MemoryIndex) -> &mut Memory {
487 if let Some(defined_index) = self.env_module().defined_memory_index(index) {
488 unsafe { &mut *self.get_defined_memory(defined_index) }
489 } else {
490 let import = self.imported_memory(index);
491 unsafe {
492 let ptr = Instance::from_vmctx(import.vmctx.as_non_null(), |i| {
493 i.get_defined_memory(import.index)
494 });
495 &mut *ptr
496 }
497 }
498 }
499
500 /// Return the indexed `VMMemoryDefinition`.
501 fn memory(&self, index: DefinedMemoryIndex) -> VMMemoryDefinition {
502 unsafe { VMMemoryDefinition::load(self.memory_ptr(index).as_ptr()) }
503 }
504
505 /// Set the indexed memory to `VMMemoryDefinition`.
506 fn set_memory(&self, index: DefinedMemoryIndex, mem: VMMemoryDefinition) {
507 unsafe {
508 self.memory_ptr(index).write(mem);
509 }
510 }
511
512 /// Return the indexed `VMMemoryDefinition`.
513 fn memory_ptr(&self, index: DefinedMemoryIndex) -> NonNull<VMMemoryDefinition> {
514 let vmptr = unsafe {
515 *self.vmctx_plus_offset::<VmPtr<_>>(self.offsets().vmctx_vmmemory_pointer(index))
516 };
517 vmptr.as_non_null()
518 }
519
520 /// Return the indexed `VMGlobalDefinition`.
521 fn global_ptr(&mut self, index: DefinedGlobalIndex) -> NonNull<VMGlobalDefinition> {
522 unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_vmglobal_definition(index)) }
523 }
524
525 /// Get a raw pointer to the global at the given index regardless whether it
526 /// is defined locally or imported from another module.
527 ///
528 /// Panics if the index is out of bound or is the reserved value.
529 pub(crate) fn defined_or_imported_global_ptr(
530 &mut self,
531 index: GlobalIndex,
532 ) -> NonNull<VMGlobalDefinition> {
533 if let Some(index) = self.env_module().defined_global_index(index) {
534 self.global_ptr(index)
535 } else {
536 self.imported_global(index).from.as_non_null()
537 }
538 }
539
540 /// Get all globals within this instance.
541 ///
542 /// Returns both import and defined globals.
543 ///
544 /// Returns both exported and non-exported globals.
545 ///
546 /// Gives access to the full globals space.
547 pub fn all_globals<'a>(
548 &'a mut self,
549 ) -> impl ExactSizeIterator<Item = (GlobalIndex, ExportGlobal)> + 'a {
550 let module = self.env_module().clone();
551 module.globals.keys().map(move |idx| {
552 (
553 idx,
554 ExportGlobal {
555 definition: self.defined_or_imported_global_ptr(idx),
556 vmctx: Some(self.vmctx()),
557 global: self.env_module().globals[idx],
558 },
559 )
560 })
561 }
562
563 /// Get the globals defined in this instance (not imported).
564 pub fn defined_globals<'a>(
565 &'a mut self,
566 ) -> impl ExactSizeIterator<Item = (DefinedGlobalIndex, ExportGlobal)> + 'a {
567 let module = self.env_module().clone();
568 module
569 .globals
570 .keys()
571 .skip(module.num_imported_globals)
572 .map(move |global_idx| {
573 let def_idx = module.defined_global_index(global_idx).unwrap();
574 let global = ExportGlobal {
575 definition: self.global_ptr(def_idx),
576 vmctx: Some(self.vmctx()),
577 global: self.env_module().globals[global_idx],
578 };
579 (def_idx, global)
580 })
581 }
582
583 /// Return a pointer to the interrupts structure
584 #[inline]
585 pub fn vm_store_context(&mut self) -> NonNull<Option<VmPtr<VMStoreContext>>> {
586 unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_runtime_limits()) }
587 }
588
589 /// Return a pointer to the global epoch counter used by this instance.
590 #[cfg(target_has_atomic = "64")]
591 pub fn epoch_ptr(&mut self) -> NonNull<Option<VmPtr<AtomicU64>>> {
592 unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_epoch_ptr()) }
593 }
594
595 /// Return a pointer to the GC heap base pointer.
596 pub fn gc_heap_base(&mut self) -> NonNull<Option<VmPtr<u8>>> {
597 unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_gc_heap_base()) }
598 }
599
600 /// Return a pointer to the GC heap bound.
601 pub fn gc_heap_bound(&mut self) -> NonNull<usize> {
602 unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_gc_heap_bound()) }
603 }
604
605 /// Return a pointer to the collector-specific heap data.
606 pub fn gc_heap_data(&mut self) -> NonNull<Option<VmPtr<u8>>> {
607 unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_gc_heap_data()) }
608 }
609
610 pub(crate) unsafe fn set_store(&mut self, store: Option<NonNull<dyn VMStore>>) {
611 self.store = store.map(VMStoreRawPtr);
612 if let Some(mut store) = store {
613 let store = store.as_mut();
614 self.vm_store_context()
615 .write(Some(store.vm_store_context_ptr().into()));
616 #[cfg(target_has_atomic = "64")]
617 self.epoch_ptr()
618 .write(Some(NonNull::from(store.engine().epoch_counter()).into()));
619 self.set_gc_heap(store.gc_store_mut().ok());
620 } else {
621 self.vm_store_context().write(None);
622 #[cfg(target_has_atomic = "64")]
623 self.epoch_ptr().write(None);
624 self.set_gc_heap(None);
625 }
626 }
627
628 unsafe fn set_gc_heap(&mut self, gc_store: Option<&mut GcStore>) {
629 if let Some(gc_store) = gc_store {
630 let heap = gc_store.gc_heap.heap_slice_mut();
631 self.gc_heap_bound().write(heap.len());
632 self.gc_heap_base()
633 .write(Some(NonNull::from(heap).cast().into()));
634 self.gc_heap_data()
635 .write(Some(gc_store.gc_heap.vmctx_gc_heap_data().into()));
636 } else {
637 self.gc_heap_bound().write(0);
638 self.gc_heap_base().write(None);
639 self.gc_heap_data().write(None);
640 }
641 }
642
643 pub(crate) unsafe fn set_callee(&mut self, callee: Option<NonNull<VMFunctionBody>>) {
644 let callee = callee.map(|p| VmPtr::from(p));
645 self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_callee())
646 .write(callee);
647 }
648
649 /// Return a reference to the vmctx used by compiled wasm code.
650 #[inline]
651 pub fn vmctx(&self) -> NonNull<VMContext> {
652 // The definition of this method is subtle but intentional. The goal
653 // here is that effectively this should return `&mut self.vmctx`, but
654 // it's not quite so simple. Some more documentation is available on the
655 // `vmctx_self_reference` field, but the general idea is that we're
656 // creating a pointer to return with proper provenance. Provenance is
657 // still in the works in Rust at the time of this writing but the load
658 // of the `self.vmctx_self_reference` field is important here as it
659 // affects how LLVM thinks about aliasing with respect to the returned
660 // pointer.
661 //
662 // The intention of this method is to codegen to machine code as `&mut
663 // self.vmctx`, however. While it doesn't show up like this in LLVM IR
664 // (there's an actual load of the field) it does look like that by the
665 // time the backend runs. (that's magic to me, the backend removing
666 // loads...)
667 //
668 // As a final minor note, strict provenance APIs are not stable on Rust
669 // today so the `sptr` crate is used. This crate provides the extension
670 // trait `Strict` but the method names conflict with the nightly methods
671 // so a different syntax is used to invoke methods here.
672 let addr = &raw const self.vmctx;
673 let ret = Strict::with_addr(self.vmctx_self_reference.as_ptr(), Strict::addr(addr));
674 NonNull::new(ret).unwrap()
675 }
676
677 fn get_exported_func(&mut self, index: FuncIndex) -> ExportFunction {
678 let func_ref = self.get_func_ref(index).unwrap();
679 ExportFunction { func_ref }
680 }
681
682 fn get_exported_table(&mut self, index: TableIndex) -> ExportTable {
683 let (definition, vmctx) =
684 if let Some(def_index) = self.env_module().defined_table_index(index) {
685 (self.table_ptr(def_index), self.vmctx())
686 } else {
687 let import = self.imported_table(index);
688 (import.from.as_non_null(), import.vmctx.as_non_null())
689 };
690 ExportTable {
691 definition,
692 vmctx,
693 table: self.env_module().tables[index],
694 }
695 }
696
697 fn get_exported_memory(&mut self, index: MemoryIndex) -> ExportMemory {
698 let (definition, vmctx, def_index) =
699 if let Some(def_index) = self.env_module().defined_memory_index(index) {
700 (self.memory_ptr(def_index), self.vmctx(), def_index)
701 } else {
702 let import = self.imported_memory(index);
703 (
704 import.from.as_non_null(),
705 import.vmctx.as_non_null(),
706 import.index,
707 )
708 };
709 ExportMemory {
710 definition,
711 vmctx,
712 memory: self.env_module().memories[index],
713 index: def_index,
714 }
715 }
716
717 fn get_exported_global(&mut self, index: GlobalIndex) -> ExportGlobal {
718 ExportGlobal {
719 definition: if let Some(def_index) = self.env_module().defined_global_index(index) {
720 self.global_ptr(def_index)
721 } else {
722 self.imported_global(index).from.as_non_null()
723 },
724 vmctx: Some(self.vmctx()),
725 global: self.env_module().globals[index],
726 }
727 }
728
729 fn get_exported_tag(&mut self, index: TagIndex) -> ExportTag {
730 ExportTag {
731 definition: if let Some(def_index) = self.env_module().defined_tag_index(index) {
732 self.tag_ptr(def_index)
733 } else {
734 self.imported_tag(index).from.as_non_null()
735 },
736 tag: self.env_module().tags[index],
737 }
738 }
739
740 /// Return an iterator over the exports of this instance.
741 ///
742 /// Specifically, it provides access to the key-value pairs, where the keys
743 /// are export names, and the values are export declarations which can be
744 /// resolved `lookup_by_declaration`.
745 pub fn exports(&self) -> wasmparser::collections::index_map::Iter<String, EntityIndex> {
746 self.env_module().exports.iter()
747 }
748
749 /// Return a reference to the custom state attached to this instance.
750 #[inline]
751 pub fn host_state(&self) -> &dyn Any {
752 &*self.host_state
753 }
754
755 /// Return the table index for the given `VMTableDefinition`.
756 pub unsafe fn table_index(&mut self, table: &VMTableDefinition) -> DefinedTableIndex {
757 let index = DefinedTableIndex::new(
758 usize::try_from(
759 (table as *const VMTableDefinition)
760 .offset_from(self.table_ptr(DefinedTableIndex::new(0)).as_ptr()),
761 )
762 .unwrap(),
763 );
764 assert!(index.index() < self.tables.len());
765 index
766 }
767
768 /// Get the given memory's page size, in bytes.
769 pub(crate) fn memory_page_size(&self, index: MemoryIndex) -> usize {
770 usize::try_from(self.env_module().memories[index].page_size()).unwrap()
771 }
772
773 /// Grow memory by the specified amount of pages.
774 ///
775 /// Returns `None` if memory can't be grown by the specified amount
776 /// of pages. Returns `Some` with the old size in bytes if growth was
777 /// successful.
778 pub(crate) fn memory_grow(
779 &mut self,
780 store: &mut dyn VMStore,
781 index: MemoryIndex,
782 delta: u64,
783 ) -> Result<Option<usize>, Error> {
784 match self.env_module().defined_memory_index(index) {
785 Some(idx) => self.defined_memory_grow(store, idx, delta),
786 None => {
787 let import = self.imported_memory(index);
788 unsafe {
789 Instance::from_vmctx(import.vmctx.as_non_null(), |i| {
790 i.defined_memory_grow(store, import.index, delta)
791 })
792 }
793 }
794 }
795 }
796
797 fn defined_memory_grow(
798 &mut self,
799 store: &mut dyn VMStore,
800 idx: DefinedMemoryIndex,
801 delta: u64,
802 ) -> Result<Option<usize>, Error> {
803 let memory = &mut self.memories[idx].1;
804
805 let result = unsafe { memory.grow(delta, Some(store)) };
806
807 // Update the state used by a non-shared Wasm memory in case the base
808 // pointer and/or the length changed.
809 if memory.as_shared_memory().is_none() {
810 let vmmemory = memory.vmmemory();
811 self.set_memory(idx, vmmemory);
812 }
813
814 result
815 }
816
817 pub(crate) fn table_element_type(&mut self, table_index: TableIndex) -> TableElementType {
818 unsafe { (*self.get_table(table_index)).element_type() }
819 }
820
821 /// Grow table by the specified amount of elements, filling them with
822 /// `init_value`.
823 ///
824 /// Returns `None` if table can't be grown by the specified amount of
825 /// elements, or if `init_value` is the wrong type of table element.
826 pub(crate) fn table_grow(
827 &mut self,
828 store: &mut dyn VMStore,
829 table_index: TableIndex,
830 delta: u64,
831 init_value: TableElement,
832 ) -> Result<Option<usize>, Error> {
833 self.with_defined_table_index_and_instance(table_index, |i, instance| {
834 instance.defined_table_grow(store, i, delta, init_value)
835 })
836 }
837
838 fn defined_table_grow(
839 &mut self,
840 store: &mut dyn VMStore,
841 table_index: DefinedTableIndex,
842 delta: u64,
843 init_value: TableElement,
844 ) -> Result<Option<usize>, Error> {
845 let table = &mut self
846 .tables
847 .get_mut(table_index)
848 .unwrap_or_else(|| panic!("no table for index {}", table_index.index()))
849 .1;
850
851 let result = unsafe { table.grow(delta, init_value, store) };
852
853 // Keep the `VMContext` pointers used by compiled Wasm code up to
854 // date.
855 let element = self.tables[table_index].1.vmtable();
856 self.set_table(table_index, element);
857
858 result
859 }
860
861 fn alloc_layout(offsets: &VMOffsets<HostPtr>) -> Layout {
862 let size = mem::size_of::<Self>()
863 .checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
864 .unwrap();
865 let align = mem::align_of::<Self>();
866 Layout::from_size_align(size, align).unwrap()
867 }
868
869 fn type_ids_array(&mut self) -> NonNull<VmPtr<VMSharedTypeIndex>> {
870 unsafe { self.vmctx_plus_offset_mut(self.offsets().ptr.vmctx_type_ids_array()) }
871 }
872
873 /// Construct a new VMFuncRef for the given function
874 /// (imported or defined in this module) and store into the given
875 /// location. Used during lazy initialization.
876 ///
877 /// Note that our current lazy-init scheme actually calls this every
878 /// time the funcref pointer is fetched; this turns out to be better
879 /// than tracking state related to whether it's been initialized
880 /// before, because resetting that state on (re)instantiation is
881 /// very expensive if there are many funcrefs.
882 fn construct_func_ref(
883 &mut self,
884 index: FuncIndex,
885 type_index: VMSharedTypeIndex,
886 into: *mut VMFuncRef,
887 ) {
888 let func_ref = if let Some(def_index) = self.env_module().defined_func_index(index) {
889 VMFuncRef {
890 array_call: self
891 .runtime_info
892 .array_to_wasm_trampoline(def_index)
893 .expect("should have array-to-Wasm trampoline for escaping function")
894 .into(),
895 wasm_call: Some(self.runtime_info.function(def_index).into()),
896 vmctx: VMOpaqueContext::from_vmcontext(self.vmctx()).into(),
897 type_index,
898 }
899 } else {
900 let import = self.imported_function(index);
901 VMFuncRef {
902 array_call: import.array_call,
903 wasm_call: Some(import.wasm_call),
904 vmctx: import.vmctx,
905 type_index,
906 }
907 };
908
909 // Safety: we have a `&mut self`, so we have exclusive access
910 // to this Instance.
911 unsafe {
912 ptr::write(into, func_ref);
913 }
914 }
915
916 /// Get a `&VMFuncRef` for the given `FuncIndex`.
917 ///
918 /// Returns `None` if the index is the reserved index value.
919 ///
920 /// The returned reference is a stable reference that won't be moved and can
921 /// be passed into JIT code.
922 pub(crate) fn get_func_ref(&mut self, index: FuncIndex) -> Option<NonNull<VMFuncRef>> {
923 if index == FuncIndex::reserved_value() {
924 return None;
925 }
926
927 // Safety: we have a `&mut self`, so we have exclusive access
928 // to this Instance.
929 unsafe {
930 // For now, we eagerly initialize an funcref struct in-place
931 // whenever asked for a reference to it. This is mostly
932 // fine, because in practice each funcref is unlikely to be
933 // requested more than a few times: once-ish for funcref
934 // tables used for call_indirect (the usual compilation
935 // strategy places each function in the table at most once),
936 // and once or a few times when fetching exports via API.
937 // Note that for any case driven by table accesses, the lazy
938 // table init behaves like a higher-level cache layer that
939 // protects this initialization from happening multiple
940 // times, via that particular table at least.
941 //
942 // When `ref.func` becomes more commonly used or if we
943 // otherwise see a use-case where this becomes a hotpath,
944 // we can reconsider by using some state to track
945 // "uninitialized" explicitly, for example by zeroing the
946 // funcrefs (perhaps together with other
947 // zeroed-at-instantiate-time state) or using a separate
948 // is-initialized bitmap.
949 //
950 // We arrived at this design because zeroing memory is
951 // expensive, so it's better for instantiation performance
952 // if we don't have to track "is-initialized" state at
953 // all!
954 let func = &self.env_module().functions[index];
955 let sig = func.signature.unwrap_engine_type_index();
956 let func_ref = self
957 .vmctx_plus_offset_mut::<VMFuncRef>(self.offsets().vmctx_func_ref(func.func_ref));
958 self.construct_func_ref(index, sig, func_ref.as_ptr());
959
960 Some(func_ref)
961 }
962 }
963
964 /// Get the passive elements segment at the given index.
965 ///
966 /// Returns an empty segment if the index is out of bounds or if the segment
967 /// has been dropped.
968 ///
969 /// The `storage` parameter should always be `None`; it is a bit of a hack
970 /// to work around lifetime issues.
971 pub(crate) fn passive_element_segment<'a>(
972 &self,
973 storage: &'a mut Option<(Arc<wasmtime_environ::Module>, TableSegmentElements)>,
974 elem_index: ElemIndex,
975 ) -> &'a TableSegmentElements {
976 debug_assert!(storage.is_none());
977 *storage = Some((
978 // TODO: this `clone()` shouldn't be necessary but is used for now to
979 // inform `rustc` that the lifetime of the elements here are
980 // disconnected from the lifetime of `self`.
981 self.env_module().clone(),
982 // NB: fall back to an expressions-based list of elements which
983 // doesn't have static type information (as opposed to
984 // `TableSegmentElements::Functions`) since we don't know what type
985 // is needed in the caller's context. Let the type be inferred by
986 // how they use the segment.
987 TableSegmentElements::Expressions(Box::new([])),
988 ));
989 let (module, empty) = storage.as_ref().unwrap();
990
991 match module.passive_elements_map.get(&elem_index) {
992 Some(index) if !self.dropped_elements.contains(elem_index) => {
993 &module.passive_elements[*index]
994 }
995 _ => empty,
996 }
997 }
998
999 /// The `table.init` operation: initializes a portion of a table with a
1000 /// passive element.
1001 ///
1002 /// # Errors
1003 ///
1004 /// Returns a `Trap` error when the range within the table is out of bounds
1005 /// or the range within the passive element is out of bounds.
1006 pub(crate) fn table_init(
1007 &mut self,
1008 store: &mut StoreOpaque,
1009 table_index: TableIndex,
1010 elem_index: ElemIndex,
1011 dst: u64,
1012 src: u64,
1013 len: u64,
1014 ) -> Result<(), Trap> {
1015 let mut storage = None;
1016 let elements = self.passive_element_segment(&mut storage, elem_index);
1017 let mut const_evaluator = ConstExprEvaluator::default();
1018 self.table_init_segment(
1019 store,
1020 &mut const_evaluator,
1021 table_index,
1022 elements,
1023 dst,
1024 src,
1025 len,
1026 )
1027 }
1028
1029 pub(crate) fn table_init_segment(
1030 &mut self,
1031 store: &mut StoreOpaque,
1032 const_evaluator: &mut ConstExprEvaluator,
1033 table_index: TableIndex,
1034 elements: &TableSegmentElements,
1035 dst: u64,
1036 src: u64,
1037 len: u64,
1038 ) -> Result<(), Trap> {
1039 // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
1040
1041 let table = unsafe { &mut *self.get_table(table_index) };
1042 let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
1043 let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
1044 let module = self.env_module().clone();
1045
1046 match elements {
1047 TableSegmentElements::Functions(funcs) => {
1048 let elements = funcs
1049 .get(src..)
1050 .and_then(|s| s.get(..len))
1051 .ok_or(Trap::TableOutOfBounds)?;
1052 table.init_func(dst, elements.iter().map(|idx| self.get_func_ref(*idx)))?;
1053 }
1054 TableSegmentElements::Expressions(exprs) => {
1055 let exprs = exprs
1056 .get(src..)
1057 .and_then(|s| s.get(..len))
1058 .ok_or(Trap::TableOutOfBounds)?;
1059 let mut context = ConstEvalContext::new(self);
1060 match module.tables[table_index].ref_type.heap_type.top() {
1061 WasmHeapTopType::Extern => table.init_gc_refs(
1062 dst,
1063 exprs.iter().map(|expr| unsafe {
1064 let raw = const_evaluator
1065 .eval(store, &mut context, expr)
1066 .expect("const expr should be valid");
1067 VMGcRef::from_raw_u32(raw.get_externref())
1068 }),
1069 )?,
1070 WasmHeapTopType::Any => table.init_gc_refs(
1071 dst,
1072 exprs.iter().map(|expr| unsafe {
1073 let raw = const_evaluator
1074 .eval(store, &mut context, expr)
1075 .expect("const expr should be valid");
1076 VMGcRef::from_raw_u32(raw.get_anyref())
1077 }),
1078 )?,
1079 WasmHeapTopType::Func => table.init_func(
1080 dst,
1081 exprs.iter().map(|expr| unsafe {
1082 NonNull::new(
1083 const_evaluator
1084 .eval(store, &mut context, expr)
1085 .expect("const expr should be valid")
1086 .get_funcref()
1087 .cast(),
1088 )
1089 }),
1090 )?,
1091 WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
1092 }
1093 }
1094 }
1095
1096 Ok(())
1097 }
1098
1099 /// Drop an element.
1100 pub(crate) fn elem_drop(&mut self, elem_index: ElemIndex) {
1101 // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
1102
1103 self.dropped_elements.insert(elem_index);
1104
1105 // Note that we don't check that we actually removed a segment because
1106 // dropping a non-passive segment is a no-op (not a trap).
1107 }
1108
1109 /// Get a locally-defined memory.
1110 pub fn get_defined_memory(&mut self, index: DefinedMemoryIndex) -> *mut Memory {
1111 // SAFETY: the `unsafe` here is projecting from `*mut (A, B)` to
1112 // `*mut A`, which should be a safe operation to do.
1113 unsafe { &raw mut (*self.memories.get_raw_mut(index).unwrap()).1 }
1114 }
1115
1116 /// Do a `memory.copy`
1117 ///
1118 /// # Errors
1119 ///
1120 /// Returns a `Trap` error when the source or destination ranges are out of
1121 /// bounds.
1122 pub(crate) fn memory_copy(
1123 &mut self,
1124 dst_index: MemoryIndex,
1125 dst: u64,
1126 src_index: MemoryIndex,
1127 src: u64,
1128 len: u64,
1129 ) -> Result<(), Trap> {
1130 // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
1131
1132 let src_mem = self.get_memory(src_index);
1133 let dst_mem = self.get_memory(dst_index);
1134
1135 let src = self.validate_inbounds(src_mem.current_length(), src, len)?;
1136 let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?;
1137 let len = usize::try_from(len).unwrap();
1138
1139 // Bounds and casts are checked above, by this point we know that
1140 // everything is safe.
1141 unsafe {
1142 let dst = dst_mem.base.as_ptr().add(dst);
1143 let src = src_mem.base.as_ptr().add(src);
1144 // FIXME audit whether this is safe in the presence of shared memory
1145 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1146 ptr::copy(src, dst, len);
1147 }
1148
1149 Ok(())
1150 }
1151
1152 fn validate_inbounds(&self, max: usize, ptr: u64, len: u64) -> Result<usize, Trap> {
1153 let oob = || Trap::MemoryOutOfBounds;
1154 let end = ptr
1155 .checked_add(len)
1156 .and_then(|i| usize::try_from(i).ok())
1157 .ok_or_else(oob)?;
1158 if end > max {
1159 Err(oob())
1160 } else {
1161 Ok(ptr.try_into().unwrap())
1162 }
1163 }
1164
1165 /// Perform the `memory.fill` operation on a locally defined memory.
1166 ///
1167 /// # Errors
1168 ///
1169 /// Returns a `Trap` error if the memory range is out of bounds.
1170 pub(crate) fn memory_fill(
1171 &mut self,
1172 memory_index: MemoryIndex,
1173 dst: u64,
1174 val: u8,
1175 len: u64,
1176 ) -> Result<(), Trap> {
1177 let memory = self.get_memory(memory_index);
1178 let dst = self.validate_inbounds(memory.current_length(), dst, len)?;
1179 let len = usize::try_from(len).unwrap();
1180
1181 // Bounds and casts are checked above, by this point we know that
1182 // everything is safe.
1183 unsafe {
1184 let dst = memory.base.as_ptr().add(dst);
1185 // FIXME audit whether this is safe in the presence of shared memory
1186 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1187 ptr::write_bytes(dst, val, len);
1188 }
1189
1190 Ok(())
1191 }
1192
1193 /// Get the internal storage range of a particular Wasm data segment.
1194 pub(crate) fn wasm_data_range(&self, index: DataIndex) -> Range<u32> {
1195 match self.env_module().passive_data_map.get(&index) {
1196 Some(range) if !self.dropped_data.contains(index) => range.clone(),
1197 _ => 0..0,
1198 }
1199 }
1200
1201 /// Given an internal storage range of a Wasm data segment (or subset of a
1202 /// Wasm data segment), get the data's raw bytes.
1203 pub(crate) fn wasm_data(&self, range: Range<u32>) -> &[u8] {
1204 let start = usize::try_from(range.start).unwrap();
1205 let end = usize::try_from(range.end).unwrap();
1206 &self.runtime_info.wasm_data()[start..end]
1207 }
1208
1209 /// Performs the `memory.init` operation.
1210 ///
1211 /// # Errors
1212 ///
1213 /// Returns a `Trap` error if the destination range is out of this module's
1214 /// memory's bounds or if the source range is outside the data segment's
1215 /// bounds.
1216 pub(crate) fn memory_init(
1217 &mut self,
1218 memory_index: MemoryIndex,
1219 data_index: DataIndex,
1220 dst: u64,
1221 src: u32,
1222 len: u32,
1223 ) -> Result<(), Trap> {
1224 let range = self.wasm_data_range(data_index);
1225 self.memory_init_segment(memory_index, range, dst, src, len)
1226 }
1227
1228 pub(crate) fn memory_init_segment(
1229 &mut self,
1230 memory_index: MemoryIndex,
1231 range: Range<u32>,
1232 dst: u64,
1233 src: u32,
1234 len: u32,
1235 ) -> Result<(), Trap> {
1236 // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
1237
1238 let memory = self.get_memory(memory_index);
1239 let data = self.wasm_data(range);
1240 let dst = self.validate_inbounds(memory.current_length(), dst, len.into())?;
1241 let src = self.validate_inbounds(data.len(), src.into(), len.into())?;
1242 let len = len as usize;
1243
1244 unsafe {
1245 let src_start = data.as_ptr().add(src);
1246 let dst_start = memory.base.as_ptr().add(dst);
1247 // FIXME audit whether this is safe in the presence of shared memory
1248 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1249 ptr::copy_nonoverlapping(src_start, dst_start, len);
1250 }
1251
1252 Ok(())
1253 }
1254
1255 /// Drop the given data segment, truncating its length to zero.
1256 pub(crate) fn data_drop(&mut self, data_index: DataIndex) {
1257 self.dropped_data.insert(data_index);
1258
1259 // Note that we don't check that we actually removed a segment because
1260 // dropping a non-passive segment is a no-op (not a trap).
1261 }
1262
1263 /// Get a table by index regardless of whether it is locally-defined
1264 /// or an imported, foreign table. Ensure that the given range of
1265 /// elements in the table is lazily initialized. We define this
1266 /// operation all-in-one for safety, to ensure the lazy-init
1267 /// happens.
1268 ///
1269 /// Takes an `Iterator` for the index-range to lazy-initialize,
1270 /// for flexibility. This can be a range, single item, or empty
1271 /// sequence, for example. The iterator should return indices in
1272 /// increasing order, so that the break-at-out-of-bounds behavior
1273 /// works correctly.
1274 pub(crate) fn get_table_with_lazy_init(
1275 &mut self,
1276 table_index: TableIndex,
1277 range: impl Iterator<Item = u64>,
1278 ) -> *mut Table {
1279 self.with_defined_table_index_and_instance(table_index, |idx, instance| {
1280 instance.get_defined_table_with_lazy_init(idx, range)
1281 })
1282 }
1283
1284 /// Gets the raw runtime table data structure owned by this instance
1285 /// given the provided `idx`.
1286 ///
1287 /// The `range` specified is eagerly initialized for funcref tables.
1288 pub fn get_defined_table_with_lazy_init(
1289 &mut self,
1290 idx: DefinedTableIndex,
1291 range: impl Iterator<Item = u64>,
1292 ) -> *mut Table {
1293 let elt_ty = self.tables[idx].1.element_type();
1294
1295 if elt_ty == TableElementType::Func {
1296 for i in range {
1297 let value = match self.tables[idx].1.get(None, i) {
1298 Some(value) => value,
1299 None => {
1300 // Out-of-bounds; caller will handle by likely
1301 // throwing a trap. No work to do to lazy-init
1302 // beyond the end.
1303 break;
1304 }
1305 };
1306
1307 if !value.is_uninit() {
1308 continue;
1309 }
1310
1311 // The table element `i` is uninitialized and is now being
1312 // initialized. This must imply that a `precompiled` list of
1313 // function indices is available for this table. The precompiled
1314 // list is extracted and then it is consulted with `i` to
1315 // determine the function that is going to be initialized. Note
1316 // that `i` may be outside the limits of the static
1317 // initialization so it's a fallible `get` instead of an index.
1318 let module = self.env_module();
1319 let precomputed = match &module.table_initialization.initial_values[idx] {
1320 TableInitialValue::Null { precomputed } => precomputed,
1321 TableInitialValue::Expr(_) => unreachable!(),
1322 };
1323 // Panicking here helps catch bugs rather than silently truncating by accident.
1324 let func_index = precomputed.get(usize::try_from(i).unwrap()).cloned();
1325 let func_ref = func_index.and_then(|func_index| self.get_func_ref(func_index));
1326 self.tables[idx]
1327 .1
1328 .set(i, TableElement::FuncRef(func_ref))
1329 .expect("Table type should match and index should be in-bounds");
1330 }
1331 }
1332
1333 // SAFETY: the `unsafe` here is projecting from `*mut (A, B)` to
1334 // `*mut A`, which should be a safe operation to do.
1335 unsafe { &raw mut (*self.tables.get_raw_mut(idx).unwrap()).1 }
1336 }
1337
1338 /// Get a table by index regardless of whether it is locally-defined or an
1339 /// imported, foreign table.
1340 pub(crate) fn get_table(&mut self, table_index: TableIndex) -> *mut Table {
1341 self.with_defined_table_index_and_instance(table_index, |idx, instance| unsafe {
1342 // SAFETY: the `unsafe` here is projecting from `*mut (A, B)` to
1343 // `*mut A`, which should be a safe operation to do.
1344 &raw mut (*instance.tables.get_raw_mut(idx).unwrap()).1
1345 })
1346 }
1347
1348 /// Get a locally-defined table.
1349 pub(crate) fn get_defined_table(&mut self, index: DefinedTableIndex) -> *mut Table {
1350 // SAFETY: the `unsafe` here is projecting from `*mut (A, B)` to
1351 // `*mut A`, which should be a safe operation to do.
1352 unsafe { &raw mut (*self.tables.get_raw_mut(index).unwrap()).1 }
1353 }
1354
1355 pub(crate) fn with_defined_table_index_and_instance<R>(
1356 &mut self,
1357 index: TableIndex,
1358 f: impl FnOnce(DefinedTableIndex, &mut Instance) -> R,
1359 ) -> R {
1360 if let Some(defined_table_index) = self.env_module().defined_table_index(index) {
1361 f(defined_table_index, self)
1362 } else {
1363 let import = self.imported_table(index);
1364 unsafe {
1365 Instance::from_vmctx(import.vmctx.as_non_null(), |foreign_instance| {
1366 let foreign_table_def = import.from.as_ptr();
1367 let foreign_table_index = foreign_instance.table_index(&*foreign_table_def);
1368 f(foreign_table_index, foreign_instance)
1369 })
1370 }
1371 }
1372 }
1373
1374 /// Initialize the VMContext data associated with this Instance.
1375 ///
1376 /// The `VMContext` memory is assumed to be uninitialized; any field
1377 /// that we need in a certain state will be explicitly written by this
1378 /// function.
1379 unsafe fn initialize_vmctx(
1380 &mut self,
1381 module: &Module,
1382 offsets: &VMOffsets<HostPtr>,
1383 store: StorePtr,
1384 imports: Imports,
1385 ) {
1386 assert!(ptr::eq(module, self.env_module().as_ref()));
1387
1388 self.vmctx_plus_offset_mut(offsets.ptr.vmctx_magic())
1389 .write(VMCONTEXT_MAGIC);
1390 self.set_callee(None);
1391 self.set_store(store.as_raw());
1392
1393 // Initialize shared types
1394 let types = NonNull::from(self.runtime_info.type_ids());
1395 self.type_ids_array().write(types.cast().into());
1396
1397 // Initialize the built-in functions
1398 static BUILTINS: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray::INIT;
1399 let ptr = BUILTINS.expose_provenance();
1400 self.vmctx_plus_offset_mut(offsets.ptr.vmctx_builtin_functions())
1401 .write(VmPtr::from(ptr));
1402
1403 // Initialize the imports
1404 debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
1405 ptr::copy_nonoverlapping(
1406 imports.functions.as_ptr(),
1407 self.vmctx_plus_offset_mut(offsets.vmctx_imported_functions_begin())
1408 .as_ptr(),
1409 imports.functions.len(),
1410 );
1411 debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
1412 ptr::copy_nonoverlapping(
1413 imports.tables.as_ptr(),
1414 self.vmctx_plus_offset_mut(offsets.vmctx_imported_tables_begin())
1415 .as_ptr(),
1416 imports.tables.len(),
1417 );
1418 debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
1419 ptr::copy_nonoverlapping(
1420 imports.memories.as_ptr(),
1421 self.vmctx_plus_offset_mut(offsets.vmctx_imported_memories_begin())
1422 .as_ptr(),
1423 imports.memories.len(),
1424 );
1425 debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
1426 ptr::copy_nonoverlapping(
1427 imports.globals.as_ptr(),
1428 self.vmctx_plus_offset_mut(offsets.vmctx_imported_globals_begin())
1429 .as_ptr(),
1430 imports.globals.len(),
1431 );
1432
1433 debug_assert_eq!(imports.tags.len(), module.num_imported_tags);
1434 ptr::copy_nonoverlapping(
1435 imports.tags.as_ptr(),
1436 self.vmctx_plus_offset_mut(offsets.vmctx_imported_tags_begin())
1437 .as_ptr(),
1438 imports.tags.len(),
1439 );
1440
1441 // N.B.: there is no need to initialize the funcrefs array because we
1442 // eagerly construct each element in it whenever asked for a reference
1443 // to that element. In other words, there is no state needed to track
1444 // the lazy-init, so we don't need to initialize any state now.
1445
1446 // Initialize the defined tables
1447 let mut ptr = self.vmctx_plus_offset_mut(offsets.vmctx_tables_begin());
1448 for i in 0..module.num_defined_tables() {
1449 ptr.write(self.tables[DefinedTableIndex::new(i)].1.vmtable());
1450 ptr = ptr.add(1);
1451 }
1452
1453 // Initialize the defined memories. This fills in both the
1454 // `defined_memories` table and the `owned_memories` table at the same
1455 // time. Entries in `defined_memories` hold a pointer to a definition
1456 // (all memories) whereas the `owned_memories` hold the actual
1457 // definitions of memories owned (not shared) in the module.
1458 let mut ptr = self.vmctx_plus_offset_mut(offsets.vmctx_memories_begin());
1459 let mut owned_ptr = self.vmctx_plus_offset_mut(offsets.vmctx_owned_memories_begin());
1460 for i in 0..module.num_defined_memories() {
1461 let defined_memory_index = DefinedMemoryIndex::new(i);
1462 let memory_index = module.memory_index(defined_memory_index);
1463 if module.memories[memory_index].shared {
1464 let def_ptr = self.memories[defined_memory_index]
1465 .1
1466 .as_shared_memory()
1467 .unwrap()
1468 .vmmemory_ptr();
1469 ptr.write(VmPtr::from(def_ptr));
1470 } else {
1471 owned_ptr.write(self.memories[defined_memory_index].1.vmmemory());
1472 ptr.write(VmPtr::from(owned_ptr));
1473 owned_ptr = owned_ptr.add(1);
1474 }
1475 ptr = ptr.add(1);
1476 }
1477
1478 // Zero-initialize the globals so that nothing is uninitialized memory
1479 // after this function returns. The globals are actually initialized
1480 // with their const expression initializers after the instance is fully
1481 // allocated.
1482 for (index, _init) in module.global_initializers.iter() {
1483 self.global_ptr(index).write(VMGlobalDefinition::new());
1484 }
1485
1486 // Initialize the defined tags
1487 let mut ptr = self.vmctx_plus_offset_mut(offsets.vmctx_tags_begin());
1488 for i in 0..module.num_defined_tags() {
1489 let defined_index = DefinedTagIndex::new(i);
1490 let tag_index = module.tag_index(defined_index);
1491 let tag = module.tags[tag_index];
1492 ptr.write(VMTagDefinition::new(
1493 tag.signature.unwrap_engine_type_index(),
1494 ));
1495 ptr = ptr.add(1);
1496 }
1497 }
1498
1499 fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1500 let mut fault = None;
1501 for (_, (_, memory)) in self.memories.iter() {
1502 let accessible = memory.wasm_accessible();
1503 if accessible.start <= addr && addr < accessible.end {
1504 // All linear memories should be disjoint so assert that no
1505 // prior fault has been found.
1506 assert!(fault.is_none());
1507 fault = Some(WasmFault {
1508 memory_size: memory.byte_size(),
1509 wasm_address: u64::try_from(addr - accessible.start).unwrap(),
1510 });
1511 }
1512 }
1513 fault
1514 }
1515}
1516
1517/// A handle holding an `Instance` of a WebAssembly module.
1518#[derive(Debug)]
1519pub struct InstanceHandle {
1520 instance: Option<SendSyncPtr<Instance>>,
1521}
1522
1523impl InstanceHandle {
1524 /// Creates an "empty" instance handle which internally has a null pointer
1525 /// to an instance.
1526 pub fn null() -> InstanceHandle {
1527 InstanceHandle { instance: None }
1528 }
1529
1530 /// Return a raw pointer to the vmctx used by compiled wasm code.
1531 #[inline]
1532 pub fn vmctx(&self) -> NonNull<VMContext> {
1533 self.instance().vmctx()
1534 }
1535
1536 /// Return a reference to a module.
1537 pub fn module(&self) -> &Arc<Module> {
1538 self.instance().env_module()
1539 }
1540
1541 /// Lookup a function by index.
1542 pub fn get_exported_func(&mut self, export: FuncIndex) -> ExportFunction {
1543 self.instance_mut().get_exported_func(export)
1544 }
1545
1546 /// Lookup a global by index.
1547 pub fn get_exported_global(&mut self, export: GlobalIndex) -> ExportGlobal {
1548 self.instance_mut().get_exported_global(export)
1549 }
1550
1551 /// Lookup a tag by index.
1552 pub fn get_exported_tag(&mut self, export: TagIndex) -> ExportTag {
1553 self.instance_mut().get_exported_tag(export)
1554 }
1555
1556 /// Lookup a memory by index.
1557 pub fn get_exported_memory(&mut self, export: MemoryIndex) -> ExportMemory {
1558 self.instance_mut().get_exported_memory(export)
1559 }
1560
1561 /// Lookup a table by index.
1562 pub fn get_exported_table(&mut self, export: TableIndex) -> ExportTable {
1563 self.instance_mut().get_exported_table(export)
1564 }
1565
1566 /// Lookup an item with the given index.
1567 pub fn get_export_by_index(&mut self, export: EntityIndex) -> Export {
1568 match export {
1569 EntityIndex::Function(i) => Export::Function(self.get_exported_func(i)),
1570 EntityIndex::Global(i) => Export::Global(self.get_exported_global(i)),
1571 EntityIndex::Table(i) => Export::Table(self.get_exported_table(i)),
1572 EntityIndex::Memory(i) => Export::Memory(self.get_exported_memory(i)),
1573 EntityIndex::Tag(i) => Export::Tag(self.get_exported_tag(i)),
1574 }
1575 }
1576
1577 /// Return an iterator over the exports of this instance.
1578 ///
1579 /// Specifically, it provides access to the key-value pairs, where the keys
1580 /// are export names, and the values are export declarations which can be
1581 /// resolved `lookup_by_declaration`.
1582 pub fn exports(&self) -> wasmparser::collections::index_map::Iter<String, EntityIndex> {
1583 self.instance().exports()
1584 }
1585
1586 /// Return a reference to the custom state attached to this instance.
1587 pub fn host_state(&self) -> &dyn Any {
1588 self.instance().host_state()
1589 }
1590
1591 /// Get a table defined locally within this module.
1592 pub fn get_defined_table(&mut self, index: DefinedTableIndex) -> *mut Table {
1593 self.instance_mut().get_defined_table(index)
1594 }
1595
1596 /// Get a table defined locally within this module, lazily
1597 /// initializing the given range first.
1598 pub fn get_defined_table_with_lazy_init(
1599 &mut self,
1600 index: DefinedTableIndex,
1601 range: impl Iterator<Item = u64>,
1602 ) -> *mut Table {
1603 let index = self.instance().env_module().table_index(index);
1604 self.instance_mut().get_table_with_lazy_init(index, range)
1605 }
1606
1607 /// Get all tables within this instance.
1608 ///
1609 /// Returns both import and defined tables.
1610 ///
1611 /// Returns both exported and non-exported tables.
1612 ///
1613 /// Gives access to the full tables space.
1614 pub fn all_tables<'a>(
1615 &'a mut self,
1616 ) -> impl ExactSizeIterator<Item = (TableIndex, ExportTable)> + 'a {
1617 let indices = (0..self.module().tables.len())
1618 .map(|i| TableIndex::new(i))
1619 .collect::<Vec<_>>();
1620 indices.into_iter().map(|i| (i, self.get_exported_table(i)))
1621 }
1622
1623 /// Return the tables defined in this instance (not imported).
1624 pub fn defined_tables<'a>(&'a mut self) -> impl ExactSizeIterator<Item = ExportTable> + 'a {
1625 let num_imported = self.module().num_imported_tables;
1626 self.all_tables()
1627 .skip(num_imported)
1628 .map(|(_i, table)| table)
1629 }
1630
1631 /// Get all memories within this instance.
1632 ///
1633 /// Returns both import and defined memories.
1634 ///
1635 /// Returns both exported and non-exported memories.
1636 ///
1637 /// Gives access to the full memories space.
1638 pub fn all_memories<'a>(
1639 &'a mut self,
1640 ) -> impl ExactSizeIterator<Item = (MemoryIndex, ExportMemory)> + 'a {
1641 let indices = (0..self.module().memories.len())
1642 .map(|i| MemoryIndex::new(i))
1643 .collect::<Vec<_>>();
1644 indices
1645 .into_iter()
1646 .map(|i| (i, self.get_exported_memory(i)))
1647 }
1648
1649 /// Return the memories defined in this instance (not imported).
1650 pub fn defined_memories<'a>(&'a mut self) -> impl ExactSizeIterator<Item = ExportMemory> + 'a {
1651 let num_imported = self.module().num_imported_memories;
1652 self.all_memories()
1653 .skip(num_imported)
1654 .map(|(_i, memory)| memory)
1655 }
1656
1657 /// Get all globals within this instance.
1658 ///
1659 /// Returns both import and defined globals.
1660 ///
1661 /// Returns both exported and non-exported globals.
1662 ///
1663 /// Gives access to the full globals space.
1664 pub fn all_globals<'a>(
1665 &'a mut self,
1666 ) -> impl ExactSizeIterator<Item = (GlobalIndex, ExportGlobal)> + 'a {
1667 self.instance_mut().all_globals()
1668 }
1669
1670 /// Get the globals defined in this instance (not imported).
1671 pub fn defined_globals<'a>(
1672 &'a mut self,
1673 ) -> impl ExactSizeIterator<Item = (DefinedGlobalIndex, ExportGlobal)> + 'a {
1674 self.instance_mut().defined_globals()
1675 }
1676
1677 /// Return a reference to the contained `Instance`.
1678 #[inline]
1679 pub(crate) fn instance(&self) -> &Instance {
1680 unsafe { &*self.instance.unwrap().as_ptr() }
1681 }
1682
1683 pub(crate) fn instance_mut(&mut self) -> &mut Instance {
1684 unsafe { &mut *self.instance.unwrap().as_ptr() }
1685 }
1686
1687 /// Get this instance's `dyn VMStore` trait object.
1688 ///
1689 /// This should only be used for initializing a vmctx's store pointer. It
1690 /// should never be used to access the store itself. Use `InstanceAndStore`
1691 /// for that instead.
1692 pub fn traitobj(&self, store: &StoreOpaque) -> NonNull<dyn VMStore> {
1693 // By requiring a store argument, we are ensuring that callers aren't
1694 // getting this trait object in order to access the store, since they
1695 // already have access. See `InstanceAndStore` and its documentation for
1696 // details about the store access patterns we want to restrict host code
1697 // to.
1698 let _ = store;
1699
1700 self.instance().store.unwrap().0
1701 }
1702
1703 /// Configure the `*mut dyn Store` internal pointer after-the-fact.
1704 ///
1705 /// This is provided for the original `Store` itself to configure the first
1706 /// self-pointer after the original `Box` has been initialized.
1707 pub unsafe fn set_store(&mut self, store: NonNull<dyn VMStore>) {
1708 self.instance_mut().set_store(Some(store));
1709 }
1710
1711 /// Returns a clone of this instance.
1712 ///
1713 /// This is unsafe because the returned handle here is just a cheap clone
1714 /// of the internals, there's no lifetime tracking around its validity.
1715 /// You'll need to ensure that the returned handles all go out of scope at
1716 /// the same time.
1717 #[inline]
1718 pub unsafe fn clone(&self) -> InstanceHandle {
1719 InstanceHandle {
1720 instance: self.instance,
1721 }
1722 }
1723
1724 /// Performs post-initialization of an instance after its handle has been
1725 /// created and registered with a store.
1726 ///
1727 /// Failure of this function means that the instance still must persist
1728 /// within the store since failure may indicate partial failure, or some
1729 /// state could be referenced by other instances.
1730 pub fn initialize(
1731 &mut self,
1732 store: &mut StoreOpaque,
1733 module: &Module,
1734 is_bulk_memory: bool,
1735 ) -> Result<()> {
1736 allocator::initialize_instance(store, self.instance_mut(), module, is_bulk_memory)
1737 }
1738
1739 /// Attempts to convert from the host `addr` specified to a WebAssembly
1740 /// based address recorded in `WasmFault`.
1741 ///
1742 /// This method will check all linear memories that this instance contains
1743 /// to see if any of them contain `addr`. If one does then `Some` is
1744 /// returned with metadata about the wasm fault. Otherwise `None` is
1745 /// returned and `addr` doesn't belong to this instance.
1746 pub fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1747 self.instance().wasm_fault(addr)
1748 }
1749}