wasmtime/runtime/vm/
instance.rs

1//! An `Instance` contains all the runtime state used by execution of a
2//! wasm module (except its callstack and register state). An
3//! `InstanceHandle` is a reference-counting handle for an `Instance`.
4
5use crate::OpaqueRootScope;
6use crate::code::ModuleWithCode;
7use crate::module::ModuleRegistry;
8use crate::prelude::*;
9use crate::runtime::vm::const_expr::{ConstEvalContext, ConstExprEvaluator};
10use crate::runtime::vm::export::{Export, ExportMemory};
11use crate::runtime::vm::memory::{Memory, RuntimeMemoryCreator};
12use crate::runtime::vm::table::{Table, TableElementType};
13use crate::runtime::vm::vmcontext::{
14    VMBuiltinFunctionsArray, VMContext, VMFuncRef, VMFunctionImport, VMGlobalDefinition,
15    VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext, VMStoreContext,
16    VMTableDefinition, VMTableImport, VMTagDefinition, VMTagImport,
17};
18use crate::runtime::vm::{
19    GcStore, HostResult, Imports, ModuleRuntimeInfo, SendSyncPtr, VMGlobalKind, VMStore,
20    VMStoreRawPtr, VmPtr, VmSafe, WasmFault, catch_unwind_and_record_trap,
21};
22use crate::store::{InstanceId, StoreId, StoreInstanceId, StoreOpaque, StoreResourceLimiter};
23use crate::vm::VMWasmCallFunction;
24use alloc::sync::Arc;
25use core::alloc::Layout;
26use core::marker;
27use core::ops::Range;
28use core::pin::Pin;
29use core::ptr::NonNull;
30#[cfg(target_has_atomic = "64")]
31use core::sync::atomic::AtomicU64;
32use core::{mem, ptr};
33#[cfg(feature = "gc")]
34use wasmtime_environ::ModuleInternedTypeIndex;
35use wasmtime_environ::{
36    DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, DefinedTagIndex,
37    ElemIndex, EntityIndex, EntityRef, EntitySet, FuncIndex, GlobalIndex, HostPtr, MemoryIndex,
38    Module, PrimaryMap, PtrSize, TableIndex, TableInitialValue, TableSegmentElements, TagIndex,
39    Trap, VMCONTEXT_MAGIC, VMOffsets, VMSharedTypeIndex, packed_option::ReservedValue,
40};
41#[cfg(feature = "wmemcheck")]
42use wasmtime_wmemcheck::Wmemcheck;
43
44mod allocator;
45pub use allocator::*;
46
47/// A type that roughly corresponds to a WebAssembly instance, but is also used
48/// for host-defined objects.
49///
50/// Instances here can correspond to actual instantiated modules, but it's also
51/// used ubiquitously for host-defined objects. For example creating a
52/// host-defined memory will have a `module` that looks like it exports a single
53/// memory (and similar for other constructs).
54///
55/// This `Instance` type is used as a ubiquitous representation for WebAssembly
56/// values, whether or not they were created on the host or through a module.
57///
58/// # Ownership
59///
60/// This structure is never allocated directly but is instead managed through
61/// an `InstanceHandle`. This structure ends with a `VMContext` which has a
62/// dynamic size corresponding to the `module` configured within. Memory
63/// management of this structure is always done through `InstanceHandle` as the
64/// sole owner of an instance.
65///
66/// # `Instance` and `Pin`
67///
68/// Given an instance it is accompanied with trailing memory for the
69/// appropriate `VMContext`. The `Instance` also holds `runtime_info` and other
70/// information pointing to relevant offsets for the `VMContext`. Thus it is
71/// not sound to mutate `runtime_info` after an instance is created. More
72/// generally it's also not safe to "swap" instances, for example given two
73/// `&mut Instance` values it's not sound to swap them as then the `VMContext`
74/// values are inaccurately described.
75///
76/// To encapsulate this guarantee this type is only ever mutated through Rust's
77/// `Pin` type. All mutable methods here take `self: Pin<&mut Self>` which
78/// statically disallows safe access to `&mut Instance`. There are assorted
79/// "projection methods" to go from `Pin<&mut Instance>` to `&mut T` for
80/// individual fields, for example `memories_mut`. More methods can be added as
81/// necessary or methods may also be added to project multiple fields at a time
82/// if necessary to. The precise ergonomics around getting mutable access to
83/// some fields (but notably not `runtime_info`) is probably going to evolve
84/// over time.
85///
86/// Note that is is not sound to basically ever pass around `&mut Instance`.
87/// That should always instead be `Pin<&mut Instance>`. All usage of
88/// `Pin::new_unchecked` should be here in this module in just a few `unsafe`
89/// locations and it's recommended to use existing helpers if you can.
90#[repr(C)] // ensure that the vmctx field is last.
91pub struct Instance {
92    /// The index, within a `Store` that this instance lives at
93    id: InstanceId,
94
95    /// The runtime info (corresponding to the "compiled module"
96    /// abstraction in higher layers) that is retained and needed for
97    /// lazy initialization. This provides access to the underlying
98    /// Wasm module entities, the compiled JIT code, metadata about
99    /// functions, lazy initialization state, etc.
100    runtime_info: ModuleRuntimeInfo,
101
102    /// WebAssembly linear memory data.
103    ///
104    /// This is where all runtime information about defined linear memories in
105    /// this module lives.
106    ///
107    /// The `MemoryAllocationIndex` was given from our `InstanceAllocator` and
108    /// must be given back to the instance allocator when deallocating each
109    /// memory.
110    memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
111
112    /// WebAssembly table data.
113    ///
114    /// Like memories, this is only for defined tables in the module and
115    /// contains all of their runtime state.
116    ///
117    /// The `TableAllocationIndex` was given from our `InstanceAllocator` and
118    /// must be given back to the instance allocator when deallocating each
119    /// table.
120    tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
121
122    /// Stores the dropped passive element segments in this instantiation by index.
123    /// If the index is present in the set, the segment has been dropped.
124    dropped_elements: EntitySet<ElemIndex>,
125
126    /// Stores the dropped passive data segments in this instantiation by index.
127    /// If the index is present in the set, the segment has been dropped.
128    dropped_data: EntitySet<DataIndex>,
129
130    // TODO: add support for multiple memories; `wmemcheck_state` corresponds to
131    // memory 0.
132    #[cfg(feature = "wmemcheck")]
133    pub(crate) wmemcheck_state: Option<Wmemcheck>,
134
135    /// Self-pointer back to `Store<T>` and its functions. Not present for
136    /// the brief time that `Store<T>` is itself being created. Also not
137    /// present for some niche uses that are disconnected from stores (e.g.
138    /// cross-thread stuff used in `InstancePre`)
139    store: Option<VMStoreRawPtr>,
140
141    /// Additional context used by compiled wasm code. This field is last, and
142    /// represents a dynamically-sized array that extends beyond the nominal
143    /// end of the struct (similar to a flexible array member).
144    vmctx: OwnedVMContext<VMContext>,
145}
146
147impl Instance {
148    /// Create an instance at the given memory address.
149    ///
150    /// It is assumed the memory was properly aligned and the
151    /// allocation was `alloc_size` in bytes.
152    ///
153    /// # Safety
154    ///
155    /// The `req.imports` field must be appropriately sized/typed for the module
156    /// being allocated according to `req.runtime_info`. Additionally `memories`
157    /// and `tables` must have been allocated for `req.store`.
158    unsafe fn new(
159        req: InstanceAllocationRequest,
160        memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
161        tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
162        memory_tys: &PrimaryMap<MemoryIndex, wasmtime_environ::Memory>,
163    ) -> InstanceHandle {
164        let module = req.runtime_info.env_module();
165        let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
166        let dropped_data = EntitySet::with_capacity(module.passive_data_map.len());
167
168        #[cfg(not(feature = "wmemcheck"))]
169        let _ = memory_tys;
170
171        let mut ret = OwnedInstance::new(Instance {
172            id: req.id,
173            runtime_info: req.runtime_info.clone(),
174            memories,
175            tables,
176            dropped_elements,
177            dropped_data,
178            #[cfg(feature = "wmemcheck")]
179            wmemcheck_state: {
180                if req.store.engine().config().wmemcheck {
181                    let size = memory_tys
182                        .iter()
183                        .next()
184                        .map(|memory| memory.1.limits.min)
185                        .unwrap_or(0)
186                        * 64
187                        * 1024;
188                    Some(Wmemcheck::new(size.try_into().unwrap()))
189                } else {
190                    None
191                }
192            },
193            store: None,
194            vmctx: OwnedVMContext::new(),
195        });
196
197        // SAFETY: this vmctx was allocated with the same layout above, so it
198        // should be safe to initialize with the same values here.
199        unsafe {
200            ret.get_mut().initialize_vmctx(
201                module,
202                req.runtime_info.offsets(),
203                req.store,
204                req.imports,
205            );
206        }
207        ret
208    }
209
210    /// Converts a raw `VMContext` pointer into a raw `Instance` pointer.
211    ///
212    /// # Safety
213    ///
214    /// Calling this function safely requires that `vmctx` is a valid allocation
215    /// of a `VMContext` which is derived from `Instance::new`. To safely
216    /// convert the returned raw pointer into a safe instance pointer callers
217    /// will also want to uphold guarantees such as:
218    ///
219    /// * The instance should not be in use elsewhere. For example you can't
220    ///   call this function twice, turn both raw pointers into safe pointers,
221    ///   and then use both safe pointers.
222    /// * There should be no other active mutable borrow to any other instance
223    ///   within the same store. Note that this is not restricted to just this
224    ///   instance pointer, but to all instances in a store. Instances can
225    ///   safely traverse to other instances "laterally" meaning that a mutable
226    ///   borrow on one is a mutable borrow on all.
227    /// * There should be no active mutable borrow on the store accessible at
228    ///   the same time the instance is turned. Instances are owned by a store
229    ///   and a store can be used to acquire a safe instance borrow at any time.
230    /// * The lifetime of the usage of the instance should not be unnecessarily
231    ///   long, for example it cannot be `'static`.
232    ///
233    /// Other entrypoints exist for converting from a raw `VMContext` to a safe
234    /// pointer such as:
235    ///
236    /// * `Instance::enter_host_from_wasm`
237    /// * `Instance::sibling_vmctx{,_mut}`
238    ///
239    /// These place further restrictions on the API signature to satisfy some of
240    /// the above points.
241    #[inline]
242    pub(crate) unsafe fn from_vmctx(vmctx: NonNull<VMContext>) -> NonNull<Instance> {
243        // SAFETY: The validity of `byte_sub` relies on `vmctx` being a valid
244        // allocation.
245        unsafe {
246            vmctx
247                .byte_sub(mem::size_of::<Instance>())
248                .cast::<Instance>()
249        }
250    }
251
252    /// Encapsulated entrypoint to the host from WebAssembly, converting a raw
253    /// `VMContext` pointer into a `VMStore` plus an `InstanceId`.
254    ///
255    /// This is an entrypoint for core wasm entering back into the host. This is
256    /// used for both host functions and libcalls for example. This will execute
257    /// the closure `f` with safer Internal types than a raw `VMContext`
258    /// pointer.
259    ///
260    /// The closure `f` will have its errors caught, handled, and translated to
261    /// an ABI-safe return value to give back to wasm. This includes both normal
262    /// errors such as traps as well as panics.
263    ///
264    /// # Safety
265    ///
266    /// Callers must ensure that `vmctx` is a valid allocation and is safe to
267    /// dereference at this time. That's generally only true when it's a
268    /// wasm-provided value and this is the first function called after entering
269    /// the host. Otherwise this could unsafely alias the store with a mutable
270    /// pointer, for example.
271    #[inline]
272    pub(crate) unsafe fn enter_host_from_wasm<R>(
273        vmctx: NonNull<VMContext>,
274        f: impl FnOnce(&mut dyn VMStore, InstanceId) -> R,
275    ) -> R::Abi
276    where
277        R: HostResult,
278    {
279        // SAFETY: It's a contract of this function that `vmctx` is a valid
280        // pointer with neither the store nor other instances actively in use
281        // when this is called, so it should be safe to acquire a mutable
282        // pointer to the store and read the instance pointer.
283        let (store, instance) = unsafe {
284            let instance = Instance::from_vmctx(vmctx);
285            let instance = instance.as_ref();
286            let store = &mut *instance.store.unwrap().0.as_ptr();
287            (store, instance.id)
288        };
289
290        // Thread the `store` and `instance` through panic/trap infrastructure
291        // back into `f`.
292        catch_unwind_and_record_trap(store, |store| f(store, instance))
293    }
294
295    /// Converts the provided `*mut VMContext` to an `Instance` pointer and
296    /// returns it with the same lifetime as `self`.
297    ///
298    /// This function can be used when traversing a `VMContext` to reach into
299    /// the context needed for imports, optionally.
300    ///
301    /// # Safety
302    ///
303    /// This function requires that the `vmctx` pointer is indeed valid and
304    /// from the store that `self` belongs to.
305    #[inline]
306    unsafe fn sibling_vmctx<'a>(&'a self, vmctx: NonNull<VMContext>) -> &'a Instance {
307        // SAFETY: it's a contract of this function itself that `vmctx` is a
308        // valid pointer. Additionally with `self` being a
309        let ptr = unsafe { Instance::from_vmctx(vmctx) };
310        // SAFETY: it's a contract of this function itself that `vmctx` is a
311        // valid pointer to dereference. Additionally the lifetime of the return
312        // value is constrained to be the same as `self` to avoid granting a
313        // too-long lifetime.
314        unsafe { ptr.as_ref() }
315    }
316
317    /// Same as [`Self::sibling_vmctx`], but the mutable version.
318    ///
319    /// # Safety
320    ///
321    /// This function requires that the `vmctx` pointer is indeed valid and
322    /// from the store that `self` belongs to.
323    ///
324    /// (Note that it is *NOT* required that `vmctx` be distinct from this
325    /// instance's `vmctx`, or that usage of the resulting instance is limited
326    /// to its defined items! The returned borrow has the same lifetime as
327    /// `self`, which means that this instance cannot be used while the
328    /// resulting instance is in use, and we therefore do not need to worry
329    /// about mutable aliasing between this instance and the resulting
330    /// instance.)
331    #[inline]
332    unsafe fn sibling_vmctx_mut<'a>(
333        self: Pin<&'a mut Self>,
334        vmctx: NonNull<VMContext>,
335    ) -> Pin<&'a mut Instance> {
336        // SAFETY: it's a contract of this function itself that `vmctx` is a
337        // valid pointer such that this pointer arithmetic is valid.
338        let mut ptr = unsafe { Instance::from_vmctx(vmctx) };
339
340        // SAFETY: it's a contract of this function itself that `vmctx` is a
341        // valid pointer to dereference. Additionally the lifetime of the return
342        // value is constrained to be the same as `self` to avoid granting a
343        // too-long lifetime. Finally mutable references to an instance are
344        // always through `Pin`, so it's safe to create a pin-pointer here.
345        unsafe { Pin::new_unchecked(ptr.as_mut()) }
346    }
347
348    pub(crate) fn env_module(&self) -> &Arc<wasmtime_environ::Module> {
349        self.runtime_info.env_module()
350    }
351
352    pub(crate) fn runtime_module(&self) -> Option<&crate::Module> {
353        match &self.runtime_info {
354            ModuleRuntimeInfo::Module(m) => Some(m),
355            ModuleRuntimeInfo::Bare(_) => None,
356        }
357    }
358
359    /// Translate a module-level interned type index into an engine-level
360    /// interned type index.
361    #[cfg(feature = "gc")]
362    pub fn engine_type_index(&self, module_index: ModuleInternedTypeIndex) -> VMSharedTypeIndex {
363        self.runtime_info.engine_type_index(module_index)
364    }
365
366    #[inline]
367    fn offsets(&self) -> &VMOffsets<HostPtr> {
368        self.runtime_info.offsets()
369    }
370
371    /// Return the indexed `VMFunctionImport`.
372    fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
373        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
374    }
375
376    /// Return the index `VMTableImport`.
377    fn imported_table(&self, index: TableIndex) -> &VMTableImport {
378        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
379    }
380
381    /// Return the indexed `VMMemoryImport`.
382    fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
383        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
384    }
385
386    /// Return the indexed `VMGlobalImport`.
387    fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
388        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
389    }
390
391    /// Return the indexed `VMTagImport`.
392    fn imported_tag(&self, index: TagIndex) -> &VMTagImport {
393        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtag_import(index)) }
394    }
395
396    /// Return the indexed `VMTagDefinition`.
397    pub fn tag_ptr(&self, index: DefinedTagIndex) -> NonNull<VMTagDefinition> {
398        unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtag_definition(index)) }
399    }
400
401    /// Return the indexed `VMTableDefinition`.
402    pub fn table(&self, index: DefinedTableIndex) -> VMTableDefinition {
403        unsafe { self.table_ptr(index).read() }
404    }
405
406    /// Updates the value for a defined table to `VMTableDefinition`.
407    fn set_table(self: Pin<&mut Self>, index: DefinedTableIndex, table: VMTableDefinition) {
408        unsafe {
409            self.table_ptr(index).write(table);
410        }
411    }
412
413    /// Return a pointer to the `index`'th table within this instance, stored
414    /// in vmctx memory.
415    pub fn table_ptr(&self, index: DefinedTableIndex) -> NonNull<VMTableDefinition> {
416        unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtable_definition(index)) }
417    }
418
419    /// Get a locally defined or imported memory.
420    pub(crate) fn get_memory(&self, index: MemoryIndex) -> VMMemoryDefinition {
421        if let Some(defined_index) = self.env_module().defined_memory_index(index) {
422            self.memory(defined_index)
423        } else {
424            let import = self.imported_memory(index);
425            unsafe { VMMemoryDefinition::load(import.from.as_ptr()) }
426        }
427    }
428
429    /// Return the indexed `VMMemoryDefinition`, loaded from vmctx memory
430    /// already.
431    #[inline]
432    pub fn memory(&self, index: DefinedMemoryIndex) -> VMMemoryDefinition {
433        unsafe { VMMemoryDefinition::load(self.memory_ptr(index).as_ptr()) }
434    }
435
436    /// Set the indexed memory to `VMMemoryDefinition`.
437    fn set_memory(&self, index: DefinedMemoryIndex, mem: VMMemoryDefinition) {
438        unsafe {
439            self.memory_ptr(index).write(mem);
440        }
441    }
442
443    /// Return the address of the specified memory at `index` within this vmctx.
444    ///
445    /// Note that the returned pointer resides in wasm-code-readable-memory in
446    /// the vmctx.
447    #[inline]
448    pub fn memory_ptr(&self, index: DefinedMemoryIndex) -> NonNull<VMMemoryDefinition> {
449        unsafe {
450            self.vmctx_plus_offset::<VmPtr<_>>(self.offsets().vmctx_vmmemory_pointer(index))
451                .as_non_null()
452        }
453    }
454
455    /// Return the indexed `VMGlobalDefinition`.
456    pub fn global_ptr(&self, index: DefinedGlobalIndex) -> NonNull<VMGlobalDefinition> {
457        unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmglobal_definition(index)) }
458    }
459
460    /// Get all globals within this instance.
461    ///
462    /// Returns both import and defined globals.
463    ///
464    /// Returns both exported and non-exported globals.
465    ///
466    /// Gives access to the full globals space.
467    pub fn all_globals(
468        &self,
469        store: StoreId,
470    ) -> impl ExactSizeIterator<Item = (GlobalIndex, crate::Global)> + '_ {
471        let module = self.env_module();
472        module
473            .globals
474            .keys()
475            .map(move |idx| (idx, self.get_exported_global(store, idx)))
476    }
477
478    /// Get the globals defined in this instance (not imported).
479    pub fn defined_globals(
480        &self,
481        store: StoreId,
482    ) -> impl ExactSizeIterator<Item = (DefinedGlobalIndex, crate::Global)> + '_ {
483        let module = self.env_module();
484        self.all_globals(store)
485            .skip(module.num_imported_globals)
486            .map(move |(i, global)| (module.defined_global_index(i).unwrap(), global))
487    }
488
489    /// Return a pointer to the interrupts structure
490    #[inline]
491    pub fn vm_store_context(&self) -> NonNull<Option<VmPtr<VMStoreContext>>> {
492        unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_store_context()) }
493    }
494
495    /// Return a pointer to the global epoch counter used by this instance.
496    #[cfg(target_has_atomic = "64")]
497    pub fn epoch_ptr(self: Pin<&mut Self>) -> &mut Option<VmPtr<AtomicU64>> {
498        let offset = self.offsets().ptr.vmctx_epoch_ptr();
499        unsafe { self.vmctx_plus_offset_mut(offset) }
500    }
501
502    /// Return a pointer to the collector-specific heap data.
503    pub fn gc_heap_data(self: Pin<&mut Self>) -> &mut Option<VmPtr<u8>> {
504        let offset = self.offsets().ptr.vmctx_gc_heap_data();
505        unsafe { self.vmctx_plus_offset_mut(offset) }
506    }
507
508    pub(crate) unsafe fn set_store(mut self: Pin<&mut Self>, store: &StoreOpaque) {
509        // FIXME: should be more targeted ideally with the `unsafe` than just
510        // throwing this entire function in a large `unsafe` block.
511        unsafe {
512            *self.as_mut().store_mut() = Some(VMStoreRawPtr(store.traitobj()));
513            self.vm_store_context()
514                .write(Some(store.vm_store_context_ptr().into()));
515            #[cfg(target_has_atomic = "64")]
516            {
517                *self.as_mut().epoch_ptr() =
518                    Some(NonNull::from(store.engine().epoch_counter()).into());
519            }
520
521            if self.env_module().needs_gc_heap {
522                self.as_mut().set_gc_heap(Some(store.unwrap_gc_store()));
523            } else {
524                self.as_mut().set_gc_heap(None);
525            }
526        }
527    }
528
529    unsafe fn set_gc_heap(self: Pin<&mut Self>, gc_store: Option<&GcStore>) {
530        if let Some(gc_store) = gc_store {
531            *self.gc_heap_data() = Some(unsafe { gc_store.gc_heap.vmctx_gc_heap_data().into() });
532        } else {
533            *self.gc_heap_data() = None;
534        }
535    }
536
537    /// Return a reference to the vmctx used by compiled wasm code.
538    #[inline]
539    pub fn vmctx(&self) -> NonNull<VMContext> {
540        InstanceLayout::vmctx(self)
541    }
542
543    /// Lookup a function by index.
544    ///
545    /// # Panics
546    ///
547    /// Panics if `index` is out of bounds for this instance.
548    ///
549    /// # Safety
550    ///
551    /// The `store` parameter must be the store that owns this instance and the
552    /// functions that this instance can reference.
553    pub unsafe fn get_exported_func(
554        self: Pin<&mut Self>,
555        registry: &ModuleRegistry,
556        store: StoreId,
557        index: FuncIndex,
558    ) -> crate::Func {
559        let func_ref = self.get_func_ref(registry, index).unwrap();
560
561        // SAFETY: the validity of `func_ref` is guaranteed by the validity of
562        // `self`, and the contract that `store` must own `func_ref` is a
563        // contract of this function itself.
564        unsafe { crate::Func::from_vm_func_ref(store, func_ref) }
565    }
566
567    /// Lookup a table by index.
568    ///
569    /// # Panics
570    ///
571    /// Panics if `index` is out of bounds for this instance.
572    pub fn get_exported_table(&self, store: StoreId, index: TableIndex) -> crate::Table {
573        let (id, def_index) = if let Some(def_index) = self.env_module().defined_table_index(index)
574        {
575            (self.id, def_index)
576        } else {
577            let import = self.imported_table(index);
578            // SAFETY: validity of this `Instance` guarantees validity of the
579            // `vmctx` pointer being read here to find the transitive
580            // `InstanceId` that the import is associated with.
581            let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
582            (id, import.index)
583        };
584        crate::Table::from_raw(StoreInstanceId::new(store, id), def_index)
585    }
586
587    /// Lookup a memory by index.
588    ///
589    /// # Panics
590    ///
591    /// Panics if `index` is out-of-bounds for this instance.
592    #[cfg_attr(
593        not(feature = "threads"),
594        expect(unused_variables, reason = "definitions cfg'd to dummy",)
595    )]
596    pub fn get_exported_memory(&self, store: StoreId, index: MemoryIndex) -> ExportMemory {
597        let module = self.env_module();
598        if module.memories[index].shared {
599            let (memory, import) =
600                if let Some(def_index) = self.env_module().defined_memory_index(index) {
601                    (
602                        self.get_defined_memory(def_index),
603                        self.get_defined_memory_vmimport(def_index),
604                    )
605                } else {
606                    let import = self.imported_memory(index);
607                    // SAFETY: validity of this `Instance` guarantees validity of
608                    // the `vmctx` pointer being read here to find the transitive
609                    // `InstanceId` that the import is associated with.
610                    let instance = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()) };
611                    (instance.get_defined_memory(import.index), *import)
612                };
613
614            let vm = memory.as_shared_memory().unwrap().clone();
615            ExportMemory::Shared(vm, import)
616        } else {
617            let (id, def_index) =
618                if let Some(def_index) = self.env_module().defined_memory_index(index) {
619                    (self.id, def_index)
620                } else {
621                    let import = self.imported_memory(index);
622                    // SAFETY: validity of this `Instance` guarantees validity of the
623                    // `vmctx` pointer being read here to find the transitive
624                    // `InstanceId` that the import is associated with.
625                    let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
626                    (id, import.index)
627                };
628
629            // SAFETY: `from_raw` requires that the memory is not shared, which
630            // was tested above in this if/else.
631            let store_id = StoreInstanceId::new(store, id);
632            ExportMemory::Unshared(unsafe { crate::Memory::from_raw(store_id, def_index) })
633        }
634    }
635
636    /// Lookup a global by index.
637    ///
638    /// # Panics
639    ///
640    /// Panics if `index` is out-of-bounds for this instance.
641    pub(crate) fn get_exported_global(&self, store: StoreId, index: GlobalIndex) -> crate::Global {
642        // If this global is defined within this instance, then that's easy to
643        // calculate the `Global`.
644        if let Some(def_index) = self.env_module().defined_global_index(index) {
645            let instance = StoreInstanceId::new(store, self.id);
646            return crate::Global::from_core(instance, def_index);
647        }
648
649        // For imported globals it's required to match on the `kind` to
650        // determine which `Global` constructor is going to be invoked.
651        let import = self.imported_global(index);
652        match import.kind {
653            VMGlobalKind::Host(index) => crate::Global::from_host(store, index),
654            VMGlobalKind::Instance(index) => {
655                // SAFETY: validity of this `&Instance` means validity of its
656                // imports meaning we can read the id of the vmctx within.
657                let id = unsafe {
658                    let vmctx = VMContext::from_opaque(import.vmctx.unwrap().as_non_null());
659                    self.sibling_vmctx(vmctx).id
660                };
661                crate::Global::from_core(StoreInstanceId::new(store, id), index)
662            }
663            #[cfg(feature = "component-model")]
664            VMGlobalKind::ComponentFlags(index) => {
665                // SAFETY: validity of this `&Instance` means validity of its
666                // imports meaning we can read the id of the vmctx within.
667                let id = unsafe {
668                    let vmctx = super::component::VMComponentContext::from_opaque(
669                        import.vmctx.unwrap().as_non_null(),
670                    );
671                    super::component::ComponentInstance::vmctx_instance_id(vmctx)
672                };
673                crate::Global::from_component_flags(
674                    crate::component::store::StoreComponentInstanceId::new(store, id),
675                    index,
676                )
677            }
678        }
679    }
680
681    /// Get an exported tag by index.
682    ///
683    /// # Panics
684    ///
685    /// Panics if the index is out-of-range.
686    pub fn get_exported_tag(&self, store: StoreId, index: TagIndex) -> crate::Tag {
687        let (id, def_index) = if let Some(def_index) = self.env_module().defined_tag_index(index) {
688            (self.id, def_index)
689        } else {
690            let import = self.imported_tag(index);
691            // SAFETY: validity of this `Instance` guarantees validity of the
692            // `vmctx` pointer being read here to find the transitive
693            // `InstanceId` that the import is associated with.
694            let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
695            (id, import.index)
696        };
697        crate::Tag::from_raw(StoreInstanceId::new(store, id), def_index)
698    }
699
700    /// Return an iterator over the exports of this instance.
701    ///
702    /// Specifically, it provides access to the key-value pairs, where the keys
703    /// are export names, and the values are export declarations which can be
704    /// resolved `lookup_by_declaration`.
705    pub fn exports(&self) -> wasmparser::collections::index_map::Iter<'_, String, EntityIndex> {
706        self.env_module().exports.iter()
707    }
708
709    /// Grow memory by the specified amount of pages.
710    ///
711    /// Returns `None` if memory can't be grown by the specified amount
712    /// of pages. Returns `Some` with the old size in bytes if growth was
713    /// successful.
714    pub(crate) async fn memory_grow(
715        mut self: Pin<&mut Self>,
716        limiter: Option<&mut StoreResourceLimiter<'_>>,
717        idx: DefinedMemoryIndex,
718        delta: u64,
719    ) -> Result<Option<usize>, Error> {
720        let memory = &mut self.as_mut().memories_mut()[idx].1;
721
722        // SAFETY: this is the safe wrapper around `Memory::grow` because it
723        // automatically updates the `VMMemoryDefinition` in this instance after
724        // a growth operation below.
725        let result = unsafe { memory.grow(delta, limiter).await };
726
727        // Update the state used by a non-shared Wasm memory in case the base
728        // pointer and/or the length changed.
729        if memory.as_shared_memory().is_none() {
730            let vmmemory = memory.vmmemory();
731            self.set_memory(idx, vmmemory);
732        }
733
734        result
735    }
736
737    pub(crate) fn table_element_type(
738        self: Pin<&mut Self>,
739        table_index: TableIndex,
740    ) -> TableElementType {
741        self.get_table(table_index).element_type()
742    }
743
744    /// Performs a grow operation on the `table_index` specified using `grow`.
745    ///
746    /// This will handle updating the VMTableDefinition internally as necessary.
747    pub(crate) async fn defined_table_grow(
748        mut self: Pin<&mut Self>,
749        table_index: DefinedTableIndex,
750        grow: impl AsyncFnOnce(&mut Table) -> Result<Option<usize>>,
751    ) -> Result<Option<usize>> {
752        let table = self.as_mut().get_defined_table(table_index);
753        let result = grow(table).await;
754        let element = table.vmtable();
755        self.set_table(table_index, element);
756        result
757    }
758
759    fn alloc_layout(offsets: &VMOffsets<HostPtr>) -> Layout {
760        let size = mem::size_of::<Self>()
761            .checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
762            .unwrap();
763        let align = mem::align_of::<Self>();
764        Layout::from_size_align(size, align).unwrap()
765    }
766
767    fn type_ids_array(&self) -> NonNull<VmPtr<VMSharedTypeIndex>> {
768        unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_type_ids_array()) }
769    }
770
771    /// Construct a new VMFuncRef for the given function
772    /// (imported or defined in this module) and store into the given
773    /// location. Used during lazy initialization.
774    ///
775    /// Note that our current lazy-init scheme actually calls this every
776    /// time the funcref pointer is fetched; this turns out to be better
777    /// than tracking state related to whether it's been initialized
778    /// before, because resetting that state on (re)instantiation is
779    /// very expensive if there are many funcrefs.
780    ///
781    /// # Safety
782    ///
783    /// This functions requires that `into` is a valid pointer.
784    unsafe fn construct_func_ref(
785        self: Pin<&mut Self>,
786        registry: &ModuleRegistry,
787        index: FuncIndex,
788        type_index: VMSharedTypeIndex,
789        into: *mut VMFuncRef,
790    ) {
791        let module_with_code = ModuleWithCode::in_store(
792            registry,
793            self.runtime_module()
794                .expect("funcref impossible in fake module"),
795        )
796        .expect("module not in store");
797
798        let func_ref = if let Some(def_index) = self.env_module().defined_func_index(index) {
799            VMFuncRef {
800                array_call: NonNull::from(
801                    module_with_code
802                        .array_to_wasm_trampoline(def_index)
803                        .expect("should have array-to-Wasm trampoline for escaping function"),
804                )
805                .cast()
806                .into(),
807                wasm_call: Some(
808                    NonNull::new(
809                        module_with_code
810                            .finished_function(def_index)
811                            .as_ptr()
812                            .cast::<VMWasmCallFunction>()
813                            .cast_mut(),
814                    )
815                    .unwrap()
816                    .into(),
817                ),
818                vmctx: VMOpaqueContext::from_vmcontext(self.vmctx()).into(),
819                type_index,
820            }
821        } else {
822            let import = self.imported_function(index);
823            VMFuncRef {
824                array_call: import.array_call,
825                wasm_call: Some(import.wasm_call),
826                vmctx: import.vmctx,
827                type_index,
828            }
829        };
830
831        // SAFETY: the unsafe contract here is forwarded to callers of this
832        // function.
833        unsafe {
834            ptr::write(into, func_ref);
835        }
836    }
837
838    /// Get a `&VMFuncRef` for the given `FuncIndex`.
839    ///
840    /// Returns `None` if the index is the reserved index value.
841    ///
842    /// The returned reference is a stable reference that won't be moved and can
843    /// be passed into JIT code.
844    pub(crate) fn get_func_ref(
845        self: Pin<&mut Self>,
846        registry: &ModuleRegistry,
847        index: FuncIndex,
848    ) -> Option<NonNull<VMFuncRef>> {
849        if index == FuncIndex::reserved_value() {
850            return None;
851        }
852
853        // For now, we eagerly initialize an funcref struct in-place
854        // whenever asked for a reference to it. This is mostly
855        // fine, because in practice each funcref is unlikely to be
856        // requested more than a few times: once-ish for funcref
857        // tables used for call_indirect (the usual compilation
858        // strategy places each function in the table at most once),
859        // and once or a few times when fetching exports via API.
860        // Note that for any case driven by table accesses, the lazy
861        // table init behaves like a higher-level cache layer that
862        // protects this initialization from happening multiple
863        // times, via that particular table at least.
864        //
865        // When `ref.func` becomes more commonly used or if we
866        // otherwise see a use-case where this becomes a hotpath,
867        // we can reconsider by using some state to track
868        // "uninitialized" explicitly, for example by zeroing the
869        // funcrefs (perhaps together with other
870        // zeroed-at-instantiate-time state) or using a separate
871        // is-initialized bitmap.
872        //
873        // We arrived at this design because zeroing memory is
874        // expensive, so it's better for instantiation performance
875        // if we don't have to track "is-initialized" state at
876        // all!
877        let func = &self.env_module().functions[index];
878        let sig = func.signature.unwrap_engine_type_index();
879
880        // SAFETY: the offset calculated here should be correct with
881        // `self.offsets`
882        let func_ref = unsafe {
883            self.vmctx_plus_offset_raw::<VMFuncRef>(self.offsets().vmctx_func_ref(func.func_ref))
884        };
885
886        // SAFETY: the `func_ref` ptr should be valid as it's within our
887        // `VMContext` area.
888        unsafe {
889            self.construct_func_ref(registry, index, sig, func_ref.as_ptr());
890        }
891
892        Some(func_ref)
893    }
894
895    /// Get the passive elements segment at the given index.
896    ///
897    /// Returns an empty segment if the index is out of bounds or if the segment
898    /// has been dropped.
899    ///
900    /// The `storage` parameter should always be `None`; it is a bit of a hack
901    /// to work around lifetime issues.
902    pub(crate) fn passive_element_segment<'a>(
903        &self,
904        storage: &'a mut Option<(Arc<wasmtime_environ::Module>, TableSegmentElements)>,
905        elem_index: ElemIndex,
906    ) -> &'a TableSegmentElements {
907        debug_assert!(storage.is_none());
908        *storage = Some((
909            // TODO: this `clone()` shouldn't be necessary but is used for now to
910            // inform `rustc` that the lifetime of the elements here are
911            // disconnected from the lifetime of `self`.
912            self.env_module().clone(),
913            // NB: fall back to an expressions-based list of elements which
914            // doesn't have static type information (as opposed to
915            // `TableSegmentElements::Functions`) since we don't know what type
916            // is needed in the caller's context. Let the type be inferred by
917            // how they use the segment.
918            TableSegmentElements::Expressions(Box::new([])),
919        ));
920        let (module, empty) = storage.as_ref().unwrap();
921
922        match module.passive_elements_map.get(&elem_index) {
923            Some(index) if !self.dropped_elements.contains(elem_index) => {
924                &module.passive_elements[*index]
925            }
926            _ => empty,
927        }
928    }
929
930    /// The `table.init` operation: initializes a portion of a table with a
931    /// passive element.
932    ///
933    /// # Errors
934    ///
935    /// Returns a `Trap` error when the range within the table is out of bounds
936    /// or the range within the passive element is out of bounds.
937    pub(crate) async fn table_init(
938        store: &mut StoreOpaque,
939        limiter: Option<&mut StoreResourceLimiter<'_>>,
940        instance: InstanceId,
941        table_index: TableIndex,
942        elem_index: ElemIndex,
943        dst: u64,
944        src: u64,
945        len: u64,
946    ) -> Result<()> {
947        let mut storage = None;
948        let elements = store
949            .instance(instance)
950            .passive_element_segment(&mut storage, elem_index);
951        let mut const_evaluator = ConstExprEvaluator::default();
952        Self::table_init_segment(
953            store,
954            limiter,
955            instance,
956            &mut const_evaluator,
957            table_index,
958            elements,
959            dst,
960            src,
961            len,
962        )
963        .await
964    }
965
966    pub(crate) async fn table_init_segment(
967        store: &mut StoreOpaque,
968        mut limiter: Option<&mut StoreResourceLimiter<'_>>,
969        elements_instance_id: InstanceId,
970        const_evaluator: &mut ConstExprEvaluator,
971        table_index: TableIndex,
972        elements: &TableSegmentElements,
973        dst: u64,
974        src: u64,
975        len: u64,
976    ) -> Result<()> {
977        // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
978
979        let store_id = store.id();
980        let elements_instance = store.instance_mut(elements_instance_id);
981        let table = elements_instance.get_exported_table(store_id, table_index);
982        let table_size = table._size(store);
983
984        // Perform a bounds check on the table being written to. This is done by
985        // ensuring that `dst + len <= table.size()` via checked arithmetic.
986        //
987        // Note that the bounds check for the element segment happens below when
988        // the original segment is sliced via `src` and `len`.
989        table_size
990            .checked_sub(dst)
991            .and_then(|i| i.checked_sub(len))
992            .ok_or(Trap::TableOutOfBounds)?;
993
994        let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
995        let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
996
997        let positions = dst..dst + u64::try_from(len).unwrap();
998        match elements {
999            TableSegmentElements::Functions(funcs) => {
1000                let elements = funcs
1001                    .get(src..)
1002                    .and_then(|s| s.get(..len))
1003                    .ok_or(Trap::TableOutOfBounds)?;
1004                for (i, func_idx) in positions.zip(elements) {
1005                    let (instance, registry) =
1006                        store.instance_and_module_registry_mut(elements_instance_id);
1007                    // SAFETY: the `store_id` passed to `get_exported_func` is
1008                    // indeed the store that owns the function.
1009                    let func = unsafe { instance.get_exported_func(registry, store_id, *func_idx) };
1010                    table.set_(store, i, func.into()).unwrap();
1011                }
1012            }
1013            TableSegmentElements::Expressions(exprs) => {
1014                let mut store = OpaqueRootScope::new(store);
1015                let exprs = exprs
1016                    .get(src..)
1017                    .and_then(|s| s.get(..len))
1018                    .ok_or(Trap::TableOutOfBounds)?;
1019                let mut context = ConstEvalContext::new(elements_instance_id);
1020                for (i, expr) in positions.zip(exprs) {
1021                    let element = const_evaluator
1022                        .eval(&mut store, limiter.as_deref_mut(), &mut context, expr)
1023                        .await?;
1024                    table.set_(&mut store, i, element.ref_().unwrap()).unwrap();
1025                }
1026            }
1027        }
1028
1029        Ok(())
1030    }
1031
1032    /// Drop an element.
1033    pub(crate) fn elem_drop(self: Pin<&mut Self>, elem_index: ElemIndex) {
1034        // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
1035
1036        self.dropped_elements_mut().insert(elem_index);
1037
1038        // Note that we don't check that we actually removed a segment because
1039        // dropping a non-passive segment is a no-op (not a trap).
1040    }
1041
1042    /// Get a locally-defined memory.
1043    pub fn get_defined_memory_mut(self: Pin<&mut Self>, index: DefinedMemoryIndex) -> &mut Memory {
1044        &mut self.memories_mut()[index].1
1045    }
1046
1047    /// Get a locally-defined memory.
1048    pub fn get_defined_memory(&self, index: DefinedMemoryIndex) -> &Memory {
1049        &self.memories[index].1
1050    }
1051
1052    pub fn get_defined_memory_vmimport(&self, index: DefinedMemoryIndex) -> VMMemoryImport {
1053        crate::runtime::vm::VMMemoryImport {
1054            from: self.memory_ptr(index).into(),
1055            vmctx: self.vmctx().into(),
1056            index,
1057        }
1058    }
1059
1060    /// Do a `memory.copy`
1061    ///
1062    /// # Errors
1063    ///
1064    /// Returns a `Trap` error when the source or destination ranges are out of
1065    /// bounds.
1066    pub(crate) fn memory_copy(
1067        self: Pin<&mut Self>,
1068        dst_index: MemoryIndex,
1069        dst: u64,
1070        src_index: MemoryIndex,
1071        src: u64,
1072        len: u64,
1073    ) -> Result<(), Trap> {
1074        // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
1075
1076        let src_mem = self.get_memory(src_index);
1077        let dst_mem = self.get_memory(dst_index);
1078
1079        let src = self.validate_inbounds(src_mem.current_length(), src, len)?;
1080        let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?;
1081        let len = usize::try_from(len).unwrap();
1082
1083        // Bounds and casts are checked above, by this point we know that
1084        // everything is safe.
1085        unsafe {
1086            let dst = dst_mem.base.as_ptr().add(dst);
1087            let src = src_mem.base.as_ptr().add(src);
1088            // FIXME audit whether this is safe in the presence of shared memory
1089            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1090            ptr::copy(src, dst, len);
1091        }
1092
1093        Ok(())
1094    }
1095
1096    fn validate_inbounds(&self, max: usize, ptr: u64, len: u64) -> Result<usize, Trap> {
1097        let oob = || Trap::MemoryOutOfBounds;
1098        let end = ptr
1099            .checked_add(len)
1100            .and_then(|i| usize::try_from(i).ok())
1101            .ok_or_else(oob)?;
1102        if end > max {
1103            Err(oob())
1104        } else {
1105            Ok(ptr.try_into().unwrap())
1106        }
1107    }
1108
1109    /// Perform the `memory.fill` operation on a locally defined memory.
1110    ///
1111    /// # Errors
1112    ///
1113    /// Returns a `Trap` error if the memory range is out of bounds.
1114    pub(crate) fn memory_fill(
1115        self: Pin<&mut Self>,
1116        memory_index: DefinedMemoryIndex,
1117        dst: u64,
1118        val: u8,
1119        len: u64,
1120    ) -> Result<(), Trap> {
1121        let memory_index = self.env_module().memory_index(memory_index);
1122        let memory = self.get_memory(memory_index);
1123        let dst = self.validate_inbounds(memory.current_length(), dst, len)?;
1124        let len = usize::try_from(len).unwrap();
1125
1126        // Bounds and casts are checked above, by this point we know that
1127        // everything is safe.
1128        unsafe {
1129            let dst = memory.base.as_ptr().add(dst);
1130            // FIXME audit whether this is safe in the presence of shared memory
1131            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1132            ptr::write_bytes(dst, val, len);
1133        }
1134
1135        Ok(())
1136    }
1137
1138    /// Get the internal storage range of a particular Wasm data segment.
1139    pub(crate) fn wasm_data_range(&self, index: DataIndex) -> Range<u32> {
1140        match self.env_module().passive_data_map.get(&index) {
1141            Some(range) if !self.dropped_data.contains(index) => range.clone(),
1142            _ => 0..0,
1143        }
1144    }
1145
1146    /// Given an internal storage range of a Wasm data segment (or subset of a
1147    /// Wasm data segment), get the data's raw bytes.
1148    pub(crate) fn wasm_data(&self, range: Range<u32>) -> &[u8] {
1149        let start = usize::try_from(range.start).unwrap();
1150        let end = usize::try_from(range.end).unwrap();
1151        &self.runtime_info.wasm_data()[start..end]
1152    }
1153
1154    /// Performs the `memory.init` operation.
1155    ///
1156    /// # Errors
1157    ///
1158    /// Returns a `Trap` error if the destination range is out of this module's
1159    /// memory's bounds or if the source range is outside the data segment's
1160    /// bounds.
1161    pub(crate) fn memory_init(
1162        self: Pin<&mut Self>,
1163        memory_index: MemoryIndex,
1164        data_index: DataIndex,
1165        dst: u64,
1166        src: u32,
1167        len: u32,
1168    ) -> Result<(), Trap> {
1169        let range = self.wasm_data_range(data_index);
1170        self.memory_init_segment(memory_index, range, dst, src, len)
1171    }
1172
1173    pub(crate) fn memory_init_segment(
1174        self: Pin<&mut Self>,
1175        memory_index: MemoryIndex,
1176        range: Range<u32>,
1177        dst: u64,
1178        src: u32,
1179        len: u32,
1180    ) -> Result<(), Trap> {
1181        // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
1182
1183        let memory = self.get_memory(memory_index);
1184        let data = self.wasm_data(range);
1185        let dst = self.validate_inbounds(memory.current_length(), dst, len.into())?;
1186        let src = self.validate_inbounds(data.len(), src.into(), len.into())?;
1187        let len = len as usize;
1188
1189        unsafe {
1190            let src_start = data.as_ptr().add(src);
1191            let dst_start = memory.base.as_ptr().add(dst);
1192            // FIXME audit whether this is safe in the presence of shared memory
1193            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1194            ptr::copy_nonoverlapping(src_start, dst_start, len);
1195        }
1196
1197        Ok(())
1198    }
1199
1200    /// Drop the given data segment, truncating its length to zero.
1201    pub(crate) fn data_drop(self: Pin<&mut Self>, data_index: DataIndex) {
1202        self.dropped_data_mut().insert(data_index);
1203
1204        // Note that we don't check that we actually removed a segment because
1205        // dropping a non-passive segment is a no-op (not a trap).
1206    }
1207
1208    /// Get a table by index regardless of whether it is locally-defined
1209    /// or an imported, foreign table. Ensure that the given range of
1210    /// elements in the table is lazily initialized.  We define this
1211    /// operation all-in-one for safety, to ensure the lazy-init
1212    /// happens.
1213    ///
1214    /// Takes an `Iterator` for the index-range to lazy-initialize,
1215    /// for flexibility. This can be a range, single item, or empty
1216    /// sequence, for example. The iterator should return indices in
1217    /// increasing order, so that the break-at-out-of-bounds behavior
1218    /// works correctly.
1219    pub(crate) fn get_table_with_lazy_init(
1220        self: Pin<&mut Self>,
1221        registry: &ModuleRegistry,
1222        table_index: TableIndex,
1223        range: impl Iterator<Item = u64>,
1224    ) -> &mut Table {
1225        let (idx, instance) = self.defined_table_index_and_instance(table_index);
1226        instance.get_defined_table_with_lazy_init(registry, idx, range)
1227    }
1228
1229    /// Gets the raw runtime table data structure owned by this instance
1230    /// given the provided `idx`.
1231    ///
1232    /// The `range` specified is eagerly initialized for funcref tables.
1233    pub fn get_defined_table_with_lazy_init(
1234        mut self: Pin<&mut Self>,
1235        registry: &ModuleRegistry,
1236        idx: DefinedTableIndex,
1237        range: impl IntoIterator<Item = u64>,
1238    ) -> &mut Table {
1239        let elt_ty = self.tables[idx].1.element_type();
1240
1241        if elt_ty == TableElementType::Func {
1242            for i in range {
1243                match self.tables[idx].1.get_func_maybe_init(i) {
1244                    // Uninitialized table element.
1245                    Ok(None) => {}
1246                    // Initialized table element, move on to the next.
1247                    Ok(Some(_)) => continue,
1248                    // Out-of-bounds; caller will handle by likely
1249                    // throwing a trap. No work to do to lazy-init
1250                    // beyond the end.
1251                    Err(_) => break,
1252                };
1253
1254                // The table element `i` is uninitialized and is now being
1255                // initialized. This must imply that a `precompiled` list of
1256                // function indices is available for this table. The precompiled
1257                // list is extracted and then it is consulted with `i` to
1258                // determine the function that is going to be initialized. Note
1259                // that `i` may be outside the limits of the static
1260                // initialization so it's a fallible `get` instead of an index.
1261                let module = self.env_module();
1262                let precomputed = match &module.table_initialization.initial_values[idx] {
1263                    TableInitialValue::Null { precomputed } => precomputed,
1264                    TableInitialValue::Expr(_) => unreachable!(),
1265                };
1266                // Panicking here helps catch bugs rather than silently truncating by accident.
1267                let func_index = precomputed.get(usize::try_from(i).unwrap()).cloned();
1268                let func_ref = func_index
1269                    .and_then(|func_index| self.as_mut().get_func_ref(registry, func_index));
1270                self.as_mut().tables_mut()[idx]
1271                    .1
1272                    .set_func(i, func_ref)
1273                    .expect("Table type should match and index should be in-bounds");
1274            }
1275        }
1276
1277        self.get_defined_table(idx)
1278    }
1279
1280    /// Get a table by index regardless of whether it is locally-defined or an
1281    /// imported, foreign table.
1282    pub(crate) fn get_table(self: Pin<&mut Self>, table_index: TableIndex) -> &mut Table {
1283        let (idx, instance) = self.defined_table_index_and_instance(table_index);
1284        instance.get_defined_table(idx)
1285    }
1286
1287    /// Get a locally-defined table.
1288    pub(crate) fn get_defined_table(self: Pin<&mut Self>, index: DefinedTableIndex) -> &mut Table {
1289        &mut self.tables_mut()[index].1
1290    }
1291
1292    pub(crate) fn defined_table_index_and_instance<'a>(
1293        self: Pin<&'a mut Self>,
1294        index: TableIndex,
1295    ) -> (DefinedTableIndex, Pin<&'a mut Instance>) {
1296        if let Some(defined_table_index) = self.env_module().defined_table_index(index) {
1297            (defined_table_index, self)
1298        } else {
1299            let import = self.imported_table(index);
1300            let index = import.index;
1301            let vmctx = import.vmctx.as_non_null();
1302            // SAFETY: the validity of `self` means that the reachable instances
1303            // should also all be owned by the same store and fully initialized,
1304            // so it's safe to laterally move from a mutable borrow of this
1305            // instance to a mutable borrow of a sibling instance.
1306            let foreign_instance = unsafe { self.sibling_vmctx_mut(vmctx) };
1307            (index, foreign_instance)
1308        }
1309    }
1310
1311    /// Initialize the VMContext data associated with this Instance.
1312    ///
1313    /// The `VMContext` memory is assumed to be uninitialized; any field
1314    /// that we need in a certain state will be explicitly written by this
1315    /// function.
1316    unsafe fn initialize_vmctx(
1317        mut self: Pin<&mut Self>,
1318        module: &Module,
1319        offsets: &VMOffsets<HostPtr>,
1320        store: &StoreOpaque,
1321        imports: Imports,
1322    ) {
1323        assert!(ptr::eq(module, self.env_module().as_ref()));
1324
1325        // SAFETY: the type of the magic field is indeed `u32` and this function
1326        // is initializing its value.
1327        unsafe {
1328            self.vmctx_plus_offset_raw::<u32>(offsets.ptr.vmctx_magic())
1329                .write(VMCONTEXT_MAGIC);
1330        }
1331
1332        // SAFETY: it's up to the caller to provide a valid store pointer here.
1333        unsafe {
1334            self.as_mut().set_store(store);
1335        }
1336
1337        // Initialize shared types
1338        //
1339        // SAFETY: validity of the vmctx means it should be safe to write to it
1340        // here.
1341        unsafe {
1342            let types = NonNull::from(self.runtime_info.type_ids());
1343            self.type_ids_array().write(types.cast().into());
1344        }
1345
1346        // Initialize the built-in functions
1347        //
1348        // SAFETY: the type of the builtin functions field is indeed a pointer
1349        // and the pointer being filled in here, plus the vmctx is valid to
1350        // write to during initialization.
1351        unsafe {
1352            static BUILTINS: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray::INIT;
1353            let ptr = BUILTINS.expose_provenance();
1354            self.vmctx_plus_offset_raw(offsets.ptr.vmctx_builtin_functions())
1355                .write(VmPtr::from(ptr));
1356        }
1357
1358        // Initialize the imports
1359        //
1360        // SAFETY: the vmctx is safe to initialize during this function and
1361        // validity of each item itself is a contract the caller must uphold.
1362        debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
1363        unsafe {
1364            ptr::copy_nonoverlapping(
1365                imports.functions.as_ptr(),
1366                self.vmctx_plus_offset_raw(offsets.vmctx_imported_functions_begin())
1367                    .as_ptr(),
1368                imports.functions.len(),
1369            );
1370            debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
1371            ptr::copy_nonoverlapping(
1372                imports.tables.as_ptr(),
1373                self.vmctx_plus_offset_raw(offsets.vmctx_imported_tables_begin())
1374                    .as_ptr(),
1375                imports.tables.len(),
1376            );
1377            debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
1378            ptr::copy_nonoverlapping(
1379                imports.memories.as_ptr(),
1380                self.vmctx_plus_offset_raw(offsets.vmctx_imported_memories_begin())
1381                    .as_ptr(),
1382                imports.memories.len(),
1383            );
1384            debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
1385            ptr::copy_nonoverlapping(
1386                imports.globals.as_ptr(),
1387                self.vmctx_plus_offset_raw(offsets.vmctx_imported_globals_begin())
1388                    .as_ptr(),
1389                imports.globals.len(),
1390            );
1391            debug_assert_eq!(imports.tags.len(), module.num_imported_tags);
1392            ptr::copy_nonoverlapping(
1393                imports.tags.as_ptr(),
1394                self.vmctx_plus_offset_raw(offsets.vmctx_imported_tags_begin())
1395                    .as_ptr(),
1396                imports.tags.len(),
1397            );
1398        }
1399
1400        // N.B.: there is no need to initialize the funcrefs array because we
1401        // eagerly construct each element in it whenever asked for a reference
1402        // to that element. In other words, there is no state needed to track
1403        // the lazy-init, so we don't need to initialize any state now.
1404
1405        // Initialize the defined tables
1406        //
1407        // SAFETY: it's safe to initialize these tables during initialization
1408        // here and the various types of pointers and such here should all be
1409        // valid.
1410        unsafe {
1411            let mut ptr = self.vmctx_plus_offset_raw(offsets.vmctx_tables_begin());
1412            let tables = self.as_mut().tables_mut();
1413            for i in 0..module.num_defined_tables() {
1414                ptr.write(tables[DefinedTableIndex::new(i)].1.vmtable());
1415                ptr = ptr.add(1);
1416            }
1417        }
1418
1419        // Initialize the defined memories. This fills in both the
1420        // `defined_memories` table and the `owned_memories` table at the same
1421        // time. Entries in `defined_memories` hold a pointer to a definition
1422        // (all memories) whereas the `owned_memories` hold the actual
1423        // definitions of memories owned (not shared) in the module.
1424        //
1425        // SAFETY: it's safe to initialize these memories during initialization
1426        // here and the various types of pointers and such here should all be
1427        // valid.
1428        unsafe {
1429            let mut ptr = self.vmctx_plus_offset_raw(offsets.vmctx_memories_begin());
1430            let mut owned_ptr = self.vmctx_plus_offset_raw(offsets.vmctx_owned_memories_begin());
1431            let memories = self.as_mut().memories_mut();
1432            for i in 0..module.num_defined_memories() {
1433                let defined_memory_index = DefinedMemoryIndex::new(i);
1434                let memory_index = module.memory_index(defined_memory_index);
1435                if module.memories[memory_index].shared {
1436                    let def_ptr = memories[defined_memory_index]
1437                        .1
1438                        .as_shared_memory()
1439                        .unwrap()
1440                        .vmmemory_ptr();
1441                    ptr.write(VmPtr::from(def_ptr));
1442                } else {
1443                    owned_ptr.write(memories[defined_memory_index].1.vmmemory());
1444                    ptr.write(VmPtr::from(owned_ptr));
1445                    owned_ptr = owned_ptr.add(1);
1446                }
1447                ptr = ptr.add(1);
1448            }
1449        }
1450
1451        // Zero-initialize the globals so that nothing is uninitialized memory
1452        // after this function returns. The globals are actually initialized
1453        // with their const expression initializers after the instance is fully
1454        // allocated.
1455        //
1456        // SAFETY: it's safe to initialize globals during initialization
1457        // here. Note that while the value being written is not valid for all
1458        // types of globals it's initializing the memory to zero instead of
1459        // being in an undefined state. So it's still unsafe to access globals
1460        // after this, but if it's read then it'd hopefully crash faster than
1461        // leaving this undefined.
1462        unsafe {
1463            for (index, _init) in module.global_initializers.iter() {
1464                self.global_ptr(index).write(VMGlobalDefinition::new());
1465            }
1466        }
1467
1468        // Initialize the defined tags
1469        //
1470        // SAFETY: it's safe to initialize these tags during initialization
1471        // here and the various types of pointers and such here should all be
1472        // valid.
1473        unsafe {
1474            let mut ptr = self.vmctx_plus_offset_raw(offsets.vmctx_tags_begin());
1475            for i in 0..module.num_defined_tags() {
1476                let defined_index = DefinedTagIndex::new(i);
1477                let tag_index = module.tag_index(defined_index);
1478                let tag = module.tags[tag_index];
1479                ptr.write(VMTagDefinition::new(
1480                    tag.signature.unwrap_engine_type_index(),
1481                ));
1482                ptr = ptr.add(1);
1483            }
1484        }
1485    }
1486
1487    /// Attempts to convert from the host `addr` specified to a WebAssembly
1488    /// based address recorded in `WasmFault`.
1489    ///
1490    /// This method will check all linear memories that this instance contains
1491    /// to see if any of them contain `addr`. If one does then `Some` is
1492    /// returned with metadata about the wasm fault. Otherwise `None` is
1493    /// returned and `addr` doesn't belong to this instance.
1494    pub fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1495        let mut fault = None;
1496        for (_, (_, memory)) in self.memories.iter() {
1497            let accessible = memory.wasm_accessible();
1498            if accessible.start <= addr && addr < accessible.end {
1499                // All linear memories should be disjoint so assert that no
1500                // prior fault has been found.
1501                assert!(fault.is_none());
1502                fault = Some(WasmFault {
1503                    memory_size: memory.byte_size(),
1504                    wasm_address: u64::try_from(addr - accessible.start).unwrap(),
1505                });
1506            }
1507        }
1508        fault
1509    }
1510
1511    /// Returns the id, within this instance's store, that it's assigned.
1512    pub fn id(&self) -> InstanceId {
1513        self.id
1514    }
1515
1516    /// Get all memories within this instance.
1517    ///
1518    /// Returns both import and defined memories.
1519    ///
1520    /// Returns both exported and non-exported memories.
1521    ///
1522    /// Gives access to the full memories space.
1523    pub fn all_memories(
1524        &self,
1525        store: StoreId,
1526    ) -> impl ExactSizeIterator<Item = (MemoryIndex, ExportMemory)> + '_ {
1527        self.env_module()
1528            .memories
1529            .iter()
1530            .map(move |(i, _)| (i, self.get_exported_memory(store, i)))
1531    }
1532
1533    /// Return the memories defined in this instance (not imported).
1534    pub fn defined_memories<'a>(
1535        &'a self,
1536        store: StoreId,
1537    ) -> impl ExactSizeIterator<Item = ExportMemory> + 'a {
1538        let num_imported = self.env_module().num_imported_memories;
1539        self.all_memories(store)
1540            .skip(num_imported)
1541            .map(|(_i, memory)| memory)
1542    }
1543
1544    /// Lookup an item with the given index.
1545    ///
1546    /// # Panics
1547    ///
1548    /// Panics if `export` is not valid for this instance.
1549    ///
1550    /// # Safety
1551    ///
1552    /// This function requires that `store` is the correct store which owns this
1553    /// instance.
1554    pub unsafe fn get_export_by_index_mut(
1555        self: Pin<&mut Self>,
1556        registry: &ModuleRegistry,
1557        store: StoreId,
1558        export: EntityIndex,
1559    ) -> Export {
1560        match export {
1561            // SAFETY: the contract of `store` owning the this instance is a
1562            // safety requirement of this function itself.
1563            EntityIndex::Function(i) => {
1564                Export::Function(unsafe { self.get_exported_func(registry, store, i) })
1565            }
1566            EntityIndex::Global(i) => Export::Global(self.get_exported_global(store, i)),
1567            EntityIndex::Table(i) => Export::Table(self.get_exported_table(store, i)),
1568            EntityIndex::Memory(i) => match self.get_exported_memory(store, i) {
1569                ExportMemory::Unshared(m) => Export::Memory(m),
1570                ExportMemory::Shared(m, i) => Export::SharedMemory(m, i),
1571            },
1572            EntityIndex::Tag(i) => Export::Tag(self.get_exported_tag(store, i)),
1573        }
1574    }
1575
1576    fn store_mut(self: Pin<&mut Self>) -> &mut Option<VMStoreRawPtr> {
1577        // SAFETY: this is a pin-projection to get a mutable reference to an
1578        // internal field and is safe so long as the `&mut Self` temporarily
1579        // created is not overwritten, which it isn't here.
1580        unsafe { &mut self.get_unchecked_mut().store }
1581    }
1582
1583    fn dropped_elements_mut(self: Pin<&mut Self>) -> &mut EntitySet<ElemIndex> {
1584        // SAFETY: see `store_mut` above.
1585        unsafe { &mut self.get_unchecked_mut().dropped_elements }
1586    }
1587
1588    fn dropped_data_mut(self: Pin<&mut Self>) -> &mut EntitySet<DataIndex> {
1589        // SAFETY: see `store_mut` above.
1590        unsafe { &mut self.get_unchecked_mut().dropped_data }
1591    }
1592
1593    fn memories_mut(
1594        self: Pin<&mut Self>,
1595    ) -> &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)> {
1596        // SAFETY: see `store_mut` above.
1597        unsafe { &mut self.get_unchecked_mut().memories }
1598    }
1599
1600    pub(crate) fn tables_mut(
1601        self: Pin<&mut Self>,
1602    ) -> &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)> {
1603        // SAFETY: see `store_mut` above.
1604        unsafe { &mut self.get_unchecked_mut().tables }
1605    }
1606
1607    #[cfg(feature = "wmemcheck")]
1608    pub(super) fn wmemcheck_state_mut(self: Pin<&mut Self>) -> &mut Option<Wmemcheck> {
1609        // SAFETY: see `store_mut` above.
1610        unsafe { &mut self.get_unchecked_mut().wmemcheck_state }
1611    }
1612}
1613
1614// SAFETY: `layout` should describe this accurately and `OwnedVMContext` is the
1615// last field of `ComponentInstance`.
1616unsafe impl InstanceLayout for Instance {
1617    const INIT_ZEROED: bool = false;
1618    type VMContext = VMContext;
1619
1620    fn layout(&self) -> Layout {
1621        Self::alloc_layout(self.runtime_info.offsets())
1622    }
1623
1624    fn owned_vmctx(&self) -> &OwnedVMContext<VMContext> {
1625        &self.vmctx
1626    }
1627
1628    fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<VMContext> {
1629        &mut self.vmctx
1630    }
1631}
1632
1633pub type InstanceHandle = OwnedInstance<Instance>;
1634
1635/// A handle holding an `Instance` of a WebAssembly module.
1636///
1637/// This structure is an owning handle of the `instance` contained internally.
1638/// When this value goes out of scope it will deallocate the `Instance` and all
1639/// memory associated with it.
1640///
1641/// Note that this lives within a `StoreOpaque` on a list of instances that a
1642/// store is keeping alive.
1643#[derive(Debug)]
1644#[repr(transparent)] // guarantee this is a zero-cost wrapper
1645pub struct OwnedInstance<T: InstanceLayout> {
1646    /// The raw pointer to the instance that was allocated.
1647    ///
1648    /// Note that this is not equivalent to `Box<Instance>` because the
1649    /// allocation here has a `VMContext` trailing after it. Thus the custom
1650    /// destructor to invoke the `dealloc` function with the appropriate
1651    /// layout.
1652    instance: SendSyncPtr<T>,
1653    _marker: marker::PhantomData<Box<(T, OwnedVMContext<T::VMContext>)>>,
1654}
1655
1656/// Structure that must be placed at the end of a type implementing
1657/// `InstanceLayout`.
1658#[repr(align(16))] // match the alignment of VMContext
1659pub struct OwnedVMContext<T> {
1660    /// A pointer to the `vmctx` field at the end of the `structure`.
1661    ///
1662    /// If you're looking at this a reasonable question would be "why do we need
1663    /// a pointer to ourselves?" because after all the pointer's value is
1664    /// trivially derivable from any `&Instance` pointer. The rationale for this
1665    /// field's existence is subtle, but it's required for correctness. The
1666    /// short version is "this makes miri happy".
1667    ///
1668    /// The long version of why this field exists is that the rules that MIRI
1669    /// uses to ensure pointers are used correctly have various conditions on
1670    /// them depend on how pointers are used. More specifically if `*mut T` is
1671    /// derived from `&mut T`, then that invalidates all prior pointers drived
1672    /// from the `&mut T`. This means that while we liberally want to re-acquire
1673    /// a `*mut VMContext` throughout the implementation of `Instance` the
1674    /// trivial way, a function `fn vmctx(Pin<&mut Instance>) -> *mut VMContext`
1675    /// would effectively invalidate all prior `*mut VMContext` pointers
1676    /// acquired. The purpose of this field is to serve as a sort of
1677    /// source-of-truth for where `*mut VMContext` pointers come from.
1678    ///
1679    /// This field is initialized when the `Instance` is created with the
1680    /// original allocation's pointer. That means that the provenance of this
1681    /// pointer contains the entire allocation (both instance and `VMContext`).
1682    /// This provenance bit is then "carried through" where `fn vmctx` will base
1683    /// all returned pointers on this pointer itself. This provides the means of
1684    /// never invalidating this pointer throughout MIRI and additionally being
1685    /// able to still temporarily have `Pin<&mut Instance>` methods and such.
1686    ///
1687    /// It's important to note, though, that this is not here purely for MIRI.
1688    /// The careful construction of the `fn vmctx` method has ramifications on
1689    /// the LLVM IR generated, for example. A historical CVE on Wasmtime,
1690    /// GHSA-ch89-5g45-qwc7, was caused due to relying on undefined behavior. By
1691    /// deriving VMContext pointers from this pointer it specifically hints to
1692    /// LLVM that trickery is afoot and it properly informs `noalias` and such
1693    /// annotations and analysis. More-or-less this pointer is actually loaded
1694    /// in LLVM IR which helps defeat otherwise present aliasing optimizations,
1695    /// which we want, since writes to this should basically never be optimized
1696    /// out.
1697    ///
1698    /// As a final note it's worth pointing out that the machine code generated
1699    /// for accessing `fn vmctx` is still as one would expect. This member isn't
1700    /// actually ever loaded at runtime (or at least shouldn't be). Perhaps in
1701    /// the future if the memory consumption of this field is a problem we could
1702    /// shrink it slightly, but for now one extra pointer per wasm instance
1703    /// seems not too bad.
1704    vmctx_self_reference: SendSyncPtr<T>,
1705
1706    /// This field ensures that going from `Pin<&mut T>` to `&mut T` is not a
1707    /// safe operation.
1708    _marker: core::marker::PhantomPinned,
1709}
1710
1711impl<T> OwnedVMContext<T> {
1712    /// Creates a new blank vmctx to place at the end of an instance.
1713    pub fn new() -> OwnedVMContext<T> {
1714        OwnedVMContext {
1715            vmctx_self_reference: SendSyncPtr::new(NonNull::dangling()),
1716            _marker: core::marker::PhantomPinned,
1717        }
1718    }
1719}
1720
1721/// Helper trait to plumb both core instances and component instances into
1722/// `OwnedInstance` below.
1723///
1724/// # Safety
1725///
1726/// This trait requires `layout` to correctly describe `Self` and appropriately
1727/// allocate space for `Self::VMContext` afterwards. Additionally the field
1728/// returned by `owned_vmctx()` must be the last field in the structure.
1729pub unsafe trait InstanceLayout {
1730    /// Whether or not to allocate this instance with `alloc_zeroed` or `alloc`.
1731    const INIT_ZEROED: bool;
1732
1733    /// The trailing `VMContext` type at the end of this instance.
1734    type VMContext;
1735
1736    /// The memory layout to use to allocate and deallocate this instance.
1737    fn layout(&self) -> Layout;
1738
1739    fn owned_vmctx(&self) -> &OwnedVMContext<Self::VMContext>;
1740    fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<Self::VMContext>;
1741
1742    /// Returns the `vmctx_self_reference` set above.
1743    #[inline]
1744    fn vmctx(&self) -> NonNull<Self::VMContext> {
1745        // The definition of this method is subtle but intentional. The goal
1746        // here is that effectively this should return `&mut self.vmctx`, but
1747        // it's not quite so simple. Some more documentation is available on the
1748        // `vmctx_self_reference` field, but the general idea is that we're
1749        // creating a pointer to return with proper provenance. Provenance is
1750        // still in the works in Rust at the time of this writing but the load
1751        // of the `self.vmctx_self_reference` field is important here as it
1752        // affects how LLVM thinks about aliasing with respect to the returned
1753        // pointer.
1754        //
1755        // The intention of this method is to codegen to machine code as `&mut
1756        // self.vmctx`, however. While it doesn't show up like this in LLVM IR
1757        // (there's an actual load of the field) it does look like that by the
1758        // time the backend runs. (that's magic to me, the backend removing
1759        // loads...)
1760        let owned_vmctx = self.owned_vmctx();
1761        let owned_vmctx_raw = NonNull::from(owned_vmctx);
1762        // SAFETY: it's part of the contract of `InstanceLayout` and the usage
1763        // with `OwnedInstance` that this indeed points to the vmctx.
1764        let addr = unsafe { owned_vmctx_raw.add(1) };
1765        owned_vmctx
1766            .vmctx_self_reference
1767            .as_non_null()
1768            .with_addr(addr.addr())
1769    }
1770
1771    /// Helper function to access various locations offset from our `*mut
1772    /// VMContext` object.
1773    ///
1774    /// Note that this method takes `&self` as an argument but returns
1775    /// `NonNull<T>` which is frequently used to mutate said memory. This is an
1776    /// intentional design decision where the safety of the modification of
1777    /// memory is placed as a burden onto the caller. The implementation of this
1778    /// method explicitly does not require `&mut self` to acquire mutable
1779    /// provenance to update the `VMContext` region. Instead all pointers into
1780    /// the `VMContext` area have provenance/permissions to write.
1781    ///
1782    /// Also note though that care must be taken to ensure that reads/writes of
1783    /// memory must only happen where appropriate, for example a non-atomic
1784    /// write (as most are) should never happen concurrently with another read
1785    /// or write. It's generally on the burden of the caller to adhere to this.
1786    ///
1787    /// Also of note is that most of the time the usage of this method falls
1788    /// into one of:
1789    ///
1790    /// * Something in the VMContext is being read or written. In that case use
1791    ///   `vmctx_plus_offset` or `vmctx_plus_offset_mut` if possible due to
1792    ///   that having a safer lifetime.
1793    ///
1794    /// * A pointer is being created to pass to other VM* data structures. In
1795    ///   that situation the lifetime of all VM data structures are typically
1796    ///   tied to the `Store<T>` which is what provides the guarantees around
1797    ///   concurrency/etc.
1798    ///
1799    /// There's quite a lot of unsafety riding on this method, especially
1800    /// related to the ascription `T` of the byte `offset`. It's hoped that in
1801    /// the future we're able to settle on an in theory safer design.
1802    ///
1803    /// # Safety
1804    ///
1805    /// This method is unsafe because the `offset` must be within bounds of the
1806    /// `VMContext` object trailing this instance. Additionally `T` must be a
1807    /// valid ascription of the value that resides at that location.
1808    unsafe fn vmctx_plus_offset_raw<T: VmSafe>(&self, offset: impl Into<u32>) -> NonNull<T> {
1809        // SAFETY: the safety requirements of `byte_add` are forwarded to this
1810        // method's caller.
1811        unsafe {
1812            self.vmctx()
1813                .byte_add(usize::try_from(offset.into()).unwrap())
1814                .cast()
1815        }
1816    }
1817
1818    /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1819    /// `&self` to the returned reference `&T`.
1820    ///
1821    /// # Safety
1822    ///
1823    /// See the safety documentation of `vmctx_plus_offset_raw`.
1824    unsafe fn vmctx_plus_offset<T: VmSafe>(&self, offset: impl Into<u32>) -> &T {
1825        // SAFETY: this method has the same safety requirements as
1826        // `vmctx_plus_offset_raw`.
1827        unsafe { self.vmctx_plus_offset_raw(offset).as_ref() }
1828    }
1829
1830    /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1831    /// `&mut self` to the returned reference `&mut T`.
1832    ///
1833    /// # Safety
1834    ///
1835    /// See the safety documentation of `vmctx_plus_offset_raw`.
1836    unsafe fn vmctx_plus_offset_mut<T: VmSafe>(
1837        self: Pin<&mut Self>,
1838        offset: impl Into<u32>,
1839    ) -> &mut T {
1840        // SAFETY: this method has the same safety requirements as
1841        // `vmctx_plus_offset_raw`.
1842        unsafe { self.vmctx_plus_offset_raw(offset).as_mut() }
1843    }
1844}
1845
1846impl<T: InstanceLayout> OwnedInstance<T> {
1847    /// Allocates a new `OwnedInstance` and places `instance` inside of it.
1848    ///
1849    /// This will `instance`
1850    pub(super) fn new(mut instance: T) -> OwnedInstance<T> {
1851        let layout = instance.layout();
1852        debug_assert!(layout.size() >= size_of_val(&instance));
1853        debug_assert!(layout.align() >= align_of_val(&instance));
1854
1855        // SAFETY: it's up to us to assert that `layout` has a non-zero size,
1856        // which is asserted here.
1857        let ptr = unsafe {
1858            assert!(layout.size() > 0);
1859            if T::INIT_ZEROED {
1860                alloc::alloc::alloc_zeroed(layout)
1861            } else {
1862                alloc::alloc::alloc(layout)
1863            }
1864        };
1865        if ptr.is_null() {
1866            alloc::alloc::handle_alloc_error(layout);
1867        }
1868        let instance_ptr = NonNull::new(ptr.cast::<T>()).unwrap();
1869
1870        // SAFETY: it's part of the unsafe contract of `InstanceLayout` that the
1871        // `add` here is appropriate for the layout allocated.
1872        let vmctx_self_reference = unsafe { instance_ptr.add(1).cast() };
1873        instance.owned_vmctx_mut().vmctx_self_reference = vmctx_self_reference.into();
1874
1875        // SAFETY: we allocated above and it's an unsafe contract of
1876        // `InstanceLayout` that the layout is suitable for writing the
1877        // instance.
1878        unsafe {
1879            instance_ptr.write(instance);
1880        }
1881
1882        let ret = OwnedInstance {
1883            instance: SendSyncPtr::new(instance_ptr),
1884            _marker: marker::PhantomData,
1885        };
1886
1887        // Double-check various vmctx calculations are correct.
1888        debug_assert_eq!(
1889            vmctx_self_reference.addr(),
1890            // SAFETY: `InstanceLayout` should guarantee it's safe to add 1 to
1891            // the last field to get a pointer to 1-byte-past-the-end of an
1892            // object, which should be valid.
1893            unsafe { NonNull::from(ret.get().owned_vmctx()).add(1).addr() }
1894        );
1895        debug_assert_eq!(vmctx_self_reference.addr(), ret.get().vmctx().addr());
1896
1897        ret
1898    }
1899
1900    /// Gets the raw underlying `&Instance` from this handle.
1901    pub fn get(&self) -> &T {
1902        // SAFETY: this is an owned instance handle that retains exclusive
1903        // ownership of the `Instance` inside. With `&self` given we know
1904        // this pointer is valid valid and the returned lifetime is connected
1905        // to `self` so that should also be valid.
1906        unsafe { self.instance.as_non_null().as_ref() }
1907    }
1908
1909    /// Same as [`Self::get`] except for mutability.
1910    pub fn get_mut(&mut self) -> Pin<&mut T> {
1911        // SAFETY: The lifetime concerns here are the same as `get` above.
1912        // Otherwise `new_unchecked` is used here to uphold the contract that
1913        // instances are always pinned in memory.
1914        unsafe { Pin::new_unchecked(self.instance.as_non_null().as_mut()) }
1915    }
1916}
1917
1918impl<T: InstanceLayout> Drop for OwnedInstance<T> {
1919    fn drop(&mut self) {
1920        unsafe {
1921            let layout = self.get().layout();
1922            ptr::drop_in_place(self.instance.as_ptr());
1923            alloc::alloc::dealloc(self.instance.as_ptr().cast(), layout);
1924        }
1925    }
1926}