Skip to main content

wasmtime/runtime/vm/
instance.rs

1//! An `Instance` contains all the runtime state used by execution of a
2//! wasm module (except its callstack and register state). An
3//! `InstanceHandle` is a reference-counting handle for an `Instance`.
4
5use crate::OpaqueRootScope;
6use crate::code::ModuleWithCode;
7use crate::module::ModuleRegistry;
8use crate::prelude::*;
9use crate::runtime::vm::const_expr::{ConstEvalContext, ConstExprEvaluator};
10use crate::runtime::vm::export::{Export, ExportMemory};
11use crate::runtime::vm::memory::{Memory, RuntimeMemoryCreator};
12use crate::runtime::vm::table::{Table, TableElementType};
13use crate::runtime::vm::vmcontext::{
14    VMBuiltinFunctionsArray, VMContext, VMFuncRef, VMFunctionImport, VMGlobalDefinition,
15    VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext, VMStoreContext,
16    VMTableDefinition, VMTableImport, VMTagDefinition, VMTagImport,
17};
18use crate::runtime::vm::{
19    GcStore, HostResult, Imports, ModuleRuntimeInfo, SendSyncPtr, VMGlobalKind, VMStore,
20    VMStoreRawPtr, VmPtr, VmSafe, WasmFault, catch_unwind_and_record_trap,
21};
22use crate::store::{
23    Asyncness, InstanceId, StoreId, StoreInstanceId, StoreOpaque, StoreResourceLimiter,
24};
25use crate::vm::VMWasmCallFunction;
26use alloc::sync::Arc;
27use core::alloc::Layout;
28use core::marker;
29use core::ops::Range;
30use core::pin::Pin;
31use core::ptr::NonNull;
32#[cfg(target_has_atomic = "64")]
33use core::sync::atomic::AtomicU64;
34use core::{mem, ptr};
35#[cfg(feature = "gc")]
36use wasmtime_environ::ModuleInternedTypeIndex;
37use wasmtime_environ::error::OutOfMemory;
38use wasmtime_environ::{
39    DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, DefinedTagIndex,
40    ElemIndex, EntityIndex, EntityRef, FuncIndex, GlobalIndex, HostPtr, MemoryIndex, PrimaryMap,
41    PtrSize, TableIndex, TableInitialValue, TableSegmentElements, TagIndex, Trap, VMCONTEXT_MAGIC,
42    VMOffsets, VMSharedTypeIndex, packed_option::ReservedValue,
43};
44#[cfg(feature = "wmemcheck")]
45use wasmtime_wmemcheck::Wmemcheck;
46
47mod allocator;
48pub use allocator::*;
49
50/// A type that roughly corresponds to a WebAssembly instance, but is also used
51/// for host-defined objects.
52///
53/// Instances here can correspond to actual instantiated modules, but it's also
54/// used ubiquitously for host-defined objects. For example creating a
55/// host-defined memory will have a `module` that looks like it exports a single
56/// memory (and similar for other constructs).
57///
58/// This `Instance` type is used as a ubiquitous representation for WebAssembly
59/// values, whether or not they were created on the host or through a module.
60///
61/// # Ownership
62///
63/// This structure is never allocated directly but is instead managed through
64/// an `InstanceHandle`. This structure ends with a `VMContext` which has a
65/// dynamic size corresponding to the `module` configured within. Memory
66/// management of this structure is always done through `InstanceHandle` as the
67/// sole owner of an instance.
68///
69/// # `Instance` and `Pin`
70///
71/// Given an instance it is accompanied with trailing memory for the
72/// appropriate `VMContext`. The `Instance` also holds `runtime_info` and other
73/// information pointing to relevant offsets for the `VMContext`. Thus it is
74/// not sound to mutate `runtime_info` after an instance is created. More
75/// generally it's also not safe to "swap" instances, for example given two
76/// `&mut Instance` values it's not sound to swap them as then the `VMContext`
77/// values are inaccurately described.
78///
79/// To encapsulate this guarantee this type is only ever mutated through Rust's
80/// `Pin` type. All mutable methods here take `self: Pin<&mut Self>` which
81/// statically disallows safe access to `&mut Instance`. There are assorted
82/// "projection methods" to go from `Pin<&mut Instance>` to `&mut T` for
83/// individual fields, for example `memories_mut`. More methods can be added as
84/// necessary or methods may also be added to project multiple fields at a time
85/// if necessary to. The precise ergonomics around getting mutable access to
86/// some fields (but notably not `runtime_info`) is probably going to evolve
87/// over time.
88///
89/// Note that is is not sound to basically ever pass around `&mut Instance`.
90/// That should always instead be `Pin<&mut Instance>`. All usage of
91/// `Pin::new_unchecked` should be here in this module in just a few `unsafe`
92/// locations and it's recommended to use existing helpers if you can.
93#[repr(C)] // ensure that the vmctx field is last.
94pub struct Instance {
95    /// The index, within a `Store` that this instance lives at
96    id: InstanceId,
97
98    /// The runtime info (corresponding to the "compiled module"
99    /// abstraction in higher layers) that is retained and needed for
100    /// lazy initialization. This provides access to the underlying
101    /// Wasm module entities, the compiled JIT code, metadata about
102    /// functions, lazy initialization state, etc.
103    //
104    // SAFETY: this field cannot be overwritten after an instance is created. It
105    // must contain this exact same value for the entire lifetime of this
106    // instance. This enables borrowing the info's `Module` and this instance at
107    // the same time (instance mutably, module not). Additionally it enables
108    // borrowing a store mutably at the same time as a contained instance.
109    runtime_info: ModuleRuntimeInfo,
110
111    /// WebAssembly linear memory data.
112    ///
113    /// This is where all runtime information about defined linear memories in
114    /// this module lives.
115    ///
116    /// The `MemoryAllocationIndex` was given from our `InstanceAllocator` and
117    /// must be given back to the instance allocator when deallocating each
118    /// memory.
119    memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
120
121    /// WebAssembly table data.
122    ///
123    /// Like memories, this is only for defined tables in the module and
124    /// contains all of their runtime state.
125    ///
126    /// The `TableAllocationIndex` was given from our `InstanceAllocator` and
127    /// must be given back to the instance allocator when deallocating each
128    /// table.
129    tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
130
131    /// Stores the dropped passive element segments in this instantiation by index.
132    /// If the index is present in the set, the segment has been dropped.
133    dropped_elements: EntitySet<ElemIndex>,
134
135    /// Stores the dropped passive data segments in this instantiation by index.
136    /// If the index is present in the set, the segment has been dropped.
137    dropped_data: EntitySet<DataIndex>,
138
139    // TODO: add support for multiple memories; `wmemcheck_state` corresponds to
140    // memory 0.
141    #[cfg(feature = "wmemcheck")]
142    pub(crate) wmemcheck_state: Option<Wmemcheck>,
143
144    /// Self-pointer back to `Store<T>` and its functions. Not present for
145    /// the brief time that `Store<T>` is itself being created. Also not
146    /// present for some niche uses that are disconnected from stores (e.g.
147    /// cross-thread stuff used in `InstancePre`)
148    store: Option<VMStoreRawPtr>,
149
150    /// Additional context used by compiled wasm code. This field is last, and
151    /// represents a dynamically-sized array that extends beyond the nominal
152    /// end of the struct (similar to a flexible array member).
153    vmctx: OwnedVMContext<VMContext>,
154}
155
156impl Instance {
157    /// Create an instance at the given memory address.
158    ///
159    /// It is assumed the memory was properly aligned and the
160    /// allocation was `alloc_size` in bytes.
161    ///
162    /// # Safety
163    ///
164    /// The `req.imports` field must be appropriately sized/typed for the module
165    /// being allocated according to `req.runtime_info`. Additionally `memories`
166    /// and `tables` must have been allocated for `req.store`.
167    unsafe fn new(
168        req: InstanceAllocationRequest,
169        memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
170        tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
171    ) -> Result<InstanceHandle, OutOfMemory> {
172        let module = req.runtime_info.env_module();
173        let memory_tys = &module.memories;
174        let dropped_elements = EntitySet::with_capacity(module.passive_elements.len())?;
175        let dropped_data = EntitySet::with_capacity(module.passive_data_map.len())?;
176
177        #[cfg(feature = "wmemcheck")]
178        let wmemcheck_state = if req.store.engine().config().wmemcheck {
179            let size = memory_tys
180                .iter()
181                .next()
182                .map(|memory| memory.1.limits.min)
183                .unwrap_or(0)
184                * 64
185                * 1024;
186            Some(Wmemcheck::new(size.try_into().unwrap()))
187        } else {
188            None
189        };
190        #[cfg(not(feature = "wmemcheck"))]
191        let _ = memory_tys;
192
193        let mut ret = OwnedInstance::new(Instance {
194            id: req.id,
195            runtime_info: req.runtime_info.clone(),
196            memories,
197            tables,
198            dropped_elements,
199            dropped_data,
200            #[cfg(feature = "wmemcheck")]
201            wmemcheck_state,
202            store: None,
203            vmctx: OwnedVMContext::new(),
204        })?;
205
206        // SAFETY: this vmctx was allocated with the same layout above, so it
207        // should be safe to initialize with the same values here.
208        unsafe {
209            ret.get_mut().initialize_vmctx(req.store, req.imports);
210        }
211        Ok(ret)
212    }
213
214    /// Converts a raw `VMContext` pointer into a raw `Instance` pointer.
215    ///
216    /// # Safety
217    ///
218    /// Calling this function safely requires that `vmctx` is a valid allocation
219    /// of a `VMContext` which is derived from `Instance::new`. To safely
220    /// convert the returned raw pointer into a safe instance pointer callers
221    /// will also want to uphold guarantees such as:
222    ///
223    /// * The instance should not be in use elsewhere. For example you can't
224    ///   call this function twice, turn both raw pointers into safe pointers,
225    ///   and then use both safe pointers.
226    /// * There should be no other active mutable borrow to any other instance
227    ///   within the same store. Note that this is not restricted to just this
228    ///   instance pointer, but to all instances in a store. Instances can
229    ///   safely traverse to other instances "laterally" meaning that a mutable
230    ///   borrow on one is a mutable borrow on all.
231    /// * There should be no active mutable borrow on the store accessible at
232    ///   the same time the instance is turned. Instances are owned by a store
233    ///   and a store can be used to acquire a safe instance borrow at any time.
234    /// * The lifetime of the usage of the instance should not be unnecessarily
235    ///   long, for example it cannot be `'static`.
236    ///
237    /// Other entrypoints exist for converting from a raw `VMContext` to a safe
238    /// pointer such as:
239    ///
240    /// * `Instance::enter_host_from_wasm`
241    /// * `Instance::sibling_vmctx{,_mut}`
242    ///
243    /// These place further restrictions on the API signature to satisfy some of
244    /// the above points.
245    #[inline]
246    pub(crate) unsafe fn from_vmctx(vmctx: NonNull<VMContext>) -> NonNull<Instance> {
247        // SAFETY: The validity of `byte_sub` relies on `vmctx` being a valid
248        // allocation.
249        unsafe {
250            vmctx
251                .byte_sub(mem::size_of::<Instance>())
252                .cast::<Instance>()
253        }
254    }
255
256    /// Encapsulated entrypoint to the host from WebAssembly, converting a raw
257    /// `VMContext` pointer into a `VMStore` plus an `InstanceId`.
258    ///
259    /// This is an entrypoint for core wasm entering back into the host. This is
260    /// used for both host functions and libcalls for example. This will execute
261    /// the closure `f` with safer Internal types than a raw `VMContext`
262    /// pointer.
263    ///
264    /// The closure `f` will have its errors caught, handled, and translated to
265    /// an ABI-safe return value to give back to wasm. This includes both normal
266    /// errors such as traps as well as panics.
267    ///
268    /// # Safety
269    ///
270    /// Callers must ensure that `vmctx` is a valid allocation and is safe to
271    /// dereference at this time. That's generally only true when it's a
272    /// wasm-provided value and this is the first function called after entering
273    /// the host. Otherwise this could unsafely alias the store with a mutable
274    /// pointer, for example.
275    #[inline]
276    pub(crate) unsafe fn enter_host_from_wasm<R>(
277        vmctx: NonNull<VMContext>,
278        f: impl FnOnce(&mut dyn VMStore, InstanceId) -> R,
279    ) -> R::Abi
280    where
281        R: HostResult,
282    {
283        // SAFETY: It's a contract of this function that `vmctx` is a valid
284        // pointer with neither the store nor other instances actively in use
285        // when this is called, so it should be safe to acquire a mutable
286        // pointer to the store and read the instance pointer.
287        let (store, instance) = unsafe {
288            let instance = Instance::from_vmctx(vmctx);
289            let instance = instance.as_ref();
290            let store = &mut *instance.store.unwrap().0.as_ptr();
291            (store, instance.id)
292        };
293
294        // Thread the `store` and `instance` through panic/trap infrastructure
295        // back into `f`.
296        catch_unwind_and_record_trap(store, |store| f(store, instance))
297    }
298
299    /// Converts the provided `*mut VMContext` to an `Instance` pointer and
300    /// returns it with the same lifetime as `self`.
301    ///
302    /// This function can be used when traversing a `VMContext` to reach into
303    /// the context needed for imports, optionally.
304    ///
305    /// # Safety
306    ///
307    /// This function requires that the `vmctx` pointer is indeed valid and
308    /// from the store that `self` belongs to.
309    #[inline]
310    unsafe fn sibling_vmctx<'a>(&'a self, vmctx: NonNull<VMContext>) -> &'a Instance {
311        // SAFETY: it's a contract of this function itself that `vmctx` is a
312        // valid pointer. Additionally with `self` being a
313        let ptr = unsafe { Instance::from_vmctx(vmctx) };
314        // SAFETY: it's a contract of this function itself that `vmctx` is a
315        // valid pointer to dereference. Additionally the lifetime of the return
316        // value is constrained to be the same as `self` to avoid granting a
317        // too-long lifetime.
318        unsafe { ptr.as_ref() }
319    }
320
321    /// Same as [`Self::sibling_vmctx`], but the mutable version.
322    ///
323    /// # Safety
324    ///
325    /// This function requires that the `vmctx` pointer is indeed valid and
326    /// from the store that `self` belongs to.
327    ///
328    /// (Note that it is *NOT* required that `vmctx` be distinct from this
329    /// instance's `vmctx`, or that usage of the resulting instance is limited
330    /// to its defined items! The returned borrow has the same lifetime as
331    /// `self`, which means that this instance cannot be used while the
332    /// resulting instance is in use, and we therefore do not need to worry
333    /// about mutable aliasing between this instance and the resulting
334    /// instance.)
335    #[inline]
336    unsafe fn sibling_vmctx_mut<'a>(
337        self: Pin<&'a mut Self>,
338        vmctx: NonNull<VMContext>,
339    ) -> Pin<&'a mut Instance> {
340        // SAFETY: it's a contract of this function itself that `vmctx` is a
341        // valid pointer such that this pointer arithmetic is valid.
342        let mut ptr = unsafe { Instance::from_vmctx(vmctx) };
343
344        // SAFETY: it's a contract of this function itself that `vmctx` is a
345        // valid pointer to dereference. Additionally the lifetime of the return
346        // value is constrained to be the same as `self` to avoid granting a
347        // too-long lifetime. Finally mutable references to an instance are
348        // always through `Pin`, so it's safe to create a pin-pointer here.
349        unsafe { Pin::new_unchecked(ptr.as_mut()) }
350    }
351
352    pub(crate) fn env_module(&self) -> &Arc<wasmtime_environ::Module> {
353        self.runtime_info.env_module()
354    }
355
356    pub(crate) fn runtime_module(&self) -> Option<&crate::Module> {
357        match &self.runtime_info {
358            ModuleRuntimeInfo::Module(m) => Some(m),
359            ModuleRuntimeInfo::Bare(_) => None,
360        }
361    }
362
363    /// Translate a module-level interned type index into an engine-level
364    /// interned type index.
365    #[cfg(feature = "gc")]
366    pub fn engine_type_index(&self, module_index: ModuleInternedTypeIndex) -> VMSharedTypeIndex {
367        self.runtime_info.engine_type_index(module_index)
368    }
369
370    #[inline]
371    fn offsets(&self) -> &VMOffsets<HostPtr> {
372        self.runtime_info.offsets()
373    }
374
375    /// Return the indexed `VMFunctionImport`.
376    fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
377        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
378    }
379
380    /// Return the index `VMTableImport`.
381    fn imported_table(&self, index: TableIndex) -> &VMTableImport {
382        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
383    }
384
385    /// Return the indexed `VMMemoryImport`.
386    fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
387        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
388    }
389
390    /// Return the indexed `VMGlobalImport`.
391    fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
392        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
393    }
394
395    /// Return the indexed `VMTagImport`.
396    fn imported_tag(&self, index: TagIndex) -> &VMTagImport {
397        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtag_import(index)) }
398    }
399
400    /// Return the indexed `VMTagDefinition`.
401    pub fn tag_ptr(&self, index: DefinedTagIndex) -> NonNull<VMTagDefinition> {
402        unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtag_definition(index)) }
403    }
404
405    /// Return the indexed `VMTableDefinition`.
406    pub fn table(&self, index: DefinedTableIndex) -> VMTableDefinition {
407        unsafe { self.table_ptr(index).read() }
408    }
409
410    /// Updates the value for a defined table to `VMTableDefinition`.
411    fn set_table(self: Pin<&mut Self>, index: DefinedTableIndex, table: VMTableDefinition) {
412        unsafe {
413            self.table_ptr(index).write(table);
414        }
415    }
416
417    /// Return a pointer to the `index`'th table within this instance, stored
418    /// in vmctx memory.
419    pub fn table_ptr(&self, index: DefinedTableIndex) -> NonNull<VMTableDefinition> {
420        unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtable_definition(index)) }
421    }
422
423    /// Get a locally defined or imported memory.
424    pub(crate) fn get_memory(&self, index: MemoryIndex) -> VMMemoryDefinition {
425        if let Some(defined_index) = self.env_module().defined_memory_index(index) {
426            self.memory(defined_index)
427        } else {
428            let import = self.imported_memory(index);
429            unsafe { VMMemoryDefinition::load(import.from.as_ptr()) }
430        }
431    }
432
433    /// Return the indexed `VMMemoryDefinition`, loaded from vmctx memory
434    /// already.
435    #[inline]
436    pub fn memory(&self, index: DefinedMemoryIndex) -> VMMemoryDefinition {
437        unsafe { VMMemoryDefinition::load(self.memory_ptr(index).as_ptr()) }
438    }
439
440    /// Set the indexed memory to `VMMemoryDefinition`.
441    fn set_memory(&self, index: DefinedMemoryIndex, mem: VMMemoryDefinition) {
442        unsafe {
443            self.memory_ptr(index).write(mem);
444        }
445    }
446
447    /// Return the address of the specified memory at `index` within this vmctx.
448    ///
449    /// Note that the returned pointer resides in wasm-code-readable-memory in
450    /// the vmctx.
451    #[inline]
452    pub fn memory_ptr(&self, index: DefinedMemoryIndex) -> NonNull<VMMemoryDefinition> {
453        unsafe {
454            self.vmctx_plus_offset::<VmPtr<_>>(self.offsets().vmctx_vmmemory_pointer(index))
455                .as_non_null()
456        }
457    }
458
459    /// Return the indexed `VMGlobalDefinition`.
460    pub fn global_ptr(&self, index: DefinedGlobalIndex) -> NonNull<VMGlobalDefinition> {
461        unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmglobal_definition(index)) }
462    }
463
464    /// Get all globals within this instance.
465    ///
466    /// Returns both import and defined globals.
467    ///
468    /// Returns both exported and non-exported globals.
469    ///
470    /// Gives access to the full globals space.
471    pub fn all_globals(
472        &self,
473        store: StoreId,
474    ) -> impl ExactSizeIterator<Item = (GlobalIndex, crate::Global)> + '_ {
475        let module = self.env_module();
476        module
477            .globals
478            .keys()
479            .map(move |idx| (idx, self.get_exported_global(store, idx)))
480    }
481
482    /// Get the globals defined in this instance (not imported).
483    pub fn defined_globals(
484        &self,
485        store: StoreId,
486    ) -> impl ExactSizeIterator<Item = (DefinedGlobalIndex, crate::Global)> + '_ {
487        let module = self.env_module();
488        self.all_globals(store)
489            .skip(module.num_imported_globals)
490            .map(move |(i, global)| (module.defined_global_index(i).unwrap(), global))
491    }
492
493    /// Return a pointer to the interrupts structure
494    #[inline]
495    pub fn vm_store_context(&self) -> NonNull<Option<VmPtr<VMStoreContext>>> {
496        unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_store_context()) }
497    }
498
499    /// Return a pointer to the global epoch counter used by this instance.
500    #[cfg(target_has_atomic = "64")]
501    pub fn epoch_ptr(self: Pin<&mut Self>) -> &mut Option<VmPtr<AtomicU64>> {
502        let offset = self.offsets().ptr.vmctx_epoch_ptr();
503        unsafe { self.vmctx_plus_offset_mut(offset) }
504    }
505
506    /// Return a pointer to the collector-specific heap data.
507    pub fn gc_heap_data(self: Pin<&mut Self>) -> &mut Option<VmPtr<u8>> {
508        let offset = self.offsets().ptr.vmctx_gc_heap_data();
509        unsafe { self.vmctx_plus_offset_mut(offset) }
510    }
511
512    pub(crate) unsafe fn set_store(mut self: Pin<&mut Self>, store: &StoreOpaque) {
513        // FIXME: should be more targeted ideally with the `unsafe` than just
514        // throwing this entire function in a large `unsafe` block.
515        unsafe {
516            *self.as_mut().store_mut() = Some(VMStoreRawPtr(store.traitobj()));
517            self.vm_store_context()
518                .write(Some(store.vm_store_context_ptr().into()));
519            #[cfg(target_has_atomic = "64")]
520            {
521                *self.as_mut().epoch_ptr() =
522                    Some(NonNull::from(store.engine().epoch_counter()).into());
523            }
524
525            if self.env_module().needs_gc_heap {
526                self.as_mut().set_gc_heap(Some(store.unwrap_gc_store()));
527            } else {
528                self.as_mut().set_gc_heap(None);
529            }
530        }
531    }
532
533    unsafe fn set_gc_heap(self: Pin<&mut Self>, gc_store: Option<&GcStore>) {
534        if let Some(gc_store) = gc_store {
535            *self.gc_heap_data() = Some(unsafe { gc_store.gc_heap.vmctx_gc_heap_data().into() });
536        } else {
537            *self.gc_heap_data() = None;
538        }
539    }
540
541    /// Return a reference to the vmctx used by compiled wasm code.
542    #[inline]
543    pub fn vmctx(&self) -> NonNull<VMContext> {
544        InstanceLayout::vmctx(self)
545    }
546
547    /// Lookup a function by index.
548    ///
549    /// # Panics
550    ///
551    /// Panics if `index` is out of bounds for this instance.
552    ///
553    /// # Safety
554    ///
555    /// The `store` parameter must be the store that owns this instance and the
556    /// functions that this instance can reference.
557    pub unsafe fn get_exported_func(
558        self: Pin<&mut Self>,
559        registry: &ModuleRegistry,
560        store: StoreId,
561        index: FuncIndex,
562    ) -> crate::Func {
563        let func_ref = self.get_func_ref(registry, index).unwrap();
564
565        // SAFETY: the validity of `func_ref` is guaranteed by the validity of
566        // `self`, and the contract that `store` must own `func_ref` is a
567        // contract of this function itself.
568        unsafe { crate::Func::from_vm_func_ref(store, func_ref) }
569    }
570
571    /// Lookup a table by index.
572    ///
573    /// # Panics
574    ///
575    /// Panics if `index` is out of bounds for this instance.
576    pub fn get_exported_table(&self, store: StoreId, index: TableIndex) -> crate::Table {
577        let (id, def_index) = if let Some(def_index) = self.env_module().defined_table_index(index)
578        {
579            (self.id, def_index)
580        } else {
581            let import = self.imported_table(index);
582            // SAFETY: validity of this `Instance` guarantees validity of the
583            // `vmctx` pointer being read here to find the transitive
584            // `InstanceId` that the import is associated with.
585            let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
586            (id, import.index)
587        };
588        crate::Table::from_raw(StoreInstanceId::new(store, id), def_index)
589    }
590
591    /// Lookup a memory by index.
592    ///
593    /// # Panics
594    ///
595    /// Panics if `index` is out-of-bounds for this instance.
596    #[cfg_attr(
597        not(feature = "threads"),
598        expect(unused_variables, reason = "definitions cfg'd to dummy",)
599    )]
600    pub fn get_exported_memory(&self, store: StoreId, index: MemoryIndex) -> ExportMemory {
601        let module = self.env_module();
602        if module.memories[index].shared {
603            let (memory, import) =
604                if let Some(def_index) = self.env_module().defined_memory_index(index) {
605                    (
606                        self.get_defined_memory(def_index),
607                        self.get_defined_memory_vmimport(def_index),
608                    )
609                } else {
610                    let import = self.imported_memory(index);
611                    // SAFETY: validity of this `Instance` guarantees validity of
612                    // the `vmctx` pointer being read here to find the transitive
613                    // `InstanceId` that the import is associated with.
614                    let instance = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()) };
615                    (instance.get_defined_memory(import.index), *import)
616                };
617
618            let vm = memory.as_shared_memory().unwrap().clone();
619            ExportMemory::Shared(vm, import)
620        } else {
621            let (id, def_index) =
622                if let Some(def_index) = self.env_module().defined_memory_index(index) {
623                    (self.id, def_index)
624                } else {
625                    let import = self.imported_memory(index);
626                    // SAFETY: validity of this `Instance` guarantees validity of the
627                    // `vmctx` pointer being read here to find the transitive
628                    // `InstanceId` that the import is associated with.
629                    let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
630                    (id, import.index)
631                };
632
633            // SAFETY: `from_raw` requires that the memory is not shared, which
634            // was tested above in this if/else.
635            let store_id = StoreInstanceId::new(store, id);
636            ExportMemory::Unshared(unsafe { crate::Memory::from_raw(store_id, def_index) })
637        }
638    }
639
640    /// Lookup a global by index.
641    ///
642    /// # Panics
643    ///
644    /// Panics if `index` is out-of-bounds for this instance.
645    pub(crate) fn get_exported_global(&self, store: StoreId, index: GlobalIndex) -> crate::Global {
646        // If this global is defined within this instance, then that's easy to
647        // calculate the `Global`.
648        if let Some(def_index) = self.env_module().defined_global_index(index) {
649            let instance = StoreInstanceId::new(store, self.id);
650            return crate::Global::from_core(instance, def_index);
651        }
652
653        // For imported globals it's required to match on the `kind` to
654        // determine which `Global` constructor is going to be invoked.
655        let import = self.imported_global(index);
656        match import.kind {
657            VMGlobalKind::Host(index) => crate::Global::from_host(store, index),
658            VMGlobalKind::Instance(index) => {
659                // SAFETY: validity of this `&Instance` means validity of its
660                // imports meaning we can read the id of the vmctx within.
661                let id = unsafe {
662                    let vmctx = VMContext::from_opaque(import.vmctx.unwrap().as_non_null());
663                    self.sibling_vmctx(vmctx).id
664                };
665                crate::Global::from_core(StoreInstanceId::new(store, id), index)
666            }
667            #[cfg(feature = "component-model")]
668            VMGlobalKind::ComponentFlags(index) => {
669                // SAFETY: validity of this `&Instance` means validity of its
670                // imports meaning we can read the id of the vmctx within.
671                let id = unsafe {
672                    let vmctx = super::component::VMComponentContext::from_opaque(
673                        import.vmctx.unwrap().as_non_null(),
674                    );
675                    super::component::ComponentInstance::vmctx_instance_id(vmctx)
676                };
677                crate::Global::from_component_flags(
678                    crate::component::store::StoreComponentInstanceId::new(store, id),
679                    index,
680                )
681            }
682            #[cfg(feature = "component-model")]
683            VMGlobalKind::TaskMayBlock => {
684                // SAFETY: validity of this `&Instance` means validity of its
685                // imports meaning we can read the id of the vmctx within.
686                let id = unsafe {
687                    let vmctx = super::component::VMComponentContext::from_opaque(
688                        import.vmctx.unwrap().as_non_null(),
689                    );
690                    super::component::ComponentInstance::vmctx_instance_id(vmctx)
691                };
692                crate::Global::from_task_may_block(
693                    crate::component::store::StoreComponentInstanceId::new(store, id),
694                )
695            }
696        }
697    }
698
699    /// Get an exported tag by index.
700    ///
701    /// # Panics
702    ///
703    /// Panics if the index is out-of-range.
704    pub fn get_exported_tag(&self, store: StoreId, index: TagIndex) -> crate::Tag {
705        let (id, def_index) = if let Some(def_index) = self.env_module().defined_tag_index(index) {
706            (self.id, def_index)
707        } else {
708            let import = self.imported_tag(index);
709            // SAFETY: validity of this `Instance` guarantees validity of the
710            // `vmctx` pointer being read here to find the transitive
711            // `InstanceId` that the import is associated with.
712            let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
713            (id, import.index)
714        };
715        crate::Tag::from_raw(StoreInstanceId::new(store, id), def_index)
716    }
717
718    /// Return an iterator over the exports of this instance.
719    ///
720    /// Specifically, it provides access to the key-value pairs, where the keys
721    /// are export names, and the values are export declarations which can be
722    /// resolved `lookup_by_declaration`.
723    pub fn exports(&self) -> wasmparser::collections::index_map::Iter<'_, String, EntityIndex> {
724        self.env_module().exports.iter()
725    }
726
727    /// Grow memory by the specified amount of pages.
728    ///
729    /// Returns `None` if memory can't be grown by the specified amount
730    /// of pages. Returns `Some` with the old size in bytes if growth was
731    /// successful.
732    pub(crate) async fn memory_grow(
733        mut self: Pin<&mut Self>,
734        limiter: Option<&mut StoreResourceLimiter<'_>>,
735        idx: DefinedMemoryIndex,
736        delta: u64,
737    ) -> Result<Option<usize>, Error> {
738        let memory = &mut self.as_mut().memories_mut()[idx].1;
739
740        // SAFETY: this is the safe wrapper around `Memory::grow` because it
741        // automatically updates the `VMMemoryDefinition` in this instance after
742        // a growth operation below.
743        let result = unsafe { memory.grow(delta, limiter).await };
744
745        // Update the state used by a non-shared Wasm memory in case the base
746        // pointer and/or the length changed.
747        if memory.as_shared_memory().is_none() {
748            let vmmemory = memory.vmmemory();
749            self.set_memory(idx, vmmemory);
750        }
751
752        result
753    }
754
755    pub(crate) fn table_element_type(
756        self: Pin<&mut Self>,
757        table_index: TableIndex,
758    ) -> TableElementType {
759        self.get_table(table_index).element_type()
760    }
761
762    /// Performs a grow operation on the `table_index` specified using `grow`.
763    ///
764    /// This will handle updating the VMTableDefinition internally as necessary.
765    pub(crate) async fn defined_table_grow(
766        mut self: Pin<&mut Self>,
767        table_index: DefinedTableIndex,
768        grow: impl AsyncFnOnce(&mut Table) -> Result<Option<usize>>,
769    ) -> Result<Option<usize>> {
770        let table = self.as_mut().get_defined_table(table_index);
771        let result = grow(table).await;
772        let element = table.vmtable();
773        self.set_table(table_index, element);
774        result
775    }
776
777    fn alloc_layout(offsets: &VMOffsets<HostPtr>) -> Layout {
778        let size = mem::size_of::<Self>()
779            .checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
780            .unwrap();
781        let align = mem::align_of::<Self>();
782        Layout::from_size_align(size, align).unwrap()
783    }
784
785    fn type_ids_array(&self) -> NonNull<VmPtr<VMSharedTypeIndex>> {
786        unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_type_ids_array()) }
787    }
788
789    /// Construct a new VMFuncRef for the given function
790    /// (imported or defined in this module) and store into the given
791    /// location. Used during lazy initialization.
792    ///
793    /// Note that our current lazy-init scheme actually calls this every
794    /// time the funcref pointer is fetched; this turns out to be better
795    /// than tracking state related to whether it's been initialized
796    /// before, because resetting that state on (re)instantiation is
797    /// very expensive if there are many funcrefs.
798    ///
799    /// # Safety
800    ///
801    /// This functions requires that `into` is a valid pointer.
802    unsafe fn construct_func_ref(
803        self: Pin<&mut Self>,
804        registry: &ModuleRegistry,
805        index: FuncIndex,
806        type_index: VMSharedTypeIndex,
807        into: *mut VMFuncRef,
808    ) {
809        let module_with_code = ModuleWithCode::in_store(
810            registry,
811            self.runtime_module()
812                .expect("funcref impossible in fake module"),
813        )
814        .expect("module not in store");
815
816        let func_ref = if let Some(def_index) = self.env_module().defined_func_index(index) {
817            VMFuncRef {
818                array_call: NonNull::from(
819                    module_with_code
820                        .array_to_wasm_trampoline(def_index)
821                        .expect("should have array-to-Wasm trampoline for escaping function"),
822                )
823                .cast()
824                .into(),
825                wasm_call: Some(
826                    NonNull::new(
827                        module_with_code
828                            .finished_function(def_index)
829                            .as_ptr()
830                            .cast::<VMWasmCallFunction>()
831                            .cast_mut(),
832                    )
833                    .unwrap()
834                    .into(),
835                ),
836                vmctx: VMOpaqueContext::from_vmcontext(self.vmctx()).into(),
837                type_index,
838            }
839        } else {
840            let import = self.imported_function(index);
841            VMFuncRef {
842                array_call: import.array_call,
843                wasm_call: Some(import.wasm_call),
844                vmctx: import.vmctx,
845                type_index,
846            }
847        };
848
849        // SAFETY: the unsafe contract here is forwarded to callers of this
850        // function.
851        unsafe {
852            ptr::write(into, func_ref);
853        }
854    }
855
856    /// Get a `&VMFuncRef` for the given `FuncIndex`.
857    ///
858    /// Returns `None` if the index is the reserved index value.
859    ///
860    /// The returned reference is a stable reference that won't be moved and can
861    /// be passed into JIT code.
862    pub(crate) fn get_func_ref(
863        self: Pin<&mut Self>,
864        registry: &ModuleRegistry,
865        index: FuncIndex,
866    ) -> Option<NonNull<VMFuncRef>> {
867        if index == FuncIndex::reserved_value() {
868            return None;
869        }
870
871        // For now, we eagerly initialize an funcref struct in-place
872        // whenever asked for a reference to it. This is mostly
873        // fine, because in practice each funcref is unlikely to be
874        // requested more than a few times: once-ish for funcref
875        // tables used for call_indirect (the usual compilation
876        // strategy places each function in the table at most once),
877        // and once or a few times when fetching exports via API.
878        // Note that for any case driven by table accesses, the lazy
879        // table init behaves like a higher-level cache layer that
880        // protects this initialization from happening multiple
881        // times, via that particular table at least.
882        //
883        // When `ref.func` becomes more commonly used or if we
884        // otherwise see a use-case where this becomes a hotpath,
885        // we can reconsider by using some state to track
886        // "uninitialized" explicitly, for example by zeroing the
887        // funcrefs (perhaps together with other
888        // zeroed-at-instantiate-time state) or using a separate
889        // is-initialized bitmap.
890        //
891        // We arrived at this design because zeroing memory is
892        // expensive, so it's better for instantiation performance
893        // if we don't have to track "is-initialized" state at
894        // all!
895        let func = &self.env_module().functions[index];
896        let sig = func.signature.unwrap_engine_type_index();
897
898        // SAFETY: the offset calculated here should be correct with
899        // `self.offsets`
900        let func_ref = unsafe {
901            self.vmctx_plus_offset_raw::<VMFuncRef>(self.offsets().vmctx_func_ref(func.func_ref))
902        };
903
904        // SAFETY: the `func_ref` ptr should be valid as it's within our
905        // `VMContext` area.
906        unsafe {
907            self.construct_func_ref(registry, index, sig, func_ref.as_ptr());
908        }
909
910        Some(func_ref)
911    }
912
913    /// Get the passive elements segment at the given index.
914    ///
915    /// Returns an empty segment if the index is out of bounds or if the segment
916    /// has been dropped.
917    ///
918    /// The `storage` parameter should always be `None`; it is a bit of a hack
919    /// to work around lifetime issues.
920    pub(crate) fn passive_element_segment<'a>(
921        &self,
922        storage: &'a mut Option<(Arc<wasmtime_environ::Module>, TableSegmentElements)>,
923        elem_index: ElemIndex,
924    ) -> &'a TableSegmentElements {
925        debug_assert!(storage.is_none());
926        *storage = Some((
927            // TODO: this `clone()` shouldn't be necessary but is used for now to
928            // inform `rustc` that the lifetime of the elements here are
929            // disconnected from the lifetime of `self`.
930            self.env_module().clone(),
931            // NB: fall back to an expressions-based list of elements which
932            // doesn't have static type information (as opposed to
933            // `TableSegmentElements::Functions`) since we don't know what type
934            // is needed in the caller's context. Let the type be inferred by
935            // how they use the segment.
936            TableSegmentElements::Expressions(Box::new([])),
937        ));
938        let (module, empty) = storage.as_ref().unwrap();
939
940        match module.passive_elements_map.get(&elem_index) {
941            Some(index) if !self.dropped_elements.contains(elem_index) => {
942                &module.passive_elements[*index]
943            }
944            _ => empty,
945        }
946    }
947
948    /// The `table.init` operation: initializes a portion of a table with a
949    /// passive element.
950    ///
951    /// # Errors
952    ///
953    /// Returns a `Trap` error when the range within the table is out of bounds
954    /// or the range within the passive element is out of bounds.
955    pub(crate) async fn table_init(
956        store: &mut StoreOpaque,
957        limiter: Option<&mut StoreResourceLimiter<'_>>,
958        asyncness: Asyncness,
959        instance: InstanceId,
960        table_index: TableIndex,
961        elem_index: ElemIndex,
962        dst: u64,
963        src: u64,
964        len: u64,
965    ) -> Result<()> {
966        let mut storage = None;
967        let elements = store
968            .instance(instance)
969            .passive_element_segment(&mut storage, elem_index);
970        let mut const_evaluator = ConstExprEvaluator::default();
971        Self::table_init_segment(
972            store,
973            limiter,
974            asyncness,
975            instance,
976            &mut const_evaluator,
977            table_index,
978            elements,
979            dst,
980            src,
981            len,
982        )
983        .await
984    }
985
986    pub(crate) async fn table_init_segment(
987        store: &mut StoreOpaque,
988        mut limiter: Option<&mut StoreResourceLimiter<'_>>,
989        asyncness: Asyncness,
990        elements_instance_id: InstanceId,
991        const_evaluator: &mut ConstExprEvaluator,
992        table_index: TableIndex,
993        elements: &TableSegmentElements,
994        dst: u64,
995        src: u64,
996        len: u64,
997    ) -> Result<()> {
998        // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
999
1000        let store_id = store.id();
1001        let elements_instance = store.instance_mut(elements_instance_id);
1002        let table = elements_instance.get_exported_table(store_id, table_index);
1003        let table_size = table._size(store);
1004
1005        // Perform a bounds check on the table being written to. This is done by
1006        // ensuring that `dst + len <= table.size()` via checked arithmetic.
1007        //
1008        // Note that the bounds check for the element segment happens below when
1009        // the original segment is sliced via `src` and `len`.
1010        table_size
1011            .checked_sub(dst)
1012            .and_then(|i| i.checked_sub(len))
1013            .ok_or(Trap::TableOutOfBounds)?;
1014
1015        let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
1016        let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
1017
1018        let positions = dst..dst + u64::try_from(len).unwrap();
1019        match elements {
1020            TableSegmentElements::Functions(funcs) => {
1021                let elements = funcs
1022                    .get(src..)
1023                    .and_then(|s| s.get(..len))
1024                    .ok_or(Trap::TableOutOfBounds)?;
1025                for (i, func_idx) in positions.zip(elements) {
1026                    let (instance, registry) =
1027                        store.instance_and_module_registry_mut(elements_instance_id);
1028                    // SAFETY: the `store_id` passed to `get_exported_func` is
1029                    // indeed the store that owns the function.
1030                    let func = unsafe { instance.get_exported_func(registry, store_id, *func_idx) };
1031                    table.set_(store, i, func.into()).unwrap();
1032                }
1033            }
1034            TableSegmentElements::Expressions(exprs) => {
1035                let mut store = OpaqueRootScope::new(store);
1036                let exprs = exprs
1037                    .get(src..)
1038                    .and_then(|s| s.get(..len))
1039                    .ok_or(Trap::TableOutOfBounds)?;
1040                let mut context = ConstEvalContext::new(elements_instance_id, asyncness);
1041                for (i, expr) in positions.zip(exprs) {
1042                    let element = const_evaluator
1043                        .eval(&mut store, limiter.as_deref_mut(), &mut context, expr)
1044                        .await?;
1045                    table.set_(&mut store, i, element.ref_().unwrap()).unwrap();
1046                }
1047            }
1048        }
1049
1050        Ok(())
1051    }
1052
1053    /// Drop an element.
1054    pub(crate) fn elem_drop(
1055        self: Pin<&mut Self>,
1056        elem_index: ElemIndex,
1057    ) -> Result<(), OutOfMemory> {
1058        // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
1059
1060        self.dropped_elements_mut().insert(elem_index)?;
1061
1062        // Note that we don't check that we actually removed a segment because
1063        // dropping a non-passive segment is a no-op (not a trap).
1064
1065        Ok(())
1066    }
1067
1068    /// Get a locally-defined memory.
1069    pub fn get_defined_memory_mut(self: Pin<&mut Self>, index: DefinedMemoryIndex) -> &mut Memory {
1070        &mut self.memories_mut()[index].1
1071    }
1072
1073    /// Get a locally-defined memory.
1074    pub fn get_defined_memory(&self, index: DefinedMemoryIndex) -> &Memory {
1075        &self.memories[index].1
1076    }
1077
1078    pub fn get_defined_memory_vmimport(&self, index: DefinedMemoryIndex) -> VMMemoryImport {
1079        crate::runtime::vm::VMMemoryImport {
1080            from: self.memory_ptr(index).into(),
1081            vmctx: self.vmctx().into(),
1082            index,
1083        }
1084    }
1085
1086    /// Do a `memory.copy`
1087    ///
1088    /// # Errors
1089    ///
1090    /// Returns a `Trap` error when the source or destination ranges are out of
1091    /// bounds.
1092    pub(crate) fn memory_copy(
1093        self: Pin<&mut Self>,
1094        dst_index: MemoryIndex,
1095        dst: u64,
1096        src_index: MemoryIndex,
1097        src: u64,
1098        len: u64,
1099    ) -> Result<(), Trap> {
1100        // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
1101
1102        let src_mem = self.get_memory(src_index);
1103        let dst_mem = self.get_memory(dst_index);
1104
1105        let src = self.validate_inbounds(src_mem.current_length(), src, len)?;
1106        let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?;
1107        let len = usize::try_from(len).unwrap();
1108
1109        // Bounds and casts are checked above, by this point we know that
1110        // everything is safe.
1111        unsafe {
1112            let dst = dst_mem.base.as_ptr().add(dst);
1113            let src = src_mem.base.as_ptr().add(src);
1114            // FIXME audit whether this is safe in the presence of shared memory
1115            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1116            ptr::copy(src, dst, len);
1117        }
1118
1119        Ok(())
1120    }
1121
1122    fn validate_inbounds(&self, max: usize, ptr: u64, len: u64) -> Result<usize, Trap> {
1123        let oob = || Trap::MemoryOutOfBounds;
1124        let end = ptr
1125            .checked_add(len)
1126            .and_then(|i| usize::try_from(i).ok())
1127            .ok_or_else(oob)?;
1128        if end > max {
1129            Err(oob())
1130        } else {
1131            Ok(ptr.try_into().unwrap())
1132        }
1133    }
1134
1135    /// Perform the `memory.fill` operation on a locally defined memory.
1136    ///
1137    /// # Errors
1138    ///
1139    /// Returns a `Trap` error if the memory range is out of bounds.
1140    pub(crate) fn memory_fill(
1141        self: Pin<&mut Self>,
1142        memory_index: DefinedMemoryIndex,
1143        dst: u64,
1144        val: u8,
1145        len: u64,
1146    ) -> Result<(), Trap> {
1147        let memory_index = self.env_module().memory_index(memory_index);
1148        let memory = self.get_memory(memory_index);
1149        let dst = self.validate_inbounds(memory.current_length(), dst, len)?;
1150        let len = usize::try_from(len).unwrap();
1151
1152        // Bounds and casts are checked above, by this point we know that
1153        // everything is safe.
1154        unsafe {
1155            let dst = memory.base.as_ptr().add(dst);
1156            // FIXME audit whether this is safe in the presence of shared memory
1157            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1158            ptr::write_bytes(dst, val, len);
1159        }
1160
1161        Ok(())
1162    }
1163
1164    /// Get the internal storage range of a particular Wasm data segment.
1165    pub(crate) fn wasm_data_range(&self, index: DataIndex) -> Range<u32> {
1166        match self.env_module().passive_data_map.get(&index) {
1167            Some(range) if !self.dropped_data.contains(index) => range.clone(),
1168            _ => 0..0,
1169        }
1170    }
1171
1172    /// Given an internal storage range of a Wasm data segment (or subset of a
1173    /// Wasm data segment), get the data's raw bytes.
1174    pub(crate) fn wasm_data(&self, range: Range<u32>) -> &[u8] {
1175        let start = usize::try_from(range.start).unwrap();
1176        let end = usize::try_from(range.end).unwrap();
1177        &self.runtime_info.wasm_data()[start..end]
1178    }
1179
1180    /// Performs the `memory.init` operation.
1181    ///
1182    /// # Errors
1183    ///
1184    /// Returns a `Trap` error if the destination range is out of this module's
1185    /// memory's bounds or if the source range is outside the data segment's
1186    /// bounds.
1187    pub(crate) fn memory_init(
1188        self: Pin<&mut Self>,
1189        memory_index: MemoryIndex,
1190        data_index: DataIndex,
1191        dst: u64,
1192        src: u32,
1193        len: u32,
1194    ) -> Result<(), Trap> {
1195        let range = self.wasm_data_range(data_index);
1196        self.memory_init_segment(memory_index, range, dst, src, len)
1197    }
1198
1199    pub(crate) fn memory_init_segment(
1200        self: Pin<&mut Self>,
1201        memory_index: MemoryIndex,
1202        range: Range<u32>,
1203        dst: u64,
1204        src: u32,
1205        len: u32,
1206    ) -> Result<(), Trap> {
1207        // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
1208
1209        let memory = self.get_memory(memory_index);
1210        let data = self.wasm_data(range);
1211        let dst = self.validate_inbounds(memory.current_length(), dst, len.into())?;
1212        let src = self.validate_inbounds(data.len(), src.into(), len.into())?;
1213        let len = len as usize;
1214
1215        unsafe {
1216            let src_start = data.as_ptr().add(src);
1217            let dst_start = memory.base.as_ptr().add(dst);
1218            // FIXME audit whether this is safe in the presence of shared memory
1219            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1220            ptr::copy_nonoverlapping(src_start, dst_start, len);
1221        }
1222
1223        Ok(())
1224    }
1225
1226    /// Drop the given data segment, truncating its length to zero.
1227    pub(crate) fn data_drop(
1228        self: Pin<&mut Self>,
1229        data_index: DataIndex,
1230    ) -> Result<(), OutOfMemory> {
1231        self.dropped_data_mut().insert(data_index)?;
1232
1233        // Note that we don't check that we actually removed a segment because
1234        // dropping a non-passive segment is a no-op (not a trap).
1235
1236        Ok(())
1237    }
1238
1239    /// Get a table by index regardless of whether it is locally-defined
1240    /// or an imported, foreign table. Ensure that the given range of
1241    /// elements in the table is lazily initialized.  We define this
1242    /// operation all-in-one for safety, to ensure the lazy-init
1243    /// happens.
1244    ///
1245    /// Takes an `Iterator` for the index-range to lazy-initialize,
1246    /// for flexibility. This can be a range, single item, or empty
1247    /// sequence, for example. The iterator should return indices in
1248    /// increasing order, so that the break-at-out-of-bounds behavior
1249    /// works correctly.
1250    pub(crate) fn get_table_with_lazy_init(
1251        self: Pin<&mut Self>,
1252        registry: &ModuleRegistry,
1253        table_index: TableIndex,
1254        range: impl Iterator<Item = u64>,
1255    ) -> &mut Table {
1256        let (idx, instance) = self.defined_table_index_and_instance(table_index);
1257        instance.get_defined_table_with_lazy_init(registry, idx, range)
1258    }
1259
1260    /// Gets the raw runtime table data structure owned by this instance
1261    /// given the provided `idx`.
1262    ///
1263    /// The `range` specified is eagerly initialized for funcref tables.
1264    pub fn get_defined_table_with_lazy_init(
1265        mut self: Pin<&mut Self>,
1266        registry: &ModuleRegistry,
1267        idx: DefinedTableIndex,
1268        range: impl IntoIterator<Item = u64>,
1269    ) -> &mut Table {
1270        let elt_ty = self.tables[idx].1.element_type();
1271
1272        if elt_ty == TableElementType::Func {
1273            for i in range {
1274                match self.tables[idx].1.get_func_maybe_init(i) {
1275                    // Uninitialized table element.
1276                    Ok(None) => {}
1277                    // Initialized table element, move on to the next.
1278                    Ok(Some(_)) => continue,
1279                    // Out-of-bounds; caller will handle by likely
1280                    // throwing a trap. No work to do to lazy-init
1281                    // beyond the end.
1282                    Err(_) => break,
1283                };
1284
1285                // The table element `i` is uninitialized and is now being
1286                // initialized. This must imply that a `precompiled` list of
1287                // function indices is available for this table. The precompiled
1288                // list is extracted and then it is consulted with `i` to
1289                // determine the function that is going to be initialized. Note
1290                // that `i` may be outside the limits of the static
1291                // initialization so it's a fallible `get` instead of an index.
1292                let module = self.env_module();
1293                let precomputed = match &module.table_initialization.initial_values[idx] {
1294                    TableInitialValue::Null { precomputed } => precomputed,
1295                    TableInitialValue::Expr(_) => unreachable!(),
1296                };
1297                // Panicking here helps catch bugs rather than silently truncating by accident.
1298                let func_index = precomputed.get(usize::try_from(i).unwrap()).cloned();
1299                let func_ref = func_index
1300                    .and_then(|func_index| self.as_mut().get_func_ref(registry, func_index));
1301                self.as_mut().tables_mut()[idx]
1302                    .1
1303                    .set_func(i, func_ref)
1304                    .expect("Table type should match and index should be in-bounds");
1305            }
1306        }
1307
1308        self.get_defined_table(idx)
1309    }
1310
1311    /// Get a table by index regardless of whether it is locally-defined or an
1312    /// imported, foreign table.
1313    pub(crate) fn get_table(self: Pin<&mut Self>, table_index: TableIndex) -> &mut Table {
1314        let (idx, instance) = self.defined_table_index_and_instance(table_index);
1315        instance.get_defined_table(idx)
1316    }
1317
1318    /// Get a locally-defined table.
1319    pub(crate) fn get_defined_table(self: Pin<&mut Self>, index: DefinedTableIndex) -> &mut Table {
1320        &mut self.tables_mut()[index].1
1321    }
1322
1323    pub(crate) fn defined_table_index_and_instance<'a>(
1324        self: Pin<&'a mut Self>,
1325        index: TableIndex,
1326    ) -> (DefinedTableIndex, Pin<&'a mut Instance>) {
1327        if let Some(defined_table_index) = self.env_module().defined_table_index(index) {
1328            (defined_table_index, self)
1329        } else {
1330            let import = self.imported_table(index);
1331            let index = import.index;
1332            let vmctx = import.vmctx.as_non_null();
1333            // SAFETY: the validity of `self` means that the reachable instances
1334            // should also all be owned by the same store and fully initialized,
1335            // so it's safe to laterally move from a mutable borrow of this
1336            // instance to a mutable borrow of a sibling instance.
1337            let foreign_instance = unsafe { self.sibling_vmctx_mut(vmctx) };
1338            (index, foreign_instance)
1339        }
1340    }
1341
1342    /// Same as `self.runtime_info.env_module()` but additionally returns the
1343    /// `Pin<&mut Self>` with the same original lifetime.
1344    pub fn module_and_self(self: Pin<&mut Self>) -> (&wasmtime_environ::Module, Pin<&mut Self>) {
1345        // SAFETY: this function is projecting both `&Module` and the same
1346        // pointer both connected to the same lifetime. This is safe because
1347        // it's a contract of `Pin<&mut Self>` that the `runtime_info` field is
1348        // never written, meaning it's effectively unsafe to have `&mut Module`
1349        // projected from `Pin<&mut Self>`. Consequently it's safe to have a
1350        // read-only view of the field while still retaining mutable access to
1351        // all other fields.
1352        let module = self.runtime_info.env_module();
1353        let module = &raw const *module;
1354        let module = unsafe { &*module };
1355        (module, self)
1356    }
1357
1358    /// Initialize the VMContext data associated with this Instance.
1359    ///
1360    /// The `VMContext` memory is assumed to be uninitialized; any field
1361    /// that we need in a certain state will be explicitly written by this
1362    /// function.
1363    unsafe fn initialize_vmctx(self: Pin<&mut Self>, store: &StoreOpaque, imports: Imports) {
1364        let (module, mut instance) = self.module_and_self();
1365
1366        // SAFETY: the type of the magic field is indeed `u32` and this function
1367        // is initializing its value.
1368        unsafe {
1369            let offsets = instance.runtime_info.offsets();
1370            instance
1371                .vmctx_plus_offset_raw::<u32>(offsets.ptr.vmctx_magic())
1372                .write(VMCONTEXT_MAGIC);
1373        }
1374
1375        // SAFETY: it's up to the caller to provide a valid store pointer here.
1376        unsafe {
1377            instance.as_mut().set_store(store);
1378        }
1379
1380        // Initialize shared types
1381        //
1382        // SAFETY: validity of the vmctx means it should be safe to write to it
1383        // here.
1384        unsafe {
1385            let types = NonNull::from(instance.runtime_info.type_ids());
1386            instance.type_ids_array().write(types.cast().into());
1387        }
1388
1389        // Initialize the built-in functions
1390        //
1391        // SAFETY: the type of the builtin functions field is indeed a pointer
1392        // and the pointer being filled in here, plus the vmctx is valid to
1393        // write to during initialization.
1394        unsafe {
1395            static BUILTINS: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray::INIT;
1396            let ptr = BUILTINS.expose_provenance();
1397            let offsets = instance.runtime_info.offsets();
1398            instance
1399                .vmctx_plus_offset_raw(offsets.ptr.vmctx_builtin_functions())
1400                .write(VmPtr::from(ptr));
1401        }
1402
1403        // Initialize the imports
1404        //
1405        // SAFETY: the vmctx is safe to initialize during this function and
1406        // validity of each item itself is a contract the caller must uphold.
1407        debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
1408        unsafe {
1409            let offsets = instance.runtime_info.offsets();
1410            ptr::copy_nonoverlapping(
1411                imports.functions.as_ptr(),
1412                instance
1413                    .vmctx_plus_offset_raw(offsets.vmctx_imported_functions_begin())
1414                    .as_ptr(),
1415                imports.functions.len(),
1416            );
1417            debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
1418            ptr::copy_nonoverlapping(
1419                imports.tables.as_ptr(),
1420                instance
1421                    .vmctx_plus_offset_raw(offsets.vmctx_imported_tables_begin())
1422                    .as_ptr(),
1423                imports.tables.len(),
1424            );
1425            debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
1426            ptr::copy_nonoverlapping(
1427                imports.memories.as_ptr(),
1428                instance
1429                    .vmctx_plus_offset_raw(offsets.vmctx_imported_memories_begin())
1430                    .as_ptr(),
1431                imports.memories.len(),
1432            );
1433            debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
1434            ptr::copy_nonoverlapping(
1435                imports.globals.as_ptr(),
1436                instance
1437                    .vmctx_plus_offset_raw(offsets.vmctx_imported_globals_begin())
1438                    .as_ptr(),
1439                imports.globals.len(),
1440            );
1441            debug_assert_eq!(imports.tags.len(), module.num_imported_tags);
1442            ptr::copy_nonoverlapping(
1443                imports.tags.as_ptr(),
1444                instance
1445                    .vmctx_plus_offset_raw(offsets.vmctx_imported_tags_begin())
1446                    .as_ptr(),
1447                imports.tags.len(),
1448            );
1449        }
1450
1451        // N.B.: there is no need to initialize the funcrefs array because we
1452        // eagerly construct each element in it whenever asked for a reference
1453        // to that element. In other words, there is no state needed to track
1454        // the lazy-init, so we don't need to initialize any state now.
1455
1456        // Initialize the defined tables
1457        //
1458        // SAFETY: it's safe to initialize these tables during initialization
1459        // here and the various types of pointers and such here should all be
1460        // valid.
1461        unsafe {
1462            let offsets = instance.runtime_info.offsets();
1463            let mut ptr = instance.vmctx_plus_offset_raw(offsets.vmctx_tables_begin());
1464            let tables = instance.as_mut().tables_mut();
1465            for i in 0..module.num_defined_tables() {
1466                ptr.write(tables[DefinedTableIndex::new(i)].1.vmtable());
1467                ptr = ptr.add(1);
1468            }
1469        }
1470
1471        // Initialize the defined memories. This fills in both the
1472        // `defined_memories` table and the `owned_memories` table at the same
1473        // time. Entries in `defined_memories` hold a pointer to a definition
1474        // (all memories) whereas the `owned_memories` hold the actual
1475        // definitions of memories owned (not shared) in the module.
1476        //
1477        // SAFETY: it's safe to initialize these memories during initialization
1478        // here and the various types of pointers and such here should all be
1479        // valid.
1480        unsafe {
1481            let offsets = instance.runtime_info.offsets();
1482            let mut ptr = instance.vmctx_plus_offset_raw(offsets.vmctx_memories_begin());
1483            let mut owned_ptr =
1484                instance.vmctx_plus_offset_raw(offsets.vmctx_owned_memories_begin());
1485            let memories = instance.as_mut().memories_mut();
1486            for i in 0..module.num_defined_memories() {
1487                let defined_memory_index = DefinedMemoryIndex::new(i);
1488                let memory_index = module.memory_index(defined_memory_index);
1489                if module.memories[memory_index].shared {
1490                    let def_ptr = memories[defined_memory_index]
1491                        .1
1492                        .as_shared_memory()
1493                        .unwrap()
1494                        .vmmemory_ptr();
1495                    ptr.write(VmPtr::from(def_ptr));
1496                } else {
1497                    owned_ptr.write(memories[defined_memory_index].1.vmmemory());
1498                    ptr.write(VmPtr::from(owned_ptr));
1499                    owned_ptr = owned_ptr.add(1);
1500                }
1501                ptr = ptr.add(1);
1502            }
1503        }
1504
1505        // Zero-initialize the globals so that nothing is uninitialized memory
1506        // after this function returns. The globals are actually initialized
1507        // with their const expression initializers after the instance is fully
1508        // allocated.
1509        //
1510        // SAFETY: it's safe to initialize globals during initialization
1511        // here. Note that while the value being written is not valid for all
1512        // types of globals it's initializing the memory to zero instead of
1513        // being in an undefined state. So it's still unsafe to access globals
1514        // after this, but if it's read then it'd hopefully crash faster than
1515        // leaving this undefined.
1516        unsafe {
1517            for (index, _init) in module.global_initializers.iter() {
1518                instance.global_ptr(index).write(VMGlobalDefinition::new());
1519            }
1520        }
1521
1522        // Initialize the defined tags
1523        //
1524        // SAFETY: it's safe to initialize these tags during initialization
1525        // here and the various types of pointers and such here should all be
1526        // valid.
1527        unsafe {
1528            let offsets = instance.runtime_info.offsets();
1529            let mut ptr = instance.vmctx_plus_offset_raw(offsets.vmctx_tags_begin());
1530            for i in 0..module.num_defined_tags() {
1531                let defined_index = DefinedTagIndex::new(i);
1532                let tag_index = module.tag_index(defined_index);
1533                let tag = module.tags[tag_index];
1534                ptr.write(VMTagDefinition::new(
1535                    tag.signature.unwrap_engine_type_index(),
1536                ));
1537                ptr = ptr.add(1);
1538            }
1539        }
1540    }
1541
1542    /// Attempts to convert from the host `addr` specified to a WebAssembly
1543    /// based address recorded in `WasmFault`.
1544    ///
1545    /// This method will check all linear memories that this instance contains
1546    /// to see if any of them contain `addr`. If one does then `Some` is
1547    /// returned with metadata about the wasm fault. Otherwise `None` is
1548    /// returned and `addr` doesn't belong to this instance.
1549    pub fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1550        let mut fault = None;
1551        for (_, (_, memory)) in self.memories.iter() {
1552            let accessible = memory.wasm_accessible();
1553            if accessible.start <= addr && addr < accessible.end {
1554                // All linear memories should be disjoint so assert that no
1555                // prior fault has been found.
1556                assert!(fault.is_none());
1557                fault = Some(WasmFault {
1558                    memory_size: memory.byte_size(),
1559                    wasm_address: u64::try_from(addr - accessible.start).unwrap(),
1560                });
1561            }
1562        }
1563        fault
1564    }
1565
1566    /// Returns the id, within this instance's store, that it's assigned.
1567    pub fn id(&self) -> InstanceId {
1568        self.id
1569    }
1570
1571    /// Get all memories within this instance.
1572    ///
1573    /// Returns both import and defined memories.
1574    ///
1575    /// Returns both exported and non-exported memories.
1576    ///
1577    /// Gives access to the full memories space.
1578    pub fn all_memories(
1579        &self,
1580        store: StoreId,
1581    ) -> impl ExactSizeIterator<Item = (MemoryIndex, ExportMemory)> + '_ {
1582        self.env_module()
1583            .memories
1584            .iter()
1585            .map(move |(i, _)| (i, self.get_exported_memory(store, i)))
1586    }
1587
1588    /// Return the memories defined in this instance (not imported).
1589    pub fn defined_memories<'a>(
1590        &'a self,
1591        store: StoreId,
1592    ) -> impl ExactSizeIterator<Item = ExportMemory> + 'a {
1593        let num_imported = self.env_module().num_imported_memories;
1594        self.all_memories(store)
1595            .skip(num_imported)
1596            .map(|(_i, memory)| memory)
1597    }
1598
1599    /// Lookup an item with the given index.
1600    ///
1601    /// # Panics
1602    ///
1603    /// Panics if `export` is not valid for this instance.
1604    ///
1605    /// # Safety
1606    ///
1607    /// This function requires that `store` is the correct store which owns this
1608    /// instance.
1609    pub unsafe fn get_export_by_index_mut(
1610        self: Pin<&mut Self>,
1611        registry: &ModuleRegistry,
1612        store: StoreId,
1613        export: EntityIndex,
1614    ) -> Export {
1615        match export {
1616            // SAFETY: the contract of `store` owning the this instance is a
1617            // safety requirement of this function itself.
1618            EntityIndex::Function(i) => {
1619                Export::Function(unsafe { self.get_exported_func(registry, store, i) })
1620            }
1621            EntityIndex::Global(i) => Export::Global(self.get_exported_global(store, i)),
1622            EntityIndex::Table(i) => Export::Table(self.get_exported_table(store, i)),
1623            EntityIndex::Memory(i) => match self.get_exported_memory(store, i) {
1624                ExportMemory::Unshared(m) => Export::Memory(m),
1625                ExportMemory::Shared(m, i) => Export::SharedMemory(m, i),
1626            },
1627            EntityIndex::Tag(i) => Export::Tag(self.get_exported_tag(store, i)),
1628        }
1629    }
1630
1631    fn store_mut(self: Pin<&mut Self>) -> &mut Option<VMStoreRawPtr> {
1632        // SAFETY: this is a pin-projection to get a mutable reference to an
1633        // internal field and is safe so long as the `&mut Self` temporarily
1634        // created is not overwritten, which it isn't here.
1635        unsafe { &mut self.get_unchecked_mut().store }
1636    }
1637
1638    fn dropped_elements_mut(self: Pin<&mut Self>) -> &mut EntitySet<ElemIndex> {
1639        // SAFETY: see `store_mut` above.
1640        unsafe { &mut self.get_unchecked_mut().dropped_elements }
1641    }
1642
1643    fn dropped_data_mut(self: Pin<&mut Self>) -> &mut EntitySet<DataIndex> {
1644        // SAFETY: see `store_mut` above.
1645        unsafe { &mut self.get_unchecked_mut().dropped_data }
1646    }
1647
1648    fn memories_mut(
1649        self: Pin<&mut Self>,
1650    ) -> &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)> {
1651        // SAFETY: see `store_mut` above.
1652        unsafe { &mut self.get_unchecked_mut().memories }
1653    }
1654
1655    pub(crate) fn tables_mut(
1656        self: Pin<&mut Self>,
1657    ) -> &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)> {
1658        // SAFETY: see `store_mut` above.
1659        unsafe { &mut self.get_unchecked_mut().tables }
1660    }
1661
1662    #[cfg(feature = "wmemcheck")]
1663    pub(super) fn wmemcheck_state_mut(self: Pin<&mut Self>) -> &mut Option<Wmemcheck> {
1664        // SAFETY: see `store_mut` above.
1665        unsafe { &mut self.get_unchecked_mut().wmemcheck_state }
1666    }
1667}
1668
1669// SAFETY: `layout` should describe this accurately and `OwnedVMContext` is the
1670// last field of `ComponentInstance`.
1671unsafe impl InstanceLayout for Instance {
1672    const INIT_ZEROED: bool = false;
1673    type VMContext = VMContext;
1674
1675    fn layout(&self) -> Layout {
1676        Self::alloc_layout(self.runtime_info.offsets())
1677    }
1678
1679    fn owned_vmctx(&self) -> &OwnedVMContext<VMContext> {
1680        &self.vmctx
1681    }
1682
1683    fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<VMContext> {
1684        &mut self.vmctx
1685    }
1686}
1687
1688pub type InstanceHandle = OwnedInstance<Instance>;
1689
1690/// A handle holding an `Instance` of a WebAssembly module.
1691///
1692/// This structure is an owning handle of the `instance` contained internally.
1693/// When this value goes out of scope it will deallocate the `Instance` and all
1694/// memory associated with it.
1695///
1696/// Note that this lives within a `StoreOpaque` on a list of instances that a
1697/// store is keeping alive.
1698#[derive(Debug)]
1699#[repr(transparent)] // guarantee this is a zero-cost wrapper
1700pub struct OwnedInstance<T: InstanceLayout> {
1701    /// The raw pointer to the instance that was allocated.
1702    ///
1703    /// Note that this is not equivalent to `Box<Instance>` because the
1704    /// allocation here has a `VMContext` trailing after it. Thus the custom
1705    /// destructor to invoke the `dealloc` function with the appropriate
1706    /// layout.
1707    instance: SendSyncPtr<T>,
1708    _marker: marker::PhantomData<Box<(T, OwnedVMContext<T::VMContext>)>>,
1709}
1710
1711/// Structure that must be placed at the end of a type implementing
1712/// `InstanceLayout`.
1713#[repr(align(16))] // match the alignment of VMContext
1714pub struct OwnedVMContext<T> {
1715    /// A pointer to the `vmctx` field at the end of the `structure`.
1716    ///
1717    /// If you're looking at this a reasonable question would be "why do we need
1718    /// a pointer to ourselves?" because after all the pointer's value is
1719    /// trivially derivable from any `&Instance` pointer. The rationale for this
1720    /// field's existence is subtle, but it's required for correctness. The
1721    /// short version is "this makes miri happy".
1722    ///
1723    /// The long version of why this field exists is that the rules that MIRI
1724    /// uses to ensure pointers are used correctly have various conditions on
1725    /// them depend on how pointers are used. More specifically if `*mut T` is
1726    /// derived from `&mut T`, then that invalidates all prior pointers drived
1727    /// from the `&mut T`. This means that while we liberally want to re-acquire
1728    /// a `*mut VMContext` throughout the implementation of `Instance` the
1729    /// trivial way, a function `fn vmctx(Pin<&mut Instance>) -> *mut VMContext`
1730    /// would effectively invalidate all prior `*mut VMContext` pointers
1731    /// acquired. The purpose of this field is to serve as a sort of
1732    /// source-of-truth for where `*mut VMContext` pointers come from.
1733    ///
1734    /// This field is initialized when the `Instance` is created with the
1735    /// original allocation's pointer. That means that the provenance of this
1736    /// pointer contains the entire allocation (both instance and `VMContext`).
1737    /// This provenance bit is then "carried through" where `fn vmctx` will base
1738    /// all returned pointers on this pointer itself. This provides the means of
1739    /// never invalidating this pointer throughout MIRI and additionally being
1740    /// able to still temporarily have `Pin<&mut Instance>` methods and such.
1741    ///
1742    /// It's important to note, though, that this is not here purely for MIRI.
1743    /// The careful construction of the `fn vmctx` method has ramifications on
1744    /// the LLVM IR generated, for example. A historical CVE on Wasmtime,
1745    /// GHSA-ch89-5g45-qwc7, was caused due to relying on undefined behavior. By
1746    /// deriving VMContext pointers from this pointer it specifically hints to
1747    /// LLVM that trickery is afoot and it properly informs `noalias` and such
1748    /// annotations and analysis. More-or-less this pointer is actually loaded
1749    /// in LLVM IR which helps defeat otherwise present aliasing optimizations,
1750    /// which we want, since writes to this should basically never be optimized
1751    /// out.
1752    ///
1753    /// As a final note it's worth pointing out that the machine code generated
1754    /// for accessing `fn vmctx` is still as one would expect. This member isn't
1755    /// actually ever loaded at runtime (or at least shouldn't be). Perhaps in
1756    /// the future if the memory consumption of this field is a problem we could
1757    /// shrink it slightly, but for now one extra pointer per wasm instance
1758    /// seems not too bad.
1759    vmctx_self_reference: SendSyncPtr<T>,
1760
1761    /// This field ensures that going from `Pin<&mut T>` to `&mut T` is not a
1762    /// safe operation.
1763    _marker: core::marker::PhantomPinned,
1764}
1765
1766impl<T> OwnedVMContext<T> {
1767    /// Creates a new blank vmctx to place at the end of an instance.
1768    pub fn new() -> OwnedVMContext<T> {
1769        OwnedVMContext {
1770            vmctx_self_reference: SendSyncPtr::new(NonNull::dangling()),
1771            _marker: core::marker::PhantomPinned,
1772        }
1773    }
1774}
1775
1776/// Helper trait to plumb both core instances and component instances into
1777/// `OwnedInstance` below.
1778///
1779/// # Safety
1780///
1781/// This trait requires `layout` to correctly describe `Self` and appropriately
1782/// allocate space for `Self::VMContext` afterwards. Additionally the field
1783/// returned by `owned_vmctx()` must be the last field in the structure.
1784pub unsafe trait InstanceLayout {
1785    /// Whether or not to allocate this instance with `alloc_zeroed` or `alloc`.
1786    const INIT_ZEROED: bool;
1787
1788    /// The trailing `VMContext` type at the end of this instance.
1789    type VMContext;
1790
1791    /// The memory layout to use to allocate and deallocate this instance.
1792    fn layout(&self) -> Layout;
1793
1794    fn owned_vmctx(&self) -> &OwnedVMContext<Self::VMContext>;
1795    fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<Self::VMContext>;
1796
1797    /// Returns the `vmctx_self_reference` set above.
1798    #[inline]
1799    fn vmctx(&self) -> NonNull<Self::VMContext> {
1800        // The definition of this method is subtle but intentional. The goal
1801        // here is that effectively this should return `&mut self.vmctx`, but
1802        // it's not quite so simple. Some more documentation is available on the
1803        // `vmctx_self_reference` field, but the general idea is that we're
1804        // creating a pointer to return with proper provenance. Provenance is
1805        // still in the works in Rust at the time of this writing but the load
1806        // of the `self.vmctx_self_reference` field is important here as it
1807        // affects how LLVM thinks about aliasing with respect to the returned
1808        // pointer.
1809        //
1810        // The intention of this method is to codegen to machine code as `&mut
1811        // self.vmctx`, however. While it doesn't show up like this in LLVM IR
1812        // (there's an actual load of the field) it does look like that by the
1813        // time the backend runs. (that's magic to me, the backend removing
1814        // loads...)
1815        let owned_vmctx = self.owned_vmctx();
1816        let owned_vmctx_raw = NonNull::from(owned_vmctx);
1817        // SAFETY: it's part of the contract of `InstanceLayout` and the usage
1818        // with `OwnedInstance` that this indeed points to the vmctx.
1819        let addr = unsafe { owned_vmctx_raw.add(1) };
1820        owned_vmctx
1821            .vmctx_self_reference
1822            .as_non_null()
1823            .with_addr(addr.addr())
1824    }
1825
1826    /// Helper function to access various locations offset from our `*mut
1827    /// VMContext` object.
1828    ///
1829    /// Note that this method takes `&self` as an argument but returns
1830    /// `NonNull<T>` which is frequently used to mutate said memory. This is an
1831    /// intentional design decision where the safety of the modification of
1832    /// memory is placed as a burden onto the caller. The implementation of this
1833    /// method explicitly does not require `&mut self` to acquire mutable
1834    /// provenance to update the `VMContext` region. Instead all pointers into
1835    /// the `VMContext` area have provenance/permissions to write.
1836    ///
1837    /// Also note though that care must be taken to ensure that reads/writes of
1838    /// memory must only happen where appropriate, for example a non-atomic
1839    /// write (as most are) should never happen concurrently with another read
1840    /// or write. It's generally on the burden of the caller to adhere to this.
1841    ///
1842    /// Also of note is that most of the time the usage of this method falls
1843    /// into one of:
1844    ///
1845    /// * Something in the VMContext is being read or written. In that case use
1846    ///   `vmctx_plus_offset` or `vmctx_plus_offset_mut` if possible due to
1847    ///   that having a safer lifetime.
1848    ///
1849    /// * A pointer is being created to pass to other VM* data structures. In
1850    ///   that situation the lifetime of all VM data structures are typically
1851    ///   tied to the `Store<T>` which is what provides the guarantees around
1852    ///   concurrency/etc.
1853    ///
1854    /// There's quite a lot of unsafety riding on this method, especially
1855    /// related to the ascription `T` of the byte `offset`. It's hoped that in
1856    /// the future we're able to settle on an in theory safer design.
1857    ///
1858    /// # Safety
1859    ///
1860    /// This method is unsafe because the `offset` must be within bounds of the
1861    /// `VMContext` object trailing this instance. Additionally `T` must be a
1862    /// valid ascription of the value that resides at that location.
1863    unsafe fn vmctx_plus_offset_raw<T: VmSafe>(&self, offset: impl Into<u32>) -> NonNull<T> {
1864        // SAFETY: the safety requirements of `byte_add` are forwarded to this
1865        // method's caller.
1866        unsafe {
1867            self.vmctx()
1868                .byte_add(usize::try_from(offset.into()).unwrap())
1869                .cast()
1870        }
1871    }
1872
1873    /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1874    /// `&self` to the returned reference `&T`.
1875    ///
1876    /// # Safety
1877    ///
1878    /// See the safety documentation of `vmctx_plus_offset_raw`.
1879    unsafe fn vmctx_plus_offset<T: VmSafe>(&self, offset: impl Into<u32>) -> &T {
1880        // SAFETY: this method has the same safety requirements as
1881        // `vmctx_plus_offset_raw`.
1882        unsafe { self.vmctx_plus_offset_raw(offset).as_ref() }
1883    }
1884
1885    /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1886    /// `&mut self` to the returned reference `&mut T`.
1887    ///
1888    /// # Safety
1889    ///
1890    /// See the safety documentation of `vmctx_plus_offset_raw`.
1891    unsafe fn vmctx_plus_offset_mut<T: VmSafe>(
1892        self: Pin<&mut Self>,
1893        offset: impl Into<u32>,
1894    ) -> &mut T {
1895        // SAFETY: this method has the same safety requirements as
1896        // `vmctx_plus_offset_raw`.
1897        unsafe { self.vmctx_plus_offset_raw(offset).as_mut() }
1898    }
1899}
1900
1901impl<T: InstanceLayout> OwnedInstance<T> {
1902    /// Allocates a new `OwnedInstance` and places `instance` inside of it.
1903    ///
1904    /// This will `instance`
1905    pub(super) fn new(mut instance: T) -> Result<OwnedInstance<T>, OutOfMemory> {
1906        let layout = instance.layout();
1907        debug_assert!(layout.size() >= size_of_val(&instance));
1908        debug_assert!(layout.align() >= align_of_val(&instance));
1909
1910        // SAFETY: it's up to us to assert that `layout` has a non-zero size,
1911        // which is asserted here.
1912        let ptr = unsafe {
1913            assert!(layout.size() > 0);
1914            if T::INIT_ZEROED {
1915                alloc::alloc::alloc_zeroed(layout)
1916            } else {
1917                alloc::alloc::alloc(layout)
1918            }
1919        };
1920        let Some(instance_ptr) = NonNull::new(ptr.cast::<T>()) else {
1921            return Err(OutOfMemory::new(layout.size()));
1922        };
1923
1924        // SAFETY: it's part of the unsafe contract of `InstanceLayout` that the
1925        // `add` here is appropriate for the layout allocated.
1926        let vmctx_self_reference = unsafe { instance_ptr.add(1).cast() };
1927        instance.owned_vmctx_mut().vmctx_self_reference = vmctx_self_reference.into();
1928
1929        // SAFETY: we allocated above and it's an unsafe contract of
1930        // `InstanceLayout` that the layout is suitable for writing the
1931        // instance.
1932        unsafe {
1933            instance_ptr.write(instance);
1934        }
1935
1936        let ret = OwnedInstance {
1937            instance: SendSyncPtr::new(instance_ptr),
1938            _marker: marker::PhantomData,
1939        };
1940
1941        // Double-check various vmctx calculations are correct.
1942        debug_assert_eq!(
1943            vmctx_self_reference.addr(),
1944            // SAFETY: `InstanceLayout` should guarantee it's safe to add 1 to
1945            // the last field to get a pointer to 1-byte-past-the-end of an
1946            // object, which should be valid.
1947            unsafe { NonNull::from(ret.get().owned_vmctx()).add(1).addr() }
1948        );
1949        debug_assert_eq!(vmctx_self_reference.addr(), ret.get().vmctx().addr());
1950
1951        Ok(ret)
1952    }
1953
1954    /// Gets the raw underlying `&Instance` from this handle.
1955    pub fn get(&self) -> &T {
1956        // SAFETY: this is an owned instance handle that retains exclusive
1957        // ownership of the `Instance` inside. With `&self` given we know
1958        // this pointer is valid valid and the returned lifetime is connected
1959        // to `self` so that should also be valid.
1960        unsafe { self.instance.as_non_null().as_ref() }
1961    }
1962
1963    /// Same as [`Self::get`] except for mutability.
1964    pub fn get_mut(&mut self) -> Pin<&mut T> {
1965        // SAFETY: The lifetime concerns here are the same as `get` above.
1966        // Otherwise `new_unchecked` is used here to uphold the contract that
1967        // instances are always pinned in memory.
1968        unsafe { Pin::new_unchecked(self.instance.as_non_null().as_mut()) }
1969    }
1970}
1971
1972impl<T: InstanceLayout> Drop for OwnedInstance<T> {
1973    fn drop(&mut self) {
1974        unsafe {
1975            let layout = self.get().layout();
1976            ptr::drop_in_place(self.instance.as_ptr());
1977            alloc::alloc::dealloc(self.instance.as_ptr().cast(), layout);
1978        }
1979    }
1980}