wasmtime/runtime/vm/
instance.rs

1//! An `Instance` contains all the runtime state used by execution of a
2//! wasm module (except its callstack and register state). An
3//! `InstanceHandle` is a reference-counting handle for an `Instance`.
4
5use crate::prelude::*;
6use crate::runtime::vm::const_expr::{ConstEvalContext, ConstExprEvaluator};
7use crate::runtime::vm::export::Export;
8use crate::runtime::vm::memory::{Memory, RuntimeMemoryCreator};
9use crate::runtime::vm::table::{Table, TableElement, TableElementType};
10use crate::runtime::vm::vmcontext::{
11    VMBuiltinFunctionsArray, VMContext, VMFuncRef, VMFunctionImport, VMGlobalDefinition,
12    VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext, VMStoreContext,
13    VMTableDefinition, VMTableImport, VMTagDefinition, VMTagImport,
14};
15use crate::runtime::vm::{
16    ExportFunction, ExportGlobal, ExportGlobalKind, ExportMemory, ExportTable, ExportTag, GcStore,
17    Imports, ModuleRuntimeInfo, SendSyncPtr, VMGcRef, VMStore, VMStoreRawPtr, VmPtr, VmSafe,
18    WasmFault,
19};
20use crate::store::{InstanceId, StoreOpaque};
21use alloc::sync::Arc;
22use core::alloc::Layout;
23use core::marker;
24use core::ops::Range;
25use core::pin::Pin;
26use core::ptr::NonNull;
27#[cfg(target_has_atomic = "64")]
28use core::sync::atomic::AtomicU64;
29use core::{mem, ptr};
30#[cfg(feature = "gc")]
31use wasmtime_environ::ModuleInternedTypeIndex;
32use wasmtime_environ::{
33    DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, DefinedTagIndex,
34    ElemIndex, EntityIndex, EntityRef, EntitySet, FuncIndex, GlobalIndex, HostPtr, MemoryIndex,
35    Module, PrimaryMap, PtrSize, TableIndex, TableInitialValue, TableSegmentElements, TagIndex,
36    Trap, VMCONTEXT_MAGIC, VMOffsets, VMSharedTypeIndex, WasmHeapTopType,
37    packed_option::ReservedValue,
38};
39#[cfg(feature = "wmemcheck")]
40use wasmtime_wmemcheck::Wmemcheck;
41
42mod allocator;
43pub use allocator::*;
44
45/// The pair of an instance and a raw pointer its associated store.
46///
47/// ### Safety
48///
49/// > **Note**: it's known that the documentation below is documenting an
50/// > unsound pattern and we're in the process of fixing it, but it'll take
51/// > some time to refactor. Notably `unpack_mut` is not sound because the
52/// > returned store pointer can be used to accidentally alias the instance
53/// > pointer returned as well.
54///
55/// Getting a borrow of a vmctx's store is one of the fundamental bits of unsafe
56/// code in Wasmtime. No matter how we architect the runtime, some kind of
57/// unsafe conversion from a raw vmctx pointer that Wasm is using into a Rust
58/// struct must happen.
59///
60/// It is our responsibility to ensure that multiple (exclusive) borrows of the
61/// vmctx's store never exist at the same time. The distinction between the
62/// `Instance` type (which doesn't expose its underlying vmctx pointer or a way
63/// to get a borrow of its associated store) and this type (which does) is
64/// designed to help with that.
65///
66/// Going from a `*mut VMContext` to a `&mut StoreInner<T>` is naturally unsafe
67/// due to the raw pointer usage, but additionally the `T` type parameter needs
68/// to be the same `T` that was used to define the `dyn VMStore` trait object
69/// that was stuffed into the vmctx.
70///
71/// ### Usage
72///
73/// Usage generally looks like:
74///
75/// 1. You get a raw `*mut VMContext` from Wasm
76///
77/// 2. You call `InstanceAndStore::from_vmctx` on that raw pointer
78///
79/// 3. You then call `InstanceAndStore::unpack_mut` (or another helper) to get
80///    the underlying `Pin<&mut Instance>` and `&mut dyn VMStore` (or `&mut
81///    StoreInner<T>`).
82///
83/// 4. You then use whatever `Instance` methods you need to, each of which take
84///    a store argument as necessary.
85///
86/// In step (4) you no longer need to worry about double exclusive borrows of
87/// the store, so long as you don't do (1-2) again. Note also that the borrow
88/// checker prevents repeating step (3) if you never repeat (1-2). In general,
89/// steps (1-3) should be done in a single, common, internally-unsafe,
90/// plumbing-code bottleneck and the raw pointer should never be exposed to Rust
91/// code that does (4) after the `InstanceAndStore` is created. Follow this
92/// pattern, and everything using the resulting `Instance` and `Store` can be
93/// safe code (at least, with regards to accessing the store itself).
94///
95/// As an illustrative example, the common plumbing code for our various
96/// libcalls performs steps (1-3) before calling into each actual libcall
97/// implementation function that does (4). The plumbing code hides the raw vmctx
98/// pointer and never gives out access to it to the libcall implementation
99/// functions, nor does an `Instance` expose its internal vmctx pointer, which
100/// would allow unsafely repeating steps (1-2).
101#[repr(transparent)]
102pub struct InstanceAndStore {
103    instance: Instance,
104}
105
106impl InstanceAndStore {
107    /// Converts the provided `*mut VMContext` to an `InstanceAndStore`
108    /// reference and calls the provided closure with it.
109    ///
110    /// This method will move the `vmctx` pointer backwards to point to the
111    /// original `Instance` that precedes it. The closure is provided a
112    /// temporary reference to the `InstanceAndStore` with a constrained
113    /// lifetime to ensure that it doesn't accidentally escape.
114    ///
115    /// # Safety
116    ///
117    /// Callers must validate that the `vmctx` pointer is a valid allocation and
118    /// that it's valid to acquire `&mut InstanceAndStore` at this time. For
119    /// example this can't be called twice on the same `VMContext` to get two
120    /// active mutable borrows to the same `InstanceAndStore`.
121    ///
122    /// See also the safety discussion in this type's documentation.
123    #[inline]
124    pub(crate) unsafe fn from_vmctx<R>(
125        vmctx: NonNull<VMContext>,
126        f: impl for<'a> FnOnce(&'a mut Self) -> R,
127    ) -> R {
128        const _: () = assert!(mem::size_of::<InstanceAndStore>() == mem::size_of::<Instance>());
129        let mut ptr = vmctx
130            .byte_sub(mem::size_of::<Instance>())
131            .cast::<InstanceAndStore>();
132
133        f(ptr.as_mut())
134    }
135
136    /// Unpacks this `InstanceAndStore` into its underlying `Instance` and `dyn
137    /// VMStore`.
138    #[inline]
139    pub(crate) fn unpack_mut(&mut self) -> (Pin<&mut Instance>, &mut dyn VMStore) {
140        unsafe {
141            let store = &mut *self.store_ptr();
142            (Pin::new_unchecked(&mut self.instance), store)
143        }
144    }
145
146    /// Gets a pointer to this instance's `Store` which was originally
147    /// configured on creation.
148    ///
149    /// # Panics
150    ///
151    /// May panic if the originally configured store was `None`. That can happen
152    /// for host functions so host functions can't be queried what their
153    /// original `Store` was since it's just retained as null (since host
154    /// functions are shared amongst threads and don't all share the same
155    /// store).
156    #[inline]
157    fn store_ptr(&self) -> *mut dyn VMStore {
158        self.instance.store.unwrap().0.as_ptr()
159    }
160}
161
162/// A type that roughly corresponds to a WebAssembly instance, but is also used
163/// for host-defined objects.
164///
165/// Instances here can correspond to actual instantiated modules, but it's also
166/// used ubiquitously for host-defined objects. For example creating a
167/// host-defined memory will have a `module` that looks like it exports a single
168/// memory (and similar for other constructs).
169///
170/// This `Instance` type is used as a ubiquitous representation for WebAssembly
171/// values, whether or not they were created on the host or through a module.
172///
173/// # Ownership
174///
175/// This structure is never allocated directly but is instead managed through
176/// an `InstanceHandle`. This structure ends with a `VMContext` which has a
177/// dynamic size corresponding to the `module` configured within. Memory
178/// management of this structure is always done through `InstanceHandle` as the
179/// sole owner of an instance.
180///
181/// # `Instance` and `Pin`
182///
183/// Given an instance it is accompanied with trailing memory for the
184/// appropriate `VMContext`. The `Instance` also holds `runtime_info` and other
185/// information pointing to relevant offsets for the `VMContext`. Thus it is
186/// not sound to mutate `runtime_info` after an instance is created. More
187/// generally it's also not safe to "swap" instances, for example given two
188/// `&mut Instance` values it's not sound to swap them as then the `VMContext`
189/// values are inaccurately described.
190///
191/// To encapsulate this guarantee this type is only ever mutated through Rust's
192/// `Pin` type. All mutable methods here take `self: Pin<&mut Self>` which
193/// statically disallows safe access to `&mut Instance`. There are assorted
194/// "projection methods" to go from `Pin<&mut Instance>` to `&mut T` for
195/// individual fields, for example `memories_mut`. More methods can be added as
196/// necessary or methods may also be added to project multiple fields at a time
197/// if necessary to. The precise ergonomics around getting mutable access to
198/// some fields (but notably not `runtime_info`) is probably going to evolve
199/// over time.
200///
201/// Note that is is not sound to basically ever pass around `&mut Instance`.
202/// That should always instead be `Pin<&mut Instance>`. All usage of
203/// `Pin::new_unchecked` should be here in this module in just a few `unsafe`
204/// locations and it's recommended to use existing helpers if you can.
205#[repr(C)] // ensure that the vmctx field is last.
206pub struct Instance {
207    /// The index, within a `Store` that this instance lives at
208    id: InstanceId,
209
210    /// The runtime info (corresponding to the "compiled module"
211    /// abstraction in higher layers) that is retained and needed for
212    /// lazy initialization. This provides access to the underlying
213    /// Wasm module entities, the compiled JIT code, metadata about
214    /// functions, lazy initialization state, etc.
215    runtime_info: ModuleRuntimeInfo,
216
217    /// WebAssembly linear memory data.
218    ///
219    /// This is where all runtime information about defined linear memories in
220    /// this module lives.
221    ///
222    /// The `MemoryAllocationIndex` was given from our `InstanceAllocator` and
223    /// must be given back to the instance allocator when deallocating each
224    /// memory.
225    memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
226
227    /// WebAssembly table data.
228    ///
229    /// Like memories, this is only for defined tables in the module and
230    /// contains all of their runtime state.
231    ///
232    /// The `TableAllocationIndex` was given from our `InstanceAllocator` and
233    /// must be given back to the instance allocator when deallocating each
234    /// table.
235    tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
236
237    /// Stores the dropped passive element segments in this instantiation by index.
238    /// If the index is present in the set, the segment has been dropped.
239    dropped_elements: EntitySet<ElemIndex>,
240
241    /// Stores the dropped passive data segments in this instantiation by index.
242    /// If the index is present in the set, the segment has been dropped.
243    dropped_data: EntitySet<DataIndex>,
244
245    // TODO: add support for multiple memories; `wmemcheck_state` corresponds to
246    // memory 0.
247    #[cfg(feature = "wmemcheck")]
248    pub(crate) wmemcheck_state: Option<Wmemcheck>,
249
250    /// Self-pointer back to `Store<T>` and its functions. Not present for
251    /// the brief time that `Store<T>` is itself being created. Also not
252    /// present for some niche uses that are disconnected from stores (e.g.
253    /// cross-thread stuff used in `InstancePre`)
254    store: Option<VMStoreRawPtr>,
255
256    /// Additional context used by compiled wasm code. This field is last, and
257    /// represents a dynamically-sized array that extends beyond the nominal
258    /// end of the struct (similar to a flexible array member).
259    vmctx: OwnedVMContext<VMContext>,
260}
261
262impl Instance {
263    /// Create an instance at the given memory address.
264    ///
265    /// It is assumed the memory was properly aligned and the
266    /// allocation was `alloc_size` in bytes.
267    fn new(
268        req: InstanceAllocationRequest,
269        memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
270        tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
271        memory_tys: &PrimaryMap<MemoryIndex, wasmtime_environ::Memory>,
272    ) -> InstanceHandle {
273        let module = req.runtime_info.env_module();
274        let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
275        let dropped_data = EntitySet::with_capacity(module.passive_data_map.len());
276
277        #[cfg(not(feature = "wmemcheck"))]
278        let _ = memory_tys;
279
280        let mut ret = OwnedInstance::new(Instance {
281            id: req.id,
282            runtime_info: req.runtime_info.clone(),
283            memories,
284            tables,
285            dropped_elements,
286            dropped_data,
287            #[cfg(feature = "wmemcheck")]
288            wmemcheck_state: {
289                if req.wmemcheck {
290                    let size = memory_tys
291                        .iter()
292                        .next()
293                        .map(|memory| memory.1.limits.min)
294                        .unwrap_or(0)
295                        * 64
296                        * 1024;
297                    Some(Wmemcheck::new(size.try_into().unwrap()))
298                } else {
299                    None
300                }
301            },
302            store: None,
303            vmctx: OwnedVMContext::new(),
304        });
305
306        // SAFETY: this vmctx was allocated with the same layout above, so it
307        // should be safe to initialize with the same values here.
308        unsafe {
309            ret.get_mut().initialize_vmctx(
310                module,
311                req.runtime_info.offsets(),
312                req.store,
313                req.imports,
314            );
315        }
316        ret
317    }
318
319    /// Converts the provided `*mut VMContext` to an `Instance` pointer and runs
320    /// the provided closure with the instance.
321    ///
322    /// This method will move the `vmctx` pointer backwards to point to the
323    /// original `Instance` that precedes it. The closure is provided a
324    /// temporary version of the `Instance` pointer with a constrained lifetime
325    /// to the closure to ensure it doesn't accidentally escape.
326    ///
327    /// # Unsafety
328    ///
329    /// Callers must validate that the `vmctx` pointer is a valid allocation
330    /// and that it's valid to acquire `Pin<&mut Instance>` at this time. For example
331    /// this can't be called twice on the same `VMContext` to get two active
332    /// pointers to the same `Instance`.
333    #[inline]
334    pub unsafe fn from_vmctx<R>(
335        vmctx: NonNull<VMContext>,
336        f: impl FnOnce(Pin<&mut Instance>) -> R,
337    ) -> R {
338        let mut ptr = vmctx
339            .byte_sub(mem::size_of::<Instance>())
340            .cast::<Instance>();
341        f(Pin::new_unchecked(ptr.as_mut()))
342    }
343
344    pub(crate) fn env_module(&self) -> &Arc<wasmtime_environ::Module> {
345        self.runtime_info.env_module()
346    }
347
348    #[cfg(feature = "gc")]
349    pub(crate) fn runtime_module(&self) -> Option<&crate::Module> {
350        match &self.runtime_info {
351            ModuleRuntimeInfo::Module(m) => Some(m),
352            ModuleRuntimeInfo::Bare(_) => None,
353        }
354    }
355
356    /// Translate a module-level interned type index into an engine-level
357    /// interned type index.
358    #[cfg(feature = "gc")]
359    pub fn engine_type_index(&self, module_index: ModuleInternedTypeIndex) -> VMSharedTypeIndex {
360        self.runtime_info.engine_type_index(module_index)
361    }
362
363    #[inline]
364    fn offsets(&self) -> &VMOffsets<HostPtr> {
365        self.runtime_info.offsets()
366    }
367
368    /// Return the indexed `VMFunctionImport`.
369    fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
370        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
371    }
372
373    /// Return the index `VMTableImport`.
374    fn imported_table(&self, index: TableIndex) -> &VMTableImport {
375        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
376    }
377
378    /// Return the indexed `VMMemoryImport`.
379    fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
380        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
381    }
382
383    /// Return the indexed `VMGlobalImport`.
384    fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
385        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
386    }
387
388    /// Return the indexed `VMTagImport`.
389    fn imported_tag(&self, index: TagIndex) -> &VMTagImport {
390        unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtag_import(index)) }
391    }
392
393    /// Return the indexed `VMTagDefinition`.
394    pub fn tag_ptr(&self, index: DefinedTagIndex) -> NonNull<VMTagDefinition> {
395        unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtag_definition(index)) }
396    }
397
398    /// Return the indexed `VMTableDefinition`.
399    pub fn table(&self, index: DefinedTableIndex) -> VMTableDefinition {
400        unsafe { self.table_ptr(index).read() }
401    }
402
403    /// Updates the value for a defined table to `VMTableDefinition`.
404    fn set_table(self: Pin<&mut Self>, index: DefinedTableIndex, table: VMTableDefinition) {
405        unsafe {
406            self.table_ptr(index).write(table);
407        }
408    }
409
410    /// Return a pointer to the `index`'th table within this instance, stored
411    /// in vmctx memory.
412    pub fn table_ptr(&self, index: DefinedTableIndex) -> NonNull<VMTableDefinition> {
413        unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtable_definition(index)) }
414    }
415
416    /// Get a locally defined or imported memory.
417    pub(crate) fn get_memory(&self, index: MemoryIndex) -> VMMemoryDefinition {
418        if let Some(defined_index) = self.env_module().defined_memory_index(index) {
419            self.memory(defined_index)
420        } else {
421            let import = self.imported_memory(index);
422            unsafe { VMMemoryDefinition::load(import.from.as_ptr()) }
423        }
424    }
425
426    /// Get a locally defined or imported memory.
427    #[cfg(feature = "threads")]
428    pub(crate) fn get_runtime_memory(self: Pin<&mut Self>, index: MemoryIndex) -> &mut Memory {
429        if let Some(defined_index) = self.env_module().defined_memory_index(index) {
430            unsafe { &mut *self.get_defined_memory(defined_index) }
431        } else {
432            let import = self.imported_memory(index);
433            unsafe {
434                let ptr = Instance::from_vmctx(import.vmctx.as_non_null(), |i| {
435                    i.get_defined_memory(import.index)
436                });
437                &mut *ptr
438            }
439        }
440    }
441
442    /// Return the indexed `VMMemoryDefinition`, loaded from vmctx memory
443    /// already.
444    pub fn memory(&self, index: DefinedMemoryIndex) -> VMMemoryDefinition {
445        unsafe { VMMemoryDefinition::load(self.memory_ptr(index).as_ptr()) }
446    }
447
448    /// Set the indexed memory to `VMMemoryDefinition`.
449    fn set_memory(&self, index: DefinedMemoryIndex, mem: VMMemoryDefinition) {
450        unsafe {
451            self.memory_ptr(index).write(mem);
452        }
453    }
454
455    /// Return the address of the specified memory at `index` within this vmctx.
456    ///
457    /// Note that the returned pointer resides in wasm-code-readable-memory in
458    /// the vmctx.
459    pub fn memory_ptr(&self, index: DefinedMemoryIndex) -> NonNull<VMMemoryDefinition> {
460        unsafe {
461            self.vmctx_plus_offset::<VmPtr<_>>(self.offsets().vmctx_vmmemory_pointer(index))
462                .as_non_null()
463        }
464    }
465
466    /// Return the indexed `VMGlobalDefinition`.
467    pub fn global_ptr(&self, index: DefinedGlobalIndex) -> NonNull<VMGlobalDefinition> {
468        unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmglobal_definition(index)) }
469    }
470
471    /// Get a raw pointer to the global at the given index regardless whether it
472    /// is defined locally or imported from another module.
473    ///
474    /// Panics if the index is out of bound or is the reserved value.
475    pub(crate) fn defined_or_imported_global_ptr(
476        self: Pin<&mut Self>,
477        index: GlobalIndex,
478    ) -> NonNull<VMGlobalDefinition> {
479        if let Some(index) = self.env_module().defined_global_index(index) {
480            self.global_ptr(index)
481        } else {
482            self.imported_global(index).from.as_non_null()
483        }
484    }
485
486    /// Get all globals within this instance.
487    ///
488    /// Returns both import and defined globals.
489    ///
490    /// Returns both exported and non-exported globals.
491    ///
492    /// Gives access to the full globals space.
493    pub fn all_globals(&self) -> impl ExactSizeIterator<Item = (GlobalIndex, ExportGlobal)> + '_ {
494        let module = self.env_module().clone();
495        module
496            .globals
497            .keys()
498            .map(move |idx| (idx, self.get_exported_global(idx)))
499    }
500
501    /// Get the globals defined in this instance (not imported).
502    pub fn defined_globals(
503        &self,
504    ) -> impl ExactSizeIterator<Item = (DefinedGlobalIndex, ExportGlobal)> + '_ {
505        let module = self.env_module().clone();
506        module
507            .globals
508            .keys()
509            .skip(module.num_imported_globals)
510            .map(move |global_idx| {
511                let def_idx = module.defined_global_index(global_idx).unwrap();
512                let global = ExportGlobal {
513                    definition: self.global_ptr(def_idx),
514                    kind: ExportGlobalKind::Instance(self.vmctx(), def_idx),
515                    global: self.env_module().globals[global_idx],
516                };
517                (def_idx, global)
518            })
519    }
520
521    /// Return a pointer to the interrupts structure
522    #[inline]
523    pub fn vm_store_context(&self) -> NonNull<Option<VmPtr<VMStoreContext>>> {
524        unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_store_context()) }
525    }
526
527    /// Return a pointer to the global epoch counter used by this instance.
528    #[cfg(target_has_atomic = "64")]
529    pub fn epoch_ptr(self: Pin<&mut Self>) -> &mut Option<VmPtr<AtomicU64>> {
530        let offset = self.offsets().ptr.vmctx_epoch_ptr();
531        unsafe { self.vmctx_plus_offset_mut(offset) }
532    }
533
534    /// Return a pointer to the collector-specific heap data.
535    pub fn gc_heap_data(self: Pin<&mut Self>) -> &mut Option<VmPtr<u8>> {
536        let offset = self.offsets().ptr.vmctx_gc_heap_data();
537        unsafe { self.vmctx_plus_offset_mut(offset) }
538    }
539
540    pub(crate) unsafe fn set_store(mut self: Pin<&mut Self>, store: Option<NonNull<dyn VMStore>>) {
541        *self.as_mut().store_mut() = store.map(VMStoreRawPtr);
542        if let Some(mut store) = store {
543            let store = store.as_mut();
544            self.vm_store_context()
545                .write(Some(store.vm_store_context_ptr().into()));
546            #[cfg(target_has_atomic = "64")]
547            {
548                *self.as_mut().epoch_ptr() =
549                    Some(NonNull::from(store.engine().epoch_counter()).into());
550            }
551
552            if self.env_module().needs_gc_heap {
553                self.as_mut().set_gc_heap(Some(store.gc_store().expect(
554                    "if we need a GC heap, then `Instance::new_raw` should have already \
555                     allocated it for us",
556                )));
557            } else {
558                self.as_mut().set_gc_heap(None);
559            }
560        } else {
561            self.vm_store_context().write(None);
562            #[cfg(target_has_atomic = "64")]
563            {
564                *self.as_mut().epoch_ptr() = None;
565            }
566            self.as_mut().set_gc_heap(None);
567        }
568    }
569
570    unsafe fn set_gc_heap(self: Pin<&mut Self>, gc_store: Option<&GcStore>) {
571        if let Some(gc_store) = gc_store {
572            *self.gc_heap_data() = Some(gc_store.gc_heap.vmctx_gc_heap_data().into());
573        } else {
574            *self.gc_heap_data() = None;
575        }
576    }
577
578    /// Return a reference to the vmctx used by compiled wasm code.
579    #[inline]
580    pub fn vmctx(&self) -> NonNull<VMContext> {
581        InstanceLayout::vmctx(self)
582    }
583
584    /// Lookup a function by index.
585    ///
586    /// # Panics
587    ///
588    /// Panics if `index` is out of bounds for this instance.
589    pub fn get_exported_func(self: Pin<&mut Self>, index: FuncIndex) -> ExportFunction {
590        let func_ref = self.get_func_ref(index).unwrap();
591        ExportFunction { func_ref }
592    }
593
594    /// Lookup a table by index.
595    ///
596    /// # Panics
597    ///
598    /// Panics if `index` is out of bounds for this instance.
599    pub fn get_exported_table(&self, index: TableIndex) -> ExportTable {
600        let ty = self.env_module().tables[index];
601        let (definition, vmctx, index) =
602            if let Some(def_index) = self.env_module().defined_table_index(index) {
603                (self.table_ptr(def_index), self.vmctx(), def_index)
604            } else {
605                let import = self.imported_table(index);
606                (
607                    import.from.as_non_null(),
608                    import.vmctx.as_non_null(),
609                    import.index,
610                )
611            };
612        ExportTable {
613            definition,
614            vmctx,
615            table: ty,
616            index,
617        }
618    }
619
620    /// Lookup a memory by index.
621    ///
622    /// # Panics
623    ///
624    /// Panics if `index` is out-of-bounds for this instance.
625    pub fn get_exported_memory(&self, index: MemoryIndex) -> ExportMemory {
626        let (definition, vmctx, def_index) =
627            if let Some(def_index) = self.env_module().defined_memory_index(index) {
628                (self.memory_ptr(def_index), self.vmctx(), def_index)
629            } else {
630                let import = self.imported_memory(index);
631                (
632                    import.from.as_non_null(),
633                    import.vmctx.as_non_null(),
634                    import.index,
635                )
636            };
637        ExportMemory {
638            definition,
639            vmctx,
640            memory: self.env_module().memories[index],
641            index: def_index,
642        }
643    }
644
645    fn get_exported_global(&self, index: GlobalIndex) -> ExportGlobal {
646        let global = self.env_module().globals[index];
647        if let Some(def_index) = self.env_module().defined_global_index(index) {
648            ExportGlobal {
649                definition: self.global_ptr(def_index),
650                kind: ExportGlobalKind::Instance(self.vmctx(), def_index),
651                global,
652            }
653        } else {
654            ExportGlobal::from_vmimport(self.imported_global(index), global)
655        }
656    }
657
658    fn get_exported_tag(&self, index: TagIndex) -> ExportTag {
659        let tag = self.env_module().tags[index];
660        let (vmctx, definition, index) =
661            if let Some(def_index) = self.env_module().defined_tag_index(index) {
662                (self.vmctx(), self.tag_ptr(def_index), def_index)
663            } else {
664                let import = self.imported_tag(index);
665                (
666                    import.vmctx.as_non_null(),
667                    import.from.as_non_null(),
668                    import.index,
669                )
670            };
671        ExportTag {
672            definition,
673            vmctx,
674            index,
675            tag,
676        }
677    }
678
679    /// Return an iterator over the exports of this instance.
680    ///
681    /// Specifically, it provides access to the key-value pairs, where the keys
682    /// are export names, and the values are export declarations which can be
683    /// resolved `lookup_by_declaration`.
684    pub fn exports(&self) -> wasmparser::collections::index_map::Iter<'_, String, EntityIndex> {
685        self.env_module().exports.iter()
686    }
687
688    /// Return the table index for the given `VMTableDefinition`.
689    pub unsafe fn table_index(&self, table: &VMTableDefinition) -> DefinedTableIndex {
690        let index = DefinedTableIndex::new(
691            usize::try_from(
692                (table as *const VMTableDefinition)
693                    .offset_from(self.table_ptr(DefinedTableIndex::new(0)).as_ptr()),
694            )
695            .unwrap(),
696        );
697        assert!(index.index() < self.tables.len());
698        index
699    }
700
701    /// Get the given memory's page size, in bytes.
702    pub(crate) fn memory_page_size(&self, index: MemoryIndex) -> usize {
703        usize::try_from(self.env_module().memories[index].page_size()).unwrap()
704    }
705
706    /// Grow memory by the specified amount of pages.
707    ///
708    /// Returns `None` if memory can't be grown by the specified amount
709    /// of pages. Returns `Some` with the old size in bytes if growth was
710    /// successful.
711    pub(crate) fn memory_grow(
712        self: Pin<&mut Self>,
713        store: &mut dyn VMStore,
714        index: MemoryIndex,
715        delta: u64,
716    ) -> Result<Option<usize>, Error> {
717        match self.env_module().defined_memory_index(index) {
718            Some(idx) => self.defined_memory_grow(store, idx, delta),
719            None => {
720                let import = self.imported_memory(index);
721                unsafe {
722                    Instance::from_vmctx(import.vmctx.as_non_null(), |i| {
723                        i.defined_memory_grow(store, import.index, delta)
724                    })
725                }
726            }
727        }
728    }
729
730    fn defined_memory_grow(
731        mut self: Pin<&mut Self>,
732        store: &mut dyn VMStore,
733        idx: DefinedMemoryIndex,
734        delta: u64,
735    ) -> Result<Option<usize>, Error> {
736        let memory = &mut self.as_mut().memories_mut()[idx].1;
737
738        let result = unsafe { memory.grow(delta, Some(store)) };
739
740        // Update the state used by a non-shared Wasm memory in case the base
741        // pointer and/or the length changed.
742        if memory.as_shared_memory().is_none() {
743            let vmmemory = memory.vmmemory();
744            self.set_memory(idx, vmmemory);
745        }
746
747        result
748    }
749
750    pub(crate) fn table_element_type(
751        self: Pin<&mut Self>,
752        table_index: TableIndex,
753    ) -> TableElementType {
754        unsafe { (*self.get_table(table_index)).element_type() }
755    }
756
757    /// Grow table by the specified amount of elements, filling them with
758    /// `init_value`.
759    ///
760    /// Returns `None` if table can't be grown by the specified amount of
761    /// elements, or if `init_value` is the wrong type of table element.
762    pub(crate) fn table_grow(
763        self: Pin<&mut Self>,
764        store: &mut dyn VMStore,
765        table_index: TableIndex,
766        delta: u64,
767        init_value: TableElement,
768    ) -> Result<Option<usize>, Error> {
769        self.with_defined_table_index_and_instance(table_index, |i, instance| {
770            instance.defined_table_grow(store, i, delta, init_value)
771        })
772    }
773
774    fn defined_table_grow(
775        mut self: Pin<&mut Self>,
776        store: &mut dyn VMStore,
777        table_index: DefinedTableIndex,
778        delta: u64,
779        init_value: TableElement,
780    ) -> Result<Option<usize>, Error> {
781        let table = &mut self
782            .as_mut()
783            .tables_mut()
784            .get_mut(table_index)
785            .unwrap_or_else(|| panic!("no table for index {}", table_index.index()))
786            .1;
787
788        let result = unsafe { table.grow(delta, init_value, store) };
789
790        // Keep the `VMContext` pointers used by compiled Wasm code up to
791        // date.
792        let element = table.vmtable();
793        self.set_table(table_index, element);
794
795        result
796    }
797
798    fn alloc_layout(offsets: &VMOffsets<HostPtr>) -> Layout {
799        let size = mem::size_of::<Self>()
800            .checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
801            .unwrap();
802        let align = mem::align_of::<Self>();
803        Layout::from_size_align(size, align).unwrap()
804    }
805
806    fn type_ids_array(&self) -> NonNull<VmPtr<VMSharedTypeIndex>> {
807        unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_type_ids_array()) }
808    }
809
810    /// Construct a new VMFuncRef for the given function
811    /// (imported or defined in this module) and store into the given
812    /// location. Used during lazy initialization.
813    ///
814    /// Note that our current lazy-init scheme actually calls this every
815    /// time the funcref pointer is fetched; this turns out to be better
816    /// than tracking state related to whether it's been initialized
817    /// before, because resetting that state on (re)instantiation is
818    /// very expensive if there are many funcrefs.
819    ///
820    /// # Safety
821    ///
822    /// This functions requires that `into` is a valid pointer.
823    unsafe fn construct_func_ref(
824        self: Pin<&mut Self>,
825        index: FuncIndex,
826        type_index: VMSharedTypeIndex,
827        into: *mut VMFuncRef,
828    ) {
829        let func_ref = if let Some(def_index) = self.env_module().defined_func_index(index) {
830            VMFuncRef {
831                array_call: self
832                    .runtime_info
833                    .array_to_wasm_trampoline(def_index)
834                    .expect("should have array-to-Wasm trampoline for escaping function")
835                    .into(),
836                wasm_call: Some(self.runtime_info.function(def_index).into()),
837                vmctx: VMOpaqueContext::from_vmcontext(self.vmctx()).into(),
838                type_index,
839            }
840        } else {
841            let import = self.imported_function(index);
842            VMFuncRef {
843                array_call: import.array_call,
844                wasm_call: Some(import.wasm_call),
845                vmctx: import.vmctx,
846                type_index,
847            }
848        };
849
850        // SAFETY: the unsafe contract here is forwarded to callers of this
851        // function.
852        unsafe {
853            ptr::write(into, func_ref);
854        }
855    }
856
857    /// Get a `&VMFuncRef` for the given `FuncIndex`.
858    ///
859    /// Returns `None` if the index is the reserved index value.
860    ///
861    /// The returned reference is a stable reference that won't be moved and can
862    /// be passed into JIT code.
863    pub(crate) fn get_func_ref(
864        self: Pin<&mut Self>,
865        index: FuncIndex,
866    ) -> Option<NonNull<VMFuncRef>> {
867        if index == FuncIndex::reserved_value() {
868            return None;
869        }
870
871        // For now, we eagerly initialize an funcref struct in-place
872        // whenever asked for a reference to it. This is mostly
873        // fine, because in practice each funcref is unlikely to be
874        // requested more than a few times: once-ish for funcref
875        // tables used for call_indirect (the usual compilation
876        // strategy places each function in the table at most once),
877        // and once or a few times when fetching exports via API.
878        // Note that for any case driven by table accesses, the lazy
879        // table init behaves like a higher-level cache layer that
880        // protects this initialization from happening multiple
881        // times, via that particular table at least.
882        //
883        // When `ref.func` becomes more commonly used or if we
884        // otherwise see a use-case where this becomes a hotpath,
885        // we can reconsider by using some state to track
886        // "uninitialized" explicitly, for example by zeroing the
887        // funcrefs (perhaps together with other
888        // zeroed-at-instantiate-time state) or using a separate
889        // is-initialized bitmap.
890        //
891        // We arrived at this design because zeroing memory is
892        // expensive, so it's better for instantiation performance
893        // if we don't have to track "is-initialized" state at
894        // all!
895        let func = &self.env_module().functions[index];
896        let sig = func.signature.unwrap_engine_type_index();
897
898        // SAFETY: the offset calculated here should be correct with
899        // `self.offsets`
900        let func_ref = unsafe {
901            self.vmctx_plus_offset_raw::<VMFuncRef>(self.offsets().vmctx_func_ref(func.func_ref))
902        };
903
904        // SAFETY: the `func_ref` ptr should be valid as it's within our
905        // `VMContext` area.
906        unsafe {
907            self.construct_func_ref(index, sig, func_ref.as_ptr());
908        }
909
910        Some(func_ref)
911    }
912
913    /// Get the passive elements segment at the given index.
914    ///
915    /// Returns an empty segment if the index is out of bounds or if the segment
916    /// has been dropped.
917    ///
918    /// The `storage` parameter should always be `None`; it is a bit of a hack
919    /// to work around lifetime issues.
920    pub(crate) fn passive_element_segment<'a>(
921        &self,
922        storage: &'a mut Option<(Arc<wasmtime_environ::Module>, TableSegmentElements)>,
923        elem_index: ElemIndex,
924    ) -> &'a TableSegmentElements {
925        debug_assert!(storage.is_none());
926        *storage = Some((
927            // TODO: this `clone()` shouldn't be necessary but is used for now to
928            // inform `rustc` that the lifetime of the elements here are
929            // disconnected from the lifetime of `self`.
930            self.env_module().clone(),
931            // NB: fall back to an expressions-based list of elements which
932            // doesn't have static type information (as opposed to
933            // `TableSegmentElements::Functions`) since we don't know what type
934            // is needed in the caller's context. Let the type be inferred by
935            // how they use the segment.
936            TableSegmentElements::Expressions(Box::new([])),
937        ));
938        let (module, empty) = storage.as_ref().unwrap();
939
940        match module.passive_elements_map.get(&elem_index) {
941            Some(index) if !self.dropped_elements.contains(elem_index) => {
942                &module.passive_elements[*index]
943            }
944            _ => empty,
945        }
946    }
947
948    /// The `table.init` operation: initializes a portion of a table with a
949    /// passive element.
950    ///
951    /// # Errors
952    ///
953    /// Returns a `Trap` error when the range within the table is out of bounds
954    /// or the range within the passive element is out of bounds.
955    pub(crate) fn table_init(
956        self: Pin<&mut Self>,
957        store: &mut StoreOpaque,
958        table_index: TableIndex,
959        elem_index: ElemIndex,
960        dst: u64,
961        src: u64,
962        len: u64,
963    ) -> Result<(), Trap> {
964        let mut storage = None;
965        let elements = self.passive_element_segment(&mut storage, elem_index);
966        let mut const_evaluator = ConstExprEvaluator::default();
967        Self::table_init_segment(
968            store,
969            self.id,
970            &mut const_evaluator,
971            table_index,
972            elements,
973            dst,
974            src,
975            len,
976        )
977    }
978
979    pub(crate) fn table_init_segment(
980        store: &mut StoreOpaque,
981        id: InstanceId,
982        const_evaluator: &mut ConstExprEvaluator,
983        table_index: TableIndex,
984        elements: &TableSegmentElements,
985        dst: u64,
986        src: u64,
987        len: u64,
988    ) -> Result<(), Trap> {
989        // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
990
991        let mut instance = store.instance_mut(id);
992        let table = unsafe { &mut *instance.as_mut().get_table(table_index) };
993        let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
994        let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
995        let module = instance.env_module().clone();
996
997        match elements {
998            TableSegmentElements::Functions(funcs) => {
999                let elements = funcs
1000                    .get(src..)
1001                    .and_then(|s| s.get(..len))
1002                    .ok_or(Trap::TableOutOfBounds)?;
1003                table.init_func(
1004                    dst,
1005                    elements
1006                        .iter()
1007                        .map(|idx| instance.as_mut().get_func_ref(*idx)),
1008                )?;
1009            }
1010            TableSegmentElements::Expressions(exprs) => {
1011                let exprs = exprs
1012                    .get(src..)
1013                    .and_then(|s| s.get(..len))
1014                    .ok_or(Trap::TableOutOfBounds)?;
1015                let top = module.tables[table_index].ref_type.heap_type.top();
1016                let mut context = ConstEvalContext::new(id);
1017                match top {
1018                    WasmHeapTopType::Extern => table.init_gc_refs(
1019                        dst,
1020                        exprs.iter().map(|expr| unsafe {
1021                            let raw = const_evaluator
1022                                .eval(store, &mut context, expr)
1023                                .expect("const expr should be valid");
1024                            VMGcRef::from_raw_u32(raw.get_externref())
1025                        }),
1026                    )?,
1027                    WasmHeapTopType::Any => table.init_gc_refs(
1028                        dst,
1029                        exprs.iter().map(|expr| unsafe {
1030                            let raw = const_evaluator
1031                                .eval(store, &mut context, expr)
1032                                .expect("const expr should be valid");
1033                            VMGcRef::from_raw_u32(raw.get_anyref())
1034                        }),
1035                    )?,
1036                    WasmHeapTopType::Func => table.init_func(
1037                        dst,
1038                        exprs.iter().map(|expr| unsafe {
1039                            NonNull::new(
1040                                const_evaluator
1041                                    .eval(store, &mut context, expr)
1042                                    .expect("const expr should be valid")
1043                                    .get_funcref()
1044                                    .cast(),
1045                            )
1046                        }),
1047                    )?,
1048                    WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
1049                }
1050            }
1051        }
1052
1053        Ok(())
1054    }
1055
1056    /// Drop an element.
1057    pub(crate) fn elem_drop(self: Pin<&mut Self>, elem_index: ElemIndex) {
1058        // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
1059
1060        self.dropped_elements_mut().insert(elem_index);
1061
1062        // Note that we don't check that we actually removed a segment because
1063        // dropping a non-passive segment is a no-op (not a trap).
1064    }
1065
1066    /// Get a locally-defined memory.
1067    pub fn get_defined_memory(self: Pin<&mut Self>, index: DefinedMemoryIndex) -> *mut Memory {
1068        // SAFETY: the `unsafe` here is projecting from `*mut (A, B)` to
1069        // `*mut A`, which should be a safe operation to do.
1070        unsafe { &raw mut (*self.memories_mut().get_raw_mut(index).unwrap()).1 }
1071    }
1072
1073    /// Do a `memory.copy`
1074    ///
1075    /// # Errors
1076    ///
1077    /// Returns a `Trap` error when the source or destination ranges are out of
1078    /// bounds.
1079    pub(crate) fn memory_copy(
1080        self: Pin<&mut Self>,
1081        dst_index: MemoryIndex,
1082        dst: u64,
1083        src_index: MemoryIndex,
1084        src: u64,
1085        len: u64,
1086    ) -> Result<(), Trap> {
1087        // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
1088
1089        let src_mem = self.get_memory(src_index);
1090        let dst_mem = self.get_memory(dst_index);
1091
1092        let src = self.validate_inbounds(src_mem.current_length(), src, len)?;
1093        let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?;
1094        let len = usize::try_from(len).unwrap();
1095
1096        // Bounds and casts are checked above, by this point we know that
1097        // everything is safe.
1098        unsafe {
1099            let dst = dst_mem.base.as_ptr().add(dst);
1100            let src = src_mem.base.as_ptr().add(src);
1101            // FIXME audit whether this is safe in the presence of shared memory
1102            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1103            ptr::copy(src, dst, len);
1104        }
1105
1106        Ok(())
1107    }
1108
1109    fn validate_inbounds(&self, max: usize, ptr: u64, len: u64) -> Result<usize, Trap> {
1110        let oob = || Trap::MemoryOutOfBounds;
1111        let end = ptr
1112            .checked_add(len)
1113            .and_then(|i| usize::try_from(i).ok())
1114            .ok_or_else(oob)?;
1115        if end > max {
1116            Err(oob())
1117        } else {
1118            Ok(ptr.try_into().unwrap())
1119        }
1120    }
1121
1122    /// Perform the `memory.fill` operation on a locally defined memory.
1123    ///
1124    /// # Errors
1125    ///
1126    /// Returns a `Trap` error if the memory range is out of bounds.
1127    pub(crate) fn memory_fill(
1128        self: Pin<&mut Self>,
1129        memory_index: MemoryIndex,
1130        dst: u64,
1131        val: u8,
1132        len: u64,
1133    ) -> Result<(), Trap> {
1134        let memory = self.get_memory(memory_index);
1135        let dst = self.validate_inbounds(memory.current_length(), dst, len)?;
1136        let len = usize::try_from(len).unwrap();
1137
1138        // Bounds and casts are checked above, by this point we know that
1139        // everything is safe.
1140        unsafe {
1141            let dst = memory.base.as_ptr().add(dst);
1142            // FIXME audit whether this is safe in the presence of shared memory
1143            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1144            ptr::write_bytes(dst, val, len);
1145        }
1146
1147        Ok(())
1148    }
1149
1150    /// Get the internal storage range of a particular Wasm data segment.
1151    pub(crate) fn wasm_data_range(&self, index: DataIndex) -> Range<u32> {
1152        match self.env_module().passive_data_map.get(&index) {
1153            Some(range) if !self.dropped_data.contains(index) => range.clone(),
1154            _ => 0..0,
1155        }
1156    }
1157
1158    /// Given an internal storage range of a Wasm data segment (or subset of a
1159    /// Wasm data segment), get the data's raw bytes.
1160    pub(crate) fn wasm_data(&self, range: Range<u32>) -> &[u8] {
1161        let start = usize::try_from(range.start).unwrap();
1162        let end = usize::try_from(range.end).unwrap();
1163        &self.runtime_info.wasm_data()[start..end]
1164    }
1165
1166    /// Performs the `memory.init` operation.
1167    ///
1168    /// # Errors
1169    ///
1170    /// Returns a `Trap` error if the destination range is out of this module's
1171    /// memory's bounds or if the source range is outside the data segment's
1172    /// bounds.
1173    pub(crate) fn memory_init(
1174        self: Pin<&mut Self>,
1175        memory_index: MemoryIndex,
1176        data_index: DataIndex,
1177        dst: u64,
1178        src: u32,
1179        len: u32,
1180    ) -> Result<(), Trap> {
1181        let range = self.wasm_data_range(data_index);
1182        self.memory_init_segment(memory_index, range, dst, src, len)
1183    }
1184
1185    pub(crate) fn memory_init_segment(
1186        self: Pin<&mut Self>,
1187        memory_index: MemoryIndex,
1188        range: Range<u32>,
1189        dst: u64,
1190        src: u32,
1191        len: u32,
1192    ) -> Result<(), Trap> {
1193        // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
1194
1195        let memory = self.get_memory(memory_index);
1196        let data = self.wasm_data(range);
1197        let dst = self.validate_inbounds(memory.current_length(), dst, len.into())?;
1198        let src = self.validate_inbounds(data.len(), src.into(), len.into())?;
1199        let len = len as usize;
1200
1201        unsafe {
1202            let src_start = data.as_ptr().add(src);
1203            let dst_start = memory.base.as_ptr().add(dst);
1204            // FIXME audit whether this is safe in the presence of shared memory
1205            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1206            ptr::copy_nonoverlapping(src_start, dst_start, len);
1207        }
1208
1209        Ok(())
1210    }
1211
1212    /// Drop the given data segment, truncating its length to zero.
1213    pub(crate) fn data_drop(self: Pin<&mut Self>, data_index: DataIndex) {
1214        self.dropped_data_mut().insert(data_index);
1215
1216        // Note that we don't check that we actually removed a segment because
1217        // dropping a non-passive segment is a no-op (not a trap).
1218    }
1219
1220    /// Get a table by index regardless of whether it is locally-defined
1221    /// or an imported, foreign table. Ensure that the given range of
1222    /// elements in the table is lazily initialized.  We define this
1223    /// operation all-in-one for safety, to ensure the lazy-init
1224    /// happens.
1225    ///
1226    /// Takes an `Iterator` for the index-range to lazy-initialize,
1227    /// for flexibility. This can be a range, single item, or empty
1228    /// sequence, for example. The iterator should return indices in
1229    /// increasing order, so that the break-at-out-of-bounds behavior
1230    /// works correctly.
1231    pub(crate) fn get_table_with_lazy_init(
1232        self: Pin<&mut Self>,
1233        table_index: TableIndex,
1234        range: impl Iterator<Item = u64>,
1235    ) -> *mut Table {
1236        self.with_defined_table_index_and_instance(table_index, |idx, instance| {
1237            instance.get_defined_table_with_lazy_init(idx, range)
1238        })
1239    }
1240
1241    /// Gets the raw runtime table data structure owned by this instance
1242    /// given the provided `idx`.
1243    ///
1244    /// The `range` specified is eagerly initialized for funcref tables.
1245    pub fn get_defined_table_with_lazy_init(
1246        mut self: Pin<&mut Self>,
1247        idx: DefinedTableIndex,
1248        range: impl Iterator<Item = u64>,
1249    ) -> *mut Table {
1250        let elt_ty = self.tables[idx].1.element_type();
1251
1252        if elt_ty == TableElementType::Func {
1253            for i in range {
1254                let value = match self.tables[idx].1.get(None, i) {
1255                    Some(value) => value,
1256                    None => {
1257                        // Out-of-bounds; caller will handle by likely
1258                        // throwing a trap. No work to do to lazy-init
1259                        // beyond the end.
1260                        break;
1261                    }
1262                };
1263
1264                if !value.is_uninit() {
1265                    continue;
1266                }
1267
1268                // The table element `i` is uninitialized and is now being
1269                // initialized. This must imply that a `precompiled` list of
1270                // function indices is available for this table. The precompiled
1271                // list is extracted and then it is consulted with `i` to
1272                // determine the function that is going to be initialized. Note
1273                // that `i` may be outside the limits of the static
1274                // initialization so it's a fallible `get` instead of an index.
1275                let module = self.env_module();
1276                let precomputed = match &module.table_initialization.initial_values[idx] {
1277                    TableInitialValue::Null { precomputed } => precomputed,
1278                    TableInitialValue::Expr(_) => unreachable!(),
1279                };
1280                // Panicking here helps catch bugs rather than silently truncating by accident.
1281                let func_index = precomputed.get(usize::try_from(i).unwrap()).cloned();
1282                let func_ref =
1283                    func_index.and_then(|func_index| self.as_mut().get_func_ref(func_index));
1284                self.as_mut().tables_mut()[idx]
1285                    .1
1286                    .set(i, TableElement::FuncRef(func_ref))
1287                    .expect("Table type should match and index should be in-bounds");
1288            }
1289        }
1290
1291        // SAFETY: the `unsafe` here is projecting from `*mut (A, B)` to
1292        // `*mut A`, which should be a safe operation to do.
1293        unsafe { &raw mut (*self.tables_mut().get_raw_mut(idx).unwrap()).1 }
1294    }
1295
1296    /// Get a table by index regardless of whether it is locally-defined or an
1297    /// imported, foreign table.
1298    pub(crate) fn get_table(self: Pin<&mut Self>, table_index: TableIndex) -> *mut Table {
1299        self.with_defined_table_index_and_instance(table_index, |idx, instance| unsafe {
1300            // SAFETY: the `unsafe` here is projecting from `*mut (A, B)` to
1301            // `*mut A`, which should be a safe operation to do.
1302            &raw mut (*instance.tables_mut().get_raw_mut(idx).unwrap()).1
1303        })
1304    }
1305
1306    /// Get a locally-defined table.
1307    pub(crate) fn get_defined_table(self: Pin<&mut Self>, index: DefinedTableIndex) -> *mut Table {
1308        // SAFETY: the `unsafe` here is projecting from `*mut (A, B)` to
1309        // `*mut A`, which should be a safe operation to do.
1310        unsafe { &raw mut (*self.tables_mut().get_raw_mut(index).unwrap()).1 }
1311    }
1312
1313    pub(crate) fn with_defined_table_index_and_instance<R>(
1314        self: Pin<&mut Self>,
1315        index: TableIndex,
1316        f: impl FnOnce(DefinedTableIndex, Pin<&mut Instance>) -> R,
1317    ) -> R {
1318        if let Some(defined_table_index) = self.env_module().defined_table_index(index) {
1319            f(defined_table_index, self)
1320        } else {
1321            let import = self.imported_table(index);
1322            unsafe {
1323                Instance::from_vmctx(import.vmctx.as_non_null(), |foreign_instance| {
1324                    let foreign_table_def = import.from.as_ptr();
1325                    let foreign_table_index = foreign_instance.table_index(&*foreign_table_def);
1326                    f(foreign_table_index, foreign_instance)
1327                })
1328            }
1329        }
1330    }
1331
1332    /// Initialize the VMContext data associated with this Instance.
1333    ///
1334    /// The `VMContext` memory is assumed to be uninitialized; any field
1335    /// that we need in a certain state will be explicitly written by this
1336    /// function.
1337    unsafe fn initialize_vmctx(
1338        mut self: Pin<&mut Self>,
1339        module: &Module,
1340        offsets: &VMOffsets<HostPtr>,
1341        store: StorePtr,
1342        imports: Imports,
1343    ) {
1344        assert!(ptr::eq(module, self.env_module().as_ref()));
1345
1346        self.vmctx_plus_offset_raw::<u32>(offsets.ptr.vmctx_magic())
1347            .write(VMCONTEXT_MAGIC);
1348        self.as_mut().set_store(store.as_raw());
1349
1350        // Initialize shared types
1351        let types = NonNull::from(self.runtime_info.type_ids());
1352        self.type_ids_array().write(types.cast().into());
1353
1354        // Initialize the built-in functions
1355        static BUILTINS: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray::INIT;
1356        let ptr = BUILTINS.expose_provenance();
1357        self.vmctx_plus_offset_raw(offsets.ptr.vmctx_builtin_functions())
1358            .write(VmPtr::from(ptr));
1359
1360        // Initialize the imports
1361        debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
1362        ptr::copy_nonoverlapping(
1363            imports.functions.as_ptr(),
1364            self.vmctx_plus_offset_raw(offsets.vmctx_imported_functions_begin())
1365                .as_ptr(),
1366            imports.functions.len(),
1367        );
1368        debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
1369        ptr::copy_nonoverlapping(
1370            imports.tables.as_ptr(),
1371            self.vmctx_plus_offset_raw(offsets.vmctx_imported_tables_begin())
1372                .as_ptr(),
1373            imports.tables.len(),
1374        );
1375        debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
1376        ptr::copy_nonoverlapping(
1377            imports.memories.as_ptr(),
1378            self.vmctx_plus_offset_raw(offsets.vmctx_imported_memories_begin())
1379                .as_ptr(),
1380            imports.memories.len(),
1381        );
1382        debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
1383        ptr::copy_nonoverlapping(
1384            imports.globals.as_ptr(),
1385            self.vmctx_plus_offset_raw(offsets.vmctx_imported_globals_begin())
1386                .as_ptr(),
1387            imports.globals.len(),
1388        );
1389
1390        debug_assert_eq!(imports.tags.len(), module.num_imported_tags);
1391        ptr::copy_nonoverlapping(
1392            imports.tags.as_ptr(),
1393            self.vmctx_plus_offset_raw(offsets.vmctx_imported_tags_begin())
1394                .as_ptr(),
1395            imports.tags.len(),
1396        );
1397
1398        // N.B.: there is no need to initialize the funcrefs array because we
1399        // eagerly construct each element in it whenever asked for a reference
1400        // to that element. In other words, there is no state needed to track
1401        // the lazy-init, so we don't need to initialize any state now.
1402
1403        // Initialize the defined tables
1404        let mut ptr = self.vmctx_plus_offset_raw(offsets.vmctx_tables_begin());
1405        let tables = self.as_mut().tables_mut();
1406        for i in 0..module.num_defined_tables() {
1407            ptr.write(tables[DefinedTableIndex::new(i)].1.vmtable());
1408            ptr = ptr.add(1);
1409        }
1410
1411        // Initialize the defined memories. This fills in both the
1412        // `defined_memories` table and the `owned_memories` table at the same
1413        // time. Entries in `defined_memories` hold a pointer to a definition
1414        // (all memories) whereas the `owned_memories` hold the actual
1415        // definitions of memories owned (not shared) in the module.
1416        let mut ptr = self.vmctx_plus_offset_raw(offsets.vmctx_memories_begin());
1417        let mut owned_ptr = self.vmctx_plus_offset_raw(offsets.vmctx_owned_memories_begin());
1418        let memories = self.as_mut().memories_mut();
1419        for i in 0..module.num_defined_memories() {
1420            let defined_memory_index = DefinedMemoryIndex::new(i);
1421            let memory_index = module.memory_index(defined_memory_index);
1422            if module.memories[memory_index].shared {
1423                let def_ptr = memories[defined_memory_index]
1424                    .1
1425                    .as_shared_memory()
1426                    .unwrap()
1427                    .vmmemory_ptr();
1428                ptr.write(VmPtr::from(def_ptr));
1429            } else {
1430                owned_ptr.write(memories[defined_memory_index].1.vmmemory());
1431                ptr.write(VmPtr::from(owned_ptr));
1432                owned_ptr = owned_ptr.add(1);
1433            }
1434            ptr = ptr.add(1);
1435        }
1436
1437        // Zero-initialize the globals so that nothing is uninitialized memory
1438        // after this function returns. The globals are actually initialized
1439        // with their const expression initializers after the instance is fully
1440        // allocated.
1441        for (index, _init) in module.global_initializers.iter() {
1442            self.global_ptr(index).write(VMGlobalDefinition::new());
1443        }
1444
1445        // Initialize the defined tags
1446        let mut ptr = self.vmctx_plus_offset_raw(offsets.vmctx_tags_begin());
1447        for i in 0..module.num_defined_tags() {
1448            let defined_index = DefinedTagIndex::new(i);
1449            let tag_index = module.tag_index(defined_index);
1450            let tag = module.tags[tag_index];
1451            ptr.write(VMTagDefinition::new(
1452                tag.signature.unwrap_engine_type_index(),
1453            ));
1454            ptr = ptr.add(1);
1455        }
1456    }
1457
1458    /// Attempts to convert from the host `addr` specified to a WebAssembly
1459    /// based address recorded in `WasmFault`.
1460    ///
1461    /// This method will check all linear memories that this instance contains
1462    /// to see if any of them contain `addr`. If one does then `Some` is
1463    /// returned with metadata about the wasm fault. Otherwise `None` is
1464    /// returned and `addr` doesn't belong to this instance.
1465    pub fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1466        let mut fault = None;
1467        for (_, (_, memory)) in self.memories.iter() {
1468            let accessible = memory.wasm_accessible();
1469            if accessible.start <= addr && addr < accessible.end {
1470                // All linear memories should be disjoint so assert that no
1471                // prior fault has been found.
1472                assert!(fault.is_none());
1473                fault = Some(WasmFault {
1474                    memory_size: memory.byte_size(),
1475                    wasm_address: u64::try_from(addr - accessible.start).unwrap(),
1476                });
1477            }
1478        }
1479        fault
1480    }
1481
1482    /// Returns the id, within this instance's store, that it's assigned.
1483    pub fn id(&self) -> InstanceId {
1484        self.id
1485    }
1486
1487    /// Get all memories within this instance.
1488    ///
1489    /// Returns both import and defined memories.
1490    ///
1491    /// Returns both exported and non-exported memories.
1492    ///
1493    /// Gives access to the full memories space.
1494    pub fn all_memories<'a>(
1495        &'a self,
1496    ) -> impl ExactSizeIterator<Item = (MemoryIndex, ExportMemory)> + 'a {
1497        let indices = (0..self.env_module().memories.len())
1498            .map(|i| MemoryIndex::new(i))
1499            .collect::<Vec<_>>();
1500        indices
1501            .into_iter()
1502            .map(|i| (i, self.get_exported_memory(i)))
1503    }
1504
1505    /// Return the memories defined in this instance (not imported).
1506    pub fn defined_memories<'a>(&'a self) -> impl ExactSizeIterator<Item = ExportMemory> + 'a {
1507        let num_imported = self.env_module().num_imported_memories;
1508        self.all_memories()
1509            .skip(num_imported)
1510            .map(|(_i, memory)| memory)
1511    }
1512
1513    /// Lookup an item with the given index.
1514    ///
1515    /// # Panics
1516    ///
1517    /// Panics if `export` is not valid for this instance.
1518    pub fn get_export_by_index_mut(self: Pin<&mut Self>, export: EntityIndex) -> Export {
1519        match export {
1520            EntityIndex::Function(i) => Export::Function(self.get_exported_func(i)),
1521            EntityIndex::Global(i) => Export::Global(self.get_exported_global(i)),
1522            EntityIndex::Table(i) => Export::Table(self.get_exported_table(i)),
1523            EntityIndex::Memory(i) => Export::Memory(self.get_exported_memory(i)),
1524            EntityIndex::Tag(i) => Export::Tag(self.get_exported_tag(i)),
1525        }
1526    }
1527
1528    fn store_mut(self: Pin<&mut Self>) -> &mut Option<VMStoreRawPtr> {
1529        // SAFETY: this is a pin-projection to get a mutable reference to an
1530        // internal field and is safe so long as the `&mut Self` temporarily
1531        // created is not overwritten, which it isn't here.
1532        unsafe { &mut self.get_unchecked_mut().store }
1533    }
1534
1535    fn dropped_elements_mut(self: Pin<&mut Self>) -> &mut EntitySet<ElemIndex> {
1536        // SAFETY: see `store_mut` above.
1537        unsafe { &mut self.get_unchecked_mut().dropped_elements }
1538    }
1539
1540    fn dropped_data_mut(self: Pin<&mut Self>) -> &mut EntitySet<DataIndex> {
1541        // SAFETY: see `store_mut` above.
1542        unsafe { &mut self.get_unchecked_mut().dropped_data }
1543    }
1544
1545    fn memories_mut(
1546        self: Pin<&mut Self>,
1547    ) -> &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)> {
1548        // SAFETY: see `store_mut` above.
1549        unsafe { &mut self.get_unchecked_mut().memories }
1550    }
1551
1552    fn tables_mut(
1553        self: Pin<&mut Self>,
1554    ) -> &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)> {
1555        // SAFETY: see `store_mut` above.
1556        unsafe { &mut self.get_unchecked_mut().tables }
1557    }
1558
1559    #[cfg(feature = "wmemcheck")]
1560    pub(super) fn wmemcheck_state_mut(self: Pin<&mut Self>) -> &mut Option<Wmemcheck> {
1561        // SAFETY: see `store_mut` above.
1562        unsafe { &mut self.get_unchecked_mut().wmemcheck_state }
1563    }
1564}
1565
1566// SAFETY: `layout` should describe this accurately and `OwnedVMContext` is the
1567// last field of `ComponentInstance`.
1568unsafe impl InstanceLayout for Instance {
1569    const INIT_ZEROED: bool = false;
1570    type VMContext = VMContext;
1571
1572    fn layout(&self) -> Layout {
1573        Self::alloc_layout(self.runtime_info.offsets())
1574    }
1575
1576    fn owned_vmctx(&self) -> &OwnedVMContext<VMContext> {
1577        &self.vmctx
1578    }
1579
1580    fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<VMContext> {
1581        &mut self.vmctx
1582    }
1583}
1584
1585pub type InstanceHandle = OwnedInstance<Instance>;
1586
1587/// A handle holding an `Instance` of a WebAssembly module.
1588///
1589/// This structure is an owning handle of the `instance` contained internally.
1590/// When this value goes out of scope it will deallocate the `Instance` and all
1591/// memory associated with it.
1592///
1593/// Note that this lives within a `StoreOpaque` on a list of instances that a
1594/// store is keeping alive.
1595#[derive(Debug)]
1596#[repr(transparent)] // guarantee this is a zero-cost wrapper
1597pub struct OwnedInstance<T: InstanceLayout> {
1598    /// The raw pointer to the instance that was allocated.
1599    ///
1600    /// Note that this is not equivalent to `Box<Instance>` because the
1601    /// allocation here has a `VMContext` trailing after it. Thus the custom
1602    /// destructor to invoke the `dealloc` function with the appropriate
1603    /// layout.
1604    instance: SendSyncPtr<T>,
1605    _marker: marker::PhantomData<Box<(T, OwnedVMContext<T::VMContext>)>>,
1606}
1607
1608/// Structure that must be placed at the end of a type implementing
1609/// `InstanceLayout`.
1610#[repr(align(16))] // match the alignment of VMContext
1611pub struct OwnedVMContext<T> {
1612    /// A pointer to the `vmctx` field at the end of the `structure`.
1613    ///
1614    /// If you're looking at this a reasonable question would be "why do we need
1615    /// a pointer to ourselves?" because after all the pointer's value is
1616    /// trivially derivable from any `&Instance` pointer. The rationale for this
1617    /// field's existence is subtle, but it's required for correctness. The
1618    /// short version is "this makes miri happy".
1619    ///
1620    /// The long version of why this field exists is that the rules that MIRI
1621    /// uses to ensure pointers are used correctly have various conditions on
1622    /// them depend on how pointers are used. More specifically if `*mut T` is
1623    /// derived from `&mut T`, then that invalidates all prior pointers drived
1624    /// from the `&mut T`. This means that while we liberally want to re-acquire
1625    /// a `*mut VMContext` throughout the implementation of `Instance` the
1626    /// trivial way, a function `fn vmctx(Pin<&mut Instance>) -> *mut VMContext`
1627    /// would effectively invalidate all prior `*mut VMContext` pointers
1628    /// acquired. The purpose of this field is to serve as a sort of
1629    /// source-of-truth for where `*mut VMContext` pointers come from.
1630    ///
1631    /// This field is initialized when the `Instance` is created with the
1632    /// original allocation's pointer. That means that the provenance of this
1633    /// pointer contains the entire allocation (both instance and `VMContext`).
1634    /// This provenance bit is then "carried through" where `fn vmctx` will base
1635    /// all returned pointers on this pointer itself. This provides the means of
1636    /// never invalidating this pointer throughout MIRI and additionally being
1637    /// able to still temporarily have `Pin<&mut Instance>` methods and such.
1638    ///
1639    /// It's important to note, though, that this is not here purely for MIRI.
1640    /// The careful construction of the `fn vmctx` method has ramifications on
1641    /// the LLVM IR generated, for example. A historical CVE on Wasmtime,
1642    /// GHSA-ch89-5g45-qwc7, was caused due to relying on undefined behavior. By
1643    /// deriving VMContext pointers from this pointer it specifically hints to
1644    /// LLVM that trickery is afoot and it properly informs `noalias` and such
1645    /// annotations and analysis. More-or-less this pointer is actually loaded
1646    /// in LLVM IR which helps defeat otherwise present aliasing optimizations,
1647    /// which we want, since writes to this should basically never be optimized
1648    /// out.
1649    ///
1650    /// As a final note it's worth pointing out that the machine code generated
1651    /// for accessing `fn vmctx` is still as one would expect. This member isn't
1652    /// actually ever loaded at runtime (or at least shouldn't be). Perhaps in
1653    /// the future if the memory consumption of this field is a problem we could
1654    /// shrink it slightly, but for now one extra pointer per wasm instance
1655    /// seems not too bad.
1656    vmctx_self_reference: SendSyncPtr<T>,
1657
1658    /// This field ensures that going from `Pin<&mut T>` to `&mut T` is not a
1659    /// safe operation.
1660    _marker: core::marker::PhantomPinned,
1661}
1662
1663impl<T> OwnedVMContext<T> {
1664    /// Creates a new blank vmctx to place at the end of an instance.
1665    pub fn new() -> OwnedVMContext<T> {
1666        OwnedVMContext {
1667            vmctx_self_reference: SendSyncPtr::new(NonNull::dangling()),
1668            _marker: core::marker::PhantomPinned,
1669        }
1670    }
1671}
1672
1673/// Helper trait to plumb both core instances and component instances into
1674/// `OwnedInstance` below.
1675///
1676/// # Safety
1677///
1678/// This trait requires `layout` to correctly describe `Self` and appropriately
1679/// allocate space for `Self::VMContext` afterwards. Additionally the field
1680/// returned by `owned_vmctx()` must be the last field in the structure.
1681pub unsafe trait InstanceLayout {
1682    /// Whether or not to allocate this instance with `alloc_zeroed` or `alloc`.
1683    const INIT_ZEROED: bool;
1684
1685    /// The trailing `VMContext` type at the end of this instance.
1686    type VMContext;
1687
1688    /// The memory layout to use to allocate and deallocate this instance.
1689    fn layout(&self) -> Layout;
1690
1691    fn owned_vmctx(&self) -> &OwnedVMContext<Self::VMContext>;
1692    fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<Self::VMContext>;
1693
1694    /// Returns the `vmctx_self_reference` set above.
1695    #[inline]
1696    fn vmctx(&self) -> NonNull<Self::VMContext> {
1697        // The definition of this method is subtle but intentional. The goal
1698        // here is that effectively this should return `&mut self.vmctx`, but
1699        // it's not quite so simple. Some more documentation is available on the
1700        // `vmctx_self_reference` field, but the general idea is that we're
1701        // creating a pointer to return with proper provenance. Provenance is
1702        // still in the works in Rust at the time of this writing but the load
1703        // of the `self.vmctx_self_reference` field is important here as it
1704        // affects how LLVM thinks about aliasing with respect to the returned
1705        // pointer.
1706        //
1707        // The intention of this method is to codegen to machine code as `&mut
1708        // self.vmctx`, however. While it doesn't show up like this in LLVM IR
1709        // (there's an actual load of the field) it does look like that by the
1710        // time the backend runs. (that's magic to me, the backend removing
1711        // loads...)
1712        let owned_vmctx = self.owned_vmctx();
1713        let owned_vmctx_raw = NonNull::from(owned_vmctx);
1714        // SAFETY: it's part of the contract of `InstanceLayout` and the usage
1715        // with `OwnedInstance` that this indeed points to the vmctx.
1716        let addr = unsafe { owned_vmctx_raw.add(1) };
1717        owned_vmctx
1718            .vmctx_self_reference
1719            .as_non_null()
1720            .with_addr(addr.addr())
1721    }
1722
1723    /// Helper function to access various locations offset from our `*mut
1724    /// VMContext` object.
1725    ///
1726    /// Note that this method takes `&self` as an argument but returns
1727    /// `NonNull<T>` which is frequently used to mutate said memory. This is an
1728    /// intentional design decision where the safety of the modification of
1729    /// memory is placed as a burden onto the caller. The implementation of this
1730    /// method explicitly does not require `&mut self` to acquire mutable
1731    /// provenance to update the `VMContext` region. Instead all pointers into
1732    /// the `VMContext` area have provenance/permissions to write.
1733    ///
1734    /// Also note though that care must be taken to ensure that reads/writes of
1735    /// memory must only happen where appropriate, for example a non-atomic
1736    /// write (as most are) should never happen concurrently with another read
1737    /// or write. It's generally on the burden of the caller to adhere to this.
1738    ///
1739    /// Also of note is that most of the time the usage of this method falls
1740    /// into one of:
1741    ///
1742    /// * Something in the VMContext is being read or written. In that case use
1743    ///   `vmctx_plus_offset` or `vmctx_plus_offset_mut` if possible due to
1744    ///   that having a safer lifetime.
1745    ///
1746    /// * A pointer is being created to pass to other VM* data structures. In
1747    ///   that situation the lifetime of all VM data structures are typically
1748    ///   tied to the `Store<T>` which is what provides the guarantees around
1749    ///   concurrency/etc.
1750    ///
1751    /// There's quite a lot of unsafety riding on this method, especially
1752    /// related to the ascription `T` of the byte `offset`. It's hoped that in
1753    /// the future we're able to settle on an in theory safer design.
1754    ///
1755    /// # Safety
1756    ///
1757    /// This method is unsafe because the `offset` must be within bounds of the
1758    /// `VMContext` object trailing this instance. Additionally `T` must be a
1759    /// valid ascription of the value that resides at that location.
1760    unsafe fn vmctx_plus_offset_raw<T: VmSafe>(&self, offset: impl Into<u32>) -> NonNull<T> {
1761        // SAFETY: the safety requirements of `byte_add` are forwarded to this
1762        // method's caller.
1763        unsafe {
1764            self.vmctx()
1765                .byte_add(usize::try_from(offset.into()).unwrap())
1766                .cast()
1767        }
1768    }
1769
1770    /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1771    /// `&self` to the returned reference `&T`.
1772    ///
1773    /// # Safety
1774    ///
1775    /// See the safety documentation of `vmctx_plus_offset_raw`.
1776    unsafe fn vmctx_plus_offset<T: VmSafe>(&self, offset: impl Into<u32>) -> &T {
1777        // SAFETY: this method has the same safety requirements as
1778        // `vmctx_plus_offset_raw`.
1779        unsafe { self.vmctx_plus_offset_raw(offset).as_ref() }
1780    }
1781
1782    /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1783    /// `&mut self` to the returned reference `&mut T`.
1784    ///
1785    /// # Safety
1786    ///
1787    /// See the safety documentation of `vmctx_plus_offset_raw`.
1788    unsafe fn vmctx_plus_offset_mut<T: VmSafe>(
1789        self: Pin<&mut Self>,
1790        offset: impl Into<u32>,
1791    ) -> &mut T {
1792        // SAFETY: this method has the same safety requirements as
1793        // `vmctx_plus_offset_raw`.
1794        unsafe { self.vmctx_plus_offset_raw(offset).as_mut() }
1795    }
1796}
1797
1798impl<T: InstanceLayout> OwnedInstance<T> {
1799    /// Allocates a new `OwnedInstance` and places `instance` inside of it.
1800    ///
1801    /// This will `instance`
1802    pub(super) fn new(mut instance: T) -> OwnedInstance<T> {
1803        let layout = instance.layout();
1804        debug_assert!(layout.size() >= size_of_val(&instance));
1805        debug_assert!(layout.align() >= align_of_val(&instance));
1806
1807        // SAFETY: it's up to us to assert that `layout` has a non-zero size,
1808        // which is asserted here.
1809        let ptr = unsafe {
1810            assert!(layout.size() > 0);
1811            if T::INIT_ZEROED {
1812                alloc::alloc::alloc_zeroed(layout)
1813            } else {
1814                alloc::alloc::alloc(layout)
1815            }
1816        };
1817        if ptr.is_null() {
1818            alloc::alloc::handle_alloc_error(layout);
1819        }
1820        let instance_ptr = NonNull::new(ptr.cast::<T>()).unwrap();
1821
1822        // SAFETY: it's part of the unsafe contract of `InstanceLayout` that the
1823        // `add` here is appropriate for the layout allocated.
1824        let vmctx_self_reference = unsafe { instance_ptr.add(1).cast() };
1825        instance.owned_vmctx_mut().vmctx_self_reference = vmctx_self_reference.into();
1826
1827        // SAFETY: we allocated above and it's an unsafe contract of
1828        // `InstanceLayout` that the layout is suitable for writing the
1829        // instance.
1830        unsafe {
1831            instance_ptr.write(instance);
1832        }
1833
1834        let ret = OwnedInstance {
1835            instance: SendSyncPtr::new(instance_ptr),
1836            _marker: marker::PhantomData,
1837        };
1838
1839        // Double-check various vmctx calculations are correct.
1840        debug_assert_eq!(
1841            vmctx_self_reference.addr(),
1842            // SAFETY: `InstanceLayout` should guarantee it's safe to add 1 to
1843            // the last field to get a pointer to 1-byte-past-the-end of an
1844            // object, which should be valid.
1845            unsafe { NonNull::from(ret.get().owned_vmctx()).add(1).addr() }
1846        );
1847        debug_assert_eq!(vmctx_self_reference.addr(), ret.get().vmctx().addr());
1848
1849        ret
1850    }
1851
1852    /// Gets the raw underlying `&Instance` from this handle.
1853    pub fn get(&self) -> &T {
1854        // SAFETY: this is an owned instance handle that retains exclusive
1855        // ownership of the `Instance` inside. With `&self` given we know
1856        // this pointer is valid valid and the returned lifetime is connected
1857        // to `self` so that should also be valid.
1858        unsafe { self.instance.as_non_null().as_ref() }
1859    }
1860
1861    /// Same as [`Self::get`] except for mutability.
1862    pub fn get_mut(&mut self) -> Pin<&mut T> {
1863        // SAFETY: The lifetime concerns here are the same as `get` above.
1864        // Otherwise `new_unchecked` is used here to uphold the contract that
1865        // instances are always pinned in memory.
1866        unsafe { Pin::new_unchecked(self.instance.as_non_null().as_mut()) }
1867    }
1868}
1869
1870impl<T: InstanceLayout> Drop for OwnedInstance<T> {
1871    fn drop(&mut self) {
1872        unsafe {
1873            let layout = self.get().layout();
1874            ptr::drop_in_place(self.instance.as_ptr());
1875            alloc::alloc::dealloc(self.instance.as_ptr().cast(), layout);
1876        }
1877    }
1878}