wasmtime/runtime/vm/instance.rs
1//! An `Instance` contains all the runtime state used by execution of a
2//! wasm module (except its callstack and register state). An
3//! `InstanceHandle` is a reference-counting handle for an `Instance`.
4
5use crate::code::ModuleWithCode;
6use crate::module::ModuleRegistry;
7use crate::runtime::vm::export::{Export, ExportMemory};
8use crate::runtime::vm::memory::{Memory, RuntimeMemoryCreator};
9use crate::runtime::vm::table::{Table, TableElementType};
10use crate::runtime::vm::vmcontext::{
11 VMBuiltinFunctionsArray, VMContext, VMFuncRef, VMFunctionImport, VMGlobalDefinition,
12 VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext, VMStoreContext,
13 VMTableDefinition, VMTableImport, VMTagDefinition, VMTagImport,
14};
15use crate::runtime::vm::{
16 GcStore, HostResult, Imports, ModuleRuntimeInfo, SendSyncPtr, VMGlobalKind, VMStore,
17 VMStoreRawPtr, VmPtr, VmSafe, WasmFault, catch_unwind_and_record_trap,
18};
19use crate::store::{
20 AutoAssertNoGc, InstanceId, StoreId, StoreInstanceId, StoreOpaque, StoreResourceLimiter,
21};
22use crate::vm::{VMWasmCallFunction, ValRaw};
23use crate::{OpaqueRootScope, Val};
24use crate::{ValType, prelude::*};
25use alloc::sync::Arc;
26use core::alloc::Layout;
27use core::marker;
28use core::ops::Range;
29use core::pin::Pin;
30use core::ptr::NonNull;
31#[cfg(target_has_atomic = "64")]
32use core::sync::atomic::AtomicU64;
33use core::{mem, ptr};
34#[cfg(feature = "gc")]
35use wasmtime_environ::ModuleInternedTypeIndex;
36use wasmtime_environ::error::OutOfMemory;
37use wasmtime_environ::{
38 DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, DefinedTagIndex,
39 ElemIndex, EntityIndex, EntityRef, FuncIndex, GlobalIndex, HostPtr, MemoryIndex,
40 NeedsGcRooting, PtrSize, TableIndex, TableInitialValue, TagIndex, Trap, VMCONTEXT_MAGIC,
41 VMOffsets, VMSharedTypeIndex, packed_option::ReservedValue,
42};
43#[cfg(feature = "wmemcheck")]
44use wasmtime_wmemcheck::Wmemcheck;
45
46mod allocator;
47pub use allocator::*;
48
49/// A type that roughly corresponds to a WebAssembly instance, but is also used
50/// for host-defined objects.
51///
52/// Instances here can correspond to actual instantiated modules, but it's also
53/// used ubiquitously for host-defined objects. For example creating a
54/// host-defined memory will have a `module` that looks like it exports a single
55/// memory (and similar for other constructs).
56///
57/// This `Instance` type is used as a ubiquitous representation for WebAssembly
58/// values, whether or not they were created on the host or through a module.
59///
60/// # Ownership
61///
62/// This structure is never allocated directly but is instead managed through
63/// an `InstanceHandle`. This structure ends with a `VMContext` which has a
64/// dynamic size corresponding to the `module` configured within. Memory
65/// management of this structure is always done through `InstanceHandle` as the
66/// sole owner of an instance.
67///
68/// # `Instance` and `Pin`
69///
70/// Given an instance it is accompanied with trailing memory for the
71/// appropriate `VMContext`. The `Instance` also holds `runtime_info` and other
72/// information pointing to relevant offsets for the `VMContext`. Thus it is
73/// not sound to mutate `runtime_info` after an instance is created. More
74/// generally it's also not safe to "swap" instances, for example given two
75/// `&mut Instance` values it's not sound to swap them as then the `VMContext`
76/// values are inaccurately described.
77///
78/// To encapsulate this guarantee this type is only ever mutated through Rust's
79/// `Pin` type. All mutable methods here take `self: Pin<&mut Self>` which
80/// statically disallows safe access to `&mut Instance`. There are assorted
81/// "projection methods" to go from `Pin<&mut Instance>` to `&mut T` for
82/// individual fields, for example `memories_mut`. More methods can be added as
83/// necessary or methods may also be added to project multiple fields at a time
84/// if necessary to. The precise ergonomics around getting mutable access to
85/// some fields (but notably not `runtime_info`) is probably going to evolve
86/// over time.
87///
88/// Note that is is not sound to basically ever pass around `&mut Instance`.
89/// That should always instead be `Pin<&mut Instance>`. All usage of
90/// `Pin::new_unchecked` should be here in this module in just a few `unsafe`
91/// locations and it's recommended to use existing helpers if you can.
92#[repr(C)] // ensure that the vmctx field is last.
93pub struct Instance {
94 /// The index, within a `Store` that this instance lives at
95 id: InstanceId,
96
97 /// The runtime info (corresponding to the "compiled module"
98 /// abstraction in higher layers) that is retained and needed for
99 /// lazy initialization. This provides access to the underlying
100 /// Wasm module entities, the compiled JIT code, metadata about
101 /// functions, lazy initialization state, etc.
102 //
103 // SAFETY: this field cannot be overwritten after an instance is created. It
104 // must contain this exact same value for the entire lifetime of this
105 // instance. This enables borrowing the info's `Module` and this instance at
106 // the same time (instance mutably, module not). Additionally it enables
107 // borrowing a store mutably at the same time as a contained instance.
108 runtime_info: ModuleRuntimeInfo,
109
110 /// WebAssembly linear memory data.
111 ///
112 /// This is where all runtime information about defined linear memories in
113 /// this module lives.
114 ///
115 /// The `MemoryAllocationIndex` was given from our `InstanceAllocator` and
116 /// must be given back to the instance allocator when deallocating each
117 /// memory.
118 memories: TryPrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
119
120 /// WebAssembly table data.
121 ///
122 /// Like memories, this is only for defined tables in the module and
123 /// contains all of their runtime state.
124 ///
125 /// The `TableAllocationIndex` was given from our `InstanceAllocator` and
126 /// must be given back to the instance allocator when deallocating each
127 /// table.
128 tables: TryPrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
129
130 /// Evaluated passive element segments.
131 ///
132 /// If an entry is none, then it has been dropped.
133 //
134 // TODO(#12621): This should be a `TrySecondaryMap<PassiveElemIndex, _>`
135 // but that type is currently footgun-y / isn't actually OOM-safe yet.
136 passive_elements: TryVec<Option<(NeedsGcRooting, TryVec<ValRaw>)>>,
137
138 /// Stores the dropped passive data segments in this instantiation by index.
139 /// If the index is present in the set, the segment has been dropped.
140 dropped_data: TryEntitySet<DataIndex>,
141
142 // TODO: add support for multiple memories; `wmemcheck_state` corresponds to
143 // memory 0.
144 #[cfg(feature = "wmemcheck")]
145 pub(crate) wmemcheck_state: Option<Wmemcheck>,
146
147 /// Self-pointer back to `Store<T>` and its functions. Not present for
148 /// the brief time that `Store<T>` is itself being created. Also not
149 /// present for some niche uses that are disconnected from stores (e.g.
150 /// cross-thread stuff used in `InstancePre`)
151 store: Option<VMStoreRawPtr>,
152
153 /// Additional context used by compiled wasm code. This field is last, and
154 /// represents a dynamically-sized array that extends beyond the nominal
155 /// end of the struct (similar to a flexible array member).
156 vmctx: OwnedVMContext<VMContext>,
157}
158
159impl Instance {
160 /// Create an instance at the given memory address.
161 ///
162 /// It is assumed the memory was properly aligned and the
163 /// allocation was `alloc_size` in bytes.
164 ///
165 /// # Safety
166 ///
167 /// The `req.imports` field must be appropriately sized/typed for the module
168 /// being allocated according to `req.runtime_info`. Additionally `memories`
169 /// and `tables` must have been allocated for `req.store`.
170 unsafe fn new(
171 req: InstanceAllocationRequest,
172 memories: TryPrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
173 tables: TryPrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
174 ) -> Result<InstanceHandle, OutOfMemory> {
175 let module = req.runtime_info.env_module();
176 let memory_tys = &module.memories;
177 let passive_elements = TryVec::with_capacity(module.passive_elements.len())?;
178 let dropped_data = TryEntitySet::with_capacity(module.passive_data_map.len())?;
179
180 #[cfg(feature = "wmemcheck")]
181 let wmemcheck_state = if req.store.engine().config().wmemcheck {
182 let size = memory_tys
183 .iter()
184 .next()
185 .map(|memory| memory.1.limits.min)
186 .unwrap_or(0)
187 * 64
188 * 1024;
189 Some(Wmemcheck::new(size.try_into().unwrap()))
190 } else {
191 None
192 };
193 #[cfg(not(feature = "wmemcheck"))]
194 let _ = memory_tys;
195
196 let mut ret = OwnedInstance::new(Instance {
197 id: req.id,
198 runtime_info: req.runtime_info.clone(),
199 memories,
200 tables,
201 passive_elements,
202 dropped_data,
203 #[cfg(feature = "wmemcheck")]
204 wmemcheck_state,
205 store: None,
206 vmctx: OwnedVMContext::new(),
207 })?;
208
209 // SAFETY: this vmctx was allocated with the same layout above, so it
210 // should be safe to initialize with the same values here.
211 unsafe {
212 ret.get_mut().initialize_vmctx(req.store, req.imports);
213 }
214 Ok(ret)
215 }
216
217 /// Trace element segment GC roots inside this `Instance`.
218 ///
219 /// # Safety
220 ///
221 /// This instance must live for the duration of the associated GC cycle.
222 #[cfg(feature = "gc")]
223 pub(crate) unsafe fn trace_element_segment_roots(
224 self: Pin<&mut Self>,
225 gc_roots: &mut crate::vm::GcRootsList,
226 ) {
227 for segment in self.passive_elements_mut().iter_mut() {
228 if let Some((wasmtime_environ::NeedsGcRooting::Yes, elems)) = segment {
229 for e in elems {
230 let Some(root) = e.as_vmgc_ref_ptr() else {
231 continue;
232 };
233 let root: SendSyncPtr<super::VMGcRef> = root.into();
234
235 // Safety: We know this is a type that needs GC rooting and
236 // the lifetime is implied by our safety contract.
237 unsafe {
238 gc_roots.add_root(root, "passive element segment");
239 }
240 }
241 }
242 }
243 }
244
245 /// Converts a raw `VMContext` pointer into a raw `Instance` pointer.
246 ///
247 /// # Safety
248 ///
249 /// Calling this function safely requires that `vmctx` is a valid allocation
250 /// of a `VMContext` which is derived from `Instance::new`. To safely
251 /// convert the returned raw pointer into a safe instance pointer callers
252 /// will also want to uphold guarantees such as:
253 ///
254 /// * The instance should not be in use elsewhere. For example you can't
255 /// call this function twice, turn both raw pointers into safe pointers,
256 /// and then use both safe pointers.
257 /// * There should be no other active mutable borrow to any other instance
258 /// within the same store. Note that this is not restricted to just this
259 /// instance pointer, but to all instances in a store. Instances can
260 /// safely traverse to other instances "laterally" meaning that a mutable
261 /// borrow on one is a mutable borrow on all.
262 /// * There should be no active mutable borrow on the store accessible at
263 /// the same time the instance is turned. Instances are owned by a store
264 /// and a store can be used to acquire a safe instance borrow at any time.
265 /// * The lifetime of the usage of the instance should not be unnecessarily
266 /// long, for example it cannot be `'static`.
267 ///
268 /// Other entrypoints exist for converting from a raw `VMContext` to a safe
269 /// pointer such as:
270 ///
271 /// * `Instance::enter_host_from_wasm`
272 /// * `Instance::sibling_vmctx{,_mut}`
273 ///
274 /// These place further restrictions on the API signature to satisfy some of
275 /// the above points.
276 #[inline]
277 pub(crate) unsafe fn from_vmctx(vmctx: NonNull<VMContext>) -> NonNull<Instance> {
278 // SAFETY: The validity of `byte_sub` relies on `vmctx` being a valid
279 // allocation.
280 unsafe {
281 vmctx
282 .byte_sub(mem::size_of::<Instance>())
283 .cast::<Instance>()
284 }
285 }
286
287 /// Encapsulated entrypoint to the host from WebAssembly, converting a raw
288 /// `VMContext` pointer into a `VMStore` plus an `InstanceId`.
289 ///
290 /// This is an entrypoint for core wasm entering back into the host. This is
291 /// used for both host functions and libcalls for example. This will execute
292 /// the closure `f` with safer Internal types than a raw `VMContext`
293 /// pointer.
294 ///
295 /// The closure `f` will have its errors caught, handled, and translated to
296 /// an ABI-safe return value to give back to wasm. This includes both normal
297 /// errors such as traps as well as panics.
298 ///
299 /// # Safety
300 ///
301 /// Callers must ensure that `vmctx` is a valid allocation and is safe to
302 /// dereference at this time. That's generally only true when it's a
303 /// wasm-provided value and this is the first function called after entering
304 /// the host. Otherwise this could unsafely alias the store with a mutable
305 /// pointer, for example.
306 #[inline]
307 pub(crate) unsafe fn enter_host_from_wasm<R>(
308 vmctx: NonNull<VMContext>,
309 f: impl FnOnce(&mut dyn VMStore, InstanceId) -> R,
310 ) -> R::Abi
311 where
312 R: HostResult,
313 {
314 // SAFETY: It's a contract of this function that `vmctx` is a valid
315 // pointer with neither the store nor other instances actively in use
316 // when this is called, so it should be safe to acquire a mutable
317 // pointer to the store and read the instance pointer.
318 let (store, instance) = unsafe {
319 let instance = Instance::from_vmctx(vmctx);
320 let instance = instance.as_ref();
321 let store = &mut *instance.store.unwrap().0.as_ptr();
322 (store, instance.id)
323 };
324
325 // Thread the `store` and `instance` through panic/trap infrastructure
326 // back into `f`.
327 catch_unwind_and_record_trap(store, |store| f(store, instance))
328 }
329
330 /// Converts the provided `*mut VMContext` to an `Instance` pointer and
331 /// returns it with the same lifetime as `self`.
332 ///
333 /// This function can be used when traversing a `VMContext` to reach into
334 /// the context needed for imports, optionally.
335 ///
336 /// # Safety
337 ///
338 /// This function requires that the `vmctx` pointer is indeed valid and
339 /// from the store that `self` belongs to.
340 #[inline]
341 unsafe fn sibling_vmctx<'a>(&'a self, vmctx: NonNull<VMContext>) -> &'a Instance {
342 // SAFETY: it's a contract of this function itself that `vmctx` is a
343 // valid pointer. Additionally with `self` being a
344 let ptr = unsafe { Instance::from_vmctx(vmctx) };
345 // SAFETY: it's a contract of this function itself that `vmctx` is a
346 // valid pointer to dereference. Additionally the lifetime of the return
347 // value is constrained to be the same as `self` to avoid granting a
348 // too-long lifetime.
349 unsafe { ptr.as_ref() }
350 }
351
352 /// Same as [`Self::sibling_vmctx`], but the mutable version.
353 ///
354 /// # Safety
355 ///
356 /// This function requires that the `vmctx` pointer is indeed valid and
357 /// from the store that `self` belongs to.
358 ///
359 /// (Note that it is *NOT* required that `vmctx` be distinct from this
360 /// instance's `vmctx`, or that usage of the resulting instance is limited
361 /// to its defined items! The returned borrow has the same lifetime as
362 /// `self`, which means that this instance cannot be used while the
363 /// resulting instance is in use, and we therefore do not need to worry
364 /// about mutable aliasing between this instance and the resulting
365 /// instance.)
366 #[inline]
367 unsafe fn sibling_vmctx_mut<'a>(
368 self: Pin<&'a mut Self>,
369 vmctx: NonNull<VMContext>,
370 ) -> Pin<&'a mut Instance> {
371 // SAFETY: it's a contract of this function itself that `vmctx` is a
372 // valid pointer such that this pointer arithmetic is valid.
373 let mut ptr = unsafe { Instance::from_vmctx(vmctx) };
374
375 // SAFETY: it's a contract of this function itself that `vmctx` is a
376 // valid pointer to dereference. Additionally the lifetime of the return
377 // value is constrained to be the same as `self` to avoid granting a
378 // too-long lifetime. Finally mutable references to an instance are
379 // always through `Pin`, so it's safe to create a pin-pointer here.
380 unsafe { Pin::new_unchecked(ptr.as_mut()) }
381 }
382
383 pub(crate) fn env_module(&self) -> &Arc<wasmtime_environ::Module> {
384 self.runtime_info.env_module()
385 }
386
387 pub(crate) fn runtime_module(&self) -> Option<&crate::Module> {
388 match &self.runtime_info {
389 ModuleRuntimeInfo::Module(m) => Some(m),
390 ModuleRuntimeInfo::Bare(_) => None,
391 }
392 }
393
394 /// Translate a module-level interned type index into an engine-level
395 /// interned type index.
396 #[cfg(feature = "gc")]
397 pub fn engine_type_index(&self, module_index: ModuleInternedTypeIndex) -> VMSharedTypeIndex {
398 self.runtime_info.engine_type_index(module_index)
399 }
400
401 #[inline]
402 fn offsets(&self) -> &VMOffsets<HostPtr> {
403 self.runtime_info.offsets()
404 }
405
406 /// Return the indexed `VMFunctionImport`.
407 fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
408 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
409 }
410
411 /// Return the index `VMTableImport`.
412 fn imported_table(&self, index: TableIndex) -> &VMTableImport {
413 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
414 }
415
416 /// Return the indexed `VMMemoryImport`.
417 fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
418 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
419 }
420
421 /// Return the indexed `VMGlobalImport`.
422 fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
423 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
424 }
425
426 /// Return the indexed `VMTagImport`.
427 fn imported_tag(&self, index: TagIndex) -> &VMTagImport {
428 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtag_import(index)) }
429 }
430
431 /// Return the indexed `VMTagDefinition`.
432 pub fn tag_ptr(&self, index: DefinedTagIndex) -> NonNull<VMTagDefinition> {
433 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtag_definition(index)) }
434 }
435
436 /// Return the indexed `VMTableDefinition`.
437 pub fn table(&self, index: DefinedTableIndex) -> VMTableDefinition {
438 unsafe { self.table_ptr(index).read() }
439 }
440
441 /// Updates the value for a defined table to `VMTableDefinition`.
442 fn set_table(self: Pin<&mut Self>, index: DefinedTableIndex, table: VMTableDefinition) {
443 unsafe {
444 self.table_ptr(index).write(table);
445 }
446 }
447
448 /// Return a pointer to the `index`'th table within this instance, stored
449 /// in vmctx memory.
450 pub fn table_ptr(&self, index: DefinedTableIndex) -> NonNull<VMTableDefinition> {
451 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtable_definition(index)) }
452 }
453
454 /// Get a locally defined or imported memory.
455 pub(crate) fn get_memory(&self, index: MemoryIndex) -> VMMemoryDefinition {
456 if let Some(defined_index) = self.env_module().defined_memory_index(index) {
457 self.memory(defined_index)
458 } else {
459 let import = self.imported_memory(index);
460 unsafe { VMMemoryDefinition::load(import.from.as_ptr()) }
461 }
462 }
463
464 /// Return the indexed `VMMemoryDefinition`, loaded from vmctx memory
465 /// already.
466 #[inline]
467 pub fn memory(&self, index: DefinedMemoryIndex) -> VMMemoryDefinition {
468 unsafe { VMMemoryDefinition::load(self.memory_ptr(index).as_ptr()) }
469 }
470
471 /// Set the indexed memory to `VMMemoryDefinition`.
472 fn set_memory(&self, index: DefinedMemoryIndex, mem: VMMemoryDefinition) {
473 unsafe {
474 self.memory_ptr(index).write(mem);
475 }
476 }
477
478 /// Return the address of the specified memory at `index` within this vmctx.
479 ///
480 /// Note that the returned pointer resides in wasm-code-readable-memory in
481 /// the vmctx.
482 #[inline]
483 pub fn memory_ptr(&self, index: DefinedMemoryIndex) -> NonNull<VMMemoryDefinition> {
484 unsafe {
485 self.vmctx_plus_offset::<VmPtr<_>>(self.offsets().vmctx_vmmemory_pointer(index))
486 .as_non_null()
487 }
488 }
489
490 /// Return the indexed `VMGlobalDefinition`.
491 pub fn global_ptr(&self, index: DefinedGlobalIndex) -> NonNull<VMGlobalDefinition> {
492 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmglobal_definition(index)) }
493 }
494
495 /// Get all globals within this instance.
496 ///
497 /// Returns both import and defined globals.
498 ///
499 /// Returns both exported and non-exported globals.
500 ///
501 /// Gives access to the full globals space.
502 pub fn all_globals(
503 &self,
504 store: StoreId,
505 ) -> impl ExactSizeIterator<Item = (GlobalIndex, crate::Global)> + '_ {
506 let module = self.env_module();
507 module
508 .globals
509 .keys()
510 .map(move |idx| (idx, self.get_exported_global(store, idx)))
511 }
512
513 /// Get the globals defined in this instance (not imported).
514 pub fn defined_globals(
515 &self,
516 store: StoreId,
517 ) -> impl ExactSizeIterator<Item = (DefinedGlobalIndex, crate::Global)> + '_ {
518 let module = self.env_module();
519 self.all_globals(store)
520 .skip(module.num_imported_globals)
521 .map(move |(i, global)| (module.defined_global_index(i).unwrap(), global))
522 }
523
524 /// Return a pointer to the interrupts structure
525 #[inline]
526 pub fn vm_store_context(&self) -> NonNull<Option<VmPtr<VMStoreContext>>> {
527 unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_store_context()) }
528 }
529
530 /// Return a pointer to the global epoch counter used by this instance.
531 #[cfg(target_has_atomic = "64")]
532 pub fn epoch_ptr(self: Pin<&mut Self>) -> &mut Option<VmPtr<AtomicU64>> {
533 let offset = self.offsets().ptr.vmctx_epoch_ptr();
534 unsafe { self.vmctx_plus_offset_mut(offset) }
535 }
536
537 /// Return a pointer to the collector-specific heap data.
538 pub fn gc_heap_data(self: Pin<&mut Self>) -> &mut Option<VmPtr<u8>> {
539 let offset = self.offsets().ptr.vmctx_gc_heap_data();
540 unsafe { self.vmctx_plus_offset_mut(offset) }
541 }
542
543 pub(crate) unsafe fn set_store(mut self: Pin<&mut Self>, store: &StoreOpaque) {
544 // FIXME: should be more targeted ideally with the `unsafe` than just
545 // throwing this entire function in a large `unsafe` block.
546 unsafe {
547 *self.as_mut().store_mut() = Some(VMStoreRawPtr(store.traitobj()));
548 self.vm_store_context()
549 .write(Some(store.vm_store_context_ptr().into()));
550 #[cfg(target_has_atomic = "64")]
551 {
552 *self.as_mut().epoch_ptr() =
553 Some(NonNull::from(store.engine().epoch_counter()).into());
554 }
555
556 if self.env_module().needs_gc_heap {
557 self.as_mut().set_gc_heap(Some(store.unwrap_gc_store()));
558 } else {
559 self.as_mut().set_gc_heap(None);
560 }
561 }
562 }
563
564 unsafe fn set_gc_heap(self: Pin<&mut Self>, gc_store: Option<&GcStore>) {
565 if let Some(gc_store) = gc_store {
566 *self.gc_heap_data() = Some(unsafe { gc_store.gc_heap.vmctx_gc_heap_data().into() });
567 } else {
568 *self.gc_heap_data() = None;
569 }
570 }
571
572 /// Return a reference to the vmctx used by compiled wasm code.
573 #[inline]
574 pub fn vmctx(&self) -> NonNull<VMContext> {
575 InstanceLayout::vmctx(self)
576 }
577
578 /// Lookup a function by index.
579 ///
580 /// # Panics
581 ///
582 /// Panics if `index` is out of bounds for this instance.
583 ///
584 /// # Safety
585 ///
586 /// The `store` parameter must be the store that owns this instance and the
587 /// functions that this instance can reference.
588 pub unsafe fn get_exported_func(
589 self: Pin<&mut Self>,
590 registry: &ModuleRegistry,
591 store: StoreId,
592 index: FuncIndex,
593 ) -> crate::Func {
594 let func_ref = self.get_func_ref(registry, index).unwrap();
595
596 // SAFETY: the validity of `func_ref` is guaranteed by the validity of
597 // `self`, and the contract that `store` must own `func_ref` is a
598 // contract of this function itself.
599 unsafe { crate::Func::from_vm_func_ref(store, func_ref) }
600 }
601
602 /// Lookup a table by index.
603 ///
604 /// # Panics
605 ///
606 /// Panics if `index` is out of bounds for this instance.
607 pub fn get_exported_table(&self, store: StoreId, index: TableIndex) -> crate::Table {
608 let (id, def_index) = if let Some(def_index) = self.env_module().defined_table_index(index)
609 {
610 (self.id, def_index)
611 } else {
612 let import = self.imported_table(index);
613 // SAFETY: validity of this `Instance` guarantees validity of the
614 // `vmctx` pointer being read here to find the transitive
615 // `InstanceId` that the import is associated with.
616 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
617 (id, import.index)
618 };
619 crate::Table::from_raw(StoreInstanceId::new(store, id), def_index)
620 }
621
622 /// Lookup a memory by index.
623 ///
624 /// # Panics
625 ///
626 /// Panics if `index` is out-of-bounds for this instance.
627 #[cfg_attr(
628 not(feature = "threads"),
629 expect(unused_variables, reason = "definitions cfg'd to dummy",)
630 )]
631 pub fn get_exported_memory(&self, store: StoreId, index: MemoryIndex) -> ExportMemory {
632 let module = self.env_module();
633 if module.memories[index].shared {
634 let (memory, import) =
635 if let Some(def_index) = self.env_module().defined_memory_index(index) {
636 (
637 self.get_defined_memory(def_index),
638 self.get_defined_memory_vmimport(def_index),
639 )
640 } else {
641 let import = self.imported_memory(index);
642 // SAFETY: validity of this `Instance` guarantees validity of
643 // the `vmctx` pointer being read here to find the transitive
644 // `InstanceId` that the import is associated with.
645 let instance = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()) };
646 (instance.get_defined_memory(import.index), *import)
647 };
648
649 let vm = memory.as_shared_memory().unwrap().clone();
650 ExportMemory::Shared(vm, import)
651 } else {
652 let (id, def_index) =
653 if let Some(def_index) = self.env_module().defined_memory_index(index) {
654 (self.id, def_index)
655 } else {
656 let import = self.imported_memory(index);
657 // SAFETY: validity of this `Instance` guarantees validity of the
658 // `vmctx` pointer being read here to find the transitive
659 // `InstanceId` that the import is associated with.
660 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
661 (id, import.index)
662 };
663
664 // SAFETY: `from_raw` requires that the memory is not shared, which
665 // was tested above in this if/else.
666 let store_id = StoreInstanceId::new(store, id);
667 ExportMemory::Unshared(unsafe { crate::Memory::from_raw(store_id, def_index) })
668 }
669 }
670
671 /// Lookup a global by index.
672 ///
673 /// # Panics
674 ///
675 /// Panics if `index` is out-of-bounds for this instance.
676 pub(crate) fn get_exported_global(&self, store: StoreId, index: GlobalIndex) -> crate::Global {
677 // If this global is defined within this instance, then that's easy to
678 // calculate the `Global`.
679 if let Some(def_index) = self.env_module().defined_global_index(index) {
680 let instance = StoreInstanceId::new(store, self.id);
681 return crate::Global::from_core(instance, def_index);
682 }
683
684 // For imported globals it's required to match on the `kind` to
685 // determine which `Global` constructor is going to be invoked.
686 let import = self.imported_global(index);
687 match import.kind {
688 VMGlobalKind::Host(index) => crate::Global::from_host(store, index),
689 VMGlobalKind::Instance(index) => {
690 // SAFETY: validity of this `&Instance` means validity of its
691 // imports meaning we can read the id of the vmctx within.
692 let id = unsafe {
693 let vmctx = VMContext::from_opaque(import.vmctx.unwrap().as_non_null());
694 self.sibling_vmctx(vmctx).id
695 };
696 crate::Global::from_core(StoreInstanceId::new(store, id), index)
697 }
698 #[cfg(feature = "component-model")]
699 VMGlobalKind::ComponentFlags(index) => {
700 // SAFETY: validity of this `&Instance` means validity of its
701 // imports meaning we can read the id of the vmctx within.
702 let id = unsafe {
703 let vmctx = super::component::VMComponentContext::from_opaque(
704 import.vmctx.unwrap().as_non_null(),
705 );
706 super::component::ComponentInstance::vmctx_instance_id(vmctx)
707 };
708 crate::Global::from_component_flags(
709 crate::component::store::StoreComponentInstanceId::new(store, id),
710 index,
711 )
712 }
713 #[cfg(feature = "component-model")]
714 VMGlobalKind::TaskMayBlock => {
715 // SAFETY: validity of this `&Instance` means validity of its
716 // imports meaning we can read the id of the vmctx within.
717 let id = unsafe {
718 let vmctx = super::component::VMComponentContext::from_opaque(
719 import.vmctx.unwrap().as_non_null(),
720 );
721 super::component::ComponentInstance::vmctx_instance_id(vmctx)
722 };
723 crate::Global::from_task_may_block(
724 crate::component::store::StoreComponentInstanceId::new(store, id),
725 )
726 }
727 }
728 }
729
730 /// Get an exported tag by index.
731 ///
732 /// # Panics
733 ///
734 /// Panics if the index is out-of-range.
735 pub fn get_exported_tag(&self, store: StoreId, index: TagIndex) -> crate::Tag {
736 let (id, def_index) = if let Some(def_index) = self.env_module().defined_tag_index(index) {
737 (self.id, def_index)
738 } else {
739 let import = self.imported_tag(index);
740 // SAFETY: validity of this `Instance` guarantees validity of the
741 // `vmctx` pointer being read here to find the transitive
742 // `InstanceId` that the import is associated with.
743 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
744 (id, import.index)
745 };
746 crate::Tag::from_raw(StoreInstanceId::new(store, id), def_index)
747 }
748
749 /// Grow memory by the specified amount of pages.
750 ///
751 /// Returns `None` if memory can't be grown by the specified amount
752 /// of pages. Returns `Some` with the old size in bytes if growth was
753 /// successful.
754 pub(crate) async fn memory_grow(
755 mut self: Pin<&mut Self>,
756 limiter: Option<&mut StoreResourceLimiter<'_>>,
757 idx: DefinedMemoryIndex,
758 delta: u64,
759 ) -> Result<Option<usize>, Error> {
760 let memory = &mut self.as_mut().memories_mut()[idx].1;
761
762 // SAFETY: this is the safe wrapper around `Memory::grow` because it
763 // automatically updates the `VMMemoryDefinition` in this instance after
764 // a growth operation below.
765 let result = unsafe { memory.grow(delta, limiter).await };
766
767 // Update the state used by a non-shared Wasm memory in case the base
768 // pointer and/or the length changed.
769 if memory.as_shared_memory().is_none() {
770 let vmmemory = memory.vmmemory();
771 self.set_memory(idx, vmmemory);
772 }
773
774 result
775 }
776
777 pub(crate) fn table_element_type(
778 self: Pin<&mut Self>,
779 table_index: TableIndex,
780 ) -> TableElementType {
781 self.get_table(table_index).element_type()
782 }
783
784 /// Performs a grow operation on the `table_index` specified using `grow`.
785 ///
786 /// This will handle updating the VMTableDefinition internally as necessary.
787 pub(crate) async fn defined_table_grow(
788 mut self: Pin<&mut Self>,
789 table_index: DefinedTableIndex,
790 grow: impl AsyncFnOnce(&mut Table) -> Result<Option<usize>>,
791 ) -> Result<Option<usize>> {
792 let table = self.as_mut().get_defined_table(table_index);
793 let result = grow(table).await;
794 let element = table.vmtable();
795 self.set_table(table_index, element);
796 result
797 }
798
799 fn alloc_layout(offsets: &VMOffsets<HostPtr>) -> Layout {
800 let size = mem::size_of::<Self>()
801 .checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
802 .unwrap();
803 let align = mem::align_of::<Self>();
804 Layout::from_size_align(size, align).unwrap()
805 }
806
807 fn type_ids_array(&self) -> NonNull<VmPtr<VMSharedTypeIndex>> {
808 unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_type_ids_array()) }
809 }
810
811 /// Get a `&VMFuncRef` for the given `FuncIndex`.
812 ///
813 /// Returns `None` if the index is the reserved index value.
814 ///
815 /// The returned reference is a stable reference that won't be moved and can
816 /// be passed into JIT code.
817 pub(crate) fn get_func_ref(
818 self: Pin<&mut Self>,
819 registry: &ModuleRegistry,
820 index: FuncIndex,
821 ) -> Option<NonNull<VMFuncRef>> {
822 if index == FuncIndex::reserved_value() {
823 return None;
824 }
825
826 let Some(def_index) = self.env_module().defined_func_index(index) else {
827 debug_assert!(self.env_module().is_imported_function(index));
828 return Some(self.imported_function(index).as_func_ref().into());
829 };
830
831 // For now, we eagerly initialize an funcref struct in-place whenever
832 // asked for a reference to it. This is mostly fine, because in practice
833 // each funcref is unlikely to be requested more than a few times:
834 // once-ish for funcref tables used for call_indirect (the usual
835 // compilation strategy places each function in the table at most once),
836 // and once or a few times when fetching exports via API. Note that for
837 // any case driven by table accesses, the lazy table init behaves like a
838 // higher-level cache layer that protects this initialization from
839 // happening multiple times, via that particular table at least.
840 //
841 // When `ref.func` becomes more commonly used or if we otherwise see a
842 // use-case where this becomes a hotpath, we can reconsider by using
843 // some state to track "uninitialized" explicitly, for example by
844 // zeroing the funcrefs (perhaps together with other
845 // zeroed-at-instantiate-time state) or using a separate is-initialized
846 // bitmap.
847 //
848 // We arrived at this design because zeroing memory is expensive, so
849 // it's better for instantiation performance if we don't have to track
850 // "is-initialized" state at all!
851
852 let func = &self.env_module().functions[index];
853 let type_index = func.signature.unwrap_engine_type_index();
854
855 let module_with_code = ModuleWithCode::in_store(
856 registry,
857 self.runtime_module()
858 .expect("funcref impossible in fake module"),
859 )
860 .expect("module not in store");
861
862 let array_call = VmPtr::from(
863 NonNull::from(
864 module_with_code
865 .array_to_wasm_trampoline(def_index)
866 .expect("should have array-to-Wasm trampoline for escaping function"),
867 )
868 .cast(),
869 );
870
871 let wasm_call = Some(VmPtr::from(
872 NonNull::new(
873 module_with_code
874 .finished_function(def_index)
875 .as_ptr()
876 .cast::<VMWasmCallFunction>()
877 .cast_mut(),
878 )
879 .unwrap(),
880 ));
881
882 let vmctx = VMOpaqueContext::from_vmcontext(self.vmctx()).into();
883
884 // SAFETY: the offset calculated here should be correct with
885 // `self.offsets`
886 let func_ref_ptr = unsafe {
887 self.vmctx_plus_offset_raw::<VMFuncRef>(self.offsets().vmctx_func_ref(func.func_ref))
888 };
889
890 // SAFETY: the `func_ref_ptr` should be valid as it's within our
891 // `VMContext` area.
892 unsafe {
893 func_ref_ptr.write(VMFuncRef {
894 array_call,
895 wasm_call,
896 vmctx,
897 type_index,
898 });
899 }
900
901 Some(func_ref_ptr)
902 }
903
904 /// Get the passive elements segment at the given index.
905 pub(crate) fn passive_element_segment(&self, elem_index: ElemIndex) -> &[ValRaw] {
906 let Some(passive) = self
907 .env_module()
908 .passive_elements_map
909 .get(&elem_index)
910 .copied()
911 else {
912 return &[];
913 };
914
915 let Some((_, seg)) = &self.passive_elements[passive.index()] else {
916 return &[];
917 };
918
919 &**seg
920 }
921
922 pub(crate) fn passive_elements_mut(
923 self: Pin<&mut Self>,
924 ) -> Pin<&mut TryVec<Option<(NeedsGcRooting, TryVec<ValRaw>)>>> {
925 // SAFETY: Not moving data out of `self`.
926 Pin::new(&mut unsafe { self.get_unchecked_mut() }.passive_elements)
927 }
928
929 /// The `table.init` operation: initializes a portion of a table with a
930 /// passive element.
931 ///
932 /// # Errors
933 ///
934 /// Returns a `Trap` error when the range within the table is out of bounds
935 /// or the range within the passive element is out of bounds.
936 pub(crate) fn table_init(
937 store: &mut StoreOpaque,
938 instance_id: InstanceId,
939 table_index: TableIndex,
940 elem_index: ElemIndex,
941 dst: u64,
942 src: u64,
943 len: u64,
944 ) -> Result<()> {
945 let mut store = OpaqueRootScope::new(store);
946 let store_id = store.id();
947 let instance = store.instance(instance_id);
948 let elements = instance.passive_element_segment(elem_index);
949
950 let end = dst.checked_add(len).ok_or_else(|| Trap::TableOutOfBounds)?;
951 let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
952 let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
953
954 let table = instance.get_exported_table(store_id, table_index);
955 if end > table.size_(&store) {
956 return Err(Trap::TableOutOfBounds.into());
957 }
958
959 // Subslice into just the target elements.
960 let elements = elements
961 .get(src..)
962 .and_then(|elements| elements.get(..len))
963 .ok_or_else(|| Trap::TableOutOfBounds)?
964 .iter()
965 .copied()
966 .try_collect::<TryVec<_>, OutOfMemory>()?;
967
968 let elem_ty = ValType::from(table.ty_(&store).element().clone());
969
970 let refs = {
971 let mut store = AutoAssertNoGc::new(&mut store);
972 elements
973 .into_iter()
974 // SAFETY: the raw elements are valid because we got them from
975 // this instance.
976 .map(|raw| unsafe { Val::_from_raw(&mut store, raw, &elem_ty) })
977 .map(|v| v.ref_().expect("due to validation"))
978 .try_collect::<TryVec<_>, OutOfMemory>()?
979 };
980
981 let instance = store.instance(instance_id);
982 let table = instance.get_exported_table(store_id, table_index);
983
984 for (i, r) in refs.into_iter().enumerate() {
985 let i = u64::try_from(i)
986 .expect("okay because of `src` and `len` conversions to `usize` up above");
987 let j = i
988 .checked_add(dst)
989 .expect("okay because of `checked_add` up above");
990 table.set_(&mut store, j, r)?;
991 }
992
993 Ok(())
994 }
995
996 /// Drop an element.
997 pub(crate) fn elem_drop(
998 self: Pin<&mut Self>,
999 elem_index: ElemIndex,
1000 ) -> Result<(), OutOfMemory> {
1001 // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
1002
1003 let Some(passive_index) = self
1004 .env_module()
1005 .passive_elements_map
1006 .get(&elem_index)
1007 .copied()
1008 else {
1009 // Note: dropping a non-passive segment is a no-op (not a trap).
1010 return Ok(());
1011 };
1012
1013 self.passive_elements_mut()[passive_index.index()] = None;
1014 Ok(())
1015 }
1016
1017 /// Get a locally-defined memory.
1018 pub fn get_defined_memory_mut(self: Pin<&mut Self>, index: DefinedMemoryIndex) -> &mut Memory {
1019 &mut self.memories_mut()[index].1
1020 }
1021
1022 /// Get a locally-defined memory.
1023 pub fn get_defined_memory(&self, index: DefinedMemoryIndex) -> &Memory {
1024 &self.memories[index].1
1025 }
1026
1027 pub fn get_defined_memory_vmimport(&self, index: DefinedMemoryIndex) -> VMMemoryImport {
1028 crate::runtime::vm::VMMemoryImport {
1029 from: self.memory_ptr(index).into(),
1030 vmctx: self.vmctx().into(),
1031 index,
1032 }
1033 }
1034
1035 /// Do a `memory.copy`
1036 ///
1037 /// # Errors
1038 ///
1039 /// Returns a `Trap` error when the source or destination ranges are out of
1040 /// bounds.
1041 pub(crate) fn memory_copy(
1042 self: Pin<&mut Self>,
1043 dst_index: MemoryIndex,
1044 dst: u64,
1045 src_index: MemoryIndex,
1046 src: u64,
1047 len: u64,
1048 ) -> Result<(), Trap> {
1049 // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
1050
1051 let src_mem = self.get_memory(src_index);
1052 let dst_mem = self.get_memory(dst_index);
1053
1054 let src = self.validate_inbounds(src_mem.current_length(), src, len)?;
1055 let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?;
1056 let len = usize::try_from(len).unwrap();
1057
1058 // Bounds and casts are checked above, by this point we know that
1059 // everything is safe.
1060 unsafe {
1061 let dst = dst_mem.base.as_ptr().add(dst);
1062 let src = src_mem.base.as_ptr().add(src);
1063 // FIXME audit whether this is safe in the presence of shared memory
1064 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1065 ptr::copy(src, dst, len);
1066 }
1067
1068 Ok(())
1069 }
1070
1071 fn validate_inbounds(&self, max: usize, ptr: u64, len: u64) -> Result<usize, Trap> {
1072 let oob = || Trap::MemoryOutOfBounds;
1073 let end = ptr
1074 .checked_add(len)
1075 .and_then(|i| usize::try_from(i).ok())
1076 .ok_or_else(oob)?;
1077 if end > max {
1078 Err(oob())
1079 } else {
1080 Ok(ptr.try_into().unwrap())
1081 }
1082 }
1083
1084 /// Perform the `memory.fill` operation on a locally defined memory.
1085 ///
1086 /// # Errors
1087 ///
1088 /// Returns a `Trap` error if the memory range is out of bounds.
1089 pub(crate) fn memory_fill(
1090 self: Pin<&mut Self>,
1091 memory_index: DefinedMemoryIndex,
1092 dst: u64,
1093 val: u8,
1094 len: u64,
1095 ) -> Result<(), Trap> {
1096 let memory_index = self.env_module().memory_index(memory_index);
1097 let memory = self.get_memory(memory_index);
1098 let dst = self.validate_inbounds(memory.current_length(), dst, len)?;
1099 let len = usize::try_from(len).unwrap();
1100
1101 // Bounds and casts are checked above, by this point we know that
1102 // everything is safe.
1103 unsafe {
1104 let dst = memory.base.as_ptr().add(dst);
1105 // FIXME audit whether this is safe in the presence of shared memory
1106 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1107 ptr::write_bytes(dst, val, len);
1108 }
1109
1110 Ok(())
1111 }
1112
1113 /// Get the internal storage range of a particular Wasm data segment.
1114 pub(crate) fn wasm_data_range(&self, index: DataIndex) -> Range<u32> {
1115 match self.env_module().passive_data_map.get(&index) {
1116 Some(range) if !self.dropped_data.contains(index) => range.clone(),
1117 _ => 0..0,
1118 }
1119 }
1120
1121 /// Given an internal storage range of a Wasm data segment (or subset of a
1122 /// Wasm data segment), get the data's raw bytes.
1123 pub(crate) fn wasm_data(&self, range: Range<u32>) -> &[u8] {
1124 let start = usize::try_from(range.start).unwrap();
1125 let end = usize::try_from(range.end).unwrap();
1126 &self.runtime_info.wasm_data()[start..end]
1127 }
1128
1129 /// Performs the `memory.init` operation.
1130 ///
1131 /// # Errors
1132 ///
1133 /// Returns a `Trap` error if the destination range is out of this module's
1134 /// memory's bounds or if the source range is outside the data segment's
1135 /// bounds.
1136 pub(crate) fn memory_init(
1137 self: Pin<&mut Self>,
1138 memory_index: MemoryIndex,
1139 data_index: DataIndex,
1140 dst: u64,
1141 src: u32,
1142 len: u32,
1143 ) -> Result<(), Trap> {
1144 let range = self.wasm_data_range(data_index);
1145 self.memory_init_segment(memory_index, range, dst, src, len)
1146 }
1147
1148 pub(crate) fn memory_init_segment(
1149 self: Pin<&mut Self>,
1150 memory_index: MemoryIndex,
1151 range: Range<u32>,
1152 dst: u64,
1153 src: u32,
1154 len: u32,
1155 ) -> Result<(), Trap> {
1156 // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
1157
1158 let memory = self.get_memory(memory_index);
1159 let data = self.wasm_data(range);
1160 let dst = self.validate_inbounds(memory.current_length(), dst, len.into())?;
1161 let src = self.validate_inbounds(data.len(), src.into(), len.into())?;
1162 let len = len as usize;
1163
1164 unsafe {
1165 let src_start = data.as_ptr().add(src);
1166 let dst_start = memory.base.as_ptr().add(dst);
1167 // FIXME audit whether this is safe in the presence of shared memory
1168 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1169 ptr::copy_nonoverlapping(src_start, dst_start, len);
1170 }
1171
1172 Ok(())
1173 }
1174
1175 /// Drop the given data segment, truncating its length to zero.
1176 pub(crate) fn data_drop(
1177 self: Pin<&mut Self>,
1178 data_index: DataIndex,
1179 ) -> Result<(), OutOfMemory> {
1180 self.dropped_data_mut().insert(data_index)?;
1181
1182 // Note that we don't check that we actually removed a segment because
1183 // dropping a non-passive segment is a no-op (not a trap).
1184
1185 Ok(())
1186 }
1187
1188 /// Get a table by index regardless of whether it is locally-defined
1189 /// or an imported, foreign table. Ensure that the given range of
1190 /// elements in the table is lazily initialized. We define this
1191 /// operation all-in-one for safety, to ensure the lazy-init
1192 /// happens.
1193 ///
1194 /// Takes an `Iterator` for the index-range to lazy-initialize,
1195 /// for flexibility. This can be a range, single item, or empty
1196 /// sequence, for example. The iterator should return indices in
1197 /// increasing order, so that the break-at-out-of-bounds behavior
1198 /// works correctly.
1199 pub(crate) fn get_table_with_lazy_init(
1200 self: Pin<&mut Self>,
1201 registry: &ModuleRegistry,
1202 table_index: TableIndex,
1203 range: impl Iterator<Item = u64>,
1204 ) -> &mut Table {
1205 let (idx, instance) = self.defined_table_index_and_instance(table_index);
1206 instance.get_defined_table_with_lazy_init(registry, idx, range)
1207 }
1208
1209 /// Gets the raw runtime table data structure owned by this instance
1210 /// given the provided `idx`.
1211 ///
1212 /// The `range` specified is eagerly initialized for funcref tables.
1213 pub fn get_defined_table_with_lazy_init(
1214 mut self: Pin<&mut Self>,
1215 registry: &ModuleRegistry,
1216 idx: DefinedTableIndex,
1217 range: impl IntoIterator<Item = u64>,
1218 ) -> &mut Table {
1219 let elt_ty = self.tables[idx].1.element_type();
1220
1221 if elt_ty == TableElementType::Func {
1222 for i in range {
1223 match self.tables[idx].1.get_func_maybe_init(i) {
1224 // Uninitialized table element.
1225 Ok(None) => {}
1226 // Initialized table element, move on to the next.
1227 Ok(Some(_)) => continue,
1228 // Out-of-bounds; caller will handle by likely
1229 // throwing a trap. No work to do to lazy-init
1230 // beyond the end.
1231 Err(_) => break,
1232 };
1233
1234 // The table element `i` is uninitialized and is now being
1235 // initialized. This must imply that a `precompiled` list of
1236 // function indices is available for this table. The precompiled
1237 // list is extracted and then it is consulted with `i` to
1238 // determine the function that is going to be initialized. Note
1239 // that `i` may be outside the limits of the static
1240 // initialization so it's a fallible `get` instead of an index.
1241 let module = self.env_module();
1242 let precomputed = match &module.table_initialization.initial_values[idx] {
1243 TableInitialValue::Null { precomputed } => precomputed,
1244 TableInitialValue::Expr(_) => unreachable!(),
1245 };
1246 // Panicking here helps catch bugs rather than silently truncating by accident.
1247 let func_index = precomputed.get(usize::try_from(i).unwrap()).cloned();
1248 let func_ref = func_index
1249 .and_then(|func_index| self.as_mut().get_func_ref(registry, func_index));
1250 self.as_mut().tables_mut()[idx]
1251 .1
1252 .set_func(i, func_ref)
1253 .expect("Table type should match and index should be in-bounds");
1254 }
1255 }
1256
1257 self.get_defined_table(idx)
1258 }
1259
1260 /// Get a table by index regardless of whether it is locally-defined or an
1261 /// imported, foreign table.
1262 pub(crate) fn get_table(self: Pin<&mut Self>, table_index: TableIndex) -> &mut Table {
1263 let (idx, instance) = self.defined_table_index_and_instance(table_index);
1264 instance.get_defined_table(idx)
1265 }
1266
1267 /// Get a locally-defined table.
1268 pub(crate) fn get_defined_table(self: Pin<&mut Self>, index: DefinedTableIndex) -> &mut Table {
1269 &mut self.tables_mut()[index].1
1270 }
1271
1272 pub(crate) fn defined_table_index_and_instance<'a>(
1273 self: Pin<&'a mut Self>,
1274 index: TableIndex,
1275 ) -> (DefinedTableIndex, Pin<&'a mut Instance>) {
1276 if let Some(defined_table_index) = self.env_module().defined_table_index(index) {
1277 (defined_table_index, self)
1278 } else {
1279 let import = self.imported_table(index);
1280 let index = import.index;
1281 let vmctx = import.vmctx.as_non_null();
1282 // SAFETY: the validity of `self` means that the reachable instances
1283 // should also all be owned by the same store and fully initialized,
1284 // so it's safe to laterally move from a mutable borrow of this
1285 // instance to a mutable borrow of a sibling instance.
1286 let foreign_instance = unsafe { self.sibling_vmctx_mut(vmctx) };
1287 (index, foreign_instance)
1288 }
1289 }
1290
1291 /// Same as `self.runtime_info.env_module()` but additionally returns the
1292 /// `Pin<&mut Self>` with the same original lifetime.
1293 pub fn module_and_self(self: Pin<&mut Self>) -> (&wasmtime_environ::Module, Pin<&mut Self>) {
1294 // SAFETY: this function is projecting both `&Module` and the same
1295 // pointer both connected to the same lifetime. This is safe because
1296 // it's a contract of `Pin<&mut Self>` that the `runtime_info` field is
1297 // never written, meaning it's effectively unsafe to have `&mut Module`
1298 // projected from `Pin<&mut Self>`. Consequently it's safe to have a
1299 // read-only view of the field while still retaining mutable access to
1300 // all other fields.
1301 let module = self.runtime_info.env_module();
1302 let module = &raw const *module;
1303 let module = unsafe { &*module };
1304 (module, self)
1305 }
1306
1307 /// Initialize the VMContext data associated with this Instance.
1308 ///
1309 /// The `VMContext` memory is assumed to be uninitialized; any field
1310 /// that we need in a certain state will be explicitly written by this
1311 /// function.
1312 unsafe fn initialize_vmctx(self: Pin<&mut Self>, store: &StoreOpaque, imports: Imports) {
1313 let (module, mut instance) = self.module_and_self();
1314
1315 // SAFETY: the type of the magic field is indeed `u32` and this function
1316 // is initializing its value.
1317 unsafe {
1318 let offsets = instance.runtime_info.offsets();
1319 instance
1320 .vmctx_plus_offset_raw::<u32>(offsets.ptr.vmctx_magic())
1321 .write(VMCONTEXT_MAGIC);
1322 }
1323
1324 // SAFETY: it's up to the caller to provide a valid store pointer here.
1325 unsafe {
1326 instance.as_mut().set_store(store);
1327 }
1328
1329 // Initialize shared types
1330 //
1331 // SAFETY: validity of the vmctx means it should be safe to write to it
1332 // here.
1333 unsafe {
1334 let types = NonNull::from(instance.runtime_info.type_ids());
1335 instance.type_ids_array().write(types.cast().into());
1336 }
1337
1338 // Initialize the built-in functions
1339 //
1340 // SAFETY: the type of the builtin functions field is indeed a pointer
1341 // and the pointer being filled in here, plus the vmctx is valid to
1342 // write to during initialization.
1343 unsafe {
1344 static BUILTINS: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray::INIT;
1345 let ptr = BUILTINS.expose_provenance();
1346 let offsets = instance.runtime_info.offsets();
1347 instance
1348 .vmctx_plus_offset_raw(offsets.ptr.vmctx_builtin_functions())
1349 .write(VmPtr::from(ptr));
1350 }
1351
1352 // Initialize the imports
1353 //
1354 // SAFETY: the vmctx is safe to initialize during this function and
1355 // validity of each item itself is a contract the caller must uphold.
1356 debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
1357 unsafe {
1358 let offsets = instance.runtime_info.offsets();
1359 ptr::copy_nonoverlapping(
1360 imports.functions.as_ptr(),
1361 instance
1362 .vmctx_plus_offset_raw(offsets.vmctx_imported_functions_begin())
1363 .as_ptr(),
1364 imports.functions.len(),
1365 );
1366 debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
1367 ptr::copy_nonoverlapping(
1368 imports.tables.as_ptr(),
1369 instance
1370 .vmctx_plus_offset_raw(offsets.vmctx_imported_tables_begin())
1371 .as_ptr(),
1372 imports.tables.len(),
1373 );
1374 debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
1375 ptr::copy_nonoverlapping(
1376 imports.memories.as_ptr(),
1377 instance
1378 .vmctx_plus_offset_raw(offsets.vmctx_imported_memories_begin())
1379 .as_ptr(),
1380 imports.memories.len(),
1381 );
1382 debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
1383 ptr::copy_nonoverlapping(
1384 imports.globals.as_ptr(),
1385 instance
1386 .vmctx_plus_offset_raw(offsets.vmctx_imported_globals_begin())
1387 .as_ptr(),
1388 imports.globals.len(),
1389 );
1390 debug_assert_eq!(imports.tags.len(), module.num_imported_tags);
1391 ptr::copy_nonoverlapping(
1392 imports.tags.as_ptr(),
1393 instance
1394 .vmctx_plus_offset_raw(offsets.vmctx_imported_tags_begin())
1395 .as_ptr(),
1396 imports.tags.len(),
1397 );
1398 }
1399
1400 // N.B.: there is no need to initialize the funcrefs array because we
1401 // eagerly construct each element in it whenever asked for a reference
1402 // to that element. In other words, there is no state needed to track
1403 // the lazy-init, so we don't need to initialize any state now.
1404
1405 // Initialize the defined tables
1406 //
1407 // SAFETY: it's safe to initialize these tables during initialization
1408 // here and the various types of pointers and such here should all be
1409 // valid.
1410 unsafe {
1411 let offsets = instance.runtime_info.offsets();
1412 let mut ptr = instance.vmctx_plus_offset_raw(offsets.vmctx_tables_begin());
1413 let tables = instance.as_mut().tables_mut();
1414 for i in 0..module.num_defined_tables() {
1415 ptr.write(tables[DefinedTableIndex::new(i)].1.vmtable());
1416 ptr = ptr.add(1);
1417 }
1418 }
1419
1420 // Initialize the defined memories. This fills in both the
1421 // `defined_memories` table and the `owned_memories` table at the same
1422 // time. Entries in `defined_memories` hold a pointer to a definition
1423 // (all memories) whereas the `owned_memories` hold the actual
1424 // definitions of memories owned (not shared) in the module.
1425 //
1426 // SAFETY: it's safe to initialize these memories during initialization
1427 // here and the various types of pointers and such here should all be
1428 // valid.
1429 unsafe {
1430 let offsets = instance.runtime_info.offsets();
1431 let mut ptr = instance.vmctx_plus_offset_raw(offsets.vmctx_memories_begin());
1432 let mut owned_ptr =
1433 instance.vmctx_plus_offset_raw(offsets.vmctx_owned_memories_begin());
1434 let memories = instance.as_mut().memories_mut();
1435 for i in 0..module.num_defined_memories() {
1436 let defined_memory_index = DefinedMemoryIndex::new(i);
1437 let memory_index = module.memory_index(defined_memory_index);
1438 if module.memories[memory_index].shared {
1439 let def_ptr = memories[defined_memory_index]
1440 .1
1441 .as_shared_memory()
1442 .unwrap()
1443 .vmmemory_ptr();
1444 ptr.write(VmPtr::from(def_ptr));
1445 } else {
1446 owned_ptr.write(memories[defined_memory_index].1.vmmemory());
1447 ptr.write(VmPtr::from(owned_ptr));
1448 owned_ptr = owned_ptr.add(1);
1449 }
1450 ptr = ptr.add(1);
1451 }
1452 }
1453
1454 // Zero-initialize the globals so that nothing is uninitialized memory
1455 // after this function returns. The globals are actually initialized
1456 // with their const expression initializers after the instance is fully
1457 // allocated.
1458 //
1459 // SAFETY: it's safe to initialize globals during initialization
1460 // here. Note that while the value being written is not valid for all
1461 // types of globals it's initializing the memory to zero instead of
1462 // being in an undefined state. So it's still unsafe to access globals
1463 // after this, but if it's read then it'd hopefully crash faster than
1464 // leaving this undefined.
1465 unsafe {
1466 for (index, _init) in module.global_initializers.iter() {
1467 instance.global_ptr(index).write(VMGlobalDefinition::new());
1468 }
1469 }
1470
1471 // Initialize the defined tags
1472 //
1473 // SAFETY: it's safe to initialize these tags during initialization
1474 // here and the various types of pointers and such here should all be
1475 // valid.
1476 unsafe {
1477 let offsets = instance.runtime_info.offsets();
1478 let mut ptr = instance.vmctx_plus_offset_raw(offsets.vmctx_tags_begin());
1479 for i in 0..module.num_defined_tags() {
1480 let defined_index = DefinedTagIndex::new(i);
1481 let tag_index = module.tag_index(defined_index);
1482 let tag = module.tags[tag_index];
1483 ptr.write(VMTagDefinition::new(
1484 tag.signature.unwrap_engine_type_index(),
1485 ));
1486 ptr = ptr.add(1);
1487 }
1488 }
1489 }
1490
1491 /// Attempts to convert from the host `addr` specified to a WebAssembly
1492 /// based address recorded in `WasmFault`.
1493 ///
1494 /// This method will check all linear memories that this instance contains
1495 /// to see if any of them contain `addr`. If one does then `Some` is
1496 /// returned with metadata about the wasm fault. Otherwise `None` is
1497 /// returned and `addr` doesn't belong to this instance.
1498 pub fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1499 let mut fault = None;
1500 for (_, (_, memory)) in self.memories.iter() {
1501 let accessible = memory.wasm_accessible();
1502 if accessible.start <= addr && addr < accessible.end {
1503 // All linear memories should be disjoint so assert that no
1504 // prior fault has been found.
1505 assert!(fault.is_none());
1506 fault = Some(WasmFault {
1507 memory_size: memory.byte_size(),
1508 wasm_address: u64::try_from(addr - accessible.start).unwrap(),
1509 });
1510 }
1511 }
1512 fault
1513 }
1514
1515 /// Returns the id, within this instance's store, that it's assigned.
1516 pub fn id(&self) -> InstanceId {
1517 self.id
1518 }
1519
1520 /// Get all memories within this instance.
1521 ///
1522 /// Returns both import and defined memories.
1523 ///
1524 /// Returns both exported and non-exported memories.
1525 ///
1526 /// Gives access to the full memories space.
1527 pub fn all_memories(
1528 &self,
1529 store: StoreId,
1530 ) -> impl ExactSizeIterator<Item = (MemoryIndex, ExportMemory)> + '_ {
1531 self.env_module()
1532 .memories
1533 .iter()
1534 .map(move |(i, _)| (i, self.get_exported_memory(store, i)))
1535 }
1536
1537 /// Return the memories defined in this instance (not imported).
1538 pub fn defined_memories<'a>(
1539 &'a self,
1540 store: StoreId,
1541 ) -> impl ExactSizeIterator<Item = ExportMemory> + 'a {
1542 let num_imported = self.env_module().num_imported_memories;
1543 self.all_memories(store)
1544 .skip(num_imported)
1545 .map(|(_i, memory)| memory)
1546 }
1547
1548 /// Lookup an item with the given index.
1549 ///
1550 /// # Panics
1551 ///
1552 /// Panics if `export` is not valid for this instance.
1553 ///
1554 /// # Safety
1555 ///
1556 /// This function requires that `store` is the correct store which owns this
1557 /// instance.
1558 pub unsafe fn get_export_by_index_mut(
1559 self: Pin<&mut Self>,
1560 registry: &ModuleRegistry,
1561 store: StoreId,
1562 export: EntityIndex,
1563 ) -> Export {
1564 match export {
1565 // SAFETY: the contract of `store` owning the this instance is a
1566 // safety requirement of this function itself.
1567 EntityIndex::Function(i) => {
1568 Export::Function(unsafe { self.get_exported_func(registry, store, i) })
1569 }
1570 EntityIndex::Global(i) => Export::Global(self.get_exported_global(store, i)),
1571 EntityIndex::Table(i) => Export::Table(self.get_exported_table(store, i)),
1572 EntityIndex::Memory(i) => match self.get_exported_memory(store, i) {
1573 ExportMemory::Unshared(m) => Export::Memory(m),
1574 ExportMemory::Shared(m, i) => Export::SharedMemory(m, i),
1575 },
1576 EntityIndex::Tag(i) => Export::Tag(self.get_exported_tag(store, i)),
1577 }
1578 }
1579
1580 fn store_mut(self: Pin<&mut Self>) -> &mut Option<VMStoreRawPtr> {
1581 // SAFETY: this is a pin-projection to get a mutable reference to an
1582 // internal field and is safe so long as the `&mut Self` temporarily
1583 // created is not overwritten, which it isn't here.
1584 unsafe { &mut self.get_unchecked_mut().store }
1585 }
1586
1587 fn dropped_data_mut(self: Pin<&mut Self>) -> &mut TryEntitySet<DataIndex> {
1588 // SAFETY: see `store_mut` above.
1589 unsafe { &mut self.get_unchecked_mut().dropped_data }
1590 }
1591
1592 fn memories_mut(
1593 self: Pin<&mut Self>,
1594 ) -> &mut TryPrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)> {
1595 // SAFETY: see `store_mut` above.
1596 unsafe { &mut self.get_unchecked_mut().memories }
1597 }
1598
1599 pub(crate) fn tables_mut(
1600 self: Pin<&mut Self>,
1601 ) -> &mut TryPrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)> {
1602 // SAFETY: see `store_mut` above.
1603 unsafe { &mut self.get_unchecked_mut().tables }
1604 }
1605
1606 #[cfg(feature = "wmemcheck")]
1607 pub(super) fn wmemcheck_state_mut(self: Pin<&mut Self>) -> &mut Option<Wmemcheck> {
1608 // SAFETY: see `store_mut` above.
1609 unsafe { &mut self.get_unchecked_mut().wmemcheck_state }
1610 }
1611}
1612
1613// SAFETY: `layout` should describe this accurately and `OwnedVMContext` is the
1614// last field of `ComponentInstance`.
1615unsafe impl InstanceLayout for Instance {
1616 const INIT_ZEROED: bool = false;
1617 type VMContext = VMContext;
1618
1619 fn layout(&self) -> Layout {
1620 Self::alloc_layout(self.runtime_info.offsets())
1621 }
1622
1623 fn owned_vmctx(&self) -> &OwnedVMContext<VMContext> {
1624 &self.vmctx
1625 }
1626
1627 fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<VMContext> {
1628 &mut self.vmctx
1629 }
1630}
1631
1632pub type InstanceHandle = OwnedInstance<Instance>;
1633
1634/// A handle holding an `Instance` of a WebAssembly module.
1635///
1636/// This structure is an owning handle of the `instance` contained internally.
1637/// When this value goes out of scope it will deallocate the `Instance` and all
1638/// memory associated with it.
1639///
1640/// Note that this lives within a `StoreOpaque` on a list of instances that a
1641/// store is keeping alive.
1642#[derive(Debug)]
1643#[repr(transparent)] // guarantee this is a zero-cost wrapper
1644pub struct OwnedInstance<T: InstanceLayout> {
1645 /// The raw pointer to the instance that was allocated.
1646 ///
1647 /// Note that this is not equivalent to `Box<Instance>` because the
1648 /// allocation here has a `VMContext` trailing after it. Thus the custom
1649 /// destructor to invoke the `dealloc` function with the appropriate
1650 /// layout.
1651 instance: SendSyncPtr<T>,
1652 _marker: marker::PhantomData<Box<(T, OwnedVMContext<T::VMContext>)>>,
1653}
1654
1655/// Structure that must be placed at the end of a type implementing
1656/// `InstanceLayout`.
1657#[repr(align(16))] // match the alignment of VMContext
1658pub struct OwnedVMContext<T> {
1659 /// A pointer to the `vmctx` field at the end of the `structure`.
1660 ///
1661 /// If you're looking at this a reasonable question would be "why do we need
1662 /// a pointer to ourselves?" because after all the pointer's value is
1663 /// trivially derivable from any `&Instance` pointer. The rationale for this
1664 /// field's existence is subtle, but it's required for correctness. The
1665 /// short version is "this makes miri happy".
1666 ///
1667 /// The long version of why this field exists is that the rules that MIRI
1668 /// uses to ensure pointers are used correctly have various conditions on
1669 /// them depend on how pointers are used. More specifically if `*mut T` is
1670 /// derived from `&mut T`, then that invalidates all prior pointers derived
1671 /// from the `&mut T`. This means that while we liberally want to re-acquire
1672 /// a `*mut VMContext` throughout the implementation of `Instance` the
1673 /// trivial way, a function `fn vmctx(Pin<&mut Instance>) -> *mut VMContext`
1674 /// would effectively invalidate all prior `*mut VMContext` pointers
1675 /// acquired. The purpose of this field is to serve as a sort of
1676 /// source-of-truth for where `*mut VMContext` pointers come from.
1677 ///
1678 /// This field is initialized when the `Instance` is created with the
1679 /// original allocation's pointer. That means that the provenance of this
1680 /// pointer contains the entire allocation (both instance and `VMContext`).
1681 /// This provenance bit is then "carried through" where `fn vmctx` will base
1682 /// all returned pointers on this pointer itself. This provides the means of
1683 /// never invalidating this pointer throughout MIRI and additionally being
1684 /// able to still temporarily have `Pin<&mut Instance>` methods and such.
1685 ///
1686 /// It's important to note, though, that this is not here purely for MIRI.
1687 /// The careful construction of the `fn vmctx` method has ramifications on
1688 /// the LLVM IR generated, for example. A historical CVE on Wasmtime,
1689 /// GHSA-ch89-5g45-qwc7, was caused due to relying on undefined behavior. By
1690 /// deriving VMContext pointers from this pointer it specifically hints to
1691 /// LLVM that trickery is afoot and it properly informs `noalias` and such
1692 /// annotations and analysis. More-or-less this pointer is actually loaded
1693 /// in LLVM IR which helps defeat otherwise present aliasing optimizations,
1694 /// which we want, since writes to this should basically never be optimized
1695 /// out.
1696 ///
1697 /// As a final note it's worth pointing out that the machine code generated
1698 /// for accessing `fn vmctx` is still as one would expect. This member isn't
1699 /// actually ever loaded at runtime (or at least shouldn't be). Perhaps in
1700 /// the future if the memory consumption of this field is a problem we could
1701 /// shrink it slightly, but for now one extra pointer per wasm instance
1702 /// seems not too bad.
1703 vmctx_self_reference: SendSyncPtr<T>,
1704
1705 /// This field ensures that going from `Pin<&mut T>` to `&mut T` is not a
1706 /// safe operation.
1707 _marker: core::marker::PhantomPinned,
1708}
1709
1710impl<T> OwnedVMContext<T> {
1711 /// Creates a new blank vmctx to place at the end of an instance.
1712 pub fn new() -> OwnedVMContext<T> {
1713 OwnedVMContext {
1714 vmctx_self_reference: SendSyncPtr::new(NonNull::dangling()),
1715 _marker: core::marker::PhantomPinned,
1716 }
1717 }
1718}
1719
1720/// Helper trait to plumb both core instances and component instances into
1721/// `OwnedInstance` below.
1722///
1723/// # Safety
1724///
1725/// This trait requires `layout` to correctly describe `Self` and appropriately
1726/// allocate space for `Self::VMContext` afterwards. Additionally the field
1727/// returned by `owned_vmctx()` must be the last field in the structure.
1728pub unsafe trait InstanceLayout {
1729 /// Whether or not to allocate this instance with `alloc_zeroed` or `alloc`.
1730 const INIT_ZEROED: bool;
1731
1732 /// The trailing `VMContext` type at the end of this instance.
1733 type VMContext;
1734
1735 /// The memory layout to use to allocate and deallocate this instance.
1736 fn layout(&self) -> Layout;
1737
1738 fn owned_vmctx(&self) -> &OwnedVMContext<Self::VMContext>;
1739 fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<Self::VMContext>;
1740
1741 /// Returns the `vmctx_self_reference` set above.
1742 #[inline]
1743 fn vmctx(&self) -> NonNull<Self::VMContext> {
1744 // The definition of this method is subtle but intentional. The goal
1745 // here is that effectively this should return `&mut self.vmctx`, but
1746 // it's not quite so simple. Some more documentation is available on the
1747 // `vmctx_self_reference` field, but the general idea is that we're
1748 // creating a pointer to return with proper provenance. Provenance is
1749 // still in the works in Rust at the time of this writing but the load
1750 // of the `self.vmctx_self_reference` field is important here as it
1751 // affects how LLVM thinks about aliasing with respect to the returned
1752 // pointer.
1753 //
1754 // The intention of this method is to codegen to machine code as `&mut
1755 // self.vmctx`, however. While it doesn't show up like this in LLVM IR
1756 // (there's an actual load of the field) it does look like that by the
1757 // time the backend runs. (that's magic to me, the backend removing
1758 // loads...)
1759 let owned_vmctx = self.owned_vmctx();
1760 let owned_vmctx_raw = NonNull::from(owned_vmctx);
1761 // SAFETY: it's part of the contract of `InstanceLayout` and the usage
1762 // with `OwnedInstance` that this indeed points to the vmctx.
1763 let addr = unsafe { owned_vmctx_raw.add(1) };
1764 owned_vmctx
1765 .vmctx_self_reference
1766 .as_non_null()
1767 .with_addr(addr.addr())
1768 }
1769
1770 /// Helper function to access various locations offset from our `*mut
1771 /// VMContext` object.
1772 ///
1773 /// Note that this method takes `&self` as an argument but returns
1774 /// `NonNull<T>` which is frequently used to mutate said memory. This is an
1775 /// intentional design decision where the safety of the modification of
1776 /// memory is placed as a burden onto the caller. The implementation of this
1777 /// method explicitly does not require `&mut self` to acquire mutable
1778 /// provenance to update the `VMContext` region. Instead all pointers into
1779 /// the `VMContext` area have provenance/permissions to write.
1780 ///
1781 /// Also note though that care must be taken to ensure that reads/writes of
1782 /// memory must only happen where appropriate, for example a non-atomic
1783 /// write (as most are) should never happen concurrently with another read
1784 /// or write. It's generally on the burden of the caller to adhere to this.
1785 ///
1786 /// Also of note is that most of the time the usage of this method falls
1787 /// into one of:
1788 ///
1789 /// * Something in the VMContext is being read or written. In that case use
1790 /// `vmctx_plus_offset` or `vmctx_plus_offset_mut` if possible due to
1791 /// that having a safer lifetime.
1792 ///
1793 /// * A pointer is being created to pass to other VM* data structures. In
1794 /// that situation the lifetime of all VM data structures are typically
1795 /// tied to the `Store<T>` which is what provides the guarantees around
1796 /// concurrency/etc.
1797 ///
1798 /// There's quite a lot of unsafety riding on this method, especially
1799 /// related to the ascription `T` of the byte `offset`. It's hoped that in
1800 /// the future we're able to settle on an in theory safer design.
1801 ///
1802 /// # Safety
1803 ///
1804 /// This method is unsafe because the `offset` must be within bounds of the
1805 /// `VMContext` object trailing this instance. Additionally `T` must be a
1806 /// valid ascription of the value that resides at that location.
1807 unsafe fn vmctx_plus_offset_raw<T: VmSafe>(&self, offset: impl Into<u32>) -> NonNull<T> {
1808 // SAFETY: the safety requirements of `byte_add` are forwarded to this
1809 // method's caller.
1810 unsafe {
1811 self.vmctx()
1812 .byte_add(usize::try_from(offset.into()).unwrap())
1813 .cast()
1814 }
1815 }
1816
1817 /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1818 /// `&self` to the returned reference `&T`.
1819 ///
1820 /// # Safety
1821 ///
1822 /// See the safety documentation of `vmctx_plus_offset_raw`.
1823 unsafe fn vmctx_plus_offset<T: VmSafe>(&self, offset: impl Into<u32>) -> &T {
1824 // SAFETY: this method has the same safety requirements as
1825 // `vmctx_plus_offset_raw`.
1826 unsafe { self.vmctx_plus_offset_raw(offset).as_ref() }
1827 }
1828
1829 /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1830 /// `&mut self` to the returned reference `&mut T`.
1831 ///
1832 /// # Safety
1833 ///
1834 /// See the safety documentation of `vmctx_plus_offset_raw`.
1835 unsafe fn vmctx_plus_offset_mut<T: VmSafe>(
1836 self: Pin<&mut Self>,
1837 offset: impl Into<u32>,
1838 ) -> &mut T {
1839 // SAFETY: this method has the same safety requirements as
1840 // `vmctx_plus_offset_raw`.
1841 unsafe { self.vmctx_plus_offset_raw(offset).as_mut() }
1842 }
1843}
1844
1845impl<T: InstanceLayout> OwnedInstance<T> {
1846 /// Allocates a new `OwnedInstance` and places `instance` inside of it.
1847 ///
1848 /// This will `instance`
1849 pub(super) fn new(mut instance: T) -> Result<OwnedInstance<T>, OutOfMemory> {
1850 let layout = instance.layout();
1851 debug_assert!(layout.size() >= size_of_val(&instance));
1852 debug_assert!(layout.align() >= align_of_val(&instance));
1853
1854 // SAFETY: it's up to us to assert that `layout` has a non-zero size,
1855 // which is asserted here.
1856 let ptr = unsafe {
1857 assert!(layout.size() > 0);
1858 if T::INIT_ZEROED {
1859 alloc::alloc::alloc_zeroed(layout)
1860 } else {
1861 alloc::alloc::alloc(layout)
1862 }
1863 };
1864 let Some(instance_ptr) = NonNull::new(ptr.cast::<T>()) else {
1865 return Err(OutOfMemory::new(layout.size()));
1866 };
1867
1868 // SAFETY: it's part of the unsafe contract of `InstanceLayout` that the
1869 // `add` here is appropriate for the layout allocated.
1870 let vmctx_self_reference = unsafe { instance_ptr.add(1).cast() };
1871 instance.owned_vmctx_mut().vmctx_self_reference = vmctx_self_reference.into();
1872
1873 // SAFETY: we allocated above and it's an unsafe contract of
1874 // `InstanceLayout` that the layout is suitable for writing the
1875 // instance.
1876 unsafe {
1877 instance_ptr.write(instance);
1878 }
1879
1880 let ret = OwnedInstance {
1881 instance: SendSyncPtr::new(instance_ptr),
1882 _marker: marker::PhantomData,
1883 };
1884
1885 // Double-check various vmctx calculations are correct.
1886 debug_assert_eq!(
1887 vmctx_self_reference.addr(),
1888 // SAFETY: `InstanceLayout` should guarantee it's safe to add 1 to
1889 // the last field to get a pointer to 1-byte-past-the-end of an
1890 // object, which should be valid.
1891 unsafe { NonNull::from(ret.get().owned_vmctx()).add(1).addr() }
1892 );
1893 debug_assert_eq!(vmctx_self_reference.addr(), ret.get().vmctx().addr());
1894
1895 Ok(ret)
1896 }
1897
1898 /// Gets the raw underlying `&Instance` from this handle.
1899 pub fn get(&self) -> &T {
1900 // SAFETY: this is an owned instance handle that retains exclusive
1901 // ownership of the `Instance` inside. With `&self` given we know
1902 // this pointer is valid valid and the returned lifetime is connected
1903 // to `self` so that should also be valid.
1904 unsafe { self.instance.as_non_null().as_ref() }
1905 }
1906
1907 /// Same as [`Self::get`] except for mutability.
1908 pub fn get_mut(&mut self) -> Pin<&mut T> {
1909 // SAFETY: The lifetime concerns here are the same as `get` above.
1910 // Otherwise `new_unchecked` is used here to uphold the contract that
1911 // instances are always pinned in memory.
1912 unsafe { Pin::new_unchecked(self.instance.as_non_null().as_mut()) }
1913 }
1914}
1915
1916impl<T: InstanceLayout> Drop for OwnedInstance<T> {
1917 fn drop(&mut self) {
1918 unsafe {
1919 let layout = self.get().layout();
1920 ptr::drop_in_place(self.instance.as_ptr());
1921 alloc::alloc::dealloc(self.instance.as_ptr().cast(), layout);
1922 }
1923 }
1924}