wasmtime/runtime/vm/instance.rs
1//! An `Instance` contains all the runtime state used by execution of a
2//! wasm module (except its callstack and register state). An
3//! `InstanceHandle` is a reference-counting handle for an `Instance`.
4
5use crate::OpaqueRootScope;
6use crate::code::ModuleWithCode;
7use crate::module::ModuleRegistry;
8use crate::prelude::*;
9use crate::runtime::vm::const_expr::{ConstEvalContext, ConstExprEvaluator};
10use crate::runtime::vm::export::{Export, ExportMemory};
11use crate::runtime::vm::memory::{Memory, RuntimeMemoryCreator};
12use crate::runtime::vm::table::{Table, TableElementType};
13use crate::runtime::vm::vmcontext::{
14 VMBuiltinFunctionsArray, VMContext, VMFuncRef, VMFunctionImport, VMGlobalDefinition,
15 VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext, VMStoreContext,
16 VMTableDefinition, VMTableImport, VMTagDefinition, VMTagImport,
17};
18use crate::runtime::vm::{
19 GcStore, HostResult, Imports, ModuleRuntimeInfo, SendSyncPtr, VMGlobalKind, VMStore,
20 VMStoreRawPtr, VmPtr, VmSafe, WasmFault, catch_unwind_and_record_trap,
21};
22use crate::store::{
23 Asyncness, InstanceId, StoreId, StoreInstanceId, StoreOpaque, StoreResourceLimiter,
24};
25use crate::vm::VMWasmCallFunction;
26use alloc::sync::Arc;
27use core::alloc::Layout;
28use core::marker;
29use core::ops::Range;
30use core::pin::Pin;
31use core::ptr::NonNull;
32#[cfg(target_has_atomic = "64")]
33use core::sync::atomic::AtomicU64;
34use core::{mem, ptr};
35#[cfg(feature = "gc")]
36use wasmtime_environ::ModuleInternedTypeIndex;
37use wasmtime_environ::error::OutOfMemory;
38use wasmtime_environ::{
39 DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, DefinedTagIndex,
40 ElemIndex, EntityIndex, EntityRef, FuncIndex, GlobalIndex, HostPtr, MemoryIndex, PrimaryMap,
41 PtrSize, TableIndex, TableInitialValue, TableSegmentElements, TagIndex, Trap, VMCONTEXT_MAGIC,
42 VMOffsets, VMSharedTypeIndex, packed_option::ReservedValue,
43};
44#[cfg(feature = "wmemcheck")]
45use wasmtime_wmemcheck::Wmemcheck;
46
47mod allocator;
48pub use allocator::*;
49
50/// A type that roughly corresponds to a WebAssembly instance, but is also used
51/// for host-defined objects.
52///
53/// Instances here can correspond to actual instantiated modules, but it's also
54/// used ubiquitously for host-defined objects. For example creating a
55/// host-defined memory will have a `module` that looks like it exports a single
56/// memory (and similar for other constructs).
57///
58/// This `Instance` type is used as a ubiquitous representation for WebAssembly
59/// values, whether or not they were created on the host or through a module.
60///
61/// # Ownership
62///
63/// This structure is never allocated directly but is instead managed through
64/// an `InstanceHandle`. This structure ends with a `VMContext` which has a
65/// dynamic size corresponding to the `module` configured within. Memory
66/// management of this structure is always done through `InstanceHandle` as the
67/// sole owner of an instance.
68///
69/// # `Instance` and `Pin`
70///
71/// Given an instance it is accompanied with trailing memory for the
72/// appropriate `VMContext`. The `Instance` also holds `runtime_info` and other
73/// information pointing to relevant offsets for the `VMContext`. Thus it is
74/// not sound to mutate `runtime_info` after an instance is created. More
75/// generally it's also not safe to "swap" instances, for example given two
76/// `&mut Instance` values it's not sound to swap them as then the `VMContext`
77/// values are inaccurately described.
78///
79/// To encapsulate this guarantee this type is only ever mutated through Rust's
80/// `Pin` type. All mutable methods here take `self: Pin<&mut Self>` which
81/// statically disallows safe access to `&mut Instance`. There are assorted
82/// "projection methods" to go from `Pin<&mut Instance>` to `&mut T` for
83/// individual fields, for example `memories_mut`. More methods can be added as
84/// necessary or methods may also be added to project multiple fields at a time
85/// if necessary to. The precise ergonomics around getting mutable access to
86/// some fields (but notably not `runtime_info`) is probably going to evolve
87/// over time.
88///
89/// Note that is is not sound to basically ever pass around `&mut Instance`.
90/// That should always instead be `Pin<&mut Instance>`. All usage of
91/// `Pin::new_unchecked` should be here in this module in just a few `unsafe`
92/// locations and it's recommended to use existing helpers if you can.
93#[repr(C)] // ensure that the vmctx field is last.
94pub struct Instance {
95 /// The index, within a `Store` that this instance lives at
96 id: InstanceId,
97
98 /// The runtime info (corresponding to the "compiled module"
99 /// abstraction in higher layers) that is retained and needed for
100 /// lazy initialization. This provides access to the underlying
101 /// Wasm module entities, the compiled JIT code, metadata about
102 /// functions, lazy initialization state, etc.
103 //
104 // SAFETY: this field cannot be overwritten after an instance is created. It
105 // must contain this exact same value for the entire lifetime of this
106 // instance. This enables borrowing the info's `Module` and this instance at
107 // the same time (instance mutably, module not). Additionally it enables
108 // borrowing a store mutably at the same time as a contained instance.
109 runtime_info: ModuleRuntimeInfo,
110
111 /// WebAssembly linear memory data.
112 ///
113 /// This is where all runtime information about defined linear memories in
114 /// this module lives.
115 ///
116 /// The `MemoryAllocationIndex` was given from our `InstanceAllocator` and
117 /// must be given back to the instance allocator when deallocating each
118 /// memory.
119 memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
120
121 /// WebAssembly table data.
122 ///
123 /// Like memories, this is only for defined tables in the module and
124 /// contains all of their runtime state.
125 ///
126 /// The `TableAllocationIndex` was given from our `InstanceAllocator` and
127 /// must be given back to the instance allocator when deallocating each
128 /// table.
129 tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
130
131 /// Stores the dropped passive element segments in this instantiation by index.
132 /// If the index is present in the set, the segment has been dropped.
133 dropped_elements: EntitySet<ElemIndex>,
134
135 /// Stores the dropped passive data segments in this instantiation by index.
136 /// If the index is present in the set, the segment has been dropped.
137 dropped_data: EntitySet<DataIndex>,
138
139 // TODO: add support for multiple memories; `wmemcheck_state` corresponds to
140 // memory 0.
141 #[cfg(feature = "wmemcheck")]
142 pub(crate) wmemcheck_state: Option<Wmemcheck>,
143
144 /// Self-pointer back to `Store<T>` and its functions. Not present for
145 /// the brief time that `Store<T>` is itself being created. Also not
146 /// present for some niche uses that are disconnected from stores (e.g.
147 /// cross-thread stuff used in `InstancePre`)
148 store: Option<VMStoreRawPtr>,
149
150 /// Additional context used by compiled wasm code. This field is last, and
151 /// represents a dynamically-sized array that extends beyond the nominal
152 /// end of the struct (similar to a flexible array member).
153 vmctx: OwnedVMContext<VMContext>,
154}
155
156impl Instance {
157 /// Create an instance at the given memory address.
158 ///
159 /// It is assumed the memory was properly aligned and the
160 /// allocation was `alloc_size` in bytes.
161 ///
162 /// # Safety
163 ///
164 /// The `req.imports` field must be appropriately sized/typed for the module
165 /// being allocated according to `req.runtime_info`. Additionally `memories`
166 /// and `tables` must have been allocated for `req.store`.
167 unsafe fn new(
168 req: InstanceAllocationRequest,
169 memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
170 tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
171 ) -> Result<InstanceHandle, OutOfMemory> {
172 let module = req.runtime_info.env_module();
173 let memory_tys = &module.memories;
174 let dropped_elements = EntitySet::with_capacity(module.passive_elements.len())?;
175 let dropped_data = EntitySet::with_capacity(module.passive_data_map.len())?;
176
177 #[cfg(feature = "wmemcheck")]
178 let wmemcheck_state = if req.store.engine().config().wmemcheck {
179 let size = memory_tys
180 .iter()
181 .next()
182 .map(|memory| memory.1.limits.min)
183 .unwrap_or(0)
184 * 64
185 * 1024;
186 Some(Wmemcheck::new(size.try_into().unwrap()))
187 } else {
188 None
189 };
190 #[cfg(not(feature = "wmemcheck"))]
191 let _ = memory_tys;
192
193 let mut ret = OwnedInstance::new(Instance {
194 id: req.id,
195 runtime_info: req.runtime_info.clone(),
196 memories,
197 tables,
198 dropped_elements,
199 dropped_data,
200 #[cfg(feature = "wmemcheck")]
201 wmemcheck_state,
202 store: None,
203 vmctx: OwnedVMContext::new(),
204 })?;
205
206 // SAFETY: this vmctx was allocated with the same layout above, so it
207 // should be safe to initialize with the same values here.
208 unsafe {
209 ret.get_mut().initialize_vmctx(req.store, req.imports);
210 }
211 Ok(ret)
212 }
213
214 /// Converts a raw `VMContext` pointer into a raw `Instance` pointer.
215 ///
216 /// # Safety
217 ///
218 /// Calling this function safely requires that `vmctx` is a valid allocation
219 /// of a `VMContext` which is derived from `Instance::new`. To safely
220 /// convert the returned raw pointer into a safe instance pointer callers
221 /// will also want to uphold guarantees such as:
222 ///
223 /// * The instance should not be in use elsewhere. For example you can't
224 /// call this function twice, turn both raw pointers into safe pointers,
225 /// and then use both safe pointers.
226 /// * There should be no other active mutable borrow to any other instance
227 /// within the same store. Note that this is not restricted to just this
228 /// instance pointer, but to all instances in a store. Instances can
229 /// safely traverse to other instances "laterally" meaning that a mutable
230 /// borrow on one is a mutable borrow on all.
231 /// * There should be no active mutable borrow on the store accessible at
232 /// the same time the instance is turned. Instances are owned by a store
233 /// and a store can be used to acquire a safe instance borrow at any time.
234 /// * The lifetime of the usage of the instance should not be unnecessarily
235 /// long, for example it cannot be `'static`.
236 ///
237 /// Other entrypoints exist for converting from a raw `VMContext` to a safe
238 /// pointer such as:
239 ///
240 /// * `Instance::enter_host_from_wasm`
241 /// * `Instance::sibling_vmctx{,_mut}`
242 ///
243 /// These place further restrictions on the API signature to satisfy some of
244 /// the above points.
245 #[inline]
246 pub(crate) unsafe fn from_vmctx(vmctx: NonNull<VMContext>) -> NonNull<Instance> {
247 // SAFETY: The validity of `byte_sub` relies on `vmctx` being a valid
248 // allocation.
249 unsafe {
250 vmctx
251 .byte_sub(mem::size_of::<Instance>())
252 .cast::<Instance>()
253 }
254 }
255
256 /// Encapsulated entrypoint to the host from WebAssembly, converting a raw
257 /// `VMContext` pointer into a `VMStore` plus an `InstanceId`.
258 ///
259 /// This is an entrypoint for core wasm entering back into the host. This is
260 /// used for both host functions and libcalls for example. This will execute
261 /// the closure `f` with safer Internal types than a raw `VMContext`
262 /// pointer.
263 ///
264 /// The closure `f` will have its errors caught, handled, and translated to
265 /// an ABI-safe return value to give back to wasm. This includes both normal
266 /// errors such as traps as well as panics.
267 ///
268 /// # Safety
269 ///
270 /// Callers must ensure that `vmctx` is a valid allocation and is safe to
271 /// dereference at this time. That's generally only true when it's a
272 /// wasm-provided value and this is the first function called after entering
273 /// the host. Otherwise this could unsafely alias the store with a mutable
274 /// pointer, for example.
275 #[inline]
276 pub(crate) unsafe fn enter_host_from_wasm<R>(
277 vmctx: NonNull<VMContext>,
278 f: impl FnOnce(&mut dyn VMStore, InstanceId) -> R,
279 ) -> R::Abi
280 where
281 R: HostResult,
282 {
283 // SAFETY: It's a contract of this function that `vmctx` is a valid
284 // pointer with neither the store nor other instances actively in use
285 // when this is called, so it should be safe to acquire a mutable
286 // pointer to the store and read the instance pointer.
287 let (store, instance) = unsafe {
288 let instance = Instance::from_vmctx(vmctx);
289 let instance = instance.as_ref();
290 let store = &mut *instance.store.unwrap().0.as_ptr();
291 (store, instance.id)
292 };
293
294 // Thread the `store` and `instance` through panic/trap infrastructure
295 // back into `f`.
296 catch_unwind_and_record_trap(store, |store| f(store, instance))
297 }
298
299 /// Converts the provided `*mut VMContext` to an `Instance` pointer and
300 /// returns it with the same lifetime as `self`.
301 ///
302 /// This function can be used when traversing a `VMContext` to reach into
303 /// the context needed for imports, optionally.
304 ///
305 /// # Safety
306 ///
307 /// This function requires that the `vmctx` pointer is indeed valid and
308 /// from the store that `self` belongs to.
309 #[inline]
310 unsafe fn sibling_vmctx<'a>(&'a self, vmctx: NonNull<VMContext>) -> &'a Instance {
311 // SAFETY: it's a contract of this function itself that `vmctx` is a
312 // valid pointer. Additionally with `self` being a
313 let ptr = unsafe { Instance::from_vmctx(vmctx) };
314 // SAFETY: it's a contract of this function itself that `vmctx` is a
315 // valid pointer to dereference. Additionally the lifetime of the return
316 // value is constrained to be the same as `self` to avoid granting a
317 // too-long lifetime.
318 unsafe { ptr.as_ref() }
319 }
320
321 /// Same as [`Self::sibling_vmctx`], but the mutable version.
322 ///
323 /// # Safety
324 ///
325 /// This function requires that the `vmctx` pointer is indeed valid and
326 /// from the store that `self` belongs to.
327 ///
328 /// (Note that it is *NOT* required that `vmctx` be distinct from this
329 /// instance's `vmctx`, or that usage of the resulting instance is limited
330 /// to its defined items! The returned borrow has the same lifetime as
331 /// `self`, which means that this instance cannot be used while the
332 /// resulting instance is in use, and we therefore do not need to worry
333 /// about mutable aliasing between this instance and the resulting
334 /// instance.)
335 #[inline]
336 unsafe fn sibling_vmctx_mut<'a>(
337 self: Pin<&'a mut Self>,
338 vmctx: NonNull<VMContext>,
339 ) -> Pin<&'a mut Instance> {
340 // SAFETY: it's a contract of this function itself that `vmctx` is a
341 // valid pointer such that this pointer arithmetic is valid.
342 let mut ptr = unsafe { Instance::from_vmctx(vmctx) };
343
344 // SAFETY: it's a contract of this function itself that `vmctx` is a
345 // valid pointer to dereference. Additionally the lifetime of the return
346 // value is constrained to be the same as `self` to avoid granting a
347 // too-long lifetime. Finally mutable references to an instance are
348 // always through `Pin`, so it's safe to create a pin-pointer here.
349 unsafe { Pin::new_unchecked(ptr.as_mut()) }
350 }
351
352 pub(crate) fn env_module(&self) -> &Arc<wasmtime_environ::Module> {
353 self.runtime_info.env_module()
354 }
355
356 pub(crate) fn runtime_module(&self) -> Option<&crate::Module> {
357 match &self.runtime_info {
358 ModuleRuntimeInfo::Module(m) => Some(m),
359 ModuleRuntimeInfo::Bare(_) => None,
360 }
361 }
362
363 /// Translate a module-level interned type index into an engine-level
364 /// interned type index.
365 #[cfg(feature = "gc")]
366 pub fn engine_type_index(&self, module_index: ModuleInternedTypeIndex) -> VMSharedTypeIndex {
367 self.runtime_info.engine_type_index(module_index)
368 }
369
370 #[inline]
371 fn offsets(&self) -> &VMOffsets<HostPtr> {
372 self.runtime_info.offsets()
373 }
374
375 /// Return the indexed `VMFunctionImport`.
376 fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
377 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
378 }
379
380 /// Return the index `VMTableImport`.
381 fn imported_table(&self, index: TableIndex) -> &VMTableImport {
382 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
383 }
384
385 /// Return the indexed `VMMemoryImport`.
386 fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
387 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
388 }
389
390 /// Return the indexed `VMGlobalImport`.
391 fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
392 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
393 }
394
395 /// Return the indexed `VMTagImport`.
396 fn imported_tag(&self, index: TagIndex) -> &VMTagImport {
397 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtag_import(index)) }
398 }
399
400 /// Return the indexed `VMTagDefinition`.
401 pub fn tag_ptr(&self, index: DefinedTagIndex) -> NonNull<VMTagDefinition> {
402 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtag_definition(index)) }
403 }
404
405 /// Return the indexed `VMTableDefinition`.
406 pub fn table(&self, index: DefinedTableIndex) -> VMTableDefinition {
407 unsafe { self.table_ptr(index).read() }
408 }
409
410 /// Updates the value for a defined table to `VMTableDefinition`.
411 fn set_table(self: Pin<&mut Self>, index: DefinedTableIndex, table: VMTableDefinition) {
412 unsafe {
413 self.table_ptr(index).write(table);
414 }
415 }
416
417 /// Return a pointer to the `index`'th table within this instance, stored
418 /// in vmctx memory.
419 pub fn table_ptr(&self, index: DefinedTableIndex) -> NonNull<VMTableDefinition> {
420 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtable_definition(index)) }
421 }
422
423 /// Get a locally defined or imported memory.
424 pub(crate) fn get_memory(&self, index: MemoryIndex) -> VMMemoryDefinition {
425 if let Some(defined_index) = self.env_module().defined_memory_index(index) {
426 self.memory(defined_index)
427 } else {
428 let import = self.imported_memory(index);
429 unsafe { VMMemoryDefinition::load(import.from.as_ptr()) }
430 }
431 }
432
433 /// Return the indexed `VMMemoryDefinition`, loaded from vmctx memory
434 /// already.
435 #[inline]
436 pub fn memory(&self, index: DefinedMemoryIndex) -> VMMemoryDefinition {
437 unsafe { VMMemoryDefinition::load(self.memory_ptr(index).as_ptr()) }
438 }
439
440 /// Set the indexed memory to `VMMemoryDefinition`.
441 fn set_memory(&self, index: DefinedMemoryIndex, mem: VMMemoryDefinition) {
442 unsafe {
443 self.memory_ptr(index).write(mem);
444 }
445 }
446
447 /// Return the address of the specified memory at `index` within this vmctx.
448 ///
449 /// Note that the returned pointer resides in wasm-code-readable-memory in
450 /// the vmctx.
451 #[inline]
452 pub fn memory_ptr(&self, index: DefinedMemoryIndex) -> NonNull<VMMemoryDefinition> {
453 unsafe {
454 self.vmctx_plus_offset::<VmPtr<_>>(self.offsets().vmctx_vmmemory_pointer(index))
455 .as_non_null()
456 }
457 }
458
459 /// Return the indexed `VMGlobalDefinition`.
460 pub fn global_ptr(&self, index: DefinedGlobalIndex) -> NonNull<VMGlobalDefinition> {
461 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmglobal_definition(index)) }
462 }
463
464 /// Get all globals within this instance.
465 ///
466 /// Returns both import and defined globals.
467 ///
468 /// Returns both exported and non-exported globals.
469 ///
470 /// Gives access to the full globals space.
471 pub fn all_globals(
472 &self,
473 store: StoreId,
474 ) -> impl ExactSizeIterator<Item = (GlobalIndex, crate::Global)> + '_ {
475 let module = self.env_module();
476 module
477 .globals
478 .keys()
479 .map(move |idx| (idx, self.get_exported_global(store, idx)))
480 }
481
482 /// Get the globals defined in this instance (not imported).
483 pub fn defined_globals(
484 &self,
485 store: StoreId,
486 ) -> impl ExactSizeIterator<Item = (DefinedGlobalIndex, crate::Global)> + '_ {
487 let module = self.env_module();
488 self.all_globals(store)
489 .skip(module.num_imported_globals)
490 .map(move |(i, global)| (module.defined_global_index(i).unwrap(), global))
491 }
492
493 /// Return a pointer to the interrupts structure
494 #[inline]
495 pub fn vm_store_context(&self) -> NonNull<Option<VmPtr<VMStoreContext>>> {
496 unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_store_context()) }
497 }
498
499 /// Return a pointer to the global epoch counter used by this instance.
500 #[cfg(target_has_atomic = "64")]
501 pub fn epoch_ptr(self: Pin<&mut Self>) -> &mut Option<VmPtr<AtomicU64>> {
502 let offset = self.offsets().ptr.vmctx_epoch_ptr();
503 unsafe { self.vmctx_plus_offset_mut(offset) }
504 }
505
506 /// Return a pointer to the collector-specific heap data.
507 pub fn gc_heap_data(self: Pin<&mut Self>) -> &mut Option<VmPtr<u8>> {
508 let offset = self.offsets().ptr.vmctx_gc_heap_data();
509 unsafe { self.vmctx_plus_offset_mut(offset) }
510 }
511
512 pub(crate) unsafe fn set_store(mut self: Pin<&mut Self>, store: &StoreOpaque) {
513 // FIXME: should be more targeted ideally with the `unsafe` than just
514 // throwing this entire function in a large `unsafe` block.
515 unsafe {
516 *self.as_mut().store_mut() = Some(VMStoreRawPtr(store.traitobj()));
517 self.vm_store_context()
518 .write(Some(store.vm_store_context_ptr().into()));
519 #[cfg(target_has_atomic = "64")]
520 {
521 *self.as_mut().epoch_ptr() =
522 Some(NonNull::from(store.engine().epoch_counter()).into());
523 }
524
525 if self.env_module().needs_gc_heap {
526 self.as_mut().set_gc_heap(Some(store.unwrap_gc_store()));
527 } else {
528 self.as_mut().set_gc_heap(None);
529 }
530 }
531 }
532
533 unsafe fn set_gc_heap(self: Pin<&mut Self>, gc_store: Option<&GcStore>) {
534 if let Some(gc_store) = gc_store {
535 *self.gc_heap_data() = Some(unsafe { gc_store.gc_heap.vmctx_gc_heap_data().into() });
536 } else {
537 *self.gc_heap_data() = None;
538 }
539 }
540
541 /// Return a reference to the vmctx used by compiled wasm code.
542 #[inline]
543 pub fn vmctx(&self) -> NonNull<VMContext> {
544 InstanceLayout::vmctx(self)
545 }
546
547 /// Lookup a function by index.
548 ///
549 /// # Panics
550 ///
551 /// Panics if `index` is out of bounds for this instance.
552 ///
553 /// # Safety
554 ///
555 /// The `store` parameter must be the store that owns this instance and the
556 /// functions that this instance can reference.
557 pub unsafe fn get_exported_func(
558 self: Pin<&mut Self>,
559 registry: &ModuleRegistry,
560 store: StoreId,
561 index: FuncIndex,
562 ) -> crate::Func {
563 let func_ref = self.get_func_ref(registry, index).unwrap();
564
565 // SAFETY: the validity of `func_ref` is guaranteed by the validity of
566 // `self`, and the contract that `store` must own `func_ref` is a
567 // contract of this function itself.
568 unsafe { crate::Func::from_vm_func_ref(store, func_ref) }
569 }
570
571 /// Lookup a table by index.
572 ///
573 /// # Panics
574 ///
575 /// Panics if `index` is out of bounds for this instance.
576 pub fn get_exported_table(&self, store: StoreId, index: TableIndex) -> crate::Table {
577 let (id, def_index) = if let Some(def_index) = self.env_module().defined_table_index(index)
578 {
579 (self.id, def_index)
580 } else {
581 let import = self.imported_table(index);
582 // SAFETY: validity of this `Instance` guarantees validity of the
583 // `vmctx` pointer being read here to find the transitive
584 // `InstanceId` that the import is associated with.
585 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
586 (id, import.index)
587 };
588 crate::Table::from_raw(StoreInstanceId::new(store, id), def_index)
589 }
590
591 /// Lookup a memory by index.
592 ///
593 /// # Panics
594 ///
595 /// Panics if `index` is out-of-bounds for this instance.
596 #[cfg_attr(
597 not(feature = "threads"),
598 expect(unused_variables, reason = "definitions cfg'd to dummy",)
599 )]
600 pub fn get_exported_memory(&self, store: StoreId, index: MemoryIndex) -> ExportMemory {
601 let module = self.env_module();
602 if module.memories[index].shared {
603 let (memory, import) =
604 if let Some(def_index) = self.env_module().defined_memory_index(index) {
605 (
606 self.get_defined_memory(def_index),
607 self.get_defined_memory_vmimport(def_index),
608 )
609 } else {
610 let import = self.imported_memory(index);
611 // SAFETY: validity of this `Instance` guarantees validity of
612 // the `vmctx` pointer being read here to find the transitive
613 // `InstanceId` that the import is associated with.
614 let instance = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()) };
615 (instance.get_defined_memory(import.index), *import)
616 };
617
618 let vm = memory.as_shared_memory().unwrap().clone();
619 ExportMemory::Shared(vm, import)
620 } else {
621 let (id, def_index) =
622 if let Some(def_index) = self.env_module().defined_memory_index(index) {
623 (self.id, def_index)
624 } else {
625 let import = self.imported_memory(index);
626 // SAFETY: validity of this `Instance` guarantees validity of the
627 // `vmctx` pointer being read here to find the transitive
628 // `InstanceId` that the import is associated with.
629 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
630 (id, import.index)
631 };
632
633 // SAFETY: `from_raw` requires that the memory is not shared, which
634 // was tested above in this if/else.
635 let store_id = StoreInstanceId::new(store, id);
636 ExportMemory::Unshared(unsafe { crate::Memory::from_raw(store_id, def_index) })
637 }
638 }
639
640 /// Lookup a global by index.
641 ///
642 /// # Panics
643 ///
644 /// Panics if `index` is out-of-bounds for this instance.
645 pub(crate) fn get_exported_global(&self, store: StoreId, index: GlobalIndex) -> crate::Global {
646 // If this global is defined within this instance, then that's easy to
647 // calculate the `Global`.
648 if let Some(def_index) = self.env_module().defined_global_index(index) {
649 let instance = StoreInstanceId::new(store, self.id);
650 return crate::Global::from_core(instance, def_index);
651 }
652
653 // For imported globals it's required to match on the `kind` to
654 // determine which `Global` constructor is going to be invoked.
655 let import = self.imported_global(index);
656 match import.kind {
657 VMGlobalKind::Host(index) => crate::Global::from_host(store, index),
658 VMGlobalKind::Instance(index) => {
659 // SAFETY: validity of this `&Instance` means validity of its
660 // imports meaning we can read the id of the vmctx within.
661 let id = unsafe {
662 let vmctx = VMContext::from_opaque(import.vmctx.unwrap().as_non_null());
663 self.sibling_vmctx(vmctx).id
664 };
665 crate::Global::from_core(StoreInstanceId::new(store, id), index)
666 }
667 #[cfg(feature = "component-model")]
668 VMGlobalKind::ComponentFlags(index) => {
669 // SAFETY: validity of this `&Instance` means validity of its
670 // imports meaning we can read the id of the vmctx within.
671 let id = unsafe {
672 let vmctx = super::component::VMComponentContext::from_opaque(
673 import.vmctx.unwrap().as_non_null(),
674 );
675 super::component::ComponentInstance::vmctx_instance_id(vmctx)
676 };
677 crate::Global::from_component_flags(
678 crate::component::store::StoreComponentInstanceId::new(store, id),
679 index,
680 )
681 }
682 #[cfg(feature = "component-model")]
683 VMGlobalKind::TaskMayBlock => {
684 // SAFETY: validity of this `&Instance` means validity of its
685 // imports meaning we can read the id of the vmctx within.
686 let id = unsafe {
687 let vmctx = super::component::VMComponentContext::from_opaque(
688 import.vmctx.unwrap().as_non_null(),
689 );
690 super::component::ComponentInstance::vmctx_instance_id(vmctx)
691 };
692 crate::Global::from_task_may_block(
693 crate::component::store::StoreComponentInstanceId::new(store, id),
694 )
695 }
696 }
697 }
698
699 /// Get an exported tag by index.
700 ///
701 /// # Panics
702 ///
703 /// Panics if the index is out-of-range.
704 pub fn get_exported_tag(&self, store: StoreId, index: TagIndex) -> crate::Tag {
705 let (id, def_index) = if let Some(def_index) = self.env_module().defined_tag_index(index) {
706 (self.id, def_index)
707 } else {
708 let import = self.imported_tag(index);
709 // SAFETY: validity of this `Instance` guarantees validity of the
710 // `vmctx` pointer being read here to find the transitive
711 // `InstanceId` that the import is associated with.
712 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
713 (id, import.index)
714 };
715 crate::Tag::from_raw(StoreInstanceId::new(store, id), def_index)
716 }
717
718 /// Grow memory by the specified amount of pages.
719 ///
720 /// Returns `None` if memory can't be grown by the specified amount
721 /// of pages. Returns `Some` with the old size in bytes if growth was
722 /// successful.
723 pub(crate) async fn memory_grow(
724 mut self: Pin<&mut Self>,
725 limiter: Option<&mut StoreResourceLimiter<'_>>,
726 idx: DefinedMemoryIndex,
727 delta: u64,
728 ) -> Result<Option<usize>, Error> {
729 let memory = &mut self.as_mut().memories_mut()[idx].1;
730
731 // SAFETY: this is the safe wrapper around `Memory::grow` because it
732 // automatically updates the `VMMemoryDefinition` in this instance after
733 // a growth operation below.
734 let result = unsafe { memory.grow(delta, limiter).await };
735
736 // Update the state used by a non-shared Wasm memory in case the base
737 // pointer and/or the length changed.
738 if memory.as_shared_memory().is_none() {
739 let vmmemory = memory.vmmemory();
740 self.set_memory(idx, vmmemory);
741 }
742
743 result
744 }
745
746 pub(crate) fn table_element_type(
747 self: Pin<&mut Self>,
748 table_index: TableIndex,
749 ) -> TableElementType {
750 self.get_table(table_index).element_type()
751 }
752
753 /// Performs a grow operation on the `table_index` specified using `grow`.
754 ///
755 /// This will handle updating the VMTableDefinition internally as necessary.
756 pub(crate) async fn defined_table_grow(
757 mut self: Pin<&mut Self>,
758 table_index: DefinedTableIndex,
759 grow: impl AsyncFnOnce(&mut Table) -> Result<Option<usize>>,
760 ) -> Result<Option<usize>> {
761 let table = self.as_mut().get_defined_table(table_index);
762 let result = grow(table).await;
763 let element = table.vmtable();
764 self.set_table(table_index, element);
765 result
766 }
767
768 fn alloc_layout(offsets: &VMOffsets<HostPtr>) -> Layout {
769 let size = mem::size_of::<Self>()
770 .checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
771 .unwrap();
772 let align = mem::align_of::<Self>();
773 Layout::from_size_align(size, align).unwrap()
774 }
775
776 fn type_ids_array(&self) -> NonNull<VmPtr<VMSharedTypeIndex>> {
777 unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_type_ids_array()) }
778 }
779
780 /// Construct a new VMFuncRef for the given function
781 /// (imported or defined in this module) and store into the given
782 /// location. Used during lazy initialization.
783 ///
784 /// Note that our current lazy-init scheme actually calls this every
785 /// time the funcref pointer is fetched; this turns out to be better
786 /// than tracking state related to whether it's been initialized
787 /// before, because resetting that state on (re)instantiation is
788 /// very expensive if there are many funcrefs.
789 ///
790 /// # Safety
791 ///
792 /// This functions requires that `into` is a valid pointer.
793 unsafe fn construct_func_ref(
794 self: Pin<&mut Self>,
795 registry: &ModuleRegistry,
796 index: FuncIndex,
797 type_index: VMSharedTypeIndex,
798 into: *mut VMFuncRef,
799 ) {
800 let module_with_code = ModuleWithCode::in_store(
801 registry,
802 self.runtime_module()
803 .expect("funcref impossible in fake module"),
804 )
805 .expect("module not in store");
806
807 let func_ref = if let Some(def_index) = self.env_module().defined_func_index(index) {
808 VMFuncRef {
809 array_call: NonNull::from(
810 module_with_code
811 .array_to_wasm_trampoline(def_index)
812 .expect("should have array-to-Wasm trampoline for escaping function"),
813 )
814 .cast()
815 .into(),
816 wasm_call: Some(
817 NonNull::new(
818 module_with_code
819 .finished_function(def_index)
820 .as_ptr()
821 .cast::<VMWasmCallFunction>()
822 .cast_mut(),
823 )
824 .unwrap()
825 .into(),
826 ),
827 vmctx: VMOpaqueContext::from_vmcontext(self.vmctx()).into(),
828 type_index,
829 }
830 } else {
831 let import = self.imported_function(index);
832 VMFuncRef {
833 array_call: import.array_call,
834 wasm_call: Some(import.wasm_call),
835 vmctx: import.vmctx,
836 type_index,
837 }
838 };
839
840 // SAFETY: the unsafe contract here is forwarded to callers of this
841 // function.
842 unsafe {
843 ptr::write(into, func_ref);
844 }
845 }
846
847 /// Get a `&VMFuncRef` for the given `FuncIndex`.
848 ///
849 /// Returns `None` if the index is the reserved index value.
850 ///
851 /// The returned reference is a stable reference that won't be moved and can
852 /// be passed into JIT code.
853 pub(crate) fn get_func_ref(
854 self: Pin<&mut Self>,
855 registry: &ModuleRegistry,
856 index: FuncIndex,
857 ) -> Option<NonNull<VMFuncRef>> {
858 if index == FuncIndex::reserved_value() {
859 return None;
860 }
861
862 // For now, we eagerly initialize an funcref struct in-place
863 // whenever asked for a reference to it. This is mostly
864 // fine, because in practice each funcref is unlikely to be
865 // requested more than a few times: once-ish for funcref
866 // tables used for call_indirect (the usual compilation
867 // strategy places each function in the table at most once),
868 // and once or a few times when fetching exports via API.
869 // Note that for any case driven by table accesses, the lazy
870 // table init behaves like a higher-level cache layer that
871 // protects this initialization from happening multiple
872 // times, via that particular table at least.
873 //
874 // When `ref.func` becomes more commonly used or if we
875 // otherwise see a use-case where this becomes a hotpath,
876 // we can reconsider by using some state to track
877 // "uninitialized" explicitly, for example by zeroing the
878 // funcrefs (perhaps together with other
879 // zeroed-at-instantiate-time state) or using a separate
880 // is-initialized bitmap.
881 //
882 // We arrived at this design because zeroing memory is
883 // expensive, so it's better for instantiation performance
884 // if we don't have to track "is-initialized" state at
885 // all!
886 let func = &self.env_module().functions[index];
887 let sig = func.signature.unwrap_engine_type_index();
888
889 // SAFETY: the offset calculated here should be correct with
890 // `self.offsets`
891 let func_ref = unsafe {
892 self.vmctx_plus_offset_raw::<VMFuncRef>(self.offsets().vmctx_func_ref(func.func_ref))
893 };
894
895 // SAFETY: the `func_ref` ptr should be valid as it's within our
896 // `VMContext` area.
897 unsafe {
898 self.construct_func_ref(registry, index, sig, func_ref.as_ptr());
899 }
900
901 Some(func_ref)
902 }
903
904 /// Get the passive elements segment at the given index.
905 ///
906 /// Returns an empty segment if the index is out of bounds or if the segment
907 /// has been dropped.
908 ///
909 /// The `storage` parameter should always be `None`; it is a bit of a hack
910 /// to work around lifetime issues.
911 pub(crate) fn passive_element_segment<'a>(
912 &self,
913 storage: &'a mut Option<(Arc<wasmtime_environ::Module>, TableSegmentElements)>,
914 elem_index: ElemIndex,
915 ) -> &'a TableSegmentElements {
916 debug_assert!(storage.is_none());
917 *storage = Some((
918 // TODO: this `clone()` shouldn't be necessary but is used for now to
919 // inform `rustc` that the lifetime of the elements here are
920 // disconnected from the lifetime of `self`.
921 self.env_module().clone(),
922 // NB: fall back to an expressions-based list of elements which
923 // doesn't have static type information (as opposed to
924 // `TableSegmentElements::Functions`) since we don't know what type
925 // is needed in the caller's context. Let the type be inferred by
926 // how they use the segment.
927 TableSegmentElements::Expressions(Box::new([])),
928 ));
929 let (module, empty) = storage.as_ref().unwrap();
930
931 match module.passive_elements_map.get(&elem_index) {
932 Some(index) if !self.dropped_elements.contains(elem_index) => {
933 &module.passive_elements[*index]
934 }
935 _ => empty,
936 }
937 }
938
939 /// The `table.init` operation: initializes a portion of a table with a
940 /// passive element.
941 ///
942 /// # Errors
943 ///
944 /// Returns a `Trap` error when the range within the table is out of bounds
945 /// or the range within the passive element is out of bounds.
946 pub(crate) async fn table_init(
947 store: &mut StoreOpaque,
948 limiter: Option<&mut StoreResourceLimiter<'_>>,
949 asyncness: Asyncness,
950 instance: InstanceId,
951 table_index: TableIndex,
952 elem_index: ElemIndex,
953 dst: u64,
954 src: u64,
955 len: u64,
956 ) -> Result<()> {
957 let mut storage = None;
958 let elements = store
959 .instance(instance)
960 .passive_element_segment(&mut storage, elem_index);
961 let mut const_evaluator = ConstExprEvaluator::default();
962 Self::table_init_segment(
963 store,
964 limiter,
965 asyncness,
966 instance,
967 &mut const_evaluator,
968 table_index,
969 elements,
970 dst,
971 src,
972 len,
973 )
974 .await
975 }
976
977 pub(crate) async fn table_init_segment(
978 store: &mut StoreOpaque,
979 mut limiter: Option<&mut StoreResourceLimiter<'_>>,
980 asyncness: Asyncness,
981 elements_instance_id: InstanceId,
982 const_evaluator: &mut ConstExprEvaluator,
983 table_index: TableIndex,
984 elements: &TableSegmentElements,
985 dst: u64,
986 src: u64,
987 len: u64,
988 ) -> Result<()> {
989 // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
990
991 let store_id = store.id();
992 let elements_instance = store.instance_mut(elements_instance_id);
993 let table = elements_instance.get_exported_table(store_id, table_index);
994 let table_size = table._size(store);
995
996 // Perform a bounds check on the table being written to. This is done by
997 // ensuring that `dst + len <= table.size()` via checked arithmetic.
998 //
999 // Note that the bounds check for the element segment happens below when
1000 // the original segment is sliced via `src` and `len`.
1001 table_size
1002 .checked_sub(dst)
1003 .and_then(|i| i.checked_sub(len))
1004 .ok_or(Trap::TableOutOfBounds)?;
1005
1006 let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
1007 let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
1008
1009 let positions = dst..dst + u64::try_from(len).unwrap();
1010 match elements {
1011 TableSegmentElements::Functions(funcs) => {
1012 let elements = funcs
1013 .get(src..)
1014 .and_then(|s| s.get(..len))
1015 .ok_or(Trap::TableOutOfBounds)?;
1016 for (i, func_idx) in positions.zip(elements) {
1017 let (instance, registry) =
1018 store.instance_and_module_registry_mut(elements_instance_id);
1019 // SAFETY: the `store_id` passed to `get_exported_func` is
1020 // indeed the store that owns the function.
1021 let func = unsafe { instance.get_exported_func(registry, store_id, *func_idx) };
1022 table.set_(store, i, func.into()).unwrap();
1023 }
1024 }
1025 TableSegmentElements::Expressions(exprs) => {
1026 let mut store = OpaqueRootScope::new(store);
1027 let exprs = exprs
1028 .get(src..)
1029 .and_then(|s| s.get(..len))
1030 .ok_or(Trap::TableOutOfBounds)?;
1031 let mut context = ConstEvalContext::new(elements_instance_id, asyncness);
1032 for (i, expr) in positions.zip(exprs) {
1033 let element = const_evaluator
1034 .eval(&mut store, limiter.as_deref_mut(), &mut context, expr)
1035 .await?;
1036 table.set_(&mut store, i, element.ref_().unwrap()).unwrap();
1037 }
1038 }
1039 }
1040
1041 Ok(())
1042 }
1043
1044 /// Drop an element.
1045 pub(crate) fn elem_drop(
1046 self: Pin<&mut Self>,
1047 elem_index: ElemIndex,
1048 ) -> Result<(), OutOfMemory> {
1049 // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
1050
1051 self.dropped_elements_mut().insert(elem_index)?;
1052
1053 // Note that we don't check that we actually removed a segment because
1054 // dropping a non-passive segment is a no-op (not a trap).
1055
1056 Ok(())
1057 }
1058
1059 /// Get a locally-defined memory.
1060 pub fn get_defined_memory_mut(self: Pin<&mut Self>, index: DefinedMemoryIndex) -> &mut Memory {
1061 &mut self.memories_mut()[index].1
1062 }
1063
1064 /// Get a locally-defined memory.
1065 pub fn get_defined_memory(&self, index: DefinedMemoryIndex) -> &Memory {
1066 &self.memories[index].1
1067 }
1068
1069 pub fn get_defined_memory_vmimport(&self, index: DefinedMemoryIndex) -> VMMemoryImport {
1070 crate::runtime::vm::VMMemoryImport {
1071 from: self.memory_ptr(index).into(),
1072 vmctx: self.vmctx().into(),
1073 index,
1074 }
1075 }
1076
1077 /// Do a `memory.copy`
1078 ///
1079 /// # Errors
1080 ///
1081 /// Returns a `Trap` error when the source or destination ranges are out of
1082 /// bounds.
1083 pub(crate) fn memory_copy(
1084 self: Pin<&mut Self>,
1085 dst_index: MemoryIndex,
1086 dst: u64,
1087 src_index: MemoryIndex,
1088 src: u64,
1089 len: u64,
1090 ) -> Result<(), Trap> {
1091 // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
1092
1093 let src_mem = self.get_memory(src_index);
1094 let dst_mem = self.get_memory(dst_index);
1095
1096 let src = self.validate_inbounds(src_mem.current_length(), src, len)?;
1097 let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?;
1098 let len = usize::try_from(len).unwrap();
1099
1100 // Bounds and casts are checked above, by this point we know that
1101 // everything is safe.
1102 unsafe {
1103 let dst = dst_mem.base.as_ptr().add(dst);
1104 let src = src_mem.base.as_ptr().add(src);
1105 // FIXME audit whether this is safe in the presence of shared memory
1106 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1107 ptr::copy(src, dst, len);
1108 }
1109
1110 Ok(())
1111 }
1112
1113 fn validate_inbounds(&self, max: usize, ptr: u64, len: u64) -> Result<usize, Trap> {
1114 let oob = || Trap::MemoryOutOfBounds;
1115 let end = ptr
1116 .checked_add(len)
1117 .and_then(|i| usize::try_from(i).ok())
1118 .ok_or_else(oob)?;
1119 if end > max {
1120 Err(oob())
1121 } else {
1122 Ok(ptr.try_into().unwrap())
1123 }
1124 }
1125
1126 /// Perform the `memory.fill` operation on a locally defined memory.
1127 ///
1128 /// # Errors
1129 ///
1130 /// Returns a `Trap` error if the memory range is out of bounds.
1131 pub(crate) fn memory_fill(
1132 self: Pin<&mut Self>,
1133 memory_index: DefinedMemoryIndex,
1134 dst: u64,
1135 val: u8,
1136 len: u64,
1137 ) -> Result<(), Trap> {
1138 let memory_index = self.env_module().memory_index(memory_index);
1139 let memory = self.get_memory(memory_index);
1140 let dst = self.validate_inbounds(memory.current_length(), dst, len)?;
1141 let len = usize::try_from(len).unwrap();
1142
1143 // Bounds and casts are checked above, by this point we know that
1144 // everything is safe.
1145 unsafe {
1146 let dst = memory.base.as_ptr().add(dst);
1147 // FIXME audit whether this is safe in the presence of shared memory
1148 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1149 ptr::write_bytes(dst, val, len);
1150 }
1151
1152 Ok(())
1153 }
1154
1155 /// Get the internal storage range of a particular Wasm data segment.
1156 pub(crate) fn wasm_data_range(&self, index: DataIndex) -> Range<u32> {
1157 match self.env_module().passive_data_map.get(&index) {
1158 Some(range) if !self.dropped_data.contains(index) => range.clone(),
1159 _ => 0..0,
1160 }
1161 }
1162
1163 /// Given an internal storage range of a Wasm data segment (or subset of a
1164 /// Wasm data segment), get the data's raw bytes.
1165 pub(crate) fn wasm_data(&self, range: Range<u32>) -> &[u8] {
1166 let start = usize::try_from(range.start).unwrap();
1167 let end = usize::try_from(range.end).unwrap();
1168 &self.runtime_info.wasm_data()[start..end]
1169 }
1170
1171 /// Performs the `memory.init` operation.
1172 ///
1173 /// # Errors
1174 ///
1175 /// Returns a `Trap` error if the destination range is out of this module's
1176 /// memory's bounds or if the source range is outside the data segment's
1177 /// bounds.
1178 pub(crate) fn memory_init(
1179 self: Pin<&mut Self>,
1180 memory_index: MemoryIndex,
1181 data_index: DataIndex,
1182 dst: u64,
1183 src: u32,
1184 len: u32,
1185 ) -> Result<(), Trap> {
1186 let range = self.wasm_data_range(data_index);
1187 self.memory_init_segment(memory_index, range, dst, src, len)
1188 }
1189
1190 pub(crate) fn memory_init_segment(
1191 self: Pin<&mut Self>,
1192 memory_index: MemoryIndex,
1193 range: Range<u32>,
1194 dst: u64,
1195 src: u32,
1196 len: u32,
1197 ) -> Result<(), Trap> {
1198 // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
1199
1200 let memory = self.get_memory(memory_index);
1201 let data = self.wasm_data(range);
1202 let dst = self.validate_inbounds(memory.current_length(), dst, len.into())?;
1203 let src = self.validate_inbounds(data.len(), src.into(), len.into())?;
1204 let len = len as usize;
1205
1206 unsafe {
1207 let src_start = data.as_ptr().add(src);
1208 let dst_start = memory.base.as_ptr().add(dst);
1209 // FIXME audit whether this is safe in the presence of shared memory
1210 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1211 ptr::copy_nonoverlapping(src_start, dst_start, len);
1212 }
1213
1214 Ok(())
1215 }
1216
1217 /// Drop the given data segment, truncating its length to zero.
1218 pub(crate) fn data_drop(
1219 self: Pin<&mut Self>,
1220 data_index: DataIndex,
1221 ) -> Result<(), OutOfMemory> {
1222 self.dropped_data_mut().insert(data_index)?;
1223
1224 // Note that we don't check that we actually removed a segment because
1225 // dropping a non-passive segment is a no-op (not a trap).
1226
1227 Ok(())
1228 }
1229
1230 /// Get a table by index regardless of whether it is locally-defined
1231 /// or an imported, foreign table. Ensure that the given range of
1232 /// elements in the table is lazily initialized. We define this
1233 /// operation all-in-one for safety, to ensure the lazy-init
1234 /// happens.
1235 ///
1236 /// Takes an `Iterator` for the index-range to lazy-initialize,
1237 /// for flexibility. This can be a range, single item, or empty
1238 /// sequence, for example. The iterator should return indices in
1239 /// increasing order, so that the break-at-out-of-bounds behavior
1240 /// works correctly.
1241 pub(crate) fn get_table_with_lazy_init(
1242 self: Pin<&mut Self>,
1243 registry: &ModuleRegistry,
1244 table_index: TableIndex,
1245 range: impl Iterator<Item = u64>,
1246 ) -> &mut Table {
1247 let (idx, instance) = self.defined_table_index_and_instance(table_index);
1248 instance.get_defined_table_with_lazy_init(registry, idx, range)
1249 }
1250
1251 /// Gets the raw runtime table data structure owned by this instance
1252 /// given the provided `idx`.
1253 ///
1254 /// The `range` specified is eagerly initialized for funcref tables.
1255 pub fn get_defined_table_with_lazy_init(
1256 mut self: Pin<&mut Self>,
1257 registry: &ModuleRegistry,
1258 idx: DefinedTableIndex,
1259 range: impl IntoIterator<Item = u64>,
1260 ) -> &mut Table {
1261 let elt_ty = self.tables[idx].1.element_type();
1262
1263 if elt_ty == TableElementType::Func {
1264 for i in range {
1265 match self.tables[idx].1.get_func_maybe_init(i) {
1266 // Uninitialized table element.
1267 Ok(None) => {}
1268 // Initialized table element, move on to the next.
1269 Ok(Some(_)) => continue,
1270 // Out-of-bounds; caller will handle by likely
1271 // throwing a trap. No work to do to lazy-init
1272 // beyond the end.
1273 Err(_) => break,
1274 };
1275
1276 // The table element `i` is uninitialized and is now being
1277 // initialized. This must imply that a `precompiled` list of
1278 // function indices is available for this table. The precompiled
1279 // list is extracted and then it is consulted with `i` to
1280 // determine the function that is going to be initialized. Note
1281 // that `i` may be outside the limits of the static
1282 // initialization so it's a fallible `get` instead of an index.
1283 let module = self.env_module();
1284 let precomputed = match &module.table_initialization.initial_values[idx] {
1285 TableInitialValue::Null { precomputed } => precomputed,
1286 TableInitialValue::Expr(_) => unreachable!(),
1287 };
1288 // Panicking here helps catch bugs rather than silently truncating by accident.
1289 let func_index = precomputed.get(usize::try_from(i).unwrap()).cloned();
1290 let func_ref = func_index
1291 .and_then(|func_index| self.as_mut().get_func_ref(registry, func_index));
1292 self.as_mut().tables_mut()[idx]
1293 .1
1294 .set_func(i, func_ref)
1295 .expect("Table type should match and index should be in-bounds");
1296 }
1297 }
1298
1299 self.get_defined_table(idx)
1300 }
1301
1302 /// Get a table by index regardless of whether it is locally-defined or an
1303 /// imported, foreign table.
1304 pub(crate) fn get_table(self: Pin<&mut Self>, table_index: TableIndex) -> &mut Table {
1305 let (idx, instance) = self.defined_table_index_and_instance(table_index);
1306 instance.get_defined_table(idx)
1307 }
1308
1309 /// Get a locally-defined table.
1310 pub(crate) fn get_defined_table(self: Pin<&mut Self>, index: DefinedTableIndex) -> &mut Table {
1311 &mut self.tables_mut()[index].1
1312 }
1313
1314 pub(crate) fn defined_table_index_and_instance<'a>(
1315 self: Pin<&'a mut Self>,
1316 index: TableIndex,
1317 ) -> (DefinedTableIndex, Pin<&'a mut Instance>) {
1318 if let Some(defined_table_index) = self.env_module().defined_table_index(index) {
1319 (defined_table_index, self)
1320 } else {
1321 let import = self.imported_table(index);
1322 let index = import.index;
1323 let vmctx = import.vmctx.as_non_null();
1324 // SAFETY: the validity of `self` means that the reachable instances
1325 // should also all be owned by the same store and fully initialized,
1326 // so it's safe to laterally move from a mutable borrow of this
1327 // instance to a mutable borrow of a sibling instance.
1328 let foreign_instance = unsafe { self.sibling_vmctx_mut(vmctx) };
1329 (index, foreign_instance)
1330 }
1331 }
1332
1333 /// Same as `self.runtime_info.env_module()` but additionally returns the
1334 /// `Pin<&mut Self>` with the same original lifetime.
1335 pub fn module_and_self(self: Pin<&mut Self>) -> (&wasmtime_environ::Module, Pin<&mut Self>) {
1336 // SAFETY: this function is projecting both `&Module` and the same
1337 // pointer both connected to the same lifetime. This is safe because
1338 // it's a contract of `Pin<&mut Self>` that the `runtime_info` field is
1339 // never written, meaning it's effectively unsafe to have `&mut Module`
1340 // projected from `Pin<&mut Self>`. Consequently it's safe to have a
1341 // read-only view of the field while still retaining mutable access to
1342 // all other fields.
1343 let module = self.runtime_info.env_module();
1344 let module = &raw const *module;
1345 let module = unsafe { &*module };
1346 (module, self)
1347 }
1348
1349 /// Initialize the VMContext data associated with this Instance.
1350 ///
1351 /// The `VMContext` memory is assumed to be uninitialized; any field
1352 /// that we need in a certain state will be explicitly written by this
1353 /// function.
1354 unsafe fn initialize_vmctx(self: Pin<&mut Self>, store: &StoreOpaque, imports: Imports) {
1355 let (module, mut instance) = self.module_and_self();
1356
1357 // SAFETY: the type of the magic field is indeed `u32` and this function
1358 // is initializing its value.
1359 unsafe {
1360 let offsets = instance.runtime_info.offsets();
1361 instance
1362 .vmctx_plus_offset_raw::<u32>(offsets.ptr.vmctx_magic())
1363 .write(VMCONTEXT_MAGIC);
1364 }
1365
1366 // SAFETY: it's up to the caller to provide a valid store pointer here.
1367 unsafe {
1368 instance.as_mut().set_store(store);
1369 }
1370
1371 // Initialize shared types
1372 //
1373 // SAFETY: validity of the vmctx means it should be safe to write to it
1374 // here.
1375 unsafe {
1376 let types = NonNull::from(instance.runtime_info.type_ids());
1377 instance.type_ids_array().write(types.cast().into());
1378 }
1379
1380 // Initialize the built-in functions
1381 //
1382 // SAFETY: the type of the builtin functions field is indeed a pointer
1383 // and the pointer being filled in here, plus the vmctx is valid to
1384 // write to during initialization.
1385 unsafe {
1386 static BUILTINS: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray::INIT;
1387 let ptr = BUILTINS.expose_provenance();
1388 let offsets = instance.runtime_info.offsets();
1389 instance
1390 .vmctx_plus_offset_raw(offsets.ptr.vmctx_builtin_functions())
1391 .write(VmPtr::from(ptr));
1392 }
1393
1394 // Initialize the imports
1395 //
1396 // SAFETY: the vmctx is safe to initialize during this function and
1397 // validity of each item itself is a contract the caller must uphold.
1398 debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
1399 unsafe {
1400 let offsets = instance.runtime_info.offsets();
1401 ptr::copy_nonoverlapping(
1402 imports.functions.as_ptr(),
1403 instance
1404 .vmctx_plus_offset_raw(offsets.vmctx_imported_functions_begin())
1405 .as_ptr(),
1406 imports.functions.len(),
1407 );
1408 debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
1409 ptr::copy_nonoverlapping(
1410 imports.tables.as_ptr(),
1411 instance
1412 .vmctx_plus_offset_raw(offsets.vmctx_imported_tables_begin())
1413 .as_ptr(),
1414 imports.tables.len(),
1415 );
1416 debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
1417 ptr::copy_nonoverlapping(
1418 imports.memories.as_ptr(),
1419 instance
1420 .vmctx_plus_offset_raw(offsets.vmctx_imported_memories_begin())
1421 .as_ptr(),
1422 imports.memories.len(),
1423 );
1424 debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
1425 ptr::copy_nonoverlapping(
1426 imports.globals.as_ptr(),
1427 instance
1428 .vmctx_plus_offset_raw(offsets.vmctx_imported_globals_begin())
1429 .as_ptr(),
1430 imports.globals.len(),
1431 );
1432 debug_assert_eq!(imports.tags.len(), module.num_imported_tags);
1433 ptr::copy_nonoverlapping(
1434 imports.tags.as_ptr(),
1435 instance
1436 .vmctx_plus_offset_raw(offsets.vmctx_imported_tags_begin())
1437 .as_ptr(),
1438 imports.tags.len(),
1439 );
1440 }
1441
1442 // N.B.: there is no need to initialize the funcrefs array because we
1443 // eagerly construct each element in it whenever asked for a reference
1444 // to that element. In other words, there is no state needed to track
1445 // the lazy-init, so we don't need to initialize any state now.
1446
1447 // Initialize the defined tables
1448 //
1449 // SAFETY: it's safe to initialize these tables during initialization
1450 // here and the various types of pointers and such here should all be
1451 // valid.
1452 unsafe {
1453 let offsets = instance.runtime_info.offsets();
1454 let mut ptr = instance.vmctx_plus_offset_raw(offsets.vmctx_tables_begin());
1455 let tables = instance.as_mut().tables_mut();
1456 for i in 0..module.num_defined_tables() {
1457 ptr.write(tables[DefinedTableIndex::new(i)].1.vmtable());
1458 ptr = ptr.add(1);
1459 }
1460 }
1461
1462 // Initialize the defined memories. This fills in both the
1463 // `defined_memories` table and the `owned_memories` table at the same
1464 // time. Entries in `defined_memories` hold a pointer to a definition
1465 // (all memories) whereas the `owned_memories` hold the actual
1466 // definitions of memories owned (not shared) in the module.
1467 //
1468 // SAFETY: it's safe to initialize these memories during initialization
1469 // here and the various types of pointers and such here should all be
1470 // valid.
1471 unsafe {
1472 let offsets = instance.runtime_info.offsets();
1473 let mut ptr = instance.vmctx_plus_offset_raw(offsets.vmctx_memories_begin());
1474 let mut owned_ptr =
1475 instance.vmctx_plus_offset_raw(offsets.vmctx_owned_memories_begin());
1476 let memories = instance.as_mut().memories_mut();
1477 for i in 0..module.num_defined_memories() {
1478 let defined_memory_index = DefinedMemoryIndex::new(i);
1479 let memory_index = module.memory_index(defined_memory_index);
1480 if module.memories[memory_index].shared {
1481 let def_ptr = memories[defined_memory_index]
1482 .1
1483 .as_shared_memory()
1484 .unwrap()
1485 .vmmemory_ptr();
1486 ptr.write(VmPtr::from(def_ptr));
1487 } else {
1488 owned_ptr.write(memories[defined_memory_index].1.vmmemory());
1489 ptr.write(VmPtr::from(owned_ptr));
1490 owned_ptr = owned_ptr.add(1);
1491 }
1492 ptr = ptr.add(1);
1493 }
1494 }
1495
1496 // Zero-initialize the globals so that nothing is uninitialized memory
1497 // after this function returns. The globals are actually initialized
1498 // with their const expression initializers after the instance is fully
1499 // allocated.
1500 //
1501 // SAFETY: it's safe to initialize globals during initialization
1502 // here. Note that while the value being written is not valid for all
1503 // types of globals it's initializing the memory to zero instead of
1504 // being in an undefined state. So it's still unsafe to access globals
1505 // after this, but if it's read then it'd hopefully crash faster than
1506 // leaving this undefined.
1507 unsafe {
1508 for (index, _init) in module.global_initializers.iter() {
1509 instance.global_ptr(index).write(VMGlobalDefinition::new());
1510 }
1511 }
1512
1513 // Initialize the defined tags
1514 //
1515 // SAFETY: it's safe to initialize these tags during initialization
1516 // here and the various types of pointers and such here should all be
1517 // valid.
1518 unsafe {
1519 let offsets = instance.runtime_info.offsets();
1520 let mut ptr = instance.vmctx_plus_offset_raw(offsets.vmctx_tags_begin());
1521 for i in 0..module.num_defined_tags() {
1522 let defined_index = DefinedTagIndex::new(i);
1523 let tag_index = module.tag_index(defined_index);
1524 let tag = module.tags[tag_index];
1525 ptr.write(VMTagDefinition::new(
1526 tag.signature.unwrap_engine_type_index(),
1527 ));
1528 ptr = ptr.add(1);
1529 }
1530 }
1531 }
1532
1533 /// Attempts to convert from the host `addr` specified to a WebAssembly
1534 /// based address recorded in `WasmFault`.
1535 ///
1536 /// This method will check all linear memories that this instance contains
1537 /// to see if any of them contain `addr`. If one does then `Some` is
1538 /// returned with metadata about the wasm fault. Otherwise `None` is
1539 /// returned and `addr` doesn't belong to this instance.
1540 pub fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1541 let mut fault = None;
1542 for (_, (_, memory)) in self.memories.iter() {
1543 let accessible = memory.wasm_accessible();
1544 if accessible.start <= addr && addr < accessible.end {
1545 // All linear memories should be disjoint so assert that no
1546 // prior fault has been found.
1547 assert!(fault.is_none());
1548 fault = Some(WasmFault {
1549 memory_size: memory.byte_size(),
1550 wasm_address: u64::try_from(addr - accessible.start).unwrap(),
1551 });
1552 }
1553 }
1554 fault
1555 }
1556
1557 /// Returns the id, within this instance's store, that it's assigned.
1558 pub fn id(&self) -> InstanceId {
1559 self.id
1560 }
1561
1562 /// Get all memories within this instance.
1563 ///
1564 /// Returns both import and defined memories.
1565 ///
1566 /// Returns both exported and non-exported memories.
1567 ///
1568 /// Gives access to the full memories space.
1569 pub fn all_memories(
1570 &self,
1571 store: StoreId,
1572 ) -> impl ExactSizeIterator<Item = (MemoryIndex, ExportMemory)> + '_ {
1573 self.env_module()
1574 .memories
1575 .iter()
1576 .map(move |(i, _)| (i, self.get_exported_memory(store, i)))
1577 }
1578
1579 /// Return the memories defined in this instance (not imported).
1580 pub fn defined_memories<'a>(
1581 &'a self,
1582 store: StoreId,
1583 ) -> impl ExactSizeIterator<Item = ExportMemory> + 'a {
1584 let num_imported = self.env_module().num_imported_memories;
1585 self.all_memories(store)
1586 .skip(num_imported)
1587 .map(|(_i, memory)| memory)
1588 }
1589
1590 /// Lookup an item with the given index.
1591 ///
1592 /// # Panics
1593 ///
1594 /// Panics if `export` is not valid for this instance.
1595 ///
1596 /// # Safety
1597 ///
1598 /// This function requires that `store` is the correct store which owns this
1599 /// instance.
1600 pub unsafe fn get_export_by_index_mut(
1601 self: Pin<&mut Self>,
1602 registry: &ModuleRegistry,
1603 store: StoreId,
1604 export: EntityIndex,
1605 ) -> Export {
1606 match export {
1607 // SAFETY: the contract of `store` owning the this instance is a
1608 // safety requirement of this function itself.
1609 EntityIndex::Function(i) => {
1610 Export::Function(unsafe { self.get_exported_func(registry, store, i) })
1611 }
1612 EntityIndex::Global(i) => Export::Global(self.get_exported_global(store, i)),
1613 EntityIndex::Table(i) => Export::Table(self.get_exported_table(store, i)),
1614 EntityIndex::Memory(i) => match self.get_exported_memory(store, i) {
1615 ExportMemory::Unshared(m) => Export::Memory(m),
1616 ExportMemory::Shared(m, i) => Export::SharedMemory(m, i),
1617 },
1618 EntityIndex::Tag(i) => Export::Tag(self.get_exported_tag(store, i)),
1619 }
1620 }
1621
1622 fn store_mut(self: Pin<&mut Self>) -> &mut Option<VMStoreRawPtr> {
1623 // SAFETY: this is a pin-projection to get a mutable reference to an
1624 // internal field and is safe so long as the `&mut Self` temporarily
1625 // created is not overwritten, which it isn't here.
1626 unsafe { &mut self.get_unchecked_mut().store }
1627 }
1628
1629 fn dropped_elements_mut(self: Pin<&mut Self>) -> &mut EntitySet<ElemIndex> {
1630 // SAFETY: see `store_mut` above.
1631 unsafe { &mut self.get_unchecked_mut().dropped_elements }
1632 }
1633
1634 fn dropped_data_mut(self: Pin<&mut Self>) -> &mut EntitySet<DataIndex> {
1635 // SAFETY: see `store_mut` above.
1636 unsafe { &mut self.get_unchecked_mut().dropped_data }
1637 }
1638
1639 fn memories_mut(
1640 self: Pin<&mut Self>,
1641 ) -> &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)> {
1642 // SAFETY: see `store_mut` above.
1643 unsafe { &mut self.get_unchecked_mut().memories }
1644 }
1645
1646 pub(crate) fn tables_mut(
1647 self: Pin<&mut Self>,
1648 ) -> &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)> {
1649 // SAFETY: see `store_mut` above.
1650 unsafe { &mut self.get_unchecked_mut().tables }
1651 }
1652
1653 #[cfg(feature = "wmemcheck")]
1654 pub(super) fn wmemcheck_state_mut(self: Pin<&mut Self>) -> &mut Option<Wmemcheck> {
1655 // SAFETY: see `store_mut` above.
1656 unsafe { &mut self.get_unchecked_mut().wmemcheck_state }
1657 }
1658}
1659
1660// SAFETY: `layout` should describe this accurately and `OwnedVMContext` is the
1661// last field of `ComponentInstance`.
1662unsafe impl InstanceLayout for Instance {
1663 const INIT_ZEROED: bool = false;
1664 type VMContext = VMContext;
1665
1666 fn layout(&self) -> Layout {
1667 Self::alloc_layout(self.runtime_info.offsets())
1668 }
1669
1670 fn owned_vmctx(&self) -> &OwnedVMContext<VMContext> {
1671 &self.vmctx
1672 }
1673
1674 fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<VMContext> {
1675 &mut self.vmctx
1676 }
1677}
1678
1679pub type InstanceHandle = OwnedInstance<Instance>;
1680
1681/// A handle holding an `Instance` of a WebAssembly module.
1682///
1683/// This structure is an owning handle of the `instance` contained internally.
1684/// When this value goes out of scope it will deallocate the `Instance` and all
1685/// memory associated with it.
1686///
1687/// Note that this lives within a `StoreOpaque` on a list of instances that a
1688/// store is keeping alive.
1689#[derive(Debug)]
1690#[repr(transparent)] // guarantee this is a zero-cost wrapper
1691pub struct OwnedInstance<T: InstanceLayout> {
1692 /// The raw pointer to the instance that was allocated.
1693 ///
1694 /// Note that this is not equivalent to `Box<Instance>` because the
1695 /// allocation here has a `VMContext` trailing after it. Thus the custom
1696 /// destructor to invoke the `dealloc` function with the appropriate
1697 /// layout.
1698 instance: SendSyncPtr<T>,
1699 _marker: marker::PhantomData<Box<(T, OwnedVMContext<T::VMContext>)>>,
1700}
1701
1702/// Structure that must be placed at the end of a type implementing
1703/// `InstanceLayout`.
1704#[repr(align(16))] // match the alignment of VMContext
1705pub struct OwnedVMContext<T> {
1706 /// A pointer to the `vmctx` field at the end of the `structure`.
1707 ///
1708 /// If you're looking at this a reasonable question would be "why do we need
1709 /// a pointer to ourselves?" because after all the pointer's value is
1710 /// trivially derivable from any `&Instance` pointer. The rationale for this
1711 /// field's existence is subtle, but it's required for correctness. The
1712 /// short version is "this makes miri happy".
1713 ///
1714 /// The long version of why this field exists is that the rules that MIRI
1715 /// uses to ensure pointers are used correctly have various conditions on
1716 /// them depend on how pointers are used. More specifically if `*mut T` is
1717 /// derived from `&mut T`, then that invalidates all prior pointers drived
1718 /// from the `&mut T`. This means that while we liberally want to re-acquire
1719 /// a `*mut VMContext` throughout the implementation of `Instance` the
1720 /// trivial way, a function `fn vmctx(Pin<&mut Instance>) -> *mut VMContext`
1721 /// would effectively invalidate all prior `*mut VMContext` pointers
1722 /// acquired. The purpose of this field is to serve as a sort of
1723 /// source-of-truth for where `*mut VMContext` pointers come from.
1724 ///
1725 /// This field is initialized when the `Instance` is created with the
1726 /// original allocation's pointer. That means that the provenance of this
1727 /// pointer contains the entire allocation (both instance and `VMContext`).
1728 /// This provenance bit is then "carried through" where `fn vmctx` will base
1729 /// all returned pointers on this pointer itself. This provides the means of
1730 /// never invalidating this pointer throughout MIRI and additionally being
1731 /// able to still temporarily have `Pin<&mut Instance>` methods and such.
1732 ///
1733 /// It's important to note, though, that this is not here purely for MIRI.
1734 /// The careful construction of the `fn vmctx` method has ramifications on
1735 /// the LLVM IR generated, for example. A historical CVE on Wasmtime,
1736 /// GHSA-ch89-5g45-qwc7, was caused due to relying on undefined behavior. By
1737 /// deriving VMContext pointers from this pointer it specifically hints to
1738 /// LLVM that trickery is afoot and it properly informs `noalias` and such
1739 /// annotations and analysis. More-or-less this pointer is actually loaded
1740 /// in LLVM IR which helps defeat otherwise present aliasing optimizations,
1741 /// which we want, since writes to this should basically never be optimized
1742 /// out.
1743 ///
1744 /// As a final note it's worth pointing out that the machine code generated
1745 /// for accessing `fn vmctx` is still as one would expect. This member isn't
1746 /// actually ever loaded at runtime (or at least shouldn't be). Perhaps in
1747 /// the future if the memory consumption of this field is a problem we could
1748 /// shrink it slightly, but for now one extra pointer per wasm instance
1749 /// seems not too bad.
1750 vmctx_self_reference: SendSyncPtr<T>,
1751
1752 /// This field ensures that going from `Pin<&mut T>` to `&mut T` is not a
1753 /// safe operation.
1754 _marker: core::marker::PhantomPinned,
1755}
1756
1757impl<T> OwnedVMContext<T> {
1758 /// Creates a new blank vmctx to place at the end of an instance.
1759 pub fn new() -> OwnedVMContext<T> {
1760 OwnedVMContext {
1761 vmctx_self_reference: SendSyncPtr::new(NonNull::dangling()),
1762 _marker: core::marker::PhantomPinned,
1763 }
1764 }
1765}
1766
1767/// Helper trait to plumb both core instances and component instances into
1768/// `OwnedInstance` below.
1769///
1770/// # Safety
1771///
1772/// This trait requires `layout` to correctly describe `Self` and appropriately
1773/// allocate space for `Self::VMContext` afterwards. Additionally the field
1774/// returned by `owned_vmctx()` must be the last field in the structure.
1775pub unsafe trait InstanceLayout {
1776 /// Whether or not to allocate this instance with `alloc_zeroed` or `alloc`.
1777 const INIT_ZEROED: bool;
1778
1779 /// The trailing `VMContext` type at the end of this instance.
1780 type VMContext;
1781
1782 /// The memory layout to use to allocate and deallocate this instance.
1783 fn layout(&self) -> Layout;
1784
1785 fn owned_vmctx(&self) -> &OwnedVMContext<Self::VMContext>;
1786 fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<Self::VMContext>;
1787
1788 /// Returns the `vmctx_self_reference` set above.
1789 #[inline]
1790 fn vmctx(&self) -> NonNull<Self::VMContext> {
1791 // The definition of this method is subtle but intentional. The goal
1792 // here is that effectively this should return `&mut self.vmctx`, but
1793 // it's not quite so simple. Some more documentation is available on the
1794 // `vmctx_self_reference` field, but the general idea is that we're
1795 // creating a pointer to return with proper provenance. Provenance is
1796 // still in the works in Rust at the time of this writing but the load
1797 // of the `self.vmctx_self_reference` field is important here as it
1798 // affects how LLVM thinks about aliasing with respect to the returned
1799 // pointer.
1800 //
1801 // The intention of this method is to codegen to machine code as `&mut
1802 // self.vmctx`, however. While it doesn't show up like this in LLVM IR
1803 // (there's an actual load of the field) it does look like that by the
1804 // time the backend runs. (that's magic to me, the backend removing
1805 // loads...)
1806 let owned_vmctx = self.owned_vmctx();
1807 let owned_vmctx_raw = NonNull::from(owned_vmctx);
1808 // SAFETY: it's part of the contract of `InstanceLayout` and the usage
1809 // with `OwnedInstance` that this indeed points to the vmctx.
1810 let addr = unsafe { owned_vmctx_raw.add(1) };
1811 owned_vmctx
1812 .vmctx_self_reference
1813 .as_non_null()
1814 .with_addr(addr.addr())
1815 }
1816
1817 /// Helper function to access various locations offset from our `*mut
1818 /// VMContext` object.
1819 ///
1820 /// Note that this method takes `&self` as an argument but returns
1821 /// `NonNull<T>` which is frequently used to mutate said memory. This is an
1822 /// intentional design decision where the safety of the modification of
1823 /// memory is placed as a burden onto the caller. The implementation of this
1824 /// method explicitly does not require `&mut self` to acquire mutable
1825 /// provenance to update the `VMContext` region. Instead all pointers into
1826 /// the `VMContext` area have provenance/permissions to write.
1827 ///
1828 /// Also note though that care must be taken to ensure that reads/writes of
1829 /// memory must only happen where appropriate, for example a non-atomic
1830 /// write (as most are) should never happen concurrently with another read
1831 /// or write. It's generally on the burden of the caller to adhere to this.
1832 ///
1833 /// Also of note is that most of the time the usage of this method falls
1834 /// into one of:
1835 ///
1836 /// * Something in the VMContext is being read or written. In that case use
1837 /// `vmctx_plus_offset` or `vmctx_plus_offset_mut` if possible due to
1838 /// that having a safer lifetime.
1839 ///
1840 /// * A pointer is being created to pass to other VM* data structures. In
1841 /// that situation the lifetime of all VM data structures are typically
1842 /// tied to the `Store<T>` which is what provides the guarantees around
1843 /// concurrency/etc.
1844 ///
1845 /// There's quite a lot of unsafety riding on this method, especially
1846 /// related to the ascription `T` of the byte `offset`. It's hoped that in
1847 /// the future we're able to settle on an in theory safer design.
1848 ///
1849 /// # Safety
1850 ///
1851 /// This method is unsafe because the `offset` must be within bounds of the
1852 /// `VMContext` object trailing this instance. Additionally `T` must be a
1853 /// valid ascription of the value that resides at that location.
1854 unsafe fn vmctx_plus_offset_raw<T: VmSafe>(&self, offset: impl Into<u32>) -> NonNull<T> {
1855 // SAFETY: the safety requirements of `byte_add` are forwarded to this
1856 // method's caller.
1857 unsafe {
1858 self.vmctx()
1859 .byte_add(usize::try_from(offset.into()).unwrap())
1860 .cast()
1861 }
1862 }
1863
1864 /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1865 /// `&self` to the returned reference `&T`.
1866 ///
1867 /// # Safety
1868 ///
1869 /// See the safety documentation of `vmctx_plus_offset_raw`.
1870 unsafe fn vmctx_plus_offset<T: VmSafe>(&self, offset: impl Into<u32>) -> &T {
1871 // SAFETY: this method has the same safety requirements as
1872 // `vmctx_plus_offset_raw`.
1873 unsafe { self.vmctx_plus_offset_raw(offset).as_ref() }
1874 }
1875
1876 /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1877 /// `&mut self` to the returned reference `&mut T`.
1878 ///
1879 /// # Safety
1880 ///
1881 /// See the safety documentation of `vmctx_plus_offset_raw`.
1882 unsafe fn vmctx_plus_offset_mut<T: VmSafe>(
1883 self: Pin<&mut Self>,
1884 offset: impl Into<u32>,
1885 ) -> &mut T {
1886 // SAFETY: this method has the same safety requirements as
1887 // `vmctx_plus_offset_raw`.
1888 unsafe { self.vmctx_plus_offset_raw(offset).as_mut() }
1889 }
1890}
1891
1892impl<T: InstanceLayout> OwnedInstance<T> {
1893 /// Allocates a new `OwnedInstance` and places `instance` inside of it.
1894 ///
1895 /// This will `instance`
1896 pub(super) fn new(mut instance: T) -> Result<OwnedInstance<T>, OutOfMemory> {
1897 let layout = instance.layout();
1898 debug_assert!(layout.size() >= size_of_val(&instance));
1899 debug_assert!(layout.align() >= align_of_val(&instance));
1900
1901 // SAFETY: it's up to us to assert that `layout` has a non-zero size,
1902 // which is asserted here.
1903 let ptr = unsafe {
1904 assert!(layout.size() > 0);
1905 if T::INIT_ZEROED {
1906 alloc::alloc::alloc_zeroed(layout)
1907 } else {
1908 alloc::alloc::alloc(layout)
1909 }
1910 };
1911 let Some(instance_ptr) = NonNull::new(ptr.cast::<T>()) else {
1912 return Err(OutOfMemory::new(layout.size()));
1913 };
1914
1915 // SAFETY: it's part of the unsafe contract of `InstanceLayout` that the
1916 // `add` here is appropriate for the layout allocated.
1917 let vmctx_self_reference = unsafe { instance_ptr.add(1).cast() };
1918 instance.owned_vmctx_mut().vmctx_self_reference = vmctx_self_reference.into();
1919
1920 // SAFETY: we allocated above and it's an unsafe contract of
1921 // `InstanceLayout` that the layout is suitable for writing the
1922 // instance.
1923 unsafe {
1924 instance_ptr.write(instance);
1925 }
1926
1927 let ret = OwnedInstance {
1928 instance: SendSyncPtr::new(instance_ptr),
1929 _marker: marker::PhantomData,
1930 };
1931
1932 // Double-check various vmctx calculations are correct.
1933 debug_assert_eq!(
1934 vmctx_self_reference.addr(),
1935 // SAFETY: `InstanceLayout` should guarantee it's safe to add 1 to
1936 // the last field to get a pointer to 1-byte-past-the-end of an
1937 // object, which should be valid.
1938 unsafe { NonNull::from(ret.get().owned_vmctx()).add(1).addr() }
1939 );
1940 debug_assert_eq!(vmctx_self_reference.addr(), ret.get().vmctx().addr());
1941
1942 Ok(ret)
1943 }
1944
1945 /// Gets the raw underlying `&Instance` from this handle.
1946 pub fn get(&self) -> &T {
1947 // SAFETY: this is an owned instance handle that retains exclusive
1948 // ownership of the `Instance` inside. With `&self` given we know
1949 // this pointer is valid valid and the returned lifetime is connected
1950 // to `self` so that should also be valid.
1951 unsafe { self.instance.as_non_null().as_ref() }
1952 }
1953
1954 /// Same as [`Self::get`] except for mutability.
1955 pub fn get_mut(&mut self) -> Pin<&mut T> {
1956 // SAFETY: The lifetime concerns here are the same as `get` above.
1957 // Otherwise `new_unchecked` is used here to uphold the contract that
1958 // instances are always pinned in memory.
1959 unsafe { Pin::new_unchecked(self.instance.as_non_null().as_mut()) }
1960 }
1961}
1962
1963impl<T: InstanceLayout> Drop for OwnedInstance<T> {
1964 fn drop(&mut self) {
1965 unsafe {
1966 let layout = self.get().layout();
1967 ptr::drop_in_place(self.instance.as_ptr());
1968 alloc::alloc::dealloc(self.instance.as_ptr().cast(), layout);
1969 }
1970 }
1971}