wasmtime/runtime/vm/instance.rs
1//! An `Instance` contains all the runtime state used by execution of a
2//! wasm module (except its callstack and register state). An
3//! `InstanceHandle` is a reference-counting handle for an `Instance`.
4
5use crate::OpaqueRootScope;
6use crate::prelude::*;
7use crate::runtime::vm::const_expr::{ConstEvalContext, ConstExprEvaluator};
8use crate::runtime::vm::export::{Export, ExportMemory};
9use crate::runtime::vm::memory::{Memory, RuntimeMemoryCreator};
10use crate::runtime::vm::table::{Table, TableElementType};
11use crate::runtime::vm::vmcontext::{
12 VMBuiltinFunctionsArray, VMContext, VMFuncRef, VMFunctionImport, VMGlobalDefinition,
13 VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext, VMStoreContext,
14 VMTableDefinition, VMTableImport, VMTagDefinition, VMTagImport,
15};
16use crate::runtime::vm::{
17 GcStore, HostResult, Imports, ModuleRuntimeInfo, SendSyncPtr, VMGlobalKind, VMStore,
18 VMStoreRawPtr, VmPtr, VmSafe, WasmFault, catch_unwind_and_record_trap,
19};
20use crate::store::{InstanceId, StoreId, StoreInstanceId, StoreOpaque, StoreResourceLimiter};
21use alloc::sync::Arc;
22use core::alloc::Layout;
23use core::marker;
24use core::ops::Range;
25use core::pin::Pin;
26use core::ptr::NonNull;
27#[cfg(target_has_atomic = "64")]
28use core::sync::atomic::AtomicU64;
29use core::{mem, ptr};
30#[cfg(feature = "gc")]
31use wasmtime_environ::ModuleInternedTypeIndex;
32use wasmtime_environ::{
33 DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, DefinedTagIndex,
34 ElemIndex, EntityIndex, EntityRef, EntitySet, FuncIndex, GlobalIndex, HostPtr, MemoryIndex,
35 Module, PrimaryMap, PtrSize, TableIndex, TableInitialValue, TableSegmentElements, TagIndex,
36 Trap, VMCONTEXT_MAGIC, VMOffsets, VMSharedTypeIndex, packed_option::ReservedValue,
37};
38#[cfg(feature = "wmemcheck")]
39use wasmtime_wmemcheck::Wmemcheck;
40
41mod allocator;
42pub use allocator::*;
43
44/// A type that roughly corresponds to a WebAssembly instance, but is also used
45/// for host-defined objects.
46///
47/// Instances here can correspond to actual instantiated modules, but it's also
48/// used ubiquitously for host-defined objects. For example creating a
49/// host-defined memory will have a `module` that looks like it exports a single
50/// memory (and similar for other constructs).
51///
52/// This `Instance` type is used as a ubiquitous representation for WebAssembly
53/// values, whether or not they were created on the host or through a module.
54///
55/// # Ownership
56///
57/// This structure is never allocated directly but is instead managed through
58/// an `InstanceHandle`. This structure ends with a `VMContext` which has a
59/// dynamic size corresponding to the `module` configured within. Memory
60/// management of this structure is always done through `InstanceHandle` as the
61/// sole owner of an instance.
62///
63/// # `Instance` and `Pin`
64///
65/// Given an instance it is accompanied with trailing memory for the
66/// appropriate `VMContext`. The `Instance` also holds `runtime_info` and other
67/// information pointing to relevant offsets for the `VMContext`. Thus it is
68/// not sound to mutate `runtime_info` after an instance is created. More
69/// generally it's also not safe to "swap" instances, for example given two
70/// `&mut Instance` values it's not sound to swap them as then the `VMContext`
71/// values are inaccurately described.
72///
73/// To encapsulate this guarantee this type is only ever mutated through Rust's
74/// `Pin` type. All mutable methods here take `self: Pin<&mut Self>` which
75/// statically disallows safe access to `&mut Instance`. There are assorted
76/// "projection methods" to go from `Pin<&mut Instance>` to `&mut T` for
77/// individual fields, for example `memories_mut`. More methods can be added as
78/// necessary or methods may also be added to project multiple fields at a time
79/// if necessary to. The precise ergonomics around getting mutable access to
80/// some fields (but notably not `runtime_info`) is probably going to evolve
81/// over time.
82///
83/// Note that is is not sound to basically ever pass around `&mut Instance`.
84/// That should always instead be `Pin<&mut Instance>`. All usage of
85/// `Pin::new_unchecked` should be here in this module in just a few `unsafe`
86/// locations and it's recommended to use existing helpers if you can.
87#[repr(C)] // ensure that the vmctx field is last.
88pub struct Instance {
89 /// The index, within a `Store` that this instance lives at
90 id: InstanceId,
91
92 /// The runtime info (corresponding to the "compiled module"
93 /// abstraction in higher layers) that is retained and needed for
94 /// lazy initialization. This provides access to the underlying
95 /// Wasm module entities, the compiled JIT code, metadata about
96 /// functions, lazy initialization state, etc.
97 runtime_info: ModuleRuntimeInfo,
98
99 /// WebAssembly linear memory data.
100 ///
101 /// This is where all runtime information about defined linear memories in
102 /// this module lives.
103 ///
104 /// The `MemoryAllocationIndex` was given from our `InstanceAllocator` and
105 /// must be given back to the instance allocator when deallocating each
106 /// memory.
107 memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
108
109 /// WebAssembly table data.
110 ///
111 /// Like memories, this is only for defined tables in the module and
112 /// contains all of their runtime state.
113 ///
114 /// The `TableAllocationIndex` was given from our `InstanceAllocator` and
115 /// must be given back to the instance allocator when deallocating each
116 /// table.
117 tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
118
119 /// Stores the dropped passive element segments in this instantiation by index.
120 /// If the index is present in the set, the segment has been dropped.
121 dropped_elements: EntitySet<ElemIndex>,
122
123 /// Stores the dropped passive data segments in this instantiation by index.
124 /// If the index is present in the set, the segment has been dropped.
125 dropped_data: EntitySet<DataIndex>,
126
127 // TODO: add support for multiple memories; `wmemcheck_state` corresponds to
128 // memory 0.
129 #[cfg(feature = "wmemcheck")]
130 pub(crate) wmemcheck_state: Option<Wmemcheck>,
131
132 /// Self-pointer back to `Store<T>` and its functions. Not present for
133 /// the brief time that `Store<T>` is itself being created. Also not
134 /// present for some niche uses that are disconnected from stores (e.g.
135 /// cross-thread stuff used in `InstancePre`)
136 store: Option<VMStoreRawPtr>,
137
138 /// Additional context used by compiled wasm code. This field is last, and
139 /// represents a dynamically-sized array that extends beyond the nominal
140 /// end of the struct (similar to a flexible array member).
141 vmctx: OwnedVMContext<VMContext>,
142}
143
144impl Instance {
145 /// Create an instance at the given memory address.
146 ///
147 /// It is assumed the memory was properly aligned and the
148 /// allocation was `alloc_size` in bytes.
149 ///
150 /// # Safety
151 ///
152 /// The `req.imports` field must be appropriately sized/typed for the module
153 /// being allocated according to `req.runtime_info`. Additionally `memories`
154 /// and `tables` must have been allocated for `req.store`.
155 unsafe fn new(
156 req: InstanceAllocationRequest,
157 memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
158 tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
159 memory_tys: &PrimaryMap<MemoryIndex, wasmtime_environ::Memory>,
160 ) -> InstanceHandle {
161 let module = req.runtime_info.env_module();
162 let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
163 let dropped_data = EntitySet::with_capacity(module.passive_data_map.len());
164
165 #[cfg(not(feature = "wmemcheck"))]
166 let _ = memory_tys;
167
168 let mut ret = OwnedInstance::new(Instance {
169 id: req.id,
170 runtime_info: req.runtime_info.clone(),
171 memories,
172 tables,
173 dropped_elements,
174 dropped_data,
175 #[cfg(feature = "wmemcheck")]
176 wmemcheck_state: {
177 if req.store.engine().config().wmemcheck {
178 let size = memory_tys
179 .iter()
180 .next()
181 .map(|memory| memory.1.limits.min)
182 .unwrap_or(0)
183 * 64
184 * 1024;
185 Some(Wmemcheck::new(size.try_into().unwrap()))
186 } else {
187 None
188 }
189 },
190 store: None,
191 vmctx: OwnedVMContext::new(),
192 });
193
194 // SAFETY: this vmctx was allocated with the same layout above, so it
195 // should be safe to initialize with the same values here.
196 unsafe {
197 ret.get_mut().initialize_vmctx(
198 module,
199 req.runtime_info.offsets(),
200 req.store,
201 req.imports,
202 );
203 }
204 ret
205 }
206
207 /// Converts a raw `VMContext` pointer into a raw `Instance` pointer.
208 ///
209 /// # Safety
210 ///
211 /// Calling this function safely requires that `vmctx` is a valid allocation
212 /// of a `VMContext` which is derived from `Instance::new`. To safely
213 /// convert the returned raw pointer into a safe instance pointer callers
214 /// will also want to uphold guarantees such as:
215 ///
216 /// * The instance should not be in use elsewhere. For example you can't
217 /// call this function twice, turn both raw pointers into safe pointers,
218 /// and then use both safe pointers.
219 /// * There should be no other active mutable borrow to any other instance
220 /// within the same store. Note that this is not restricted to just this
221 /// instance pointer, but to all instances in a store. Instances can
222 /// safely traverse to other instances "laterally" meaning that a mutable
223 /// borrow on one is a mutable borrow on all.
224 /// * There should be no active mutable borrow on the store accessible at
225 /// the same time the instance is turned. Instances are owned by a store
226 /// and a store can be used to acquire a safe instance borrow at any time.
227 /// * The lifetime of the usage of the instance should not be unnecessarily
228 /// long, for example it cannot be `'static`.
229 ///
230 /// Other entrypoints exist for converting from a raw `VMContext` to a safe
231 /// pointer such as:
232 ///
233 /// * `Instance::enter_host_from_wasm`
234 /// * `Instance::sibling_vmctx{,_mut}`
235 ///
236 /// These place further restrictions on the API signature to satisfy some of
237 /// the above points.
238 #[inline]
239 pub(crate) unsafe fn from_vmctx(vmctx: NonNull<VMContext>) -> NonNull<Instance> {
240 // SAFETY: The validity of `byte_sub` relies on `vmctx` being a valid
241 // allocation.
242 unsafe {
243 vmctx
244 .byte_sub(mem::size_of::<Instance>())
245 .cast::<Instance>()
246 }
247 }
248
249 /// Encapsulated entrypoint to the host from WebAssembly, converting a raw
250 /// `VMContext` pointer into a `VMStore` plus an `InstanceId`.
251 ///
252 /// This is an entrypoint for core wasm entering back into the host. This is
253 /// used for both host functions and libcalls for example. This will execute
254 /// the closure `f` with safer Internal types than a raw `VMContext`
255 /// pointer.
256 ///
257 /// The closure `f` will have its errors caught, handled, and translated to
258 /// an ABI-safe return value to give back to wasm. This includes both normal
259 /// errors such as traps as well as panics.
260 ///
261 /// # Safety
262 ///
263 /// Callers must ensure that `vmctx` is a valid allocation and is safe to
264 /// dereference at this time. That's generally only true when it's a
265 /// wasm-provided value and this is the first function called after entering
266 /// the host. Otherwise this could unsafely alias the store with a mutable
267 /// pointer, for example.
268 #[inline]
269 pub(crate) unsafe fn enter_host_from_wasm<R>(
270 vmctx: NonNull<VMContext>,
271 f: impl FnOnce(&mut dyn VMStore, InstanceId) -> R,
272 ) -> R::Abi
273 where
274 R: HostResult,
275 {
276 // SAFETY: It's a contract of this function that `vmctx` is a valid
277 // pointer with neither the store nor other instances actively in use
278 // when this is called, so it should be safe to acquire a mutable
279 // pointer to the store and read the instance pointer.
280 let (store, instance) = unsafe {
281 let instance = Instance::from_vmctx(vmctx);
282 let instance = instance.as_ref();
283 let store = &mut *instance.store.unwrap().0.as_ptr();
284 (store, instance.id)
285 };
286
287 // Thread the `store` and `instance` through panic/trap infrastructure
288 // back into `f`.
289 catch_unwind_and_record_trap(store, |store| f(store, instance))
290 }
291
292 /// Converts the provided `*mut VMContext` to an `Instance` pointer and
293 /// returns it with the same lifetime as `self`.
294 ///
295 /// This function can be used when traversing a `VMContext` to reach into
296 /// the context needed for imports, optionally.
297 ///
298 /// # Safety
299 ///
300 /// This function requires that the `vmctx` pointer is indeed valid and
301 /// from the store that `self` belongs to.
302 #[inline]
303 unsafe fn sibling_vmctx<'a>(&'a self, vmctx: NonNull<VMContext>) -> &'a Instance {
304 // SAFETY: it's a contract of this function itself that `vmctx` is a
305 // valid pointer. Additionally with `self` being a
306 let ptr = unsafe { Instance::from_vmctx(vmctx) };
307 // SAFETY: it's a contract of this function itself that `vmctx` is a
308 // valid pointer to dereference. Additionally the lifetime of the return
309 // value is constrained to be the same as `self` to avoid granting a
310 // too-long lifetime.
311 unsafe { ptr.as_ref() }
312 }
313
314 /// Same as [`Self::sibling_vmctx`], but the mutable version.
315 ///
316 /// # Safety
317 ///
318 /// This function requires that the `vmctx` pointer is indeed valid and
319 /// from the store that `self` belongs to.
320 ///
321 /// (Note that it is *NOT* required that `vmctx` be distinct from this
322 /// instance's `vmctx`, or that usage of the resulting instance is limited
323 /// to its defined items! The returned borrow has the same lifetime as
324 /// `self`, which means that this instance cannot be used while the
325 /// resulting instance is in use, and we therefore do not need to worry
326 /// about mutable aliasing between this instance and the resulting
327 /// instance.)
328 #[inline]
329 unsafe fn sibling_vmctx_mut<'a>(
330 self: Pin<&'a mut Self>,
331 vmctx: NonNull<VMContext>,
332 ) -> Pin<&'a mut Instance> {
333 // SAFETY: it's a contract of this function itself that `vmctx` is a
334 // valid pointer such that this pointer arithmetic is valid.
335 let mut ptr = unsafe { Instance::from_vmctx(vmctx) };
336
337 // SAFETY: it's a contract of this function itself that `vmctx` is a
338 // valid pointer to dereference. Additionally the lifetime of the return
339 // value is constrained to be the same as `self` to avoid granting a
340 // too-long lifetime. Finally mutable references to an instance are
341 // always through `Pin`, so it's safe to create a pin-pointer here.
342 unsafe { Pin::new_unchecked(ptr.as_mut()) }
343 }
344
345 pub(crate) fn env_module(&self) -> &Arc<wasmtime_environ::Module> {
346 self.runtime_info.env_module()
347 }
348
349 #[cfg(any(feature = "gc", feature = "debug"))]
350 pub(crate) fn runtime_module(&self) -> Option<&crate::Module> {
351 match &self.runtime_info {
352 ModuleRuntimeInfo::Module(m) => Some(m),
353 ModuleRuntimeInfo::Bare(_) => None,
354 }
355 }
356
357 /// Translate a module-level interned type index into an engine-level
358 /// interned type index.
359 #[cfg(feature = "gc")]
360 pub fn engine_type_index(&self, module_index: ModuleInternedTypeIndex) -> VMSharedTypeIndex {
361 self.runtime_info.engine_type_index(module_index)
362 }
363
364 #[inline]
365 fn offsets(&self) -> &VMOffsets<HostPtr> {
366 self.runtime_info.offsets()
367 }
368
369 /// Return the indexed `VMFunctionImport`.
370 fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
371 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
372 }
373
374 /// Return the index `VMTableImport`.
375 fn imported_table(&self, index: TableIndex) -> &VMTableImport {
376 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
377 }
378
379 /// Return the indexed `VMMemoryImport`.
380 fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
381 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
382 }
383
384 /// Return the indexed `VMGlobalImport`.
385 fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
386 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
387 }
388
389 /// Return the indexed `VMTagImport`.
390 fn imported_tag(&self, index: TagIndex) -> &VMTagImport {
391 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtag_import(index)) }
392 }
393
394 /// Return the indexed `VMTagDefinition`.
395 pub fn tag_ptr(&self, index: DefinedTagIndex) -> NonNull<VMTagDefinition> {
396 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtag_definition(index)) }
397 }
398
399 /// Return the indexed `VMTableDefinition`.
400 pub fn table(&self, index: DefinedTableIndex) -> VMTableDefinition {
401 unsafe { self.table_ptr(index).read() }
402 }
403
404 /// Updates the value for a defined table to `VMTableDefinition`.
405 fn set_table(self: Pin<&mut Self>, index: DefinedTableIndex, table: VMTableDefinition) {
406 unsafe {
407 self.table_ptr(index).write(table);
408 }
409 }
410
411 /// Return a pointer to the `index`'th table within this instance, stored
412 /// in vmctx memory.
413 pub fn table_ptr(&self, index: DefinedTableIndex) -> NonNull<VMTableDefinition> {
414 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtable_definition(index)) }
415 }
416
417 /// Get a locally defined or imported memory.
418 pub(crate) fn get_memory(&self, index: MemoryIndex) -> VMMemoryDefinition {
419 if let Some(defined_index) = self.env_module().defined_memory_index(index) {
420 self.memory(defined_index)
421 } else {
422 let import = self.imported_memory(index);
423 unsafe { VMMemoryDefinition::load(import.from.as_ptr()) }
424 }
425 }
426
427 /// Return the indexed `VMMemoryDefinition`, loaded from vmctx memory
428 /// already.
429 #[inline]
430 pub fn memory(&self, index: DefinedMemoryIndex) -> VMMemoryDefinition {
431 unsafe { VMMemoryDefinition::load(self.memory_ptr(index).as_ptr()) }
432 }
433
434 /// Set the indexed memory to `VMMemoryDefinition`.
435 fn set_memory(&self, index: DefinedMemoryIndex, mem: VMMemoryDefinition) {
436 unsafe {
437 self.memory_ptr(index).write(mem);
438 }
439 }
440
441 /// Return the address of the specified memory at `index` within this vmctx.
442 ///
443 /// Note that the returned pointer resides in wasm-code-readable-memory in
444 /// the vmctx.
445 #[inline]
446 pub fn memory_ptr(&self, index: DefinedMemoryIndex) -> NonNull<VMMemoryDefinition> {
447 unsafe {
448 self.vmctx_plus_offset::<VmPtr<_>>(self.offsets().vmctx_vmmemory_pointer(index))
449 .as_non_null()
450 }
451 }
452
453 /// Return the indexed `VMGlobalDefinition`.
454 pub fn global_ptr(&self, index: DefinedGlobalIndex) -> NonNull<VMGlobalDefinition> {
455 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmglobal_definition(index)) }
456 }
457
458 /// Get all globals within this instance.
459 ///
460 /// Returns both import and defined globals.
461 ///
462 /// Returns both exported and non-exported globals.
463 ///
464 /// Gives access to the full globals space.
465 pub fn all_globals(
466 &self,
467 store: StoreId,
468 ) -> impl ExactSizeIterator<Item = (GlobalIndex, crate::Global)> + '_ {
469 let module = self.env_module();
470 module
471 .globals
472 .keys()
473 .map(move |idx| (idx, self.get_exported_global(store, idx)))
474 }
475
476 /// Get the globals defined in this instance (not imported).
477 pub fn defined_globals(
478 &self,
479 store: StoreId,
480 ) -> impl ExactSizeIterator<Item = (DefinedGlobalIndex, crate::Global)> + '_ {
481 let module = self.env_module();
482 self.all_globals(store)
483 .skip(module.num_imported_globals)
484 .map(move |(i, global)| (module.defined_global_index(i).unwrap(), global))
485 }
486
487 /// Return a pointer to the interrupts structure
488 #[inline]
489 pub fn vm_store_context(&self) -> NonNull<Option<VmPtr<VMStoreContext>>> {
490 unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_store_context()) }
491 }
492
493 /// Return a pointer to the global epoch counter used by this instance.
494 #[cfg(target_has_atomic = "64")]
495 pub fn epoch_ptr(self: Pin<&mut Self>) -> &mut Option<VmPtr<AtomicU64>> {
496 let offset = self.offsets().ptr.vmctx_epoch_ptr();
497 unsafe { self.vmctx_plus_offset_mut(offset) }
498 }
499
500 /// Return a pointer to the collector-specific heap data.
501 pub fn gc_heap_data(self: Pin<&mut Self>) -> &mut Option<VmPtr<u8>> {
502 let offset = self.offsets().ptr.vmctx_gc_heap_data();
503 unsafe { self.vmctx_plus_offset_mut(offset) }
504 }
505
506 pub(crate) unsafe fn set_store(mut self: Pin<&mut Self>, store: &StoreOpaque) {
507 // FIXME: should be more targeted ideally with the `unsafe` than just
508 // throwing this entire function in a large `unsafe` block.
509 unsafe {
510 *self.as_mut().store_mut() = Some(VMStoreRawPtr(store.traitobj()));
511 self.vm_store_context()
512 .write(Some(store.vm_store_context_ptr().into()));
513 #[cfg(target_has_atomic = "64")]
514 {
515 *self.as_mut().epoch_ptr() =
516 Some(NonNull::from(store.engine().epoch_counter()).into());
517 }
518
519 if self.env_module().needs_gc_heap {
520 self.as_mut().set_gc_heap(Some(store.unwrap_gc_store()));
521 } else {
522 self.as_mut().set_gc_heap(None);
523 }
524 }
525 }
526
527 unsafe fn set_gc_heap(self: Pin<&mut Self>, gc_store: Option<&GcStore>) {
528 if let Some(gc_store) = gc_store {
529 *self.gc_heap_data() = Some(unsafe { gc_store.gc_heap.vmctx_gc_heap_data().into() });
530 } else {
531 *self.gc_heap_data() = None;
532 }
533 }
534
535 /// Return a reference to the vmctx used by compiled wasm code.
536 #[inline]
537 pub fn vmctx(&self) -> NonNull<VMContext> {
538 InstanceLayout::vmctx(self)
539 }
540
541 /// Lookup a function by index.
542 ///
543 /// # Panics
544 ///
545 /// Panics if `index` is out of bounds for this instance.
546 ///
547 /// # Safety
548 ///
549 /// The `store` parameter must be the store that owns this instance and the
550 /// functions that this instance can reference.
551 pub unsafe fn get_exported_func(
552 self: Pin<&mut Self>,
553 store: StoreId,
554 index: FuncIndex,
555 ) -> crate::Func {
556 let func_ref = self.get_func_ref(index).unwrap();
557
558 // SAFETY: the validity of `func_ref` is guaranteed by the validity of
559 // `self`, and the contract that `store` must own `func_ref` is a
560 // contract of this function itself.
561 unsafe { crate::Func::from_vm_func_ref(store, func_ref) }
562 }
563
564 /// Lookup a table by index.
565 ///
566 /// # Panics
567 ///
568 /// Panics if `index` is out of bounds for this instance.
569 pub fn get_exported_table(&self, store: StoreId, index: TableIndex) -> crate::Table {
570 let (id, def_index) = if let Some(def_index) = self.env_module().defined_table_index(index)
571 {
572 (self.id, def_index)
573 } else {
574 let import = self.imported_table(index);
575 // SAFETY: validity of this `Instance` guarantees validity of the
576 // `vmctx` pointer being read here to find the transitive
577 // `InstanceId` that the import is associated with.
578 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
579 (id, import.index)
580 };
581 crate::Table::from_raw(StoreInstanceId::new(store, id), def_index)
582 }
583
584 /// Lookup a memory by index.
585 ///
586 /// # Panics
587 ///
588 /// Panics if `index` is out-of-bounds for this instance.
589 #[cfg_attr(
590 not(feature = "threads"),
591 expect(unused_variables, reason = "definitions cfg'd to dummy",)
592 )]
593 pub fn get_exported_memory(&self, store: StoreId, index: MemoryIndex) -> ExportMemory {
594 let module = self.env_module();
595 if module.memories[index].shared {
596 let (memory, import) =
597 if let Some(def_index) = self.env_module().defined_memory_index(index) {
598 (
599 self.get_defined_memory(def_index),
600 self.get_defined_memory_vmimport(def_index),
601 )
602 } else {
603 let import = self.imported_memory(index);
604 // SAFETY: validity of this `Instance` guarantees validity of
605 // the `vmctx` pointer being read here to find the transitive
606 // `InstanceId` that the import is associated with.
607 let instance = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()) };
608 (instance.get_defined_memory(import.index), *import)
609 };
610
611 let vm = memory.as_shared_memory().unwrap().clone();
612 ExportMemory::Shared(vm, import)
613 } else {
614 let (id, def_index) =
615 if let Some(def_index) = self.env_module().defined_memory_index(index) {
616 (self.id, def_index)
617 } else {
618 let import = self.imported_memory(index);
619 // SAFETY: validity of this `Instance` guarantees validity of the
620 // `vmctx` pointer being read here to find the transitive
621 // `InstanceId` that the import is associated with.
622 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
623 (id, import.index)
624 };
625
626 // SAFETY: `from_raw` requires that the memory is not shared, which
627 // was tested above in this if/else.
628 let store_id = StoreInstanceId::new(store, id);
629 ExportMemory::Unshared(unsafe { crate::Memory::from_raw(store_id, def_index) })
630 }
631 }
632
633 /// Lookup a global by index.
634 ///
635 /// # Panics
636 ///
637 /// Panics if `index` is out-of-bounds for this instance.
638 pub(crate) fn get_exported_global(&self, store: StoreId, index: GlobalIndex) -> crate::Global {
639 // If this global is defined within this instance, then that's easy to
640 // calculate the `Global`.
641 if let Some(def_index) = self.env_module().defined_global_index(index) {
642 let instance = StoreInstanceId::new(store, self.id);
643 return crate::Global::from_core(instance, def_index);
644 }
645
646 // For imported globals it's required to match on the `kind` to
647 // determine which `Global` constructor is going to be invoked.
648 let import = self.imported_global(index);
649 match import.kind {
650 VMGlobalKind::Host(index) => crate::Global::from_host(store, index),
651 VMGlobalKind::Instance(index) => {
652 // SAFETY: validity of this `&Instance` means validity of its
653 // imports meaning we can read the id of the vmctx within.
654 let id = unsafe {
655 let vmctx = VMContext::from_opaque(import.vmctx.unwrap().as_non_null());
656 self.sibling_vmctx(vmctx).id
657 };
658 crate::Global::from_core(StoreInstanceId::new(store, id), index)
659 }
660 #[cfg(feature = "component-model")]
661 VMGlobalKind::ComponentFlags(index) => {
662 // SAFETY: validity of this `&Instance` means validity of its
663 // imports meaning we can read the id of the vmctx within.
664 let id = unsafe {
665 let vmctx = super::component::VMComponentContext::from_opaque(
666 import.vmctx.unwrap().as_non_null(),
667 );
668 super::component::ComponentInstance::vmctx_instance_id(vmctx)
669 };
670 crate::Global::from_component_flags(
671 crate::component::store::StoreComponentInstanceId::new(store, id),
672 index,
673 )
674 }
675 }
676 }
677
678 /// Get an exported tag by index.
679 ///
680 /// # Panics
681 ///
682 /// Panics if the index is out-of-range.
683 pub fn get_exported_tag(&self, store: StoreId, index: TagIndex) -> crate::Tag {
684 let (id, def_index) = if let Some(def_index) = self.env_module().defined_tag_index(index) {
685 (self.id, def_index)
686 } else {
687 let import = self.imported_tag(index);
688 // SAFETY: validity of this `Instance` guarantees validity of the
689 // `vmctx` pointer being read here to find the transitive
690 // `InstanceId` that the import is associated with.
691 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
692 (id, import.index)
693 };
694 crate::Tag::from_raw(StoreInstanceId::new(store, id), def_index)
695 }
696
697 /// Return an iterator over the exports of this instance.
698 ///
699 /// Specifically, it provides access to the key-value pairs, where the keys
700 /// are export names, and the values are export declarations which can be
701 /// resolved `lookup_by_declaration`.
702 pub fn exports(&self) -> wasmparser::collections::index_map::Iter<'_, String, EntityIndex> {
703 self.env_module().exports.iter()
704 }
705
706 /// Grow memory by the specified amount of pages.
707 ///
708 /// Returns `None` if memory can't be grown by the specified amount
709 /// of pages. Returns `Some` with the old size in bytes if growth was
710 /// successful.
711 pub(crate) async fn memory_grow(
712 mut self: Pin<&mut Self>,
713 limiter: Option<&mut StoreResourceLimiter<'_>>,
714 idx: DefinedMemoryIndex,
715 delta: u64,
716 ) -> Result<Option<usize>, Error> {
717 let memory = &mut self.as_mut().memories_mut()[idx].1;
718
719 // SAFETY: this is the safe wrapper around `Memory::grow` because it
720 // automatically updates the `VMMemoryDefinition` in this instance after
721 // a growth operation below.
722 let result = unsafe { memory.grow(delta, limiter).await };
723
724 // Update the state used by a non-shared Wasm memory in case the base
725 // pointer and/or the length changed.
726 if memory.as_shared_memory().is_none() {
727 let vmmemory = memory.vmmemory();
728 self.set_memory(idx, vmmemory);
729 }
730
731 result
732 }
733
734 pub(crate) fn table_element_type(
735 self: Pin<&mut Self>,
736 table_index: TableIndex,
737 ) -> TableElementType {
738 self.get_table(table_index).element_type()
739 }
740
741 /// Performs a grow operation on the `table_index` specified using `grow`.
742 ///
743 /// This will handle updating the VMTableDefinition internally as necessary.
744 pub(crate) async fn defined_table_grow(
745 mut self: Pin<&mut Self>,
746 table_index: DefinedTableIndex,
747 grow: impl AsyncFnOnce(&mut Table) -> Result<Option<usize>>,
748 ) -> Result<Option<usize>> {
749 let table = self.as_mut().get_defined_table(table_index);
750 let result = grow(table).await;
751 let element = table.vmtable();
752 self.set_table(table_index, element);
753 result
754 }
755
756 fn alloc_layout(offsets: &VMOffsets<HostPtr>) -> Layout {
757 let size = mem::size_of::<Self>()
758 .checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
759 .unwrap();
760 let align = mem::align_of::<Self>();
761 Layout::from_size_align(size, align).unwrap()
762 }
763
764 fn type_ids_array(&self) -> NonNull<VmPtr<VMSharedTypeIndex>> {
765 unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_type_ids_array()) }
766 }
767
768 /// Construct a new VMFuncRef for the given function
769 /// (imported or defined in this module) and store into the given
770 /// location. Used during lazy initialization.
771 ///
772 /// Note that our current lazy-init scheme actually calls this every
773 /// time the funcref pointer is fetched; this turns out to be better
774 /// than tracking state related to whether it's been initialized
775 /// before, because resetting that state on (re)instantiation is
776 /// very expensive if there are many funcrefs.
777 ///
778 /// # Safety
779 ///
780 /// This functions requires that `into` is a valid pointer.
781 unsafe fn construct_func_ref(
782 self: Pin<&mut Self>,
783 index: FuncIndex,
784 type_index: VMSharedTypeIndex,
785 into: *mut VMFuncRef,
786 ) {
787 let func_ref = if let Some(def_index) = self.env_module().defined_func_index(index) {
788 VMFuncRef {
789 array_call: self
790 .runtime_info
791 .array_to_wasm_trampoline(def_index)
792 .expect("should have array-to-Wasm trampoline for escaping function")
793 .into(),
794 wasm_call: Some(self.runtime_info.function(def_index).into()),
795 vmctx: VMOpaqueContext::from_vmcontext(self.vmctx()).into(),
796 type_index,
797 }
798 } else {
799 let import = self.imported_function(index);
800 VMFuncRef {
801 array_call: import.array_call,
802 wasm_call: Some(import.wasm_call),
803 vmctx: import.vmctx,
804 type_index,
805 }
806 };
807
808 // SAFETY: the unsafe contract here is forwarded to callers of this
809 // function.
810 unsafe {
811 ptr::write(into, func_ref);
812 }
813 }
814
815 /// Get a `&VMFuncRef` for the given `FuncIndex`.
816 ///
817 /// Returns `None` if the index is the reserved index value.
818 ///
819 /// The returned reference is a stable reference that won't be moved and can
820 /// be passed into JIT code.
821 pub(crate) fn get_func_ref(
822 self: Pin<&mut Self>,
823 index: FuncIndex,
824 ) -> Option<NonNull<VMFuncRef>> {
825 if index == FuncIndex::reserved_value() {
826 return None;
827 }
828
829 // For now, we eagerly initialize an funcref struct in-place
830 // whenever asked for a reference to it. This is mostly
831 // fine, because in practice each funcref is unlikely to be
832 // requested more than a few times: once-ish for funcref
833 // tables used for call_indirect (the usual compilation
834 // strategy places each function in the table at most once),
835 // and once or a few times when fetching exports via API.
836 // Note that for any case driven by table accesses, the lazy
837 // table init behaves like a higher-level cache layer that
838 // protects this initialization from happening multiple
839 // times, via that particular table at least.
840 //
841 // When `ref.func` becomes more commonly used or if we
842 // otherwise see a use-case where this becomes a hotpath,
843 // we can reconsider by using some state to track
844 // "uninitialized" explicitly, for example by zeroing the
845 // funcrefs (perhaps together with other
846 // zeroed-at-instantiate-time state) or using a separate
847 // is-initialized bitmap.
848 //
849 // We arrived at this design because zeroing memory is
850 // expensive, so it's better for instantiation performance
851 // if we don't have to track "is-initialized" state at
852 // all!
853 let func = &self.env_module().functions[index];
854 let sig = func.signature.unwrap_engine_type_index();
855
856 // SAFETY: the offset calculated here should be correct with
857 // `self.offsets`
858 let func_ref = unsafe {
859 self.vmctx_plus_offset_raw::<VMFuncRef>(self.offsets().vmctx_func_ref(func.func_ref))
860 };
861
862 // SAFETY: the `func_ref` ptr should be valid as it's within our
863 // `VMContext` area.
864 unsafe {
865 self.construct_func_ref(index, sig, func_ref.as_ptr());
866 }
867
868 Some(func_ref)
869 }
870
871 /// Get the passive elements segment at the given index.
872 ///
873 /// Returns an empty segment if the index is out of bounds or if the segment
874 /// has been dropped.
875 ///
876 /// The `storage` parameter should always be `None`; it is a bit of a hack
877 /// to work around lifetime issues.
878 pub(crate) fn passive_element_segment<'a>(
879 &self,
880 storage: &'a mut Option<(Arc<wasmtime_environ::Module>, TableSegmentElements)>,
881 elem_index: ElemIndex,
882 ) -> &'a TableSegmentElements {
883 debug_assert!(storage.is_none());
884 *storage = Some((
885 // TODO: this `clone()` shouldn't be necessary but is used for now to
886 // inform `rustc` that the lifetime of the elements here are
887 // disconnected from the lifetime of `self`.
888 self.env_module().clone(),
889 // NB: fall back to an expressions-based list of elements which
890 // doesn't have static type information (as opposed to
891 // `TableSegmentElements::Functions`) since we don't know what type
892 // is needed in the caller's context. Let the type be inferred by
893 // how they use the segment.
894 TableSegmentElements::Expressions(Box::new([])),
895 ));
896 let (module, empty) = storage.as_ref().unwrap();
897
898 match module.passive_elements_map.get(&elem_index) {
899 Some(index) if !self.dropped_elements.contains(elem_index) => {
900 &module.passive_elements[*index]
901 }
902 _ => empty,
903 }
904 }
905
906 /// The `table.init` operation: initializes a portion of a table with a
907 /// passive element.
908 ///
909 /// # Errors
910 ///
911 /// Returns a `Trap` error when the range within the table is out of bounds
912 /// or the range within the passive element is out of bounds.
913 pub(crate) async fn table_init(
914 store: &mut StoreOpaque,
915 limiter: Option<&mut StoreResourceLimiter<'_>>,
916 instance: InstanceId,
917 table_index: TableIndex,
918 elem_index: ElemIndex,
919 dst: u64,
920 src: u64,
921 len: u64,
922 ) -> Result<()> {
923 let mut storage = None;
924 let elements = store
925 .instance(instance)
926 .passive_element_segment(&mut storage, elem_index);
927 let mut const_evaluator = ConstExprEvaluator::default();
928 Self::table_init_segment(
929 store,
930 limiter,
931 instance,
932 &mut const_evaluator,
933 table_index,
934 elements,
935 dst,
936 src,
937 len,
938 )
939 .await
940 }
941
942 pub(crate) async fn table_init_segment(
943 store: &mut StoreOpaque,
944 mut limiter: Option<&mut StoreResourceLimiter<'_>>,
945 elements_instance_id: InstanceId,
946 const_evaluator: &mut ConstExprEvaluator,
947 table_index: TableIndex,
948 elements: &TableSegmentElements,
949 dst: u64,
950 src: u64,
951 len: u64,
952 ) -> Result<()> {
953 // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
954
955 let store_id = store.id();
956 let elements_instance = store.instance_mut(elements_instance_id);
957 let table = elements_instance.get_exported_table(store_id, table_index);
958 let table_size = table._size(store);
959
960 // Perform a bounds check on the table being written to. This is done by
961 // ensuring that `dst + len <= table.size()` via checked arithmetic.
962 //
963 // Note that the bounds check for the element segment happens below when
964 // the original segment is sliced via `src` and `len`.
965 table_size
966 .checked_sub(dst)
967 .and_then(|i| i.checked_sub(len))
968 .ok_or(Trap::TableOutOfBounds)?;
969
970 let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
971 let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
972
973 let positions = dst..dst + u64::try_from(len).unwrap();
974 match elements {
975 TableSegmentElements::Functions(funcs) => {
976 let elements = funcs
977 .get(src..)
978 .and_then(|s| s.get(..len))
979 .ok_or(Trap::TableOutOfBounds)?;
980 for (i, func_idx) in positions.zip(elements) {
981 // SAFETY: the `store_id` passed to `get_exported_func` is
982 // indeed the store that owns the function.
983 let func = unsafe {
984 store
985 .instance_mut(elements_instance_id)
986 .get_exported_func(store_id, *func_idx)
987 };
988 table.set_(store, i, func.into()).unwrap();
989 }
990 }
991 TableSegmentElements::Expressions(exprs) => {
992 let mut store = OpaqueRootScope::new(store);
993 let exprs = exprs
994 .get(src..)
995 .and_then(|s| s.get(..len))
996 .ok_or(Trap::TableOutOfBounds)?;
997 let mut context = ConstEvalContext::new(elements_instance_id);
998 for (i, expr) in positions.zip(exprs) {
999 let element = const_evaluator
1000 .eval(&mut store, limiter.as_deref_mut(), &mut context, expr)
1001 .await?;
1002 table.set_(&mut store, i, element.ref_().unwrap()).unwrap();
1003 }
1004 }
1005 }
1006
1007 Ok(())
1008 }
1009
1010 /// Drop an element.
1011 pub(crate) fn elem_drop(self: Pin<&mut Self>, elem_index: ElemIndex) {
1012 // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
1013
1014 self.dropped_elements_mut().insert(elem_index);
1015
1016 // Note that we don't check that we actually removed a segment because
1017 // dropping a non-passive segment is a no-op (not a trap).
1018 }
1019
1020 /// Get a locally-defined memory.
1021 pub fn get_defined_memory_mut(self: Pin<&mut Self>, index: DefinedMemoryIndex) -> &mut Memory {
1022 &mut self.memories_mut()[index].1
1023 }
1024
1025 /// Get a locally-defined memory.
1026 pub fn get_defined_memory(&self, index: DefinedMemoryIndex) -> &Memory {
1027 &self.memories[index].1
1028 }
1029
1030 pub fn get_defined_memory_vmimport(&self, index: DefinedMemoryIndex) -> VMMemoryImport {
1031 crate::runtime::vm::VMMemoryImport {
1032 from: self.memory_ptr(index).into(),
1033 vmctx: self.vmctx().into(),
1034 index,
1035 }
1036 }
1037
1038 /// Do a `memory.copy`
1039 ///
1040 /// # Errors
1041 ///
1042 /// Returns a `Trap` error when the source or destination ranges are out of
1043 /// bounds.
1044 pub(crate) fn memory_copy(
1045 self: Pin<&mut Self>,
1046 dst_index: MemoryIndex,
1047 dst: u64,
1048 src_index: MemoryIndex,
1049 src: u64,
1050 len: u64,
1051 ) -> Result<(), Trap> {
1052 // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
1053
1054 let src_mem = self.get_memory(src_index);
1055 let dst_mem = self.get_memory(dst_index);
1056
1057 let src = self.validate_inbounds(src_mem.current_length(), src, len)?;
1058 let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?;
1059 let len = usize::try_from(len).unwrap();
1060
1061 // Bounds and casts are checked above, by this point we know that
1062 // everything is safe.
1063 unsafe {
1064 let dst = dst_mem.base.as_ptr().add(dst);
1065 let src = src_mem.base.as_ptr().add(src);
1066 // FIXME audit whether this is safe in the presence of shared memory
1067 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1068 ptr::copy(src, dst, len);
1069 }
1070
1071 Ok(())
1072 }
1073
1074 fn validate_inbounds(&self, max: usize, ptr: u64, len: u64) -> Result<usize, Trap> {
1075 let oob = || Trap::MemoryOutOfBounds;
1076 let end = ptr
1077 .checked_add(len)
1078 .and_then(|i| usize::try_from(i).ok())
1079 .ok_or_else(oob)?;
1080 if end > max {
1081 Err(oob())
1082 } else {
1083 Ok(ptr.try_into().unwrap())
1084 }
1085 }
1086
1087 /// Perform the `memory.fill` operation on a locally defined memory.
1088 ///
1089 /// # Errors
1090 ///
1091 /// Returns a `Trap` error if the memory range is out of bounds.
1092 pub(crate) fn memory_fill(
1093 self: Pin<&mut Self>,
1094 memory_index: DefinedMemoryIndex,
1095 dst: u64,
1096 val: u8,
1097 len: u64,
1098 ) -> Result<(), Trap> {
1099 let memory_index = self.env_module().memory_index(memory_index);
1100 let memory = self.get_memory(memory_index);
1101 let dst = self.validate_inbounds(memory.current_length(), dst, len)?;
1102 let len = usize::try_from(len).unwrap();
1103
1104 // Bounds and casts are checked above, by this point we know that
1105 // everything is safe.
1106 unsafe {
1107 let dst = memory.base.as_ptr().add(dst);
1108 // FIXME audit whether this is safe in the presence of shared memory
1109 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1110 ptr::write_bytes(dst, val, len);
1111 }
1112
1113 Ok(())
1114 }
1115
1116 /// Get the internal storage range of a particular Wasm data segment.
1117 pub(crate) fn wasm_data_range(&self, index: DataIndex) -> Range<u32> {
1118 match self.env_module().passive_data_map.get(&index) {
1119 Some(range) if !self.dropped_data.contains(index) => range.clone(),
1120 _ => 0..0,
1121 }
1122 }
1123
1124 /// Given an internal storage range of a Wasm data segment (or subset of a
1125 /// Wasm data segment), get the data's raw bytes.
1126 pub(crate) fn wasm_data(&self, range: Range<u32>) -> &[u8] {
1127 let start = usize::try_from(range.start).unwrap();
1128 let end = usize::try_from(range.end).unwrap();
1129 &self.runtime_info.wasm_data()[start..end]
1130 }
1131
1132 /// Performs the `memory.init` operation.
1133 ///
1134 /// # Errors
1135 ///
1136 /// Returns a `Trap` error if the destination range is out of this module's
1137 /// memory's bounds or if the source range is outside the data segment's
1138 /// bounds.
1139 pub(crate) fn memory_init(
1140 self: Pin<&mut Self>,
1141 memory_index: MemoryIndex,
1142 data_index: DataIndex,
1143 dst: u64,
1144 src: u32,
1145 len: u32,
1146 ) -> Result<(), Trap> {
1147 let range = self.wasm_data_range(data_index);
1148 self.memory_init_segment(memory_index, range, dst, src, len)
1149 }
1150
1151 pub(crate) fn memory_init_segment(
1152 self: Pin<&mut Self>,
1153 memory_index: MemoryIndex,
1154 range: Range<u32>,
1155 dst: u64,
1156 src: u32,
1157 len: u32,
1158 ) -> Result<(), Trap> {
1159 // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
1160
1161 let memory = self.get_memory(memory_index);
1162 let data = self.wasm_data(range);
1163 let dst = self.validate_inbounds(memory.current_length(), dst, len.into())?;
1164 let src = self.validate_inbounds(data.len(), src.into(), len.into())?;
1165 let len = len as usize;
1166
1167 unsafe {
1168 let src_start = data.as_ptr().add(src);
1169 let dst_start = memory.base.as_ptr().add(dst);
1170 // FIXME audit whether this is safe in the presence of shared memory
1171 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1172 ptr::copy_nonoverlapping(src_start, dst_start, len);
1173 }
1174
1175 Ok(())
1176 }
1177
1178 /// Drop the given data segment, truncating its length to zero.
1179 pub(crate) fn data_drop(self: Pin<&mut Self>, data_index: DataIndex) {
1180 self.dropped_data_mut().insert(data_index);
1181
1182 // Note that we don't check that we actually removed a segment because
1183 // dropping a non-passive segment is a no-op (not a trap).
1184 }
1185
1186 /// Get a table by index regardless of whether it is locally-defined
1187 /// or an imported, foreign table. Ensure that the given range of
1188 /// elements in the table is lazily initialized. We define this
1189 /// operation all-in-one for safety, to ensure the lazy-init
1190 /// happens.
1191 ///
1192 /// Takes an `Iterator` for the index-range to lazy-initialize,
1193 /// for flexibility. This can be a range, single item, or empty
1194 /// sequence, for example. The iterator should return indices in
1195 /// increasing order, so that the break-at-out-of-bounds behavior
1196 /// works correctly.
1197 pub(crate) fn get_table_with_lazy_init(
1198 self: Pin<&mut Self>,
1199 table_index: TableIndex,
1200 range: impl Iterator<Item = u64>,
1201 ) -> &mut Table {
1202 let (idx, instance) = self.defined_table_index_and_instance(table_index);
1203 instance.get_defined_table_with_lazy_init(idx, range)
1204 }
1205
1206 /// Gets the raw runtime table data structure owned by this instance
1207 /// given the provided `idx`.
1208 ///
1209 /// The `range` specified is eagerly initialized for funcref tables.
1210 pub fn get_defined_table_with_lazy_init(
1211 mut self: Pin<&mut Self>,
1212 idx: DefinedTableIndex,
1213 range: impl IntoIterator<Item = u64>,
1214 ) -> &mut Table {
1215 let elt_ty = self.tables[idx].1.element_type();
1216
1217 if elt_ty == TableElementType::Func {
1218 for i in range {
1219 match self.tables[idx].1.get_func_maybe_init(i) {
1220 // Uninitialized table element.
1221 Ok(None) => {}
1222 // Initialized table element, move on to the next.
1223 Ok(Some(_)) => continue,
1224 // Out-of-bounds; caller will handle by likely
1225 // throwing a trap. No work to do to lazy-init
1226 // beyond the end.
1227 Err(_) => break,
1228 };
1229
1230 // The table element `i` is uninitialized and is now being
1231 // initialized. This must imply that a `precompiled` list of
1232 // function indices is available for this table. The precompiled
1233 // list is extracted and then it is consulted with `i` to
1234 // determine the function that is going to be initialized. Note
1235 // that `i` may be outside the limits of the static
1236 // initialization so it's a fallible `get` instead of an index.
1237 let module = self.env_module();
1238 let precomputed = match &module.table_initialization.initial_values[idx] {
1239 TableInitialValue::Null { precomputed } => precomputed,
1240 TableInitialValue::Expr(_) => unreachable!(),
1241 };
1242 // Panicking here helps catch bugs rather than silently truncating by accident.
1243 let func_index = precomputed.get(usize::try_from(i).unwrap()).cloned();
1244 let func_ref =
1245 func_index.and_then(|func_index| self.as_mut().get_func_ref(func_index));
1246 self.as_mut().tables_mut()[idx]
1247 .1
1248 .set_func(i, func_ref)
1249 .expect("Table type should match and index should be in-bounds");
1250 }
1251 }
1252
1253 self.get_defined_table(idx)
1254 }
1255
1256 /// Get a table by index regardless of whether it is locally-defined or an
1257 /// imported, foreign table.
1258 pub(crate) fn get_table(self: Pin<&mut Self>, table_index: TableIndex) -> &mut Table {
1259 let (idx, instance) = self.defined_table_index_and_instance(table_index);
1260 instance.get_defined_table(idx)
1261 }
1262
1263 /// Get a locally-defined table.
1264 pub(crate) fn get_defined_table(self: Pin<&mut Self>, index: DefinedTableIndex) -> &mut Table {
1265 &mut self.tables_mut()[index].1
1266 }
1267
1268 pub(crate) fn defined_table_index_and_instance<'a>(
1269 self: Pin<&'a mut Self>,
1270 index: TableIndex,
1271 ) -> (DefinedTableIndex, Pin<&'a mut Instance>) {
1272 if let Some(defined_table_index) = self.env_module().defined_table_index(index) {
1273 (defined_table_index, self)
1274 } else {
1275 let import = self.imported_table(index);
1276 let index = import.index;
1277 let vmctx = import.vmctx.as_non_null();
1278 // SAFETY: the validity of `self` means that the reachable instances
1279 // should also all be owned by the same store and fully initialized,
1280 // so it's safe to laterally move from a mutable borrow of this
1281 // instance to a mutable borrow of a sibling instance.
1282 let foreign_instance = unsafe { self.sibling_vmctx_mut(vmctx) };
1283 (index, foreign_instance)
1284 }
1285 }
1286
1287 /// Initialize the VMContext data associated with this Instance.
1288 ///
1289 /// The `VMContext` memory is assumed to be uninitialized; any field
1290 /// that we need in a certain state will be explicitly written by this
1291 /// function.
1292 unsafe fn initialize_vmctx(
1293 mut self: Pin<&mut Self>,
1294 module: &Module,
1295 offsets: &VMOffsets<HostPtr>,
1296 store: &StoreOpaque,
1297 imports: Imports,
1298 ) {
1299 assert!(ptr::eq(module, self.env_module().as_ref()));
1300
1301 // SAFETY: the type of the magic field is indeed `u32` and this function
1302 // is initializing its value.
1303 unsafe {
1304 self.vmctx_plus_offset_raw::<u32>(offsets.ptr.vmctx_magic())
1305 .write(VMCONTEXT_MAGIC);
1306 }
1307
1308 // SAFETY: it's up to the caller to provide a valid store pointer here.
1309 unsafe {
1310 self.as_mut().set_store(store);
1311 }
1312
1313 // Initialize shared types
1314 //
1315 // SAFETY: validity of the vmctx means it should be safe to write to it
1316 // here.
1317 unsafe {
1318 let types = NonNull::from(self.runtime_info.type_ids());
1319 self.type_ids_array().write(types.cast().into());
1320 }
1321
1322 // Initialize the built-in functions
1323 //
1324 // SAFETY: the type of the builtin functions field is indeed a pointer
1325 // and the pointer being filled in here, plus the vmctx is valid to
1326 // write to during initialization.
1327 unsafe {
1328 static BUILTINS: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray::INIT;
1329 let ptr = BUILTINS.expose_provenance();
1330 self.vmctx_plus_offset_raw(offsets.ptr.vmctx_builtin_functions())
1331 .write(VmPtr::from(ptr));
1332 }
1333
1334 // Initialize the imports
1335 //
1336 // SAFETY: the vmctx is safe to initialize during this function and
1337 // validity of each item itself is a contract the caller must uphold.
1338 debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
1339 unsafe {
1340 ptr::copy_nonoverlapping(
1341 imports.functions.as_ptr(),
1342 self.vmctx_plus_offset_raw(offsets.vmctx_imported_functions_begin())
1343 .as_ptr(),
1344 imports.functions.len(),
1345 );
1346 debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
1347 ptr::copy_nonoverlapping(
1348 imports.tables.as_ptr(),
1349 self.vmctx_plus_offset_raw(offsets.vmctx_imported_tables_begin())
1350 .as_ptr(),
1351 imports.tables.len(),
1352 );
1353 debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
1354 ptr::copy_nonoverlapping(
1355 imports.memories.as_ptr(),
1356 self.vmctx_plus_offset_raw(offsets.vmctx_imported_memories_begin())
1357 .as_ptr(),
1358 imports.memories.len(),
1359 );
1360 debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
1361 ptr::copy_nonoverlapping(
1362 imports.globals.as_ptr(),
1363 self.vmctx_plus_offset_raw(offsets.vmctx_imported_globals_begin())
1364 .as_ptr(),
1365 imports.globals.len(),
1366 );
1367 debug_assert_eq!(imports.tags.len(), module.num_imported_tags);
1368 ptr::copy_nonoverlapping(
1369 imports.tags.as_ptr(),
1370 self.vmctx_plus_offset_raw(offsets.vmctx_imported_tags_begin())
1371 .as_ptr(),
1372 imports.tags.len(),
1373 );
1374 }
1375
1376 // N.B.: there is no need to initialize the funcrefs array because we
1377 // eagerly construct each element in it whenever asked for a reference
1378 // to that element. In other words, there is no state needed to track
1379 // the lazy-init, so we don't need to initialize any state now.
1380
1381 // Initialize the defined tables
1382 //
1383 // SAFETY: it's safe to initialize these tables during initialization
1384 // here and the various types of pointers and such here should all be
1385 // valid.
1386 unsafe {
1387 let mut ptr = self.vmctx_plus_offset_raw(offsets.vmctx_tables_begin());
1388 let tables = self.as_mut().tables_mut();
1389 for i in 0..module.num_defined_tables() {
1390 ptr.write(tables[DefinedTableIndex::new(i)].1.vmtable());
1391 ptr = ptr.add(1);
1392 }
1393 }
1394
1395 // Initialize the defined memories. This fills in both the
1396 // `defined_memories` table and the `owned_memories` table at the same
1397 // time. Entries in `defined_memories` hold a pointer to a definition
1398 // (all memories) whereas the `owned_memories` hold the actual
1399 // definitions of memories owned (not shared) in the module.
1400 //
1401 // SAFETY: it's safe to initialize these memories during initialization
1402 // here and the various types of pointers and such here should all be
1403 // valid.
1404 unsafe {
1405 let mut ptr = self.vmctx_plus_offset_raw(offsets.vmctx_memories_begin());
1406 let mut owned_ptr = self.vmctx_plus_offset_raw(offsets.vmctx_owned_memories_begin());
1407 let memories = self.as_mut().memories_mut();
1408 for i in 0..module.num_defined_memories() {
1409 let defined_memory_index = DefinedMemoryIndex::new(i);
1410 let memory_index = module.memory_index(defined_memory_index);
1411 if module.memories[memory_index].shared {
1412 let def_ptr = memories[defined_memory_index]
1413 .1
1414 .as_shared_memory()
1415 .unwrap()
1416 .vmmemory_ptr();
1417 ptr.write(VmPtr::from(def_ptr));
1418 } else {
1419 owned_ptr.write(memories[defined_memory_index].1.vmmemory());
1420 ptr.write(VmPtr::from(owned_ptr));
1421 owned_ptr = owned_ptr.add(1);
1422 }
1423 ptr = ptr.add(1);
1424 }
1425 }
1426
1427 // Zero-initialize the globals so that nothing is uninitialized memory
1428 // after this function returns. The globals are actually initialized
1429 // with their const expression initializers after the instance is fully
1430 // allocated.
1431 //
1432 // SAFETY: it's safe to initialize globals during initialization
1433 // here. Note that while the value being written is not valid for all
1434 // types of globals it's initializing the memory to zero instead of
1435 // being in an undefined state. So it's still unsafe to access globals
1436 // after this, but if it's read then it'd hopefully crash faster than
1437 // leaving this undefined.
1438 unsafe {
1439 for (index, _init) in module.global_initializers.iter() {
1440 self.global_ptr(index).write(VMGlobalDefinition::new());
1441 }
1442 }
1443
1444 // Initialize the defined tags
1445 //
1446 // SAFETY: it's safe to initialize these tags during initialization
1447 // here and the various types of pointers and such here should all be
1448 // valid.
1449 unsafe {
1450 let mut ptr = self.vmctx_plus_offset_raw(offsets.vmctx_tags_begin());
1451 for i in 0..module.num_defined_tags() {
1452 let defined_index = DefinedTagIndex::new(i);
1453 let tag_index = module.tag_index(defined_index);
1454 let tag = module.tags[tag_index];
1455 ptr.write(VMTagDefinition::new(
1456 tag.signature.unwrap_engine_type_index(),
1457 ));
1458 ptr = ptr.add(1);
1459 }
1460 }
1461 }
1462
1463 /// Attempts to convert from the host `addr` specified to a WebAssembly
1464 /// based address recorded in `WasmFault`.
1465 ///
1466 /// This method will check all linear memories that this instance contains
1467 /// to see if any of them contain `addr`. If one does then `Some` is
1468 /// returned with metadata about the wasm fault. Otherwise `None` is
1469 /// returned and `addr` doesn't belong to this instance.
1470 pub fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1471 let mut fault = None;
1472 for (_, (_, memory)) in self.memories.iter() {
1473 let accessible = memory.wasm_accessible();
1474 if accessible.start <= addr && addr < accessible.end {
1475 // All linear memories should be disjoint so assert that no
1476 // prior fault has been found.
1477 assert!(fault.is_none());
1478 fault = Some(WasmFault {
1479 memory_size: memory.byte_size(),
1480 wasm_address: u64::try_from(addr - accessible.start).unwrap(),
1481 });
1482 }
1483 }
1484 fault
1485 }
1486
1487 /// Returns the id, within this instance's store, that it's assigned.
1488 pub fn id(&self) -> InstanceId {
1489 self.id
1490 }
1491
1492 /// Get all memories within this instance.
1493 ///
1494 /// Returns both import and defined memories.
1495 ///
1496 /// Returns both exported and non-exported memories.
1497 ///
1498 /// Gives access to the full memories space.
1499 pub fn all_memories(
1500 &self,
1501 store: StoreId,
1502 ) -> impl ExactSizeIterator<Item = (MemoryIndex, ExportMemory)> + '_ {
1503 self.env_module()
1504 .memories
1505 .iter()
1506 .map(move |(i, _)| (i, self.get_exported_memory(store, i)))
1507 }
1508
1509 /// Return the memories defined in this instance (not imported).
1510 pub fn defined_memories<'a>(
1511 &'a self,
1512 store: StoreId,
1513 ) -> impl ExactSizeIterator<Item = ExportMemory> + 'a {
1514 let num_imported = self.env_module().num_imported_memories;
1515 self.all_memories(store)
1516 .skip(num_imported)
1517 .map(|(_i, memory)| memory)
1518 }
1519
1520 /// Lookup an item with the given index.
1521 ///
1522 /// # Panics
1523 ///
1524 /// Panics if `export` is not valid for this instance.
1525 ///
1526 /// # Safety
1527 ///
1528 /// This function requires that `store` is the correct store which owns this
1529 /// instance.
1530 pub unsafe fn get_export_by_index_mut(
1531 self: Pin<&mut Self>,
1532 store: StoreId,
1533 export: EntityIndex,
1534 ) -> Export {
1535 match export {
1536 // SAFETY: the contract of `store` owning the this instance is a
1537 // safety requirement of this function itself.
1538 EntityIndex::Function(i) => {
1539 Export::Function(unsafe { self.get_exported_func(store, i) })
1540 }
1541 EntityIndex::Global(i) => Export::Global(self.get_exported_global(store, i)),
1542 EntityIndex::Table(i) => Export::Table(self.get_exported_table(store, i)),
1543 EntityIndex::Memory(i) => match self.get_exported_memory(store, i) {
1544 ExportMemory::Unshared(m) => Export::Memory(m),
1545 ExportMemory::Shared(m, i) => Export::SharedMemory(m, i),
1546 },
1547 EntityIndex::Tag(i) => Export::Tag(self.get_exported_tag(store, i)),
1548 }
1549 }
1550
1551 fn store_mut(self: Pin<&mut Self>) -> &mut Option<VMStoreRawPtr> {
1552 // SAFETY: this is a pin-projection to get a mutable reference to an
1553 // internal field and is safe so long as the `&mut Self` temporarily
1554 // created is not overwritten, which it isn't here.
1555 unsafe { &mut self.get_unchecked_mut().store }
1556 }
1557
1558 fn dropped_elements_mut(self: Pin<&mut Self>) -> &mut EntitySet<ElemIndex> {
1559 // SAFETY: see `store_mut` above.
1560 unsafe { &mut self.get_unchecked_mut().dropped_elements }
1561 }
1562
1563 fn dropped_data_mut(self: Pin<&mut Self>) -> &mut EntitySet<DataIndex> {
1564 // SAFETY: see `store_mut` above.
1565 unsafe { &mut self.get_unchecked_mut().dropped_data }
1566 }
1567
1568 fn memories_mut(
1569 self: Pin<&mut Self>,
1570 ) -> &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)> {
1571 // SAFETY: see `store_mut` above.
1572 unsafe { &mut self.get_unchecked_mut().memories }
1573 }
1574
1575 pub(crate) fn tables_mut(
1576 self: Pin<&mut Self>,
1577 ) -> &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)> {
1578 // SAFETY: see `store_mut` above.
1579 unsafe { &mut self.get_unchecked_mut().tables }
1580 }
1581
1582 #[cfg(feature = "wmemcheck")]
1583 pub(super) fn wmemcheck_state_mut(self: Pin<&mut Self>) -> &mut Option<Wmemcheck> {
1584 // SAFETY: see `store_mut` above.
1585 unsafe { &mut self.get_unchecked_mut().wmemcheck_state }
1586 }
1587}
1588
1589// SAFETY: `layout` should describe this accurately and `OwnedVMContext` is the
1590// last field of `ComponentInstance`.
1591unsafe impl InstanceLayout for Instance {
1592 const INIT_ZEROED: bool = false;
1593 type VMContext = VMContext;
1594
1595 fn layout(&self) -> Layout {
1596 Self::alloc_layout(self.runtime_info.offsets())
1597 }
1598
1599 fn owned_vmctx(&self) -> &OwnedVMContext<VMContext> {
1600 &self.vmctx
1601 }
1602
1603 fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<VMContext> {
1604 &mut self.vmctx
1605 }
1606}
1607
1608pub type InstanceHandle = OwnedInstance<Instance>;
1609
1610/// A handle holding an `Instance` of a WebAssembly module.
1611///
1612/// This structure is an owning handle of the `instance` contained internally.
1613/// When this value goes out of scope it will deallocate the `Instance` and all
1614/// memory associated with it.
1615///
1616/// Note that this lives within a `StoreOpaque` on a list of instances that a
1617/// store is keeping alive.
1618#[derive(Debug)]
1619#[repr(transparent)] // guarantee this is a zero-cost wrapper
1620pub struct OwnedInstance<T: InstanceLayout> {
1621 /// The raw pointer to the instance that was allocated.
1622 ///
1623 /// Note that this is not equivalent to `Box<Instance>` because the
1624 /// allocation here has a `VMContext` trailing after it. Thus the custom
1625 /// destructor to invoke the `dealloc` function with the appropriate
1626 /// layout.
1627 instance: SendSyncPtr<T>,
1628 _marker: marker::PhantomData<Box<(T, OwnedVMContext<T::VMContext>)>>,
1629}
1630
1631/// Structure that must be placed at the end of a type implementing
1632/// `InstanceLayout`.
1633#[repr(align(16))] // match the alignment of VMContext
1634pub struct OwnedVMContext<T> {
1635 /// A pointer to the `vmctx` field at the end of the `structure`.
1636 ///
1637 /// If you're looking at this a reasonable question would be "why do we need
1638 /// a pointer to ourselves?" because after all the pointer's value is
1639 /// trivially derivable from any `&Instance` pointer. The rationale for this
1640 /// field's existence is subtle, but it's required for correctness. The
1641 /// short version is "this makes miri happy".
1642 ///
1643 /// The long version of why this field exists is that the rules that MIRI
1644 /// uses to ensure pointers are used correctly have various conditions on
1645 /// them depend on how pointers are used. More specifically if `*mut T` is
1646 /// derived from `&mut T`, then that invalidates all prior pointers drived
1647 /// from the `&mut T`. This means that while we liberally want to re-acquire
1648 /// a `*mut VMContext` throughout the implementation of `Instance` the
1649 /// trivial way, a function `fn vmctx(Pin<&mut Instance>) -> *mut VMContext`
1650 /// would effectively invalidate all prior `*mut VMContext` pointers
1651 /// acquired. The purpose of this field is to serve as a sort of
1652 /// source-of-truth for where `*mut VMContext` pointers come from.
1653 ///
1654 /// This field is initialized when the `Instance` is created with the
1655 /// original allocation's pointer. That means that the provenance of this
1656 /// pointer contains the entire allocation (both instance and `VMContext`).
1657 /// This provenance bit is then "carried through" where `fn vmctx` will base
1658 /// all returned pointers on this pointer itself. This provides the means of
1659 /// never invalidating this pointer throughout MIRI and additionally being
1660 /// able to still temporarily have `Pin<&mut Instance>` methods and such.
1661 ///
1662 /// It's important to note, though, that this is not here purely for MIRI.
1663 /// The careful construction of the `fn vmctx` method has ramifications on
1664 /// the LLVM IR generated, for example. A historical CVE on Wasmtime,
1665 /// GHSA-ch89-5g45-qwc7, was caused due to relying on undefined behavior. By
1666 /// deriving VMContext pointers from this pointer it specifically hints to
1667 /// LLVM that trickery is afoot and it properly informs `noalias` and such
1668 /// annotations and analysis. More-or-less this pointer is actually loaded
1669 /// in LLVM IR which helps defeat otherwise present aliasing optimizations,
1670 /// which we want, since writes to this should basically never be optimized
1671 /// out.
1672 ///
1673 /// As a final note it's worth pointing out that the machine code generated
1674 /// for accessing `fn vmctx` is still as one would expect. This member isn't
1675 /// actually ever loaded at runtime (or at least shouldn't be). Perhaps in
1676 /// the future if the memory consumption of this field is a problem we could
1677 /// shrink it slightly, but for now one extra pointer per wasm instance
1678 /// seems not too bad.
1679 vmctx_self_reference: SendSyncPtr<T>,
1680
1681 /// This field ensures that going from `Pin<&mut T>` to `&mut T` is not a
1682 /// safe operation.
1683 _marker: core::marker::PhantomPinned,
1684}
1685
1686impl<T> OwnedVMContext<T> {
1687 /// Creates a new blank vmctx to place at the end of an instance.
1688 pub fn new() -> OwnedVMContext<T> {
1689 OwnedVMContext {
1690 vmctx_self_reference: SendSyncPtr::new(NonNull::dangling()),
1691 _marker: core::marker::PhantomPinned,
1692 }
1693 }
1694}
1695
1696/// Helper trait to plumb both core instances and component instances into
1697/// `OwnedInstance` below.
1698///
1699/// # Safety
1700///
1701/// This trait requires `layout` to correctly describe `Self` and appropriately
1702/// allocate space for `Self::VMContext` afterwards. Additionally the field
1703/// returned by `owned_vmctx()` must be the last field in the structure.
1704pub unsafe trait InstanceLayout {
1705 /// Whether or not to allocate this instance with `alloc_zeroed` or `alloc`.
1706 const INIT_ZEROED: bool;
1707
1708 /// The trailing `VMContext` type at the end of this instance.
1709 type VMContext;
1710
1711 /// The memory layout to use to allocate and deallocate this instance.
1712 fn layout(&self) -> Layout;
1713
1714 fn owned_vmctx(&self) -> &OwnedVMContext<Self::VMContext>;
1715 fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<Self::VMContext>;
1716
1717 /// Returns the `vmctx_self_reference` set above.
1718 #[inline]
1719 fn vmctx(&self) -> NonNull<Self::VMContext> {
1720 // The definition of this method is subtle but intentional. The goal
1721 // here is that effectively this should return `&mut self.vmctx`, but
1722 // it's not quite so simple. Some more documentation is available on the
1723 // `vmctx_self_reference` field, but the general idea is that we're
1724 // creating a pointer to return with proper provenance. Provenance is
1725 // still in the works in Rust at the time of this writing but the load
1726 // of the `self.vmctx_self_reference` field is important here as it
1727 // affects how LLVM thinks about aliasing with respect to the returned
1728 // pointer.
1729 //
1730 // The intention of this method is to codegen to machine code as `&mut
1731 // self.vmctx`, however. While it doesn't show up like this in LLVM IR
1732 // (there's an actual load of the field) it does look like that by the
1733 // time the backend runs. (that's magic to me, the backend removing
1734 // loads...)
1735 let owned_vmctx = self.owned_vmctx();
1736 let owned_vmctx_raw = NonNull::from(owned_vmctx);
1737 // SAFETY: it's part of the contract of `InstanceLayout` and the usage
1738 // with `OwnedInstance` that this indeed points to the vmctx.
1739 let addr = unsafe { owned_vmctx_raw.add(1) };
1740 owned_vmctx
1741 .vmctx_self_reference
1742 .as_non_null()
1743 .with_addr(addr.addr())
1744 }
1745
1746 /// Helper function to access various locations offset from our `*mut
1747 /// VMContext` object.
1748 ///
1749 /// Note that this method takes `&self` as an argument but returns
1750 /// `NonNull<T>` which is frequently used to mutate said memory. This is an
1751 /// intentional design decision where the safety of the modification of
1752 /// memory is placed as a burden onto the caller. The implementation of this
1753 /// method explicitly does not require `&mut self` to acquire mutable
1754 /// provenance to update the `VMContext` region. Instead all pointers into
1755 /// the `VMContext` area have provenance/permissions to write.
1756 ///
1757 /// Also note though that care must be taken to ensure that reads/writes of
1758 /// memory must only happen where appropriate, for example a non-atomic
1759 /// write (as most are) should never happen concurrently with another read
1760 /// or write. It's generally on the burden of the caller to adhere to this.
1761 ///
1762 /// Also of note is that most of the time the usage of this method falls
1763 /// into one of:
1764 ///
1765 /// * Something in the VMContext is being read or written. In that case use
1766 /// `vmctx_plus_offset` or `vmctx_plus_offset_mut` if possible due to
1767 /// that having a safer lifetime.
1768 ///
1769 /// * A pointer is being created to pass to other VM* data structures. In
1770 /// that situation the lifetime of all VM data structures are typically
1771 /// tied to the `Store<T>` which is what provides the guarantees around
1772 /// concurrency/etc.
1773 ///
1774 /// There's quite a lot of unsafety riding on this method, especially
1775 /// related to the ascription `T` of the byte `offset`. It's hoped that in
1776 /// the future we're able to settle on an in theory safer design.
1777 ///
1778 /// # Safety
1779 ///
1780 /// This method is unsafe because the `offset` must be within bounds of the
1781 /// `VMContext` object trailing this instance. Additionally `T` must be a
1782 /// valid ascription of the value that resides at that location.
1783 unsafe fn vmctx_plus_offset_raw<T: VmSafe>(&self, offset: impl Into<u32>) -> NonNull<T> {
1784 // SAFETY: the safety requirements of `byte_add` are forwarded to this
1785 // method's caller.
1786 unsafe {
1787 self.vmctx()
1788 .byte_add(usize::try_from(offset.into()).unwrap())
1789 .cast()
1790 }
1791 }
1792
1793 /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1794 /// `&self` to the returned reference `&T`.
1795 ///
1796 /// # Safety
1797 ///
1798 /// See the safety documentation of `vmctx_plus_offset_raw`.
1799 unsafe fn vmctx_plus_offset<T: VmSafe>(&self, offset: impl Into<u32>) -> &T {
1800 // SAFETY: this method has the same safety requirements as
1801 // `vmctx_plus_offset_raw`.
1802 unsafe { self.vmctx_plus_offset_raw(offset).as_ref() }
1803 }
1804
1805 /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1806 /// `&mut self` to the returned reference `&mut T`.
1807 ///
1808 /// # Safety
1809 ///
1810 /// See the safety documentation of `vmctx_plus_offset_raw`.
1811 unsafe fn vmctx_plus_offset_mut<T: VmSafe>(
1812 self: Pin<&mut Self>,
1813 offset: impl Into<u32>,
1814 ) -> &mut T {
1815 // SAFETY: this method has the same safety requirements as
1816 // `vmctx_plus_offset_raw`.
1817 unsafe { self.vmctx_plus_offset_raw(offset).as_mut() }
1818 }
1819}
1820
1821impl<T: InstanceLayout> OwnedInstance<T> {
1822 /// Allocates a new `OwnedInstance` and places `instance` inside of it.
1823 ///
1824 /// This will `instance`
1825 pub(super) fn new(mut instance: T) -> OwnedInstance<T> {
1826 let layout = instance.layout();
1827 debug_assert!(layout.size() >= size_of_val(&instance));
1828 debug_assert!(layout.align() >= align_of_val(&instance));
1829
1830 // SAFETY: it's up to us to assert that `layout` has a non-zero size,
1831 // which is asserted here.
1832 let ptr = unsafe {
1833 assert!(layout.size() > 0);
1834 if T::INIT_ZEROED {
1835 alloc::alloc::alloc_zeroed(layout)
1836 } else {
1837 alloc::alloc::alloc(layout)
1838 }
1839 };
1840 if ptr.is_null() {
1841 alloc::alloc::handle_alloc_error(layout);
1842 }
1843 let instance_ptr = NonNull::new(ptr.cast::<T>()).unwrap();
1844
1845 // SAFETY: it's part of the unsafe contract of `InstanceLayout` that the
1846 // `add` here is appropriate for the layout allocated.
1847 let vmctx_self_reference = unsafe { instance_ptr.add(1).cast() };
1848 instance.owned_vmctx_mut().vmctx_self_reference = vmctx_self_reference.into();
1849
1850 // SAFETY: we allocated above and it's an unsafe contract of
1851 // `InstanceLayout` that the layout is suitable for writing the
1852 // instance.
1853 unsafe {
1854 instance_ptr.write(instance);
1855 }
1856
1857 let ret = OwnedInstance {
1858 instance: SendSyncPtr::new(instance_ptr),
1859 _marker: marker::PhantomData,
1860 };
1861
1862 // Double-check various vmctx calculations are correct.
1863 debug_assert_eq!(
1864 vmctx_self_reference.addr(),
1865 // SAFETY: `InstanceLayout` should guarantee it's safe to add 1 to
1866 // the last field to get a pointer to 1-byte-past-the-end of an
1867 // object, which should be valid.
1868 unsafe { NonNull::from(ret.get().owned_vmctx()).add(1).addr() }
1869 );
1870 debug_assert_eq!(vmctx_self_reference.addr(), ret.get().vmctx().addr());
1871
1872 ret
1873 }
1874
1875 /// Gets the raw underlying `&Instance` from this handle.
1876 pub fn get(&self) -> &T {
1877 // SAFETY: this is an owned instance handle that retains exclusive
1878 // ownership of the `Instance` inside. With `&self` given we know
1879 // this pointer is valid valid and the returned lifetime is connected
1880 // to `self` so that should also be valid.
1881 unsafe { self.instance.as_non_null().as_ref() }
1882 }
1883
1884 /// Same as [`Self::get`] except for mutability.
1885 pub fn get_mut(&mut self) -> Pin<&mut T> {
1886 // SAFETY: The lifetime concerns here are the same as `get` above.
1887 // Otherwise `new_unchecked` is used here to uphold the contract that
1888 // instances are always pinned in memory.
1889 unsafe { Pin::new_unchecked(self.instance.as_non_null().as_mut()) }
1890 }
1891}
1892
1893impl<T: InstanceLayout> Drop for OwnedInstance<T> {
1894 fn drop(&mut self) {
1895 unsafe {
1896 let layout = self.get().layout();
1897 ptr::drop_in_place(self.instance.as_ptr());
1898 alloc::alloc::dealloc(self.instance.as_ptr().cast(), layout);
1899 }
1900 }
1901}