wasmtime/runtime/vm/instance.rs
1//! An `Instance` contains all the runtime state used by execution of a
2//! wasm module (except its callstack and register state). An
3//! `InstanceHandle` is a reference-counting handle for an `Instance`.
4
5use crate::OpaqueRootScope;
6use crate::code::ModuleWithCode;
7use crate::module::ModuleRegistry;
8use crate::prelude::*;
9use crate::runtime::vm::const_expr::{ConstEvalContext, ConstExprEvaluator};
10use crate::runtime::vm::export::{Export, ExportMemory};
11use crate::runtime::vm::memory::{Memory, RuntimeMemoryCreator};
12use crate::runtime::vm::table::{Table, TableElementType};
13use crate::runtime::vm::vmcontext::{
14 VMBuiltinFunctionsArray, VMContext, VMFuncRef, VMFunctionImport, VMGlobalDefinition,
15 VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext, VMStoreContext,
16 VMTableDefinition, VMTableImport, VMTagDefinition, VMTagImport,
17};
18use crate::runtime::vm::{
19 GcStore, HostResult, Imports, ModuleRuntimeInfo, SendSyncPtr, VMGlobalKind, VMStore,
20 VMStoreRawPtr, VmPtr, VmSafe, WasmFault, catch_unwind_and_record_trap,
21};
22use crate::store::{InstanceId, StoreId, StoreInstanceId, StoreOpaque, StoreResourceLimiter};
23use crate::vm::VMWasmCallFunction;
24use alloc::sync::Arc;
25use core::alloc::Layout;
26use core::marker;
27use core::ops::Range;
28use core::pin::Pin;
29use core::ptr::NonNull;
30#[cfg(target_has_atomic = "64")]
31use core::sync::atomic::AtomicU64;
32use core::{mem, ptr};
33#[cfg(feature = "gc")]
34use wasmtime_environ::ModuleInternedTypeIndex;
35use wasmtime_environ::{
36 DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, DefinedTagIndex,
37 ElemIndex, EntityIndex, EntityRef, EntitySet, FuncIndex, GlobalIndex, HostPtr, MemoryIndex,
38 Module, PrimaryMap, PtrSize, TableIndex, TableInitialValue, TableSegmentElements, TagIndex,
39 Trap, VMCONTEXT_MAGIC, VMOffsets, VMSharedTypeIndex, packed_option::ReservedValue,
40};
41#[cfg(feature = "wmemcheck")]
42use wasmtime_wmemcheck::Wmemcheck;
43
44mod allocator;
45pub use allocator::*;
46
47/// A type that roughly corresponds to a WebAssembly instance, but is also used
48/// for host-defined objects.
49///
50/// Instances here can correspond to actual instantiated modules, but it's also
51/// used ubiquitously for host-defined objects. For example creating a
52/// host-defined memory will have a `module` that looks like it exports a single
53/// memory (and similar for other constructs).
54///
55/// This `Instance` type is used as a ubiquitous representation for WebAssembly
56/// values, whether or not they were created on the host or through a module.
57///
58/// # Ownership
59///
60/// This structure is never allocated directly but is instead managed through
61/// an `InstanceHandle`. This structure ends with a `VMContext` which has a
62/// dynamic size corresponding to the `module` configured within. Memory
63/// management of this structure is always done through `InstanceHandle` as the
64/// sole owner of an instance.
65///
66/// # `Instance` and `Pin`
67///
68/// Given an instance it is accompanied with trailing memory for the
69/// appropriate `VMContext`. The `Instance` also holds `runtime_info` and other
70/// information pointing to relevant offsets for the `VMContext`. Thus it is
71/// not sound to mutate `runtime_info` after an instance is created. More
72/// generally it's also not safe to "swap" instances, for example given two
73/// `&mut Instance` values it's not sound to swap them as then the `VMContext`
74/// values are inaccurately described.
75///
76/// To encapsulate this guarantee this type is only ever mutated through Rust's
77/// `Pin` type. All mutable methods here take `self: Pin<&mut Self>` which
78/// statically disallows safe access to `&mut Instance`. There are assorted
79/// "projection methods" to go from `Pin<&mut Instance>` to `&mut T` for
80/// individual fields, for example `memories_mut`. More methods can be added as
81/// necessary or methods may also be added to project multiple fields at a time
82/// if necessary to. The precise ergonomics around getting mutable access to
83/// some fields (but notably not `runtime_info`) is probably going to evolve
84/// over time.
85///
86/// Note that is is not sound to basically ever pass around `&mut Instance`.
87/// That should always instead be `Pin<&mut Instance>`. All usage of
88/// `Pin::new_unchecked` should be here in this module in just a few `unsafe`
89/// locations and it's recommended to use existing helpers if you can.
90#[repr(C)] // ensure that the vmctx field is last.
91pub struct Instance {
92 /// The index, within a `Store` that this instance lives at
93 id: InstanceId,
94
95 /// The runtime info (corresponding to the "compiled module"
96 /// abstraction in higher layers) that is retained and needed for
97 /// lazy initialization. This provides access to the underlying
98 /// Wasm module entities, the compiled JIT code, metadata about
99 /// functions, lazy initialization state, etc.
100 runtime_info: ModuleRuntimeInfo,
101
102 /// WebAssembly linear memory data.
103 ///
104 /// This is where all runtime information about defined linear memories in
105 /// this module lives.
106 ///
107 /// The `MemoryAllocationIndex` was given from our `InstanceAllocator` and
108 /// must be given back to the instance allocator when deallocating each
109 /// memory.
110 memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
111
112 /// WebAssembly table data.
113 ///
114 /// Like memories, this is only for defined tables in the module and
115 /// contains all of their runtime state.
116 ///
117 /// The `TableAllocationIndex` was given from our `InstanceAllocator` and
118 /// must be given back to the instance allocator when deallocating each
119 /// table.
120 tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
121
122 /// Stores the dropped passive element segments in this instantiation by index.
123 /// If the index is present in the set, the segment has been dropped.
124 dropped_elements: EntitySet<ElemIndex>,
125
126 /// Stores the dropped passive data segments in this instantiation by index.
127 /// If the index is present in the set, the segment has been dropped.
128 dropped_data: EntitySet<DataIndex>,
129
130 // TODO: add support for multiple memories; `wmemcheck_state` corresponds to
131 // memory 0.
132 #[cfg(feature = "wmemcheck")]
133 pub(crate) wmemcheck_state: Option<Wmemcheck>,
134
135 /// Self-pointer back to `Store<T>` and its functions. Not present for
136 /// the brief time that `Store<T>` is itself being created. Also not
137 /// present for some niche uses that are disconnected from stores (e.g.
138 /// cross-thread stuff used in `InstancePre`)
139 store: Option<VMStoreRawPtr>,
140
141 /// Additional context used by compiled wasm code. This field is last, and
142 /// represents a dynamically-sized array that extends beyond the nominal
143 /// end of the struct (similar to a flexible array member).
144 vmctx: OwnedVMContext<VMContext>,
145}
146
147impl Instance {
148 /// Create an instance at the given memory address.
149 ///
150 /// It is assumed the memory was properly aligned and the
151 /// allocation was `alloc_size` in bytes.
152 ///
153 /// # Safety
154 ///
155 /// The `req.imports` field must be appropriately sized/typed for the module
156 /// being allocated according to `req.runtime_info`. Additionally `memories`
157 /// and `tables` must have been allocated for `req.store`.
158 unsafe fn new(
159 req: InstanceAllocationRequest,
160 memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
161 tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
162 memory_tys: &PrimaryMap<MemoryIndex, wasmtime_environ::Memory>,
163 ) -> InstanceHandle {
164 let module = req.runtime_info.env_module();
165 let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
166 let dropped_data = EntitySet::with_capacity(module.passive_data_map.len());
167
168 #[cfg(not(feature = "wmemcheck"))]
169 let _ = memory_tys;
170
171 let mut ret = OwnedInstance::new(Instance {
172 id: req.id,
173 runtime_info: req.runtime_info.clone(),
174 memories,
175 tables,
176 dropped_elements,
177 dropped_data,
178 #[cfg(feature = "wmemcheck")]
179 wmemcheck_state: {
180 if req.store.engine().config().wmemcheck {
181 let size = memory_tys
182 .iter()
183 .next()
184 .map(|memory| memory.1.limits.min)
185 .unwrap_or(0)
186 * 64
187 * 1024;
188 Some(Wmemcheck::new(size.try_into().unwrap()))
189 } else {
190 None
191 }
192 },
193 store: None,
194 vmctx: OwnedVMContext::new(),
195 });
196
197 // SAFETY: this vmctx was allocated with the same layout above, so it
198 // should be safe to initialize with the same values here.
199 unsafe {
200 ret.get_mut().initialize_vmctx(
201 module,
202 req.runtime_info.offsets(),
203 req.store,
204 req.imports,
205 );
206 }
207 ret
208 }
209
210 /// Converts a raw `VMContext` pointer into a raw `Instance` pointer.
211 ///
212 /// # Safety
213 ///
214 /// Calling this function safely requires that `vmctx` is a valid allocation
215 /// of a `VMContext` which is derived from `Instance::new`. To safely
216 /// convert the returned raw pointer into a safe instance pointer callers
217 /// will also want to uphold guarantees such as:
218 ///
219 /// * The instance should not be in use elsewhere. For example you can't
220 /// call this function twice, turn both raw pointers into safe pointers,
221 /// and then use both safe pointers.
222 /// * There should be no other active mutable borrow to any other instance
223 /// within the same store. Note that this is not restricted to just this
224 /// instance pointer, but to all instances in a store. Instances can
225 /// safely traverse to other instances "laterally" meaning that a mutable
226 /// borrow on one is a mutable borrow on all.
227 /// * There should be no active mutable borrow on the store accessible at
228 /// the same time the instance is turned. Instances are owned by a store
229 /// and a store can be used to acquire a safe instance borrow at any time.
230 /// * The lifetime of the usage of the instance should not be unnecessarily
231 /// long, for example it cannot be `'static`.
232 ///
233 /// Other entrypoints exist for converting from a raw `VMContext` to a safe
234 /// pointer such as:
235 ///
236 /// * `Instance::enter_host_from_wasm`
237 /// * `Instance::sibling_vmctx{,_mut}`
238 ///
239 /// These place further restrictions on the API signature to satisfy some of
240 /// the above points.
241 #[inline]
242 pub(crate) unsafe fn from_vmctx(vmctx: NonNull<VMContext>) -> NonNull<Instance> {
243 // SAFETY: The validity of `byte_sub` relies on `vmctx` being a valid
244 // allocation.
245 unsafe {
246 vmctx
247 .byte_sub(mem::size_of::<Instance>())
248 .cast::<Instance>()
249 }
250 }
251
252 /// Encapsulated entrypoint to the host from WebAssembly, converting a raw
253 /// `VMContext` pointer into a `VMStore` plus an `InstanceId`.
254 ///
255 /// This is an entrypoint for core wasm entering back into the host. This is
256 /// used for both host functions and libcalls for example. This will execute
257 /// the closure `f` with safer Internal types than a raw `VMContext`
258 /// pointer.
259 ///
260 /// The closure `f` will have its errors caught, handled, and translated to
261 /// an ABI-safe return value to give back to wasm. This includes both normal
262 /// errors such as traps as well as panics.
263 ///
264 /// # Safety
265 ///
266 /// Callers must ensure that `vmctx` is a valid allocation and is safe to
267 /// dereference at this time. That's generally only true when it's a
268 /// wasm-provided value and this is the first function called after entering
269 /// the host. Otherwise this could unsafely alias the store with a mutable
270 /// pointer, for example.
271 #[inline]
272 pub(crate) unsafe fn enter_host_from_wasm<R>(
273 vmctx: NonNull<VMContext>,
274 f: impl FnOnce(&mut dyn VMStore, InstanceId) -> R,
275 ) -> R::Abi
276 where
277 R: HostResult,
278 {
279 // SAFETY: It's a contract of this function that `vmctx` is a valid
280 // pointer with neither the store nor other instances actively in use
281 // when this is called, so it should be safe to acquire a mutable
282 // pointer to the store and read the instance pointer.
283 let (store, instance) = unsafe {
284 let instance = Instance::from_vmctx(vmctx);
285 let instance = instance.as_ref();
286 let store = &mut *instance.store.unwrap().0.as_ptr();
287 (store, instance.id)
288 };
289
290 // Thread the `store` and `instance` through panic/trap infrastructure
291 // back into `f`.
292 catch_unwind_and_record_trap(store, |store| f(store, instance))
293 }
294
295 /// Converts the provided `*mut VMContext` to an `Instance` pointer and
296 /// returns it with the same lifetime as `self`.
297 ///
298 /// This function can be used when traversing a `VMContext` to reach into
299 /// the context needed for imports, optionally.
300 ///
301 /// # Safety
302 ///
303 /// This function requires that the `vmctx` pointer is indeed valid and
304 /// from the store that `self` belongs to.
305 #[inline]
306 unsafe fn sibling_vmctx<'a>(&'a self, vmctx: NonNull<VMContext>) -> &'a Instance {
307 // SAFETY: it's a contract of this function itself that `vmctx` is a
308 // valid pointer. Additionally with `self` being a
309 let ptr = unsafe { Instance::from_vmctx(vmctx) };
310 // SAFETY: it's a contract of this function itself that `vmctx` is a
311 // valid pointer to dereference. Additionally the lifetime of the return
312 // value is constrained to be the same as `self` to avoid granting a
313 // too-long lifetime.
314 unsafe { ptr.as_ref() }
315 }
316
317 /// Same as [`Self::sibling_vmctx`], but the mutable version.
318 ///
319 /// # Safety
320 ///
321 /// This function requires that the `vmctx` pointer is indeed valid and
322 /// from the store that `self` belongs to.
323 ///
324 /// (Note that it is *NOT* required that `vmctx` be distinct from this
325 /// instance's `vmctx`, or that usage of the resulting instance is limited
326 /// to its defined items! The returned borrow has the same lifetime as
327 /// `self`, which means that this instance cannot be used while the
328 /// resulting instance is in use, and we therefore do not need to worry
329 /// about mutable aliasing between this instance and the resulting
330 /// instance.)
331 #[inline]
332 unsafe fn sibling_vmctx_mut<'a>(
333 self: Pin<&'a mut Self>,
334 vmctx: NonNull<VMContext>,
335 ) -> Pin<&'a mut Instance> {
336 // SAFETY: it's a contract of this function itself that `vmctx` is a
337 // valid pointer such that this pointer arithmetic is valid.
338 let mut ptr = unsafe { Instance::from_vmctx(vmctx) };
339
340 // SAFETY: it's a contract of this function itself that `vmctx` is a
341 // valid pointer to dereference. Additionally the lifetime of the return
342 // value is constrained to be the same as `self` to avoid granting a
343 // too-long lifetime. Finally mutable references to an instance are
344 // always through `Pin`, so it's safe to create a pin-pointer here.
345 unsafe { Pin::new_unchecked(ptr.as_mut()) }
346 }
347
348 pub(crate) fn env_module(&self) -> &Arc<wasmtime_environ::Module> {
349 self.runtime_info.env_module()
350 }
351
352 pub(crate) fn runtime_module(&self) -> Option<&crate::Module> {
353 match &self.runtime_info {
354 ModuleRuntimeInfo::Module(m) => Some(m),
355 ModuleRuntimeInfo::Bare(_) => None,
356 }
357 }
358
359 /// Translate a module-level interned type index into an engine-level
360 /// interned type index.
361 #[cfg(feature = "gc")]
362 pub fn engine_type_index(&self, module_index: ModuleInternedTypeIndex) -> VMSharedTypeIndex {
363 self.runtime_info.engine_type_index(module_index)
364 }
365
366 #[inline]
367 fn offsets(&self) -> &VMOffsets<HostPtr> {
368 self.runtime_info.offsets()
369 }
370
371 /// Return the indexed `VMFunctionImport`.
372 fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
373 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
374 }
375
376 /// Return the index `VMTableImport`.
377 fn imported_table(&self, index: TableIndex) -> &VMTableImport {
378 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
379 }
380
381 /// Return the indexed `VMMemoryImport`.
382 fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
383 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
384 }
385
386 /// Return the indexed `VMGlobalImport`.
387 fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
388 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
389 }
390
391 /// Return the indexed `VMTagImport`.
392 fn imported_tag(&self, index: TagIndex) -> &VMTagImport {
393 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtag_import(index)) }
394 }
395
396 /// Return the indexed `VMTagDefinition`.
397 pub fn tag_ptr(&self, index: DefinedTagIndex) -> NonNull<VMTagDefinition> {
398 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtag_definition(index)) }
399 }
400
401 /// Return the indexed `VMTableDefinition`.
402 pub fn table(&self, index: DefinedTableIndex) -> VMTableDefinition {
403 unsafe { self.table_ptr(index).read() }
404 }
405
406 /// Updates the value for a defined table to `VMTableDefinition`.
407 fn set_table(self: Pin<&mut Self>, index: DefinedTableIndex, table: VMTableDefinition) {
408 unsafe {
409 self.table_ptr(index).write(table);
410 }
411 }
412
413 /// Return a pointer to the `index`'th table within this instance, stored
414 /// in vmctx memory.
415 pub fn table_ptr(&self, index: DefinedTableIndex) -> NonNull<VMTableDefinition> {
416 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtable_definition(index)) }
417 }
418
419 /// Get a locally defined or imported memory.
420 pub(crate) fn get_memory(&self, index: MemoryIndex) -> VMMemoryDefinition {
421 if let Some(defined_index) = self.env_module().defined_memory_index(index) {
422 self.memory(defined_index)
423 } else {
424 let import = self.imported_memory(index);
425 unsafe { VMMemoryDefinition::load(import.from.as_ptr()) }
426 }
427 }
428
429 /// Return the indexed `VMMemoryDefinition`, loaded from vmctx memory
430 /// already.
431 #[inline]
432 pub fn memory(&self, index: DefinedMemoryIndex) -> VMMemoryDefinition {
433 unsafe { VMMemoryDefinition::load(self.memory_ptr(index).as_ptr()) }
434 }
435
436 /// Set the indexed memory to `VMMemoryDefinition`.
437 fn set_memory(&self, index: DefinedMemoryIndex, mem: VMMemoryDefinition) {
438 unsafe {
439 self.memory_ptr(index).write(mem);
440 }
441 }
442
443 /// Return the address of the specified memory at `index` within this vmctx.
444 ///
445 /// Note that the returned pointer resides in wasm-code-readable-memory in
446 /// the vmctx.
447 #[inline]
448 pub fn memory_ptr(&self, index: DefinedMemoryIndex) -> NonNull<VMMemoryDefinition> {
449 unsafe {
450 self.vmctx_plus_offset::<VmPtr<_>>(self.offsets().vmctx_vmmemory_pointer(index))
451 .as_non_null()
452 }
453 }
454
455 /// Return the indexed `VMGlobalDefinition`.
456 pub fn global_ptr(&self, index: DefinedGlobalIndex) -> NonNull<VMGlobalDefinition> {
457 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmglobal_definition(index)) }
458 }
459
460 /// Get all globals within this instance.
461 ///
462 /// Returns both import and defined globals.
463 ///
464 /// Returns both exported and non-exported globals.
465 ///
466 /// Gives access to the full globals space.
467 pub fn all_globals(
468 &self,
469 store: StoreId,
470 ) -> impl ExactSizeIterator<Item = (GlobalIndex, crate::Global)> + '_ {
471 let module = self.env_module();
472 module
473 .globals
474 .keys()
475 .map(move |idx| (idx, self.get_exported_global(store, idx)))
476 }
477
478 /// Get the globals defined in this instance (not imported).
479 pub fn defined_globals(
480 &self,
481 store: StoreId,
482 ) -> impl ExactSizeIterator<Item = (DefinedGlobalIndex, crate::Global)> + '_ {
483 let module = self.env_module();
484 self.all_globals(store)
485 .skip(module.num_imported_globals)
486 .map(move |(i, global)| (module.defined_global_index(i).unwrap(), global))
487 }
488
489 /// Return a pointer to the interrupts structure
490 #[inline]
491 pub fn vm_store_context(&self) -> NonNull<Option<VmPtr<VMStoreContext>>> {
492 unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_store_context()) }
493 }
494
495 /// Return a pointer to the global epoch counter used by this instance.
496 #[cfg(target_has_atomic = "64")]
497 pub fn epoch_ptr(self: Pin<&mut Self>) -> &mut Option<VmPtr<AtomicU64>> {
498 let offset = self.offsets().ptr.vmctx_epoch_ptr();
499 unsafe { self.vmctx_plus_offset_mut(offset) }
500 }
501
502 /// Return a pointer to the collector-specific heap data.
503 pub fn gc_heap_data(self: Pin<&mut Self>) -> &mut Option<VmPtr<u8>> {
504 let offset = self.offsets().ptr.vmctx_gc_heap_data();
505 unsafe { self.vmctx_plus_offset_mut(offset) }
506 }
507
508 pub(crate) unsafe fn set_store(mut self: Pin<&mut Self>, store: &StoreOpaque) {
509 // FIXME: should be more targeted ideally with the `unsafe` than just
510 // throwing this entire function in a large `unsafe` block.
511 unsafe {
512 *self.as_mut().store_mut() = Some(VMStoreRawPtr(store.traitobj()));
513 self.vm_store_context()
514 .write(Some(store.vm_store_context_ptr().into()));
515 #[cfg(target_has_atomic = "64")]
516 {
517 *self.as_mut().epoch_ptr() =
518 Some(NonNull::from(store.engine().epoch_counter()).into());
519 }
520
521 if self.env_module().needs_gc_heap {
522 self.as_mut().set_gc_heap(Some(store.unwrap_gc_store()));
523 } else {
524 self.as_mut().set_gc_heap(None);
525 }
526 }
527 }
528
529 unsafe fn set_gc_heap(self: Pin<&mut Self>, gc_store: Option<&GcStore>) {
530 if let Some(gc_store) = gc_store {
531 *self.gc_heap_data() = Some(unsafe { gc_store.gc_heap.vmctx_gc_heap_data().into() });
532 } else {
533 *self.gc_heap_data() = None;
534 }
535 }
536
537 /// Return a reference to the vmctx used by compiled wasm code.
538 #[inline]
539 pub fn vmctx(&self) -> NonNull<VMContext> {
540 InstanceLayout::vmctx(self)
541 }
542
543 /// Lookup a function by index.
544 ///
545 /// # Panics
546 ///
547 /// Panics if `index` is out of bounds for this instance.
548 ///
549 /// # Safety
550 ///
551 /// The `store` parameter must be the store that owns this instance and the
552 /// functions that this instance can reference.
553 pub unsafe fn get_exported_func(
554 self: Pin<&mut Self>,
555 registry: &ModuleRegistry,
556 store: StoreId,
557 index: FuncIndex,
558 ) -> crate::Func {
559 let func_ref = self.get_func_ref(registry, index).unwrap();
560
561 // SAFETY: the validity of `func_ref` is guaranteed by the validity of
562 // `self`, and the contract that `store` must own `func_ref` is a
563 // contract of this function itself.
564 unsafe { crate::Func::from_vm_func_ref(store, func_ref) }
565 }
566
567 /// Lookup a table by index.
568 ///
569 /// # Panics
570 ///
571 /// Panics if `index` is out of bounds for this instance.
572 pub fn get_exported_table(&self, store: StoreId, index: TableIndex) -> crate::Table {
573 let (id, def_index) = if let Some(def_index) = self.env_module().defined_table_index(index)
574 {
575 (self.id, def_index)
576 } else {
577 let import = self.imported_table(index);
578 // SAFETY: validity of this `Instance` guarantees validity of the
579 // `vmctx` pointer being read here to find the transitive
580 // `InstanceId` that the import is associated with.
581 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
582 (id, import.index)
583 };
584 crate::Table::from_raw(StoreInstanceId::new(store, id), def_index)
585 }
586
587 /// Lookup a memory by index.
588 ///
589 /// # Panics
590 ///
591 /// Panics if `index` is out-of-bounds for this instance.
592 #[cfg_attr(
593 not(feature = "threads"),
594 expect(unused_variables, reason = "definitions cfg'd to dummy",)
595 )]
596 pub fn get_exported_memory(&self, store: StoreId, index: MemoryIndex) -> ExportMemory {
597 let module = self.env_module();
598 if module.memories[index].shared {
599 let (memory, import) =
600 if let Some(def_index) = self.env_module().defined_memory_index(index) {
601 (
602 self.get_defined_memory(def_index),
603 self.get_defined_memory_vmimport(def_index),
604 )
605 } else {
606 let import = self.imported_memory(index);
607 // SAFETY: validity of this `Instance` guarantees validity of
608 // the `vmctx` pointer being read here to find the transitive
609 // `InstanceId` that the import is associated with.
610 let instance = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()) };
611 (instance.get_defined_memory(import.index), *import)
612 };
613
614 let vm = memory.as_shared_memory().unwrap().clone();
615 ExportMemory::Shared(vm, import)
616 } else {
617 let (id, def_index) =
618 if let Some(def_index) = self.env_module().defined_memory_index(index) {
619 (self.id, def_index)
620 } else {
621 let import = self.imported_memory(index);
622 // SAFETY: validity of this `Instance` guarantees validity of the
623 // `vmctx` pointer being read here to find the transitive
624 // `InstanceId` that the import is associated with.
625 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
626 (id, import.index)
627 };
628
629 // SAFETY: `from_raw` requires that the memory is not shared, which
630 // was tested above in this if/else.
631 let store_id = StoreInstanceId::new(store, id);
632 ExportMemory::Unshared(unsafe { crate::Memory::from_raw(store_id, def_index) })
633 }
634 }
635
636 /// Lookup a global by index.
637 ///
638 /// # Panics
639 ///
640 /// Panics if `index` is out-of-bounds for this instance.
641 pub(crate) fn get_exported_global(&self, store: StoreId, index: GlobalIndex) -> crate::Global {
642 // If this global is defined within this instance, then that's easy to
643 // calculate the `Global`.
644 if let Some(def_index) = self.env_module().defined_global_index(index) {
645 let instance = StoreInstanceId::new(store, self.id);
646 return crate::Global::from_core(instance, def_index);
647 }
648
649 // For imported globals it's required to match on the `kind` to
650 // determine which `Global` constructor is going to be invoked.
651 let import = self.imported_global(index);
652 match import.kind {
653 VMGlobalKind::Host(index) => crate::Global::from_host(store, index),
654 VMGlobalKind::Instance(index) => {
655 // SAFETY: validity of this `&Instance` means validity of its
656 // imports meaning we can read the id of the vmctx within.
657 let id = unsafe {
658 let vmctx = VMContext::from_opaque(import.vmctx.unwrap().as_non_null());
659 self.sibling_vmctx(vmctx).id
660 };
661 crate::Global::from_core(StoreInstanceId::new(store, id), index)
662 }
663 #[cfg(feature = "component-model")]
664 VMGlobalKind::ComponentFlags(index) => {
665 // SAFETY: validity of this `&Instance` means validity of its
666 // imports meaning we can read the id of the vmctx within.
667 let id = unsafe {
668 let vmctx = super::component::VMComponentContext::from_opaque(
669 import.vmctx.unwrap().as_non_null(),
670 );
671 super::component::ComponentInstance::vmctx_instance_id(vmctx)
672 };
673 crate::Global::from_component_flags(
674 crate::component::store::StoreComponentInstanceId::new(store, id),
675 index,
676 )
677 }
678 #[cfg(feature = "component-model")]
679 VMGlobalKind::TaskMayBlock => {
680 // SAFETY: validity of this `&Instance` means validity of its
681 // imports meaning we can read the id of the vmctx within.
682 let id = unsafe {
683 let vmctx = super::component::VMComponentContext::from_opaque(
684 import.vmctx.unwrap().as_non_null(),
685 );
686 super::component::ComponentInstance::vmctx_instance_id(vmctx)
687 };
688 crate::Global::from_task_may_block(
689 crate::component::store::StoreComponentInstanceId::new(store, id),
690 )
691 }
692 }
693 }
694
695 /// Get an exported tag by index.
696 ///
697 /// # Panics
698 ///
699 /// Panics if the index is out-of-range.
700 pub fn get_exported_tag(&self, store: StoreId, index: TagIndex) -> crate::Tag {
701 let (id, def_index) = if let Some(def_index) = self.env_module().defined_tag_index(index) {
702 (self.id, def_index)
703 } else {
704 let import = self.imported_tag(index);
705 // SAFETY: validity of this `Instance` guarantees validity of the
706 // `vmctx` pointer being read here to find the transitive
707 // `InstanceId` that the import is associated with.
708 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
709 (id, import.index)
710 };
711 crate::Tag::from_raw(StoreInstanceId::new(store, id), def_index)
712 }
713
714 /// Return an iterator over the exports of this instance.
715 ///
716 /// Specifically, it provides access to the key-value pairs, where the keys
717 /// are export names, and the values are export declarations which can be
718 /// resolved `lookup_by_declaration`.
719 pub fn exports(&self) -> wasmparser::collections::index_map::Iter<'_, String, EntityIndex> {
720 self.env_module().exports.iter()
721 }
722
723 /// Grow memory by the specified amount of pages.
724 ///
725 /// Returns `None` if memory can't be grown by the specified amount
726 /// of pages. Returns `Some` with the old size in bytes if growth was
727 /// successful.
728 pub(crate) async fn memory_grow(
729 mut self: Pin<&mut Self>,
730 limiter: Option<&mut StoreResourceLimiter<'_>>,
731 idx: DefinedMemoryIndex,
732 delta: u64,
733 ) -> Result<Option<usize>, Error> {
734 let memory = &mut self.as_mut().memories_mut()[idx].1;
735
736 // SAFETY: this is the safe wrapper around `Memory::grow` because it
737 // automatically updates the `VMMemoryDefinition` in this instance after
738 // a growth operation below.
739 let result = unsafe { memory.grow(delta, limiter).await };
740
741 // Update the state used by a non-shared Wasm memory in case the base
742 // pointer and/or the length changed.
743 if memory.as_shared_memory().is_none() {
744 let vmmemory = memory.vmmemory();
745 self.set_memory(idx, vmmemory);
746 }
747
748 result
749 }
750
751 pub(crate) fn table_element_type(
752 self: Pin<&mut Self>,
753 table_index: TableIndex,
754 ) -> TableElementType {
755 self.get_table(table_index).element_type()
756 }
757
758 /// Performs a grow operation on the `table_index` specified using `grow`.
759 ///
760 /// This will handle updating the VMTableDefinition internally as necessary.
761 pub(crate) async fn defined_table_grow(
762 mut self: Pin<&mut Self>,
763 table_index: DefinedTableIndex,
764 grow: impl AsyncFnOnce(&mut Table) -> Result<Option<usize>>,
765 ) -> Result<Option<usize>> {
766 let table = self.as_mut().get_defined_table(table_index);
767 let result = grow(table).await;
768 let element = table.vmtable();
769 self.set_table(table_index, element);
770 result
771 }
772
773 fn alloc_layout(offsets: &VMOffsets<HostPtr>) -> Layout {
774 let size = mem::size_of::<Self>()
775 .checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
776 .unwrap();
777 let align = mem::align_of::<Self>();
778 Layout::from_size_align(size, align).unwrap()
779 }
780
781 fn type_ids_array(&self) -> NonNull<VmPtr<VMSharedTypeIndex>> {
782 unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_type_ids_array()) }
783 }
784
785 /// Construct a new VMFuncRef for the given function
786 /// (imported or defined in this module) and store into the given
787 /// location. Used during lazy initialization.
788 ///
789 /// Note that our current lazy-init scheme actually calls this every
790 /// time the funcref pointer is fetched; this turns out to be better
791 /// than tracking state related to whether it's been initialized
792 /// before, because resetting that state on (re)instantiation is
793 /// very expensive if there are many funcrefs.
794 ///
795 /// # Safety
796 ///
797 /// This functions requires that `into` is a valid pointer.
798 unsafe fn construct_func_ref(
799 self: Pin<&mut Self>,
800 registry: &ModuleRegistry,
801 index: FuncIndex,
802 type_index: VMSharedTypeIndex,
803 into: *mut VMFuncRef,
804 ) {
805 let module_with_code = ModuleWithCode::in_store(
806 registry,
807 self.runtime_module()
808 .expect("funcref impossible in fake module"),
809 )
810 .expect("module not in store");
811
812 let func_ref = if let Some(def_index) = self.env_module().defined_func_index(index) {
813 VMFuncRef {
814 array_call: NonNull::from(
815 module_with_code
816 .array_to_wasm_trampoline(def_index)
817 .expect("should have array-to-Wasm trampoline for escaping function"),
818 )
819 .cast()
820 .into(),
821 wasm_call: Some(
822 NonNull::new(
823 module_with_code
824 .finished_function(def_index)
825 .as_ptr()
826 .cast::<VMWasmCallFunction>()
827 .cast_mut(),
828 )
829 .unwrap()
830 .into(),
831 ),
832 vmctx: VMOpaqueContext::from_vmcontext(self.vmctx()).into(),
833 type_index,
834 }
835 } else {
836 let import = self.imported_function(index);
837 VMFuncRef {
838 array_call: import.array_call,
839 wasm_call: Some(import.wasm_call),
840 vmctx: import.vmctx,
841 type_index,
842 }
843 };
844
845 // SAFETY: the unsafe contract here is forwarded to callers of this
846 // function.
847 unsafe {
848 ptr::write(into, func_ref);
849 }
850 }
851
852 /// Get a `&VMFuncRef` for the given `FuncIndex`.
853 ///
854 /// Returns `None` if the index is the reserved index value.
855 ///
856 /// The returned reference is a stable reference that won't be moved and can
857 /// be passed into JIT code.
858 pub(crate) fn get_func_ref(
859 self: Pin<&mut Self>,
860 registry: &ModuleRegistry,
861 index: FuncIndex,
862 ) -> Option<NonNull<VMFuncRef>> {
863 if index == FuncIndex::reserved_value() {
864 return None;
865 }
866
867 // For now, we eagerly initialize an funcref struct in-place
868 // whenever asked for a reference to it. This is mostly
869 // fine, because in practice each funcref is unlikely to be
870 // requested more than a few times: once-ish for funcref
871 // tables used for call_indirect (the usual compilation
872 // strategy places each function in the table at most once),
873 // and once or a few times when fetching exports via API.
874 // Note that for any case driven by table accesses, the lazy
875 // table init behaves like a higher-level cache layer that
876 // protects this initialization from happening multiple
877 // times, via that particular table at least.
878 //
879 // When `ref.func` becomes more commonly used or if we
880 // otherwise see a use-case where this becomes a hotpath,
881 // we can reconsider by using some state to track
882 // "uninitialized" explicitly, for example by zeroing the
883 // funcrefs (perhaps together with other
884 // zeroed-at-instantiate-time state) or using a separate
885 // is-initialized bitmap.
886 //
887 // We arrived at this design because zeroing memory is
888 // expensive, so it's better for instantiation performance
889 // if we don't have to track "is-initialized" state at
890 // all!
891 let func = &self.env_module().functions[index];
892 let sig = func.signature.unwrap_engine_type_index();
893
894 // SAFETY: the offset calculated here should be correct with
895 // `self.offsets`
896 let func_ref = unsafe {
897 self.vmctx_plus_offset_raw::<VMFuncRef>(self.offsets().vmctx_func_ref(func.func_ref))
898 };
899
900 // SAFETY: the `func_ref` ptr should be valid as it's within our
901 // `VMContext` area.
902 unsafe {
903 self.construct_func_ref(registry, index, sig, func_ref.as_ptr());
904 }
905
906 Some(func_ref)
907 }
908
909 /// Get the passive elements segment at the given index.
910 ///
911 /// Returns an empty segment if the index is out of bounds or if the segment
912 /// has been dropped.
913 ///
914 /// The `storage` parameter should always be `None`; it is a bit of a hack
915 /// to work around lifetime issues.
916 pub(crate) fn passive_element_segment<'a>(
917 &self,
918 storage: &'a mut Option<(Arc<wasmtime_environ::Module>, TableSegmentElements)>,
919 elem_index: ElemIndex,
920 ) -> &'a TableSegmentElements {
921 debug_assert!(storage.is_none());
922 *storage = Some((
923 // TODO: this `clone()` shouldn't be necessary but is used for now to
924 // inform `rustc` that the lifetime of the elements here are
925 // disconnected from the lifetime of `self`.
926 self.env_module().clone(),
927 // NB: fall back to an expressions-based list of elements which
928 // doesn't have static type information (as opposed to
929 // `TableSegmentElements::Functions`) since we don't know what type
930 // is needed in the caller's context. Let the type be inferred by
931 // how they use the segment.
932 TableSegmentElements::Expressions(Box::new([])),
933 ));
934 let (module, empty) = storage.as_ref().unwrap();
935
936 match module.passive_elements_map.get(&elem_index) {
937 Some(index) if !self.dropped_elements.contains(elem_index) => {
938 &module.passive_elements[*index]
939 }
940 _ => empty,
941 }
942 }
943
944 /// The `table.init` operation: initializes a portion of a table with a
945 /// passive element.
946 ///
947 /// # Errors
948 ///
949 /// Returns a `Trap` error when the range within the table is out of bounds
950 /// or the range within the passive element is out of bounds.
951 pub(crate) async fn table_init(
952 store: &mut StoreOpaque,
953 limiter: Option<&mut StoreResourceLimiter<'_>>,
954 instance: InstanceId,
955 table_index: TableIndex,
956 elem_index: ElemIndex,
957 dst: u64,
958 src: u64,
959 len: u64,
960 ) -> Result<()> {
961 let mut storage = None;
962 let elements = store
963 .instance(instance)
964 .passive_element_segment(&mut storage, elem_index);
965 let mut const_evaluator = ConstExprEvaluator::default();
966 Self::table_init_segment(
967 store,
968 limiter,
969 instance,
970 &mut const_evaluator,
971 table_index,
972 elements,
973 dst,
974 src,
975 len,
976 )
977 .await
978 }
979
980 pub(crate) async fn table_init_segment(
981 store: &mut StoreOpaque,
982 mut limiter: Option<&mut StoreResourceLimiter<'_>>,
983 elements_instance_id: InstanceId,
984 const_evaluator: &mut ConstExprEvaluator,
985 table_index: TableIndex,
986 elements: &TableSegmentElements,
987 dst: u64,
988 src: u64,
989 len: u64,
990 ) -> Result<()> {
991 // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
992
993 let store_id = store.id();
994 let elements_instance = store.instance_mut(elements_instance_id);
995 let table = elements_instance.get_exported_table(store_id, table_index);
996 let table_size = table._size(store);
997
998 // Perform a bounds check on the table being written to. This is done by
999 // ensuring that `dst + len <= table.size()` via checked arithmetic.
1000 //
1001 // Note that the bounds check for the element segment happens below when
1002 // the original segment is sliced via `src` and `len`.
1003 table_size
1004 .checked_sub(dst)
1005 .and_then(|i| i.checked_sub(len))
1006 .ok_or(Trap::TableOutOfBounds)?;
1007
1008 let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
1009 let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
1010
1011 let positions = dst..dst + u64::try_from(len).unwrap();
1012 match elements {
1013 TableSegmentElements::Functions(funcs) => {
1014 let elements = funcs
1015 .get(src..)
1016 .and_then(|s| s.get(..len))
1017 .ok_or(Trap::TableOutOfBounds)?;
1018 for (i, func_idx) in positions.zip(elements) {
1019 let (instance, registry) =
1020 store.instance_and_module_registry_mut(elements_instance_id);
1021 // SAFETY: the `store_id` passed to `get_exported_func` is
1022 // indeed the store that owns the function.
1023 let func = unsafe { instance.get_exported_func(registry, store_id, *func_idx) };
1024 table.set_(store, i, func.into()).unwrap();
1025 }
1026 }
1027 TableSegmentElements::Expressions(exprs) => {
1028 let mut store = OpaqueRootScope::new(store);
1029 let exprs = exprs
1030 .get(src..)
1031 .and_then(|s| s.get(..len))
1032 .ok_or(Trap::TableOutOfBounds)?;
1033 let mut context = ConstEvalContext::new(elements_instance_id);
1034 for (i, expr) in positions.zip(exprs) {
1035 let element = const_evaluator
1036 .eval(&mut store, limiter.as_deref_mut(), &mut context, expr)
1037 .await?;
1038 table.set_(&mut store, i, element.ref_().unwrap()).unwrap();
1039 }
1040 }
1041 }
1042
1043 Ok(())
1044 }
1045
1046 /// Drop an element.
1047 pub(crate) fn elem_drop(self: Pin<&mut Self>, elem_index: ElemIndex) {
1048 // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
1049
1050 self.dropped_elements_mut().insert(elem_index);
1051
1052 // Note that we don't check that we actually removed a segment because
1053 // dropping a non-passive segment is a no-op (not a trap).
1054 }
1055
1056 /// Get a locally-defined memory.
1057 pub fn get_defined_memory_mut(self: Pin<&mut Self>, index: DefinedMemoryIndex) -> &mut Memory {
1058 &mut self.memories_mut()[index].1
1059 }
1060
1061 /// Get a locally-defined memory.
1062 pub fn get_defined_memory(&self, index: DefinedMemoryIndex) -> &Memory {
1063 &self.memories[index].1
1064 }
1065
1066 pub fn get_defined_memory_vmimport(&self, index: DefinedMemoryIndex) -> VMMemoryImport {
1067 crate::runtime::vm::VMMemoryImport {
1068 from: self.memory_ptr(index).into(),
1069 vmctx: self.vmctx().into(),
1070 index,
1071 }
1072 }
1073
1074 /// Do a `memory.copy`
1075 ///
1076 /// # Errors
1077 ///
1078 /// Returns a `Trap` error when the source or destination ranges are out of
1079 /// bounds.
1080 pub(crate) fn memory_copy(
1081 self: Pin<&mut Self>,
1082 dst_index: MemoryIndex,
1083 dst: u64,
1084 src_index: MemoryIndex,
1085 src: u64,
1086 len: u64,
1087 ) -> Result<(), Trap> {
1088 // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
1089
1090 let src_mem = self.get_memory(src_index);
1091 let dst_mem = self.get_memory(dst_index);
1092
1093 let src = self.validate_inbounds(src_mem.current_length(), src, len)?;
1094 let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?;
1095 let len = usize::try_from(len).unwrap();
1096
1097 // Bounds and casts are checked above, by this point we know that
1098 // everything is safe.
1099 unsafe {
1100 let dst = dst_mem.base.as_ptr().add(dst);
1101 let src = src_mem.base.as_ptr().add(src);
1102 // FIXME audit whether this is safe in the presence of shared memory
1103 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1104 ptr::copy(src, dst, len);
1105 }
1106
1107 Ok(())
1108 }
1109
1110 fn validate_inbounds(&self, max: usize, ptr: u64, len: u64) -> Result<usize, Trap> {
1111 let oob = || Trap::MemoryOutOfBounds;
1112 let end = ptr
1113 .checked_add(len)
1114 .and_then(|i| usize::try_from(i).ok())
1115 .ok_or_else(oob)?;
1116 if end > max {
1117 Err(oob())
1118 } else {
1119 Ok(ptr.try_into().unwrap())
1120 }
1121 }
1122
1123 /// Perform the `memory.fill` operation on a locally defined memory.
1124 ///
1125 /// # Errors
1126 ///
1127 /// Returns a `Trap` error if the memory range is out of bounds.
1128 pub(crate) fn memory_fill(
1129 self: Pin<&mut Self>,
1130 memory_index: DefinedMemoryIndex,
1131 dst: u64,
1132 val: u8,
1133 len: u64,
1134 ) -> Result<(), Trap> {
1135 let memory_index = self.env_module().memory_index(memory_index);
1136 let memory = self.get_memory(memory_index);
1137 let dst = self.validate_inbounds(memory.current_length(), dst, len)?;
1138 let len = usize::try_from(len).unwrap();
1139
1140 // Bounds and casts are checked above, by this point we know that
1141 // everything is safe.
1142 unsafe {
1143 let dst = memory.base.as_ptr().add(dst);
1144 // FIXME audit whether this is safe in the presence of shared memory
1145 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1146 ptr::write_bytes(dst, val, len);
1147 }
1148
1149 Ok(())
1150 }
1151
1152 /// Get the internal storage range of a particular Wasm data segment.
1153 pub(crate) fn wasm_data_range(&self, index: DataIndex) -> Range<u32> {
1154 match self.env_module().passive_data_map.get(&index) {
1155 Some(range) if !self.dropped_data.contains(index) => range.clone(),
1156 _ => 0..0,
1157 }
1158 }
1159
1160 /// Given an internal storage range of a Wasm data segment (or subset of a
1161 /// Wasm data segment), get the data's raw bytes.
1162 pub(crate) fn wasm_data(&self, range: Range<u32>) -> &[u8] {
1163 let start = usize::try_from(range.start).unwrap();
1164 let end = usize::try_from(range.end).unwrap();
1165 &self.runtime_info.wasm_data()[start..end]
1166 }
1167
1168 /// Performs the `memory.init` operation.
1169 ///
1170 /// # Errors
1171 ///
1172 /// Returns a `Trap` error if the destination range is out of this module's
1173 /// memory's bounds or if the source range is outside the data segment's
1174 /// bounds.
1175 pub(crate) fn memory_init(
1176 self: Pin<&mut Self>,
1177 memory_index: MemoryIndex,
1178 data_index: DataIndex,
1179 dst: u64,
1180 src: u32,
1181 len: u32,
1182 ) -> Result<(), Trap> {
1183 let range = self.wasm_data_range(data_index);
1184 self.memory_init_segment(memory_index, range, dst, src, len)
1185 }
1186
1187 pub(crate) fn memory_init_segment(
1188 self: Pin<&mut Self>,
1189 memory_index: MemoryIndex,
1190 range: Range<u32>,
1191 dst: u64,
1192 src: u32,
1193 len: u32,
1194 ) -> Result<(), Trap> {
1195 // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
1196
1197 let memory = self.get_memory(memory_index);
1198 let data = self.wasm_data(range);
1199 let dst = self.validate_inbounds(memory.current_length(), dst, len.into())?;
1200 let src = self.validate_inbounds(data.len(), src.into(), len.into())?;
1201 let len = len as usize;
1202
1203 unsafe {
1204 let src_start = data.as_ptr().add(src);
1205 let dst_start = memory.base.as_ptr().add(dst);
1206 // FIXME audit whether this is safe in the presence of shared memory
1207 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1208 ptr::copy_nonoverlapping(src_start, dst_start, len);
1209 }
1210
1211 Ok(())
1212 }
1213
1214 /// Drop the given data segment, truncating its length to zero.
1215 pub(crate) fn data_drop(self: Pin<&mut Self>, data_index: DataIndex) {
1216 self.dropped_data_mut().insert(data_index);
1217
1218 // Note that we don't check that we actually removed a segment because
1219 // dropping a non-passive segment is a no-op (not a trap).
1220 }
1221
1222 /// Get a table by index regardless of whether it is locally-defined
1223 /// or an imported, foreign table. Ensure that the given range of
1224 /// elements in the table is lazily initialized. We define this
1225 /// operation all-in-one for safety, to ensure the lazy-init
1226 /// happens.
1227 ///
1228 /// Takes an `Iterator` for the index-range to lazy-initialize,
1229 /// for flexibility. This can be a range, single item, or empty
1230 /// sequence, for example. The iterator should return indices in
1231 /// increasing order, so that the break-at-out-of-bounds behavior
1232 /// works correctly.
1233 pub(crate) fn get_table_with_lazy_init(
1234 self: Pin<&mut Self>,
1235 registry: &ModuleRegistry,
1236 table_index: TableIndex,
1237 range: impl Iterator<Item = u64>,
1238 ) -> &mut Table {
1239 let (idx, instance) = self.defined_table_index_and_instance(table_index);
1240 instance.get_defined_table_with_lazy_init(registry, idx, range)
1241 }
1242
1243 /// Gets the raw runtime table data structure owned by this instance
1244 /// given the provided `idx`.
1245 ///
1246 /// The `range` specified is eagerly initialized for funcref tables.
1247 pub fn get_defined_table_with_lazy_init(
1248 mut self: Pin<&mut Self>,
1249 registry: &ModuleRegistry,
1250 idx: DefinedTableIndex,
1251 range: impl IntoIterator<Item = u64>,
1252 ) -> &mut Table {
1253 let elt_ty = self.tables[idx].1.element_type();
1254
1255 if elt_ty == TableElementType::Func {
1256 for i in range {
1257 match self.tables[idx].1.get_func_maybe_init(i) {
1258 // Uninitialized table element.
1259 Ok(None) => {}
1260 // Initialized table element, move on to the next.
1261 Ok(Some(_)) => continue,
1262 // Out-of-bounds; caller will handle by likely
1263 // throwing a trap. No work to do to lazy-init
1264 // beyond the end.
1265 Err(_) => break,
1266 };
1267
1268 // The table element `i` is uninitialized and is now being
1269 // initialized. This must imply that a `precompiled` list of
1270 // function indices is available for this table. The precompiled
1271 // list is extracted and then it is consulted with `i` to
1272 // determine the function that is going to be initialized. Note
1273 // that `i` may be outside the limits of the static
1274 // initialization so it's a fallible `get` instead of an index.
1275 let module = self.env_module();
1276 let precomputed = match &module.table_initialization.initial_values[idx] {
1277 TableInitialValue::Null { precomputed } => precomputed,
1278 TableInitialValue::Expr(_) => unreachable!(),
1279 };
1280 // Panicking here helps catch bugs rather than silently truncating by accident.
1281 let func_index = precomputed.get(usize::try_from(i).unwrap()).cloned();
1282 let func_ref = func_index
1283 .and_then(|func_index| self.as_mut().get_func_ref(registry, func_index));
1284 self.as_mut().tables_mut()[idx]
1285 .1
1286 .set_func(i, func_ref)
1287 .expect("Table type should match and index should be in-bounds");
1288 }
1289 }
1290
1291 self.get_defined_table(idx)
1292 }
1293
1294 /// Get a table by index regardless of whether it is locally-defined or an
1295 /// imported, foreign table.
1296 pub(crate) fn get_table(self: Pin<&mut Self>, table_index: TableIndex) -> &mut Table {
1297 let (idx, instance) = self.defined_table_index_and_instance(table_index);
1298 instance.get_defined_table(idx)
1299 }
1300
1301 /// Get a locally-defined table.
1302 pub(crate) fn get_defined_table(self: Pin<&mut Self>, index: DefinedTableIndex) -> &mut Table {
1303 &mut self.tables_mut()[index].1
1304 }
1305
1306 pub(crate) fn defined_table_index_and_instance<'a>(
1307 self: Pin<&'a mut Self>,
1308 index: TableIndex,
1309 ) -> (DefinedTableIndex, Pin<&'a mut Instance>) {
1310 if let Some(defined_table_index) = self.env_module().defined_table_index(index) {
1311 (defined_table_index, self)
1312 } else {
1313 let import = self.imported_table(index);
1314 let index = import.index;
1315 let vmctx = import.vmctx.as_non_null();
1316 // SAFETY: the validity of `self` means that the reachable instances
1317 // should also all be owned by the same store and fully initialized,
1318 // so it's safe to laterally move from a mutable borrow of this
1319 // instance to a mutable borrow of a sibling instance.
1320 let foreign_instance = unsafe { self.sibling_vmctx_mut(vmctx) };
1321 (index, foreign_instance)
1322 }
1323 }
1324
1325 /// Initialize the VMContext data associated with this Instance.
1326 ///
1327 /// The `VMContext` memory is assumed to be uninitialized; any field
1328 /// that we need in a certain state will be explicitly written by this
1329 /// function.
1330 unsafe fn initialize_vmctx(
1331 mut self: Pin<&mut Self>,
1332 module: &Module,
1333 offsets: &VMOffsets<HostPtr>,
1334 store: &StoreOpaque,
1335 imports: Imports,
1336 ) {
1337 assert!(ptr::eq(module, self.env_module().as_ref()));
1338
1339 // SAFETY: the type of the magic field is indeed `u32` and this function
1340 // is initializing its value.
1341 unsafe {
1342 self.vmctx_plus_offset_raw::<u32>(offsets.ptr.vmctx_magic())
1343 .write(VMCONTEXT_MAGIC);
1344 }
1345
1346 // SAFETY: it's up to the caller to provide a valid store pointer here.
1347 unsafe {
1348 self.as_mut().set_store(store);
1349 }
1350
1351 // Initialize shared types
1352 //
1353 // SAFETY: validity of the vmctx means it should be safe to write to it
1354 // here.
1355 unsafe {
1356 let types = NonNull::from(self.runtime_info.type_ids());
1357 self.type_ids_array().write(types.cast().into());
1358 }
1359
1360 // Initialize the built-in functions
1361 //
1362 // SAFETY: the type of the builtin functions field is indeed a pointer
1363 // and the pointer being filled in here, plus the vmctx is valid to
1364 // write to during initialization.
1365 unsafe {
1366 static BUILTINS: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray::INIT;
1367 let ptr = BUILTINS.expose_provenance();
1368 self.vmctx_plus_offset_raw(offsets.ptr.vmctx_builtin_functions())
1369 .write(VmPtr::from(ptr));
1370 }
1371
1372 // Initialize the imports
1373 //
1374 // SAFETY: the vmctx is safe to initialize during this function and
1375 // validity of each item itself is a contract the caller must uphold.
1376 debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
1377 unsafe {
1378 ptr::copy_nonoverlapping(
1379 imports.functions.as_ptr(),
1380 self.vmctx_plus_offset_raw(offsets.vmctx_imported_functions_begin())
1381 .as_ptr(),
1382 imports.functions.len(),
1383 );
1384 debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
1385 ptr::copy_nonoverlapping(
1386 imports.tables.as_ptr(),
1387 self.vmctx_plus_offset_raw(offsets.vmctx_imported_tables_begin())
1388 .as_ptr(),
1389 imports.tables.len(),
1390 );
1391 debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
1392 ptr::copy_nonoverlapping(
1393 imports.memories.as_ptr(),
1394 self.vmctx_plus_offset_raw(offsets.vmctx_imported_memories_begin())
1395 .as_ptr(),
1396 imports.memories.len(),
1397 );
1398 debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
1399 ptr::copy_nonoverlapping(
1400 imports.globals.as_ptr(),
1401 self.vmctx_plus_offset_raw(offsets.vmctx_imported_globals_begin())
1402 .as_ptr(),
1403 imports.globals.len(),
1404 );
1405 debug_assert_eq!(imports.tags.len(), module.num_imported_tags);
1406 ptr::copy_nonoverlapping(
1407 imports.tags.as_ptr(),
1408 self.vmctx_plus_offset_raw(offsets.vmctx_imported_tags_begin())
1409 .as_ptr(),
1410 imports.tags.len(),
1411 );
1412 }
1413
1414 // N.B.: there is no need to initialize the funcrefs array because we
1415 // eagerly construct each element in it whenever asked for a reference
1416 // to that element. In other words, there is no state needed to track
1417 // the lazy-init, so we don't need to initialize any state now.
1418
1419 // Initialize the defined tables
1420 //
1421 // SAFETY: it's safe to initialize these tables during initialization
1422 // here and the various types of pointers and such here should all be
1423 // valid.
1424 unsafe {
1425 let mut ptr = self.vmctx_plus_offset_raw(offsets.vmctx_tables_begin());
1426 let tables = self.as_mut().tables_mut();
1427 for i in 0..module.num_defined_tables() {
1428 ptr.write(tables[DefinedTableIndex::new(i)].1.vmtable());
1429 ptr = ptr.add(1);
1430 }
1431 }
1432
1433 // Initialize the defined memories. This fills in both the
1434 // `defined_memories` table and the `owned_memories` table at the same
1435 // time. Entries in `defined_memories` hold a pointer to a definition
1436 // (all memories) whereas the `owned_memories` hold the actual
1437 // definitions of memories owned (not shared) in the module.
1438 //
1439 // SAFETY: it's safe to initialize these memories during initialization
1440 // here and the various types of pointers and such here should all be
1441 // valid.
1442 unsafe {
1443 let mut ptr = self.vmctx_plus_offset_raw(offsets.vmctx_memories_begin());
1444 let mut owned_ptr = self.vmctx_plus_offset_raw(offsets.vmctx_owned_memories_begin());
1445 let memories = self.as_mut().memories_mut();
1446 for i in 0..module.num_defined_memories() {
1447 let defined_memory_index = DefinedMemoryIndex::new(i);
1448 let memory_index = module.memory_index(defined_memory_index);
1449 if module.memories[memory_index].shared {
1450 let def_ptr = memories[defined_memory_index]
1451 .1
1452 .as_shared_memory()
1453 .unwrap()
1454 .vmmemory_ptr();
1455 ptr.write(VmPtr::from(def_ptr));
1456 } else {
1457 owned_ptr.write(memories[defined_memory_index].1.vmmemory());
1458 ptr.write(VmPtr::from(owned_ptr));
1459 owned_ptr = owned_ptr.add(1);
1460 }
1461 ptr = ptr.add(1);
1462 }
1463 }
1464
1465 // Zero-initialize the globals so that nothing is uninitialized memory
1466 // after this function returns. The globals are actually initialized
1467 // with their const expression initializers after the instance is fully
1468 // allocated.
1469 //
1470 // SAFETY: it's safe to initialize globals during initialization
1471 // here. Note that while the value being written is not valid for all
1472 // types of globals it's initializing the memory to zero instead of
1473 // being in an undefined state. So it's still unsafe to access globals
1474 // after this, but if it's read then it'd hopefully crash faster than
1475 // leaving this undefined.
1476 unsafe {
1477 for (index, _init) in module.global_initializers.iter() {
1478 self.global_ptr(index).write(VMGlobalDefinition::new());
1479 }
1480 }
1481
1482 // Initialize the defined tags
1483 //
1484 // SAFETY: it's safe to initialize these tags during initialization
1485 // here and the various types of pointers and such here should all be
1486 // valid.
1487 unsafe {
1488 let mut ptr = self.vmctx_plus_offset_raw(offsets.vmctx_tags_begin());
1489 for i in 0..module.num_defined_tags() {
1490 let defined_index = DefinedTagIndex::new(i);
1491 let tag_index = module.tag_index(defined_index);
1492 let tag = module.tags[tag_index];
1493 ptr.write(VMTagDefinition::new(
1494 tag.signature.unwrap_engine_type_index(),
1495 ));
1496 ptr = ptr.add(1);
1497 }
1498 }
1499 }
1500
1501 /// Attempts to convert from the host `addr` specified to a WebAssembly
1502 /// based address recorded in `WasmFault`.
1503 ///
1504 /// This method will check all linear memories that this instance contains
1505 /// to see if any of them contain `addr`. If one does then `Some` is
1506 /// returned with metadata about the wasm fault. Otherwise `None` is
1507 /// returned and `addr` doesn't belong to this instance.
1508 pub fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1509 let mut fault = None;
1510 for (_, (_, memory)) in self.memories.iter() {
1511 let accessible = memory.wasm_accessible();
1512 if accessible.start <= addr && addr < accessible.end {
1513 // All linear memories should be disjoint so assert that no
1514 // prior fault has been found.
1515 assert!(fault.is_none());
1516 fault = Some(WasmFault {
1517 memory_size: memory.byte_size(),
1518 wasm_address: u64::try_from(addr - accessible.start).unwrap(),
1519 });
1520 }
1521 }
1522 fault
1523 }
1524
1525 /// Returns the id, within this instance's store, that it's assigned.
1526 pub fn id(&self) -> InstanceId {
1527 self.id
1528 }
1529
1530 /// Get all memories within this instance.
1531 ///
1532 /// Returns both import and defined memories.
1533 ///
1534 /// Returns both exported and non-exported memories.
1535 ///
1536 /// Gives access to the full memories space.
1537 pub fn all_memories(
1538 &self,
1539 store: StoreId,
1540 ) -> impl ExactSizeIterator<Item = (MemoryIndex, ExportMemory)> + '_ {
1541 self.env_module()
1542 .memories
1543 .iter()
1544 .map(move |(i, _)| (i, self.get_exported_memory(store, i)))
1545 }
1546
1547 /// Return the memories defined in this instance (not imported).
1548 pub fn defined_memories<'a>(
1549 &'a self,
1550 store: StoreId,
1551 ) -> impl ExactSizeIterator<Item = ExportMemory> + 'a {
1552 let num_imported = self.env_module().num_imported_memories;
1553 self.all_memories(store)
1554 .skip(num_imported)
1555 .map(|(_i, memory)| memory)
1556 }
1557
1558 /// Lookup an item with the given index.
1559 ///
1560 /// # Panics
1561 ///
1562 /// Panics if `export` is not valid for this instance.
1563 ///
1564 /// # Safety
1565 ///
1566 /// This function requires that `store` is the correct store which owns this
1567 /// instance.
1568 pub unsafe fn get_export_by_index_mut(
1569 self: Pin<&mut Self>,
1570 registry: &ModuleRegistry,
1571 store: StoreId,
1572 export: EntityIndex,
1573 ) -> Export {
1574 match export {
1575 // SAFETY: the contract of `store` owning the this instance is a
1576 // safety requirement of this function itself.
1577 EntityIndex::Function(i) => {
1578 Export::Function(unsafe { self.get_exported_func(registry, store, i) })
1579 }
1580 EntityIndex::Global(i) => Export::Global(self.get_exported_global(store, i)),
1581 EntityIndex::Table(i) => Export::Table(self.get_exported_table(store, i)),
1582 EntityIndex::Memory(i) => match self.get_exported_memory(store, i) {
1583 ExportMemory::Unshared(m) => Export::Memory(m),
1584 ExportMemory::Shared(m, i) => Export::SharedMemory(m, i),
1585 },
1586 EntityIndex::Tag(i) => Export::Tag(self.get_exported_tag(store, i)),
1587 }
1588 }
1589
1590 fn store_mut(self: Pin<&mut Self>) -> &mut Option<VMStoreRawPtr> {
1591 // SAFETY: this is a pin-projection to get a mutable reference to an
1592 // internal field and is safe so long as the `&mut Self` temporarily
1593 // created is not overwritten, which it isn't here.
1594 unsafe { &mut self.get_unchecked_mut().store }
1595 }
1596
1597 fn dropped_elements_mut(self: Pin<&mut Self>) -> &mut EntitySet<ElemIndex> {
1598 // SAFETY: see `store_mut` above.
1599 unsafe { &mut self.get_unchecked_mut().dropped_elements }
1600 }
1601
1602 fn dropped_data_mut(self: Pin<&mut Self>) -> &mut EntitySet<DataIndex> {
1603 // SAFETY: see `store_mut` above.
1604 unsafe { &mut self.get_unchecked_mut().dropped_data }
1605 }
1606
1607 fn memories_mut(
1608 self: Pin<&mut Self>,
1609 ) -> &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)> {
1610 // SAFETY: see `store_mut` above.
1611 unsafe { &mut self.get_unchecked_mut().memories }
1612 }
1613
1614 pub(crate) fn tables_mut(
1615 self: Pin<&mut Self>,
1616 ) -> &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)> {
1617 // SAFETY: see `store_mut` above.
1618 unsafe { &mut self.get_unchecked_mut().tables }
1619 }
1620
1621 #[cfg(feature = "wmemcheck")]
1622 pub(super) fn wmemcheck_state_mut(self: Pin<&mut Self>) -> &mut Option<Wmemcheck> {
1623 // SAFETY: see `store_mut` above.
1624 unsafe { &mut self.get_unchecked_mut().wmemcheck_state }
1625 }
1626}
1627
1628// SAFETY: `layout` should describe this accurately and `OwnedVMContext` is the
1629// last field of `ComponentInstance`.
1630unsafe impl InstanceLayout for Instance {
1631 const INIT_ZEROED: bool = false;
1632 type VMContext = VMContext;
1633
1634 fn layout(&self) -> Layout {
1635 Self::alloc_layout(self.runtime_info.offsets())
1636 }
1637
1638 fn owned_vmctx(&self) -> &OwnedVMContext<VMContext> {
1639 &self.vmctx
1640 }
1641
1642 fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<VMContext> {
1643 &mut self.vmctx
1644 }
1645}
1646
1647pub type InstanceHandle = OwnedInstance<Instance>;
1648
1649/// A handle holding an `Instance` of a WebAssembly module.
1650///
1651/// This structure is an owning handle of the `instance` contained internally.
1652/// When this value goes out of scope it will deallocate the `Instance` and all
1653/// memory associated with it.
1654///
1655/// Note that this lives within a `StoreOpaque` on a list of instances that a
1656/// store is keeping alive.
1657#[derive(Debug)]
1658#[repr(transparent)] // guarantee this is a zero-cost wrapper
1659pub struct OwnedInstance<T: InstanceLayout> {
1660 /// The raw pointer to the instance that was allocated.
1661 ///
1662 /// Note that this is not equivalent to `Box<Instance>` because the
1663 /// allocation here has a `VMContext` trailing after it. Thus the custom
1664 /// destructor to invoke the `dealloc` function with the appropriate
1665 /// layout.
1666 instance: SendSyncPtr<T>,
1667 _marker: marker::PhantomData<Box<(T, OwnedVMContext<T::VMContext>)>>,
1668}
1669
1670/// Structure that must be placed at the end of a type implementing
1671/// `InstanceLayout`.
1672#[repr(align(16))] // match the alignment of VMContext
1673pub struct OwnedVMContext<T> {
1674 /// A pointer to the `vmctx` field at the end of the `structure`.
1675 ///
1676 /// If you're looking at this a reasonable question would be "why do we need
1677 /// a pointer to ourselves?" because after all the pointer's value is
1678 /// trivially derivable from any `&Instance` pointer. The rationale for this
1679 /// field's existence is subtle, but it's required for correctness. The
1680 /// short version is "this makes miri happy".
1681 ///
1682 /// The long version of why this field exists is that the rules that MIRI
1683 /// uses to ensure pointers are used correctly have various conditions on
1684 /// them depend on how pointers are used. More specifically if `*mut T` is
1685 /// derived from `&mut T`, then that invalidates all prior pointers drived
1686 /// from the `&mut T`. This means that while we liberally want to re-acquire
1687 /// a `*mut VMContext` throughout the implementation of `Instance` the
1688 /// trivial way, a function `fn vmctx(Pin<&mut Instance>) -> *mut VMContext`
1689 /// would effectively invalidate all prior `*mut VMContext` pointers
1690 /// acquired. The purpose of this field is to serve as a sort of
1691 /// source-of-truth for where `*mut VMContext` pointers come from.
1692 ///
1693 /// This field is initialized when the `Instance` is created with the
1694 /// original allocation's pointer. That means that the provenance of this
1695 /// pointer contains the entire allocation (both instance and `VMContext`).
1696 /// This provenance bit is then "carried through" where `fn vmctx` will base
1697 /// all returned pointers on this pointer itself. This provides the means of
1698 /// never invalidating this pointer throughout MIRI and additionally being
1699 /// able to still temporarily have `Pin<&mut Instance>` methods and such.
1700 ///
1701 /// It's important to note, though, that this is not here purely for MIRI.
1702 /// The careful construction of the `fn vmctx` method has ramifications on
1703 /// the LLVM IR generated, for example. A historical CVE on Wasmtime,
1704 /// GHSA-ch89-5g45-qwc7, was caused due to relying on undefined behavior. By
1705 /// deriving VMContext pointers from this pointer it specifically hints to
1706 /// LLVM that trickery is afoot and it properly informs `noalias` and such
1707 /// annotations and analysis. More-or-less this pointer is actually loaded
1708 /// in LLVM IR which helps defeat otherwise present aliasing optimizations,
1709 /// which we want, since writes to this should basically never be optimized
1710 /// out.
1711 ///
1712 /// As a final note it's worth pointing out that the machine code generated
1713 /// for accessing `fn vmctx` is still as one would expect. This member isn't
1714 /// actually ever loaded at runtime (or at least shouldn't be). Perhaps in
1715 /// the future if the memory consumption of this field is a problem we could
1716 /// shrink it slightly, but for now one extra pointer per wasm instance
1717 /// seems not too bad.
1718 vmctx_self_reference: SendSyncPtr<T>,
1719
1720 /// This field ensures that going from `Pin<&mut T>` to `&mut T` is not a
1721 /// safe operation.
1722 _marker: core::marker::PhantomPinned,
1723}
1724
1725impl<T> OwnedVMContext<T> {
1726 /// Creates a new blank vmctx to place at the end of an instance.
1727 pub fn new() -> OwnedVMContext<T> {
1728 OwnedVMContext {
1729 vmctx_self_reference: SendSyncPtr::new(NonNull::dangling()),
1730 _marker: core::marker::PhantomPinned,
1731 }
1732 }
1733}
1734
1735/// Helper trait to plumb both core instances and component instances into
1736/// `OwnedInstance` below.
1737///
1738/// # Safety
1739///
1740/// This trait requires `layout` to correctly describe `Self` and appropriately
1741/// allocate space for `Self::VMContext` afterwards. Additionally the field
1742/// returned by `owned_vmctx()` must be the last field in the structure.
1743pub unsafe trait InstanceLayout {
1744 /// Whether or not to allocate this instance with `alloc_zeroed` or `alloc`.
1745 const INIT_ZEROED: bool;
1746
1747 /// The trailing `VMContext` type at the end of this instance.
1748 type VMContext;
1749
1750 /// The memory layout to use to allocate and deallocate this instance.
1751 fn layout(&self) -> Layout;
1752
1753 fn owned_vmctx(&self) -> &OwnedVMContext<Self::VMContext>;
1754 fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<Self::VMContext>;
1755
1756 /// Returns the `vmctx_self_reference` set above.
1757 #[inline]
1758 fn vmctx(&self) -> NonNull<Self::VMContext> {
1759 // The definition of this method is subtle but intentional. The goal
1760 // here is that effectively this should return `&mut self.vmctx`, but
1761 // it's not quite so simple. Some more documentation is available on the
1762 // `vmctx_self_reference` field, but the general idea is that we're
1763 // creating a pointer to return with proper provenance. Provenance is
1764 // still in the works in Rust at the time of this writing but the load
1765 // of the `self.vmctx_self_reference` field is important here as it
1766 // affects how LLVM thinks about aliasing with respect to the returned
1767 // pointer.
1768 //
1769 // The intention of this method is to codegen to machine code as `&mut
1770 // self.vmctx`, however. While it doesn't show up like this in LLVM IR
1771 // (there's an actual load of the field) it does look like that by the
1772 // time the backend runs. (that's magic to me, the backend removing
1773 // loads...)
1774 let owned_vmctx = self.owned_vmctx();
1775 let owned_vmctx_raw = NonNull::from(owned_vmctx);
1776 // SAFETY: it's part of the contract of `InstanceLayout` and the usage
1777 // with `OwnedInstance` that this indeed points to the vmctx.
1778 let addr = unsafe { owned_vmctx_raw.add(1) };
1779 owned_vmctx
1780 .vmctx_self_reference
1781 .as_non_null()
1782 .with_addr(addr.addr())
1783 }
1784
1785 /// Helper function to access various locations offset from our `*mut
1786 /// VMContext` object.
1787 ///
1788 /// Note that this method takes `&self` as an argument but returns
1789 /// `NonNull<T>` which is frequently used to mutate said memory. This is an
1790 /// intentional design decision where the safety of the modification of
1791 /// memory is placed as a burden onto the caller. The implementation of this
1792 /// method explicitly does not require `&mut self` to acquire mutable
1793 /// provenance to update the `VMContext` region. Instead all pointers into
1794 /// the `VMContext` area have provenance/permissions to write.
1795 ///
1796 /// Also note though that care must be taken to ensure that reads/writes of
1797 /// memory must only happen where appropriate, for example a non-atomic
1798 /// write (as most are) should never happen concurrently with another read
1799 /// or write. It's generally on the burden of the caller to adhere to this.
1800 ///
1801 /// Also of note is that most of the time the usage of this method falls
1802 /// into one of:
1803 ///
1804 /// * Something in the VMContext is being read or written. In that case use
1805 /// `vmctx_plus_offset` or `vmctx_plus_offset_mut` if possible due to
1806 /// that having a safer lifetime.
1807 ///
1808 /// * A pointer is being created to pass to other VM* data structures. In
1809 /// that situation the lifetime of all VM data structures are typically
1810 /// tied to the `Store<T>` which is what provides the guarantees around
1811 /// concurrency/etc.
1812 ///
1813 /// There's quite a lot of unsafety riding on this method, especially
1814 /// related to the ascription `T` of the byte `offset`. It's hoped that in
1815 /// the future we're able to settle on an in theory safer design.
1816 ///
1817 /// # Safety
1818 ///
1819 /// This method is unsafe because the `offset` must be within bounds of the
1820 /// `VMContext` object trailing this instance. Additionally `T` must be a
1821 /// valid ascription of the value that resides at that location.
1822 unsafe fn vmctx_plus_offset_raw<T: VmSafe>(&self, offset: impl Into<u32>) -> NonNull<T> {
1823 // SAFETY: the safety requirements of `byte_add` are forwarded to this
1824 // method's caller.
1825 unsafe {
1826 self.vmctx()
1827 .byte_add(usize::try_from(offset.into()).unwrap())
1828 .cast()
1829 }
1830 }
1831
1832 /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1833 /// `&self` to the returned reference `&T`.
1834 ///
1835 /// # Safety
1836 ///
1837 /// See the safety documentation of `vmctx_plus_offset_raw`.
1838 unsafe fn vmctx_plus_offset<T: VmSafe>(&self, offset: impl Into<u32>) -> &T {
1839 // SAFETY: this method has the same safety requirements as
1840 // `vmctx_plus_offset_raw`.
1841 unsafe { self.vmctx_plus_offset_raw(offset).as_ref() }
1842 }
1843
1844 /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1845 /// `&mut self` to the returned reference `&mut T`.
1846 ///
1847 /// # Safety
1848 ///
1849 /// See the safety documentation of `vmctx_plus_offset_raw`.
1850 unsafe fn vmctx_plus_offset_mut<T: VmSafe>(
1851 self: Pin<&mut Self>,
1852 offset: impl Into<u32>,
1853 ) -> &mut T {
1854 // SAFETY: this method has the same safety requirements as
1855 // `vmctx_plus_offset_raw`.
1856 unsafe { self.vmctx_plus_offset_raw(offset).as_mut() }
1857 }
1858}
1859
1860impl<T: InstanceLayout> OwnedInstance<T> {
1861 /// Allocates a new `OwnedInstance` and places `instance` inside of it.
1862 ///
1863 /// This will `instance`
1864 pub(super) fn new(mut instance: T) -> OwnedInstance<T> {
1865 let layout = instance.layout();
1866 debug_assert!(layout.size() >= size_of_val(&instance));
1867 debug_assert!(layout.align() >= align_of_val(&instance));
1868
1869 // SAFETY: it's up to us to assert that `layout` has a non-zero size,
1870 // which is asserted here.
1871 let ptr = unsafe {
1872 assert!(layout.size() > 0);
1873 if T::INIT_ZEROED {
1874 alloc::alloc::alloc_zeroed(layout)
1875 } else {
1876 alloc::alloc::alloc(layout)
1877 }
1878 };
1879 if ptr.is_null() {
1880 alloc::alloc::handle_alloc_error(layout);
1881 }
1882 let instance_ptr = NonNull::new(ptr.cast::<T>()).unwrap();
1883
1884 // SAFETY: it's part of the unsafe contract of `InstanceLayout` that the
1885 // `add` here is appropriate for the layout allocated.
1886 let vmctx_self_reference = unsafe { instance_ptr.add(1).cast() };
1887 instance.owned_vmctx_mut().vmctx_self_reference = vmctx_self_reference.into();
1888
1889 // SAFETY: we allocated above and it's an unsafe contract of
1890 // `InstanceLayout` that the layout is suitable for writing the
1891 // instance.
1892 unsafe {
1893 instance_ptr.write(instance);
1894 }
1895
1896 let ret = OwnedInstance {
1897 instance: SendSyncPtr::new(instance_ptr),
1898 _marker: marker::PhantomData,
1899 };
1900
1901 // Double-check various vmctx calculations are correct.
1902 debug_assert_eq!(
1903 vmctx_self_reference.addr(),
1904 // SAFETY: `InstanceLayout` should guarantee it's safe to add 1 to
1905 // the last field to get a pointer to 1-byte-past-the-end of an
1906 // object, which should be valid.
1907 unsafe { NonNull::from(ret.get().owned_vmctx()).add(1).addr() }
1908 );
1909 debug_assert_eq!(vmctx_self_reference.addr(), ret.get().vmctx().addr());
1910
1911 ret
1912 }
1913
1914 /// Gets the raw underlying `&Instance` from this handle.
1915 pub fn get(&self) -> &T {
1916 // SAFETY: this is an owned instance handle that retains exclusive
1917 // ownership of the `Instance` inside. With `&self` given we know
1918 // this pointer is valid valid and the returned lifetime is connected
1919 // to `self` so that should also be valid.
1920 unsafe { self.instance.as_non_null().as_ref() }
1921 }
1922
1923 /// Same as [`Self::get`] except for mutability.
1924 pub fn get_mut(&mut self) -> Pin<&mut T> {
1925 // SAFETY: The lifetime concerns here are the same as `get` above.
1926 // Otherwise `new_unchecked` is used here to uphold the contract that
1927 // instances are always pinned in memory.
1928 unsafe { Pin::new_unchecked(self.instance.as_non_null().as_mut()) }
1929 }
1930}
1931
1932impl<T: InstanceLayout> Drop for OwnedInstance<T> {
1933 fn drop(&mut self) {
1934 unsafe {
1935 let layout = self.get().layout();
1936 ptr::drop_in_place(self.instance.as_ptr());
1937 alloc::alloc::dealloc(self.instance.as_ptr().cast(), layout);
1938 }
1939 }
1940}