wasmtime/runtime/vm/instance.rs
1//! An `Instance` contains all the runtime state used by execution of a
2//! wasm module (except its callstack and register state). An
3//! `InstanceHandle` is a reference-counting handle for an `Instance`.
4
5use crate::OpaqueRootScope;
6use crate::code::ModuleWithCode;
7use crate::module::ModuleRegistry;
8use crate::prelude::*;
9use crate::runtime::vm::const_expr::{ConstEvalContext, ConstExprEvaluator};
10use crate::runtime::vm::export::{Export, ExportMemory};
11use crate::runtime::vm::memory::{Memory, RuntimeMemoryCreator};
12use crate::runtime::vm::table::{Table, TableElementType};
13use crate::runtime::vm::vmcontext::{
14 VMBuiltinFunctionsArray, VMContext, VMFuncRef, VMFunctionImport, VMGlobalDefinition,
15 VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext, VMStoreContext,
16 VMTableDefinition, VMTableImport, VMTagDefinition, VMTagImport,
17};
18use crate::runtime::vm::{
19 GcStore, HostResult, Imports, ModuleRuntimeInfo, SendSyncPtr, VMGlobalKind, VMStore,
20 VMStoreRawPtr, VmPtr, VmSafe, WasmFault, catch_unwind_and_record_trap,
21};
22use crate::store::{
23 Asyncness, InstanceId, StoreId, StoreInstanceId, StoreOpaque, StoreResourceLimiter,
24};
25use crate::vm::VMWasmCallFunction;
26use alloc::sync::Arc;
27use core::alloc::Layout;
28use core::marker;
29use core::ops::Range;
30use core::pin::Pin;
31use core::ptr::NonNull;
32#[cfg(target_has_atomic = "64")]
33use core::sync::atomic::AtomicU64;
34use core::{mem, ptr};
35#[cfg(feature = "gc")]
36use wasmtime_environ::ModuleInternedTypeIndex;
37use wasmtime_environ::{
38 DataIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex, DefinedTagIndex,
39 ElemIndex, EntityIndex, EntityRef, EntitySet, FuncIndex, GlobalIndex, HostPtr, MemoryIndex,
40 PrimaryMap, PtrSize, TableIndex, TableInitialValue, TableSegmentElements, TagIndex, Trap,
41 VMCONTEXT_MAGIC, VMOffsets, VMSharedTypeIndex, packed_option::ReservedValue,
42};
43#[cfg(feature = "wmemcheck")]
44use wasmtime_wmemcheck::Wmemcheck;
45
46mod allocator;
47pub use allocator::*;
48
49/// A type that roughly corresponds to a WebAssembly instance, but is also used
50/// for host-defined objects.
51///
52/// Instances here can correspond to actual instantiated modules, but it's also
53/// used ubiquitously for host-defined objects. For example creating a
54/// host-defined memory will have a `module` that looks like it exports a single
55/// memory (and similar for other constructs).
56///
57/// This `Instance` type is used as a ubiquitous representation for WebAssembly
58/// values, whether or not they were created on the host or through a module.
59///
60/// # Ownership
61///
62/// This structure is never allocated directly but is instead managed through
63/// an `InstanceHandle`. This structure ends with a `VMContext` which has a
64/// dynamic size corresponding to the `module` configured within. Memory
65/// management of this structure is always done through `InstanceHandle` as the
66/// sole owner of an instance.
67///
68/// # `Instance` and `Pin`
69///
70/// Given an instance it is accompanied with trailing memory for the
71/// appropriate `VMContext`. The `Instance` also holds `runtime_info` and other
72/// information pointing to relevant offsets for the `VMContext`. Thus it is
73/// not sound to mutate `runtime_info` after an instance is created. More
74/// generally it's also not safe to "swap" instances, for example given two
75/// `&mut Instance` values it's not sound to swap them as then the `VMContext`
76/// values are inaccurately described.
77///
78/// To encapsulate this guarantee this type is only ever mutated through Rust's
79/// `Pin` type. All mutable methods here take `self: Pin<&mut Self>` which
80/// statically disallows safe access to `&mut Instance`. There are assorted
81/// "projection methods" to go from `Pin<&mut Instance>` to `&mut T` for
82/// individual fields, for example `memories_mut`. More methods can be added as
83/// necessary or methods may also be added to project multiple fields at a time
84/// if necessary to. The precise ergonomics around getting mutable access to
85/// some fields (but notably not `runtime_info`) is probably going to evolve
86/// over time.
87///
88/// Note that is is not sound to basically ever pass around `&mut Instance`.
89/// That should always instead be `Pin<&mut Instance>`. All usage of
90/// `Pin::new_unchecked` should be here in this module in just a few `unsafe`
91/// locations and it's recommended to use existing helpers if you can.
92#[repr(C)] // ensure that the vmctx field is last.
93pub struct Instance {
94 /// The index, within a `Store` that this instance lives at
95 id: InstanceId,
96
97 /// The runtime info (corresponding to the "compiled module"
98 /// abstraction in higher layers) that is retained and needed for
99 /// lazy initialization. This provides access to the underlying
100 /// Wasm module entities, the compiled JIT code, metadata about
101 /// functions, lazy initialization state, etc.
102 //
103 // SAFETY: this field cannot be overwritten after an instance is created. It
104 // must contain this exact same value for the entire lifetime of this
105 // instance. This enables borrowing the info's `Module` and this instance at
106 // the same time (instance mutably, module not). Additionally it enables
107 // borrowing a store mutably at the same time as a contained instance.
108 runtime_info: ModuleRuntimeInfo,
109
110 /// WebAssembly linear memory data.
111 ///
112 /// This is where all runtime information about defined linear memories in
113 /// this module lives.
114 ///
115 /// The `MemoryAllocationIndex` was given from our `InstanceAllocator` and
116 /// must be given back to the instance allocator when deallocating each
117 /// memory.
118 memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
119
120 /// WebAssembly table data.
121 ///
122 /// Like memories, this is only for defined tables in the module and
123 /// contains all of their runtime state.
124 ///
125 /// The `TableAllocationIndex` was given from our `InstanceAllocator` and
126 /// must be given back to the instance allocator when deallocating each
127 /// table.
128 tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
129
130 /// Stores the dropped passive element segments in this instantiation by index.
131 /// If the index is present in the set, the segment has been dropped.
132 dropped_elements: EntitySet<ElemIndex>,
133
134 /// Stores the dropped passive data segments in this instantiation by index.
135 /// If the index is present in the set, the segment has been dropped.
136 dropped_data: EntitySet<DataIndex>,
137
138 // TODO: add support for multiple memories; `wmemcheck_state` corresponds to
139 // memory 0.
140 #[cfg(feature = "wmemcheck")]
141 pub(crate) wmemcheck_state: Option<Wmemcheck>,
142
143 /// Self-pointer back to `Store<T>` and its functions. Not present for
144 /// the brief time that `Store<T>` is itself being created. Also not
145 /// present for some niche uses that are disconnected from stores (e.g.
146 /// cross-thread stuff used in `InstancePre`)
147 store: Option<VMStoreRawPtr>,
148
149 /// Additional context used by compiled wasm code. This field is last, and
150 /// represents a dynamically-sized array that extends beyond the nominal
151 /// end of the struct (similar to a flexible array member).
152 vmctx: OwnedVMContext<VMContext>,
153}
154
155impl Instance {
156 /// Create an instance at the given memory address.
157 ///
158 /// It is assumed the memory was properly aligned and the
159 /// allocation was `alloc_size` in bytes.
160 ///
161 /// # Safety
162 ///
163 /// The `req.imports` field must be appropriately sized/typed for the module
164 /// being allocated according to `req.runtime_info`. Additionally `memories`
165 /// and `tables` must have been allocated for `req.store`.
166 unsafe fn new(
167 req: InstanceAllocationRequest,
168 memories: PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
169 tables: PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
170 ) -> InstanceHandle {
171 let module = req.runtime_info.env_module();
172 let memory_tys = &module.memories;
173 let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
174 let dropped_data = EntitySet::with_capacity(module.passive_data_map.len());
175
176 #[cfg(feature = "wmemcheck")]
177 let wmemcheck_state = if req.store.engine().config().wmemcheck {
178 let size = memory_tys
179 .iter()
180 .next()
181 .map(|memory| memory.1.limits.min)
182 .unwrap_or(0)
183 * 64
184 * 1024;
185 Some(Wmemcheck::new(size.try_into().unwrap()))
186 } else {
187 None
188 };
189 #[cfg(not(feature = "wmemcheck"))]
190 let _ = memory_tys;
191
192 let mut ret = OwnedInstance::new(Instance {
193 id: req.id,
194 runtime_info: req.runtime_info.clone(),
195 memories,
196 tables,
197 dropped_elements,
198 dropped_data,
199 #[cfg(feature = "wmemcheck")]
200 wmemcheck_state,
201 store: None,
202 vmctx: OwnedVMContext::new(),
203 });
204
205 // SAFETY: this vmctx was allocated with the same layout above, so it
206 // should be safe to initialize with the same values here.
207 unsafe {
208 ret.get_mut().initialize_vmctx(req.store, req.imports);
209 }
210 ret
211 }
212
213 /// Converts a raw `VMContext` pointer into a raw `Instance` pointer.
214 ///
215 /// # Safety
216 ///
217 /// Calling this function safely requires that `vmctx` is a valid allocation
218 /// of a `VMContext` which is derived from `Instance::new`. To safely
219 /// convert the returned raw pointer into a safe instance pointer callers
220 /// will also want to uphold guarantees such as:
221 ///
222 /// * The instance should not be in use elsewhere. For example you can't
223 /// call this function twice, turn both raw pointers into safe pointers,
224 /// and then use both safe pointers.
225 /// * There should be no other active mutable borrow to any other instance
226 /// within the same store. Note that this is not restricted to just this
227 /// instance pointer, but to all instances in a store. Instances can
228 /// safely traverse to other instances "laterally" meaning that a mutable
229 /// borrow on one is a mutable borrow on all.
230 /// * There should be no active mutable borrow on the store accessible at
231 /// the same time the instance is turned. Instances are owned by a store
232 /// and a store can be used to acquire a safe instance borrow at any time.
233 /// * The lifetime of the usage of the instance should not be unnecessarily
234 /// long, for example it cannot be `'static`.
235 ///
236 /// Other entrypoints exist for converting from a raw `VMContext` to a safe
237 /// pointer such as:
238 ///
239 /// * `Instance::enter_host_from_wasm`
240 /// * `Instance::sibling_vmctx{,_mut}`
241 ///
242 /// These place further restrictions on the API signature to satisfy some of
243 /// the above points.
244 #[inline]
245 pub(crate) unsafe fn from_vmctx(vmctx: NonNull<VMContext>) -> NonNull<Instance> {
246 // SAFETY: The validity of `byte_sub` relies on `vmctx` being a valid
247 // allocation.
248 unsafe {
249 vmctx
250 .byte_sub(mem::size_of::<Instance>())
251 .cast::<Instance>()
252 }
253 }
254
255 /// Encapsulated entrypoint to the host from WebAssembly, converting a raw
256 /// `VMContext` pointer into a `VMStore` plus an `InstanceId`.
257 ///
258 /// This is an entrypoint for core wasm entering back into the host. This is
259 /// used for both host functions and libcalls for example. This will execute
260 /// the closure `f` with safer Internal types than a raw `VMContext`
261 /// pointer.
262 ///
263 /// The closure `f` will have its errors caught, handled, and translated to
264 /// an ABI-safe return value to give back to wasm. This includes both normal
265 /// errors such as traps as well as panics.
266 ///
267 /// # Safety
268 ///
269 /// Callers must ensure that `vmctx` is a valid allocation and is safe to
270 /// dereference at this time. That's generally only true when it's a
271 /// wasm-provided value and this is the first function called after entering
272 /// the host. Otherwise this could unsafely alias the store with a mutable
273 /// pointer, for example.
274 #[inline]
275 pub(crate) unsafe fn enter_host_from_wasm<R>(
276 vmctx: NonNull<VMContext>,
277 f: impl FnOnce(&mut dyn VMStore, InstanceId) -> R,
278 ) -> R::Abi
279 where
280 R: HostResult,
281 {
282 // SAFETY: It's a contract of this function that `vmctx` is a valid
283 // pointer with neither the store nor other instances actively in use
284 // when this is called, so it should be safe to acquire a mutable
285 // pointer to the store and read the instance pointer.
286 let (store, instance) = unsafe {
287 let instance = Instance::from_vmctx(vmctx);
288 let instance = instance.as_ref();
289 let store = &mut *instance.store.unwrap().0.as_ptr();
290 (store, instance.id)
291 };
292
293 // Thread the `store` and `instance` through panic/trap infrastructure
294 // back into `f`.
295 catch_unwind_and_record_trap(store, |store| f(store, instance))
296 }
297
298 /// Converts the provided `*mut VMContext` to an `Instance` pointer and
299 /// returns it with the same lifetime as `self`.
300 ///
301 /// This function can be used when traversing a `VMContext` to reach into
302 /// the context needed for imports, optionally.
303 ///
304 /// # Safety
305 ///
306 /// This function requires that the `vmctx` pointer is indeed valid and
307 /// from the store that `self` belongs to.
308 #[inline]
309 unsafe fn sibling_vmctx<'a>(&'a self, vmctx: NonNull<VMContext>) -> &'a Instance {
310 // SAFETY: it's a contract of this function itself that `vmctx` is a
311 // valid pointer. Additionally with `self` being a
312 let ptr = unsafe { Instance::from_vmctx(vmctx) };
313 // SAFETY: it's a contract of this function itself that `vmctx` is a
314 // valid pointer to dereference. Additionally the lifetime of the return
315 // value is constrained to be the same as `self` to avoid granting a
316 // too-long lifetime.
317 unsafe { ptr.as_ref() }
318 }
319
320 /// Same as [`Self::sibling_vmctx`], but the mutable version.
321 ///
322 /// # Safety
323 ///
324 /// This function requires that the `vmctx` pointer is indeed valid and
325 /// from the store that `self` belongs to.
326 ///
327 /// (Note that it is *NOT* required that `vmctx` be distinct from this
328 /// instance's `vmctx`, or that usage of the resulting instance is limited
329 /// to its defined items! The returned borrow has the same lifetime as
330 /// `self`, which means that this instance cannot be used while the
331 /// resulting instance is in use, and we therefore do not need to worry
332 /// about mutable aliasing between this instance and the resulting
333 /// instance.)
334 #[inline]
335 unsafe fn sibling_vmctx_mut<'a>(
336 self: Pin<&'a mut Self>,
337 vmctx: NonNull<VMContext>,
338 ) -> Pin<&'a mut Instance> {
339 // SAFETY: it's a contract of this function itself that `vmctx` is a
340 // valid pointer such that this pointer arithmetic is valid.
341 let mut ptr = unsafe { Instance::from_vmctx(vmctx) };
342
343 // SAFETY: it's a contract of this function itself that `vmctx` is a
344 // valid pointer to dereference. Additionally the lifetime of the return
345 // value is constrained to be the same as `self` to avoid granting a
346 // too-long lifetime. Finally mutable references to an instance are
347 // always through `Pin`, so it's safe to create a pin-pointer here.
348 unsafe { Pin::new_unchecked(ptr.as_mut()) }
349 }
350
351 pub(crate) fn env_module(&self) -> &Arc<wasmtime_environ::Module> {
352 self.runtime_info.env_module()
353 }
354
355 pub(crate) fn runtime_module(&self) -> Option<&crate::Module> {
356 match &self.runtime_info {
357 ModuleRuntimeInfo::Module(m) => Some(m),
358 ModuleRuntimeInfo::Bare(_) => None,
359 }
360 }
361
362 /// Translate a module-level interned type index into an engine-level
363 /// interned type index.
364 #[cfg(feature = "gc")]
365 pub fn engine_type_index(&self, module_index: ModuleInternedTypeIndex) -> VMSharedTypeIndex {
366 self.runtime_info.engine_type_index(module_index)
367 }
368
369 #[inline]
370 fn offsets(&self) -> &VMOffsets<HostPtr> {
371 self.runtime_info.offsets()
372 }
373
374 /// Return the indexed `VMFunctionImport`.
375 fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
376 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
377 }
378
379 /// Return the index `VMTableImport`.
380 fn imported_table(&self, index: TableIndex) -> &VMTableImport {
381 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
382 }
383
384 /// Return the indexed `VMMemoryImport`.
385 fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
386 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
387 }
388
389 /// Return the indexed `VMGlobalImport`.
390 fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
391 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
392 }
393
394 /// Return the indexed `VMTagImport`.
395 fn imported_tag(&self, index: TagIndex) -> &VMTagImport {
396 unsafe { self.vmctx_plus_offset(self.offsets().vmctx_vmtag_import(index)) }
397 }
398
399 /// Return the indexed `VMTagDefinition`.
400 pub fn tag_ptr(&self, index: DefinedTagIndex) -> NonNull<VMTagDefinition> {
401 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtag_definition(index)) }
402 }
403
404 /// Return the indexed `VMTableDefinition`.
405 pub fn table(&self, index: DefinedTableIndex) -> VMTableDefinition {
406 unsafe { self.table_ptr(index).read() }
407 }
408
409 /// Updates the value for a defined table to `VMTableDefinition`.
410 fn set_table(self: Pin<&mut Self>, index: DefinedTableIndex, table: VMTableDefinition) {
411 unsafe {
412 self.table_ptr(index).write(table);
413 }
414 }
415
416 /// Return a pointer to the `index`'th table within this instance, stored
417 /// in vmctx memory.
418 pub fn table_ptr(&self, index: DefinedTableIndex) -> NonNull<VMTableDefinition> {
419 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmtable_definition(index)) }
420 }
421
422 /// Get a locally defined or imported memory.
423 pub(crate) fn get_memory(&self, index: MemoryIndex) -> VMMemoryDefinition {
424 if let Some(defined_index) = self.env_module().defined_memory_index(index) {
425 self.memory(defined_index)
426 } else {
427 let import = self.imported_memory(index);
428 unsafe { VMMemoryDefinition::load(import.from.as_ptr()) }
429 }
430 }
431
432 /// Return the indexed `VMMemoryDefinition`, loaded from vmctx memory
433 /// already.
434 #[inline]
435 pub fn memory(&self, index: DefinedMemoryIndex) -> VMMemoryDefinition {
436 unsafe { VMMemoryDefinition::load(self.memory_ptr(index).as_ptr()) }
437 }
438
439 /// Set the indexed memory to `VMMemoryDefinition`.
440 fn set_memory(&self, index: DefinedMemoryIndex, mem: VMMemoryDefinition) {
441 unsafe {
442 self.memory_ptr(index).write(mem);
443 }
444 }
445
446 /// Return the address of the specified memory at `index` within this vmctx.
447 ///
448 /// Note that the returned pointer resides in wasm-code-readable-memory in
449 /// the vmctx.
450 #[inline]
451 pub fn memory_ptr(&self, index: DefinedMemoryIndex) -> NonNull<VMMemoryDefinition> {
452 unsafe {
453 self.vmctx_plus_offset::<VmPtr<_>>(self.offsets().vmctx_vmmemory_pointer(index))
454 .as_non_null()
455 }
456 }
457
458 /// Return the indexed `VMGlobalDefinition`.
459 pub fn global_ptr(&self, index: DefinedGlobalIndex) -> NonNull<VMGlobalDefinition> {
460 unsafe { self.vmctx_plus_offset_raw(self.offsets().vmctx_vmglobal_definition(index)) }
461 }
462
463 /// Get all globals within this instance.
464 ///
465 /// Returns both import and defined globals.
466 ///
467 /// Returns both exported and non-exported globals.
468 ///
469 /// Gives access to the full globals space.
470 pub fn all_globals(
471 &self,
472 store: StoreId,
473 ) -> impl ExactSizeIterator<Item = (GlobalIndex, crate::Global)> + '_ {
474 let module = self.env_module();
475 module
476 .globals
477 .keys()
478 .map(move |idx| (idx, self.get_exported_global(store, idx)))
479 }
480
481 /// Get the globals defined in this instance (not imported).
482 pub fn defined_globals(
483 &self,
484 store: StoreId,
485 ) -> impl ExactSizeIterator<Item = (DefinedGlobalIndex, crate::Global)> + '_ {
486 let module = self.env_module();
487 self.all_globals(store)
488 .skip(module.num_imported_globals)
489 .map(move |(i, global)| (module.defined_global_index(i).unwrap(), global))
490 }
491
492 /// Return a pointer to the interrupts structure
493 #[inline]
494 pub fn vm_store_context(&self) -> NonNull<Option<VmPtr<VMStoreContext>>> {
495 unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_store_context()) }
496 }
497
498 /// Return a pointer to the global epoch counter used by this instance.
499 #[cfg(target_has_atomic = "64")]
500 pub fn epoch_ptr(self: Pin<&mut Self>) -> &mut Option<VmPtr<AtomicU64>> {
501 let offset = self.offsets().ptr.vmctx_epoch_ptr();
502 unsafe { self.vmctx_plus_offset_mut(offset) }
503 }
504
505 /// Return a pointer to the collector-specific heap data.
506 pub fn gc_heap_data(self: Pin<&mut Self>) -> &mut Option<VmPtr<u8>> {
507 let offset = self.offsets().ptr.vmctx_gc_heap_data();
508 unsafe { self.vmctx_plus_offset_mut(offset) }
509 }
510
511 pub(crate) unsafe fn set_store(mut self: Pin<&mut Self>, store: &StoreOpaque) {
512 // FIXME: should be more targeted ideally with the `unsafe` than just
513 // throwing this entire function in a large `unsafe` block.
514 unsafe {
515 *self.as_mut().store_mut() = Some(VMStoreRawPtr(store.traitobj()));
516 self.vm_store_context()
517 .write(Some(store.vm_store_context_ptr().into()));
518 #[cfg(target_has_atomic = "64")]
519 {
520 *self.as_mut().epoch_ptr() =
521 Some(NonNull::from(store.engine().epoch_counter()).into());
522 }
523
524 if self.env_module().needs_gc_heap {
525 self.as_mut().set_gc_heap(Some(store.unwrap_gc_store()));
526 } else {
527 self.as_mut().set_gc_heap(None);
528 }
529 }
530 }
531
532 unsafe fn set_gc_heap(self: Pin<&mut Self>, gc_store: Option<&GcStore>) {
533 if let Some(gc_store) = gc_store {
534 *self.gc_heap_data() = Some(unsafe { gc_store.gc_heap.vmctx_gc_heap_data().into() });
535 } else {
536 *self.gc_heap_data() = None;
537 }
538 }
539
540 /// Return a reference to the vmctx used by compiled wasm code.
541 #[inline]
542 pub fn vmctx(&self) -> NonNull<VMContext> {
543 InstanceLayout::vmctx(self)
544 }
545
546 /// Lookup a function by index.
547 ///
548 /// # Panics
549 ///
550 /// Panics if `index` is out of bounds for this instance.
551 ///
552 /// # Safety
553 ///
554 /// The `store` parameter must be the store that owns this instance and the
555 /// functions that this instance can reference.
556 pub unsafe fn get_exported_func(
557 self: Pin<&mut Self>,
558 registry: &ModuleRegistry,
559 store: StoreId,
560 index: FuncIndex,
561 ) -> crate::Func {
562 let func_ref = self.get_func_ref(registry, index).unwrap();
563
564 // SAFETY: the validity of `func_ref` is guaranteed by the validity of
565 // `self`, and the contract that `store` must own `func_ref` is a
566 // contract of this function itself.
567 unsafe { crate::Func::from_vm_func_ref(store, func_ref) }
568 }
569
570 /// Lookup a table by index.
571 ///
572 /// # Panics
573 ///
574 /// Panics if `index` is out of bounds for this instance.
575 pub fn get_exported_table(&self, store: StoreId, index: TableIndex) -> crate::Table {
576 let (id, def_index) = if let Some(def_index) = self.env_module().defined_table_index(index)
577 {
578 (self.id, def_index)
579 } else {
580 let import = self.imported_table(index);
581 // SAFETY: validity of this `Instance` guarantees validity of the
582 // `vmctx` pointer being read here to find the transitive
583 // `InstanceId` that the import is associated with.
584 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
585 (id, import.index)
586 };
587 crate::Table::from_raw(StoreInstanceId::new(store, id), def_index)
588 }
589
590 /// Lookup a memory by index.
591 ///
592 /// # Panics
593 ///
594 /// Panics if `index` is out-of-bounds for this instance.
595 #[cfg_attr(
596 not(feature = "threads"),
597 expect(unused_variables, reason = "definitions cfg'd to dummy",)
598 )]
599 pub fn get_exported_memory(&self, store: StoreId, index: MemoryIndex) -> ExportMemory {
600 let module = self.env_module();
601 if module.memories[index].shared {
602 let (memory, import) =
603 if let Some(def_index) = self.env_module().defined_memory_index(index) {
604 (
605 self.get_defined_memory(def_index),
606 self.get_defined_memory_vmimport(def_index),
607 )
608 } else {
609 let import = self.imported_memory(index);
610 // SAFETY: validity of this `Instance` guarantees validity of
611 // the `vmctx` pointer being read here to find the transitive
612 // `InstanceId` that the import is associated with.
613 let instance = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()) };
614 (instance.get_defined_memory(import.index), *import)
615 };
616
617 let vm = memory.as_shared_memory().unwrap().clone();
618 ExportMemory::Shared(vm, import)
619 } else {
620 let (id, def_index) =
621 if let Some(def_index) = self.env_module().defined_memory_index(index) {
622 (self.id, def_index)
623 } else {
624 let import = self.imported_memory(index);
625 // SAFETY: validity of this `Instance` guarantees validity of the
626 // `vmctx` pointer being read here to find the transitive
627 // `InstanceId` that the import is associated with.
628 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
629 (id, import.index)
630 };
631
632 // SAFETY: `from_raw` requires that the memory is not shared, which
633 // was tested above in this if/else.
634 let store_id = StoreInstanceId::new(store, id);
635 ExportMemory::Unshared(unsafe { crate::Memory::from_raw(store_id, def_index) })
636 }
637 }
638
639 /// Lookup a global by index.
640 ///
641 /// # Panics
642 ///
643 /// Panics if `index` is out-of-bounds for this instance.
644 pub(crate) fn get_exported_global(&self, store: StoreId, index: GlobalIndex) -> crate::Global {
645 // If this global is defined within this instance, then that's easy to
646 // calculate the `Global`.
647 if let Some(def_index) = self.env_module().defined_global_index(index) {
648 let instance = StoreInstanceId::new(store, self.id);
649 return crate::Global::from_core(instance, def_index);
650 }
651
652 // For imported globals it's required to match on the `kind` to
653 // determine which `Global` constructor is going to be invoked.
654 let import = self.imported_global(index);
655 match import.kind {
656 VMGlobalKind::Host(index) => crate::Global::from_host(store, index),
657 VMGlobalKind::Instance(index) => {
658 // SAFETY: validity of this `&Instance` means validity of its
659 // imports meaning we can read the id of the vmctx within.
660 let id = unsafe {
661 let vmctx = VMContext::from_opaque(import.vmctx.unwrap().as_non_null());
662 self.sibling_vmctx(vmctx).id
663 };
664 crate::Global::from_core(StoreInstanceId::new(store, id), index)
665 }
666 #[cfg(feature = "component-model")]
667 VMGlobalKind::ComponentFlags(index) => {
668 // SAFETY: validity of this `&Instance` means validity of its
669 // imports meaning we can read the id of the vmctx within.
670 let id = unsafe {
671 let vmctx = super::component::VMComponentContext::from_opaque(
672 import.vmctx.unwrap().as_non_null(),
673 );
674 super::component::ComponentInstance::vmctx_instance_id(vmctx)
675 };
676 crate::Global::from_component_flags(
677 crate::component::store::StoreComponentInstanceId::new(store, id),
678 index,
679 )
680 }
681 #[cfg(feature = "component-model")]
682 VMGlobalKind::TaskMayBlock => {
683 // SAFETY: validity of this `&Instance` means validity of its
684 // imports meaning we can read the id of the vmctx within.
685 let id = unsafe {
686 let vmctx = super::component::VMComponentContext::from_opaque(
687 import.vmctx.unwrap().as_non_null(),
688 );
689 super::component::ComponentInstance::vmctx_instance_id(vmctx)
690 };
691 crate::Global::from_task_may_block(
692 crate::component::store::StoreComponentInstanceId::new(store, id),
693 )
694 }
695 }
696 }
697
698 /// Get an exported tag by index.
699 ///
700 /// # Panics
701 ///
702 /// Panics if the index is out-of-range.
703 pub fn get_exported_tag(&self, store: StoreId, index: TagIndex) -> crate::Tag {
704 let (id, def_index) = if let Some(def_index) = self.env_module().defined_tag_index(index) {
705 (self.id, def_index)
706 } else {
707 let import = self.imported_tag(index);
708 // SAFETY: validity of this `Instance` guarantees validity of the
709 // `vmctx` pointer being read here to find the transitive
710 // `InstanceId` that the import is associated with.
711 let id = unsafe { self.sibling_vmctx(import.vmctx.as_non_null()).id };
712 (id, import.index)
713 };
714 crate::Tag::from_raw(StoreInstanceId::new(store, id), def_index)
715 }
716
717 /// Return an iterator over the exports of this instance.
718 ///
719 /// Specifically, it provides access to the key-value pairs, where the keys
720 /// are export names, and the values are export declarations which can be
721 /// resolved `lookup_by_declaration`.
722 pub fn exports(&self) -> wasmparser::collections::index_map::Iter<'_, String, EntityIndex> {
723 self.env_module().exports.iter()
724 }
725
726 /// Grow memory by the specified amount of pages.
727 ///
728 /// Returns `None` if memory can't be grown by the specified amount
729 /// of pages. Returns `Some` with the old size in bytes if growth was
730 /// successful.
731 pub(crate) async fn memory_grow(
732 mut self: Pin<&mut Self>,
733 limiter: Option<&mut StoreResourceLimiter<'_>>,
734 idx: DefinedMemoryIndex,
735 delta: u64,
736 ) -> Result<Option<usize>, Error> {
737 let memory = &mut self.as_mut().memories_mut()[idx].1;
738
739 // SAFETY: this is the safe wrapper around `Memory::grow` because it
740 // automatically updates the `VMMemoryDefinition` in this instance after
741 // a growth operation below.
742 let result = unsafe { memory.grow(delta, limiter).await };
743
744 // Update the state used by a non-shared Wasm memory in case the base
745 // pointer and/or the length changed.
746 if memory.as_shared_memory().is_none() {
747 let vmmemory = memory.vmmemory();
748 self.set_memory(idx, vmmemory);
749 }
750
751 result
752 }
753
754 pub(crate) fn table_element_type(
755 self: Pin<&mut Self>,
756 table_index: TableIndex,
757 ) -> TableElementType {
758 self.get_table(table_index).element_type()
759 }
760
761 /// Performs a grow operation on the `table_index` specified using `grow`.
762 ///
763 /// This will handle updating the VMTableDefinition internally as necessary.
764 pub(crate) async fn defined_table_grow(
765 mut self: Pin<&mut Self>,
766 table_index: DefinedTableIndex,
767 grow: impl AsyncFnOnce(&mut Table) -> Result<Option<usize>>,
768 ) -> Result<Option<usize>> {
769 let table = self.as_mut().get_defined_table(table_index);
770 let result = grow(table).await;
771 let element = table.vmtable();
772 self.set_table(table_index, element);
773 result
774 }
775
776 fn alloc_layout(offsets: &VMOffsets<HostPtr>) -> Layout {
777 let size = mem::size_of::<Self>()
778 .checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
779 .unwrap();
780 let align = mem::align_of::<Self>();
781 Layout::from_size_align(size, align).unwrap()
782 }
783
784 fn type_ids_array(&self) -> NonNull<VmPtr<VMSharedTypeIndex>> {
785 unsafe { self.vmctx_plus_offset_raw(self.offsets().ptr.vmctx_type_ids_array()) }
786 }
787
788 /// Construct a new VMFuncRef for the given function
789 /// (imported or defined in this module) and store into the given
790 /// location. Used during lazy initialization.
791 ///
792 /// Note that our current lazy-init scheme actually calls this every
793 /// time the funcref pointer is fetched; this turns out to be better
794 /// than tracking state related to whether it's been initialized
795 /// before, because resetting that state on (re)instantiation is
796 /// very expensive if there are many funcrefs.
797 ///
798 /// # Safety
799 ///
800 /// This functions requires that `into` is a valid pointer.
801 unsafe fn construct_func_ref(
802 self: Pin<&mut Self>,
803 registry: &ModuleRegistry,
804 index: FuncIndex,
805 type_index: VMSharedTypeIndex,
806 into: *mut VMFuncRef,
807 ) {
808 let module_with_code = ModuleWithCode::in_store(
809 registry,
810 self.runtime_module()
811 .expect("funcref impossible in fake module"),
812 )
813 .expect("module not in store");
814
815 let func_ref = if let Some(def_index) = self.env_module().defined_func_index(index) {
816 VMFuncRef {
817 array_call: NonNull::from(
818 module_with_code
819 .array_to_wasm_trampoline(def_index)
820 .expect("should have array-to-Wasm trampoline for escaping function"),
821 )
822 .cast()
823 .into(),
824 wasm_call: Some(
825 NonNull::new(
826 module_with_code
827 .finished_function(def_index)
828 .as_ptr()
829 .cast::<VMWasmCallFunction>()
830 .cast_mut(),
831 )
832 .unwrap()
833 .into(),
834 ),
835 vmctx: VMOpaqueContext::from_vmcontext(self.vmctx()).into(),
836 type_index,
837 }
838 } else {
839 let import = self.imported_function(index);
840 VMFuncRef {
841 array_call: import.array_call,
842 wasm_call: Some(import.wasm_call),
843 vmctx: import.vmctx,
844 type_index,
845 }
846 };
847
848 // SAFETY: the unsafe contract here is forwarded to callers of this
849 // function.
850 unsafe {
851 ptr::write(into, func_ref);
852 }
853 }
854
855 /// Get a `&VMFuncRef` for the given `FuncIndex`.
856 ///
857 /// Returns `None` if the index is the reserved index value.
858 ///
859 /// The returned reference is a stable reference that won't be moved and can
860 /// be passed into JIT code.
861 pub(crate) fn get_func_ref(
862 self: Pin<&mut Self>,
863 registry: &ModuleRegistry,
864 index: FuncIndex,
865 ) -> Option<NonNull<VMFuncRef>> {
866 if index == FuncIndex::reserved_value() {
867 return None;
868 }
869
870 // For now, we eagerly initialize an funcref struct in-place
871 // whenever asked for a reference to it. This is mostly
872 // fine, because in practice each funcref is unlikely to be
873 // requested more than a few times: once-ish for funcref
874 // tables used for call_indirect (the usual compilation
875 // strategy places each function in the table at most once),
876 // and once or a few times when fetching exports via API.
877 // Note that for any case driven by table accesses, the lazy
878 // table init behaves like a higher-level cache layer that
879 // protects this initialization from happening multiple
880 // times, via that particular table at least.
881 //
882 // When `ref.func` becomes more commonly used or if we
883 // otherwise see a use-case where this becomes a hotpath,
884 // we can reconsider by using some state to track
885 // "uninitialized" explicitly, for example by zeroing the
886 // funcrefs (perhaps together with other
887 // zeroed-at-instantiate-time state) or using a separate
888 // is-initialized bitmap.
889 //
890 // We arrived at this design because zeroing memory is
891 // expensive, so it's better for instantiation performance
892 // if we don't have to track "is-initialized" state at
893 // all!
894 let func = &self.env_module().functions[index];
895 let sig = func.signature.unwrap_engine_type_index();
896
897 // SAFETY: the offset calculated here should be correct with
898 // `self.offsets`
899 let func_ref = unsafe {
900 self.vmctx_plus_offset_raw::<VMFuncRef>(self.offsets().vmctx_func_ref(func.func_ref))
901 };
902
903 // SAFETY: the `func_ref` ptr should be valid as it's within our
904 // `VMContext` area.
905 unsafe {
906 self.construct_func_ref(registry, index, sig, func_ref.as_ptr());
907 }
908
909 Some(func_ref)
910 }
911
912 /// Get the passive elements segment at the given index.
913 ///
914 /// Returns an empty segment if the index is out of bounds or if the segment
915 /// has been dropped.
916 ///
917 /// The `storage` parameter should always be `None`; it is a bit of a hack
918 /// to work around lifetime issues.
919 pub(crate) fn passive_element_segment<'a>(
920 &self,
921 storage: &'a mut Option<(Arc<wasmtime_environ::Module>, TableSegmentElements)>,
922 elem_index: ElemIndex,
923 ) -> &'a TableSegmentElements {
924 debug_assert!(storage.is_none());
925 *storage = Some((
926 // TODO: this `clone()` shouldn't be necessary but is used for now to
927 // inform `rustc` that the lifetime of the elements here are
928 // disconnected from the lifetime of `self`.
929 self.env_module().clone(),
930 // NB: fall back to an expressions-based list of elements which
931 // doesn't have static type information (as opposed to
932 // `TableSegmentElements::Functions`) since we don't know what type
933 // is needed in the caller's context. Let the type be inferred by
934 // how they use the segment.
935 TableSegmentElements::Expressions(Box::new([])),
936 ));
937 let (module, empty) = storage.as_ref().unwrap();
938
939 match module.passive_elements_map.get(&elem_index) {
940 Some(index) if !self.dropped_elements.contains(elem_index) => {
941 &module.passive_elements[*index]
942 }
943 _ => empty,
944 }
945 }
946
947 /// The `table.init` operation: initializes a portion of a table with a
948 /// passive element.
949 ///
950 /// # Errors
951 ///
952 /// Returns a `Trap` error when the range within the table is out of bounds
953 /// or the range within the passive element is out of bounds.
954 pub(crate) async fn table_init(
955 store: &mut StoreOpaque,
956 limiter: Option<&mut StoreResourceLimiter<'_>>,
957 asyncness: Asyncness,
958 instance: InstanceId,
959 table_index: TableIndex,
960 elem_index: ElemIndex,
961 dst: u64,
962 src: u64,
963 len: u64,
964 ) -> Result<()> {
965 let mut storage = None;
966 let elements = store
967 .instance(instance)
968 .passive_element_segment(&mut storage, elem_index);
969 let mut const_evaluator = ConstExprEvaluator::default();
970 Self::table_init_segment(
971 store,
972 limiter,
973 asyncness,
974 instance,
975 &mut const_evaluator,
976 table_index,
977 elements,
978 dst,
979 src,
980 len,
981 )
982 .await
983 }
984
985 pub(crate) async fn table_init_segment(
986 store: &mut StoreOpaque,
987 mut limiter: Option<&mut StoreResourceLimiter<'_>>,
988 asyncness: Asyncness,
989 elements_instance_id: InstanceId,
990 const_evaluator: &mut ConstExprEvaluator,
991 table_index: TableIndex,
992 elements: &TableSegmentElements,
993 dst: u64,
994 src: u64,
995 len: u64,
996 ) -> Result<()> {
997 // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
998
999 let store_id = store.id();
1000 let elements_instance = store.instance_mut(elements_instance_id);
1001 let table = elements_instance.get_exported_table(store_id, table_index);
1002 let table_size = table._size(store);
1003
1004 // Perform a bounds check on the table being written to. This is done by
1005 // ensuring that `dst + len <= table.size()` via checked arithmetic.
1006 //
1007 // Note that the bounds check for the element segment happens below when
1008 // the original segment is sliced via `src` and `len`.
1009 table_size
1010 .checked_sub(dst)
1011 .and_then(|i| i.checked_sub(len))
1012 .ok_or(Trap::TableOutOfBounds)?;
1013
1014 let src = usize::try_from(src).map_err(|_| Trap::TableOutOfBounds)?;
1015 let len = usize::try_from(len).map_err(|_| Trap::TableOutOfBounds)?;
1016
1017 let positions = dst..dst + u64::try_from(len).unwrap();
1018 match elements {
1019 TableSegmentElements::Functions(funcs) => {
1020 let elements = funcs
1021 .get(src..)
1022 .and_then(|s| s.get(..len))
1023 .ok_or(Trap::TableOutOfBounds)?;
1024 for (i, func_idx) in positions.zip(elements) {
1025 let (instance, registry) =
1026 store.instance_and_module_registry_mut(elements_instance_id);
1027 // SAFETY: the `store_id` passed to `get_exported_func` is
1028 // indeed the store that owns the function.
1029 let func = unsafe { instance.get_exported_func(registry, store_id, *func_idx) };
1030 table.set_(store, i, func.into()).unwrap();
1031 }
1032 }
1033 TableSegmentElements::Expressions(exprs) => {
1034 let mut store = OpaqueRootScope::new(store);
1035 let exprs = exprs
1036 .get(src..)
1037 .and_then(|s| s.get(..len))
1038 .ok_or(Trap::TableOutOfBounds)?;
1039 let mut context = ConstEvalContext::new(elements_instance_id, asyncness);
1040 for (i, expr) in positions.zip(exprs) {
1041 let element = const_evaluator
1042 .eval(&mut store, limiter.as_deref_mut(), &mut context, expr)
1043 .await?;
1044 table.set_(&mut store, i, element.ref_().unwrap()).unwrap();
1045 }
1046 }
1047 }
1048
1049 Ok(())
1050 }
1051
1052 /// Drop an element.
1053 pub(crate) fn elem_drop(self: Pin<&mut Self>, elem_index: ElemIndex) {
1054 // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
1055
1056 self.dropped_elements_mut().insert(elem_index);
1057
1058 // Note that we don't check that we actually removed a segment because
1059 // dropping a non-passive segment is a no-op (not a trap).
1060 }
1061
1062 /// Get a locally-defined memory.
1063 pub fn get_defined_memory_mut(self: Pin<&mut Self>, index: DefinedMemoryIndex) -> &mut Memory {
1064 &mut self.memories_mut()[index].1
1065 }
1066
1067 /// Get a locally-defined memory.
1068 pub fn get_defined_memory(&self, index: DefinedMemoryIndex) -> &Memory {
1069 &self.memories[index].1
1070 }
1071
1072 pub fn get_defined_memory_vmimport(&self, index: DefinedMemoryIndex) -> VMMemoryImport {
1073 crate::runtime::vm::VMMemoryImport {
1074 from: self.memory_ptr(index).into(),
1075 vmctx: self.vmctx().into(),
1076 index,
1077 }
1078 }
1079
1080 /// Do a `memory.copy`
1081 ///
1082 /// # Errors
1083 ///
1084 /// Returns a `Trap` error when the source or destination ranges are out of
1085 /// bounds.
1086 pub(crate) fn memory_copy(
1087 self: Pin<&mut Self>,
1088 dst_index: MemoryIndex,
1089 dst: u64,
1090 src_index: MemoryIndex,
1091 src: u64,
1092 len: u64,
1093 ) -> Result<(), Trap> {
1094 // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
1095
1096 let src_mem = self.get_memory(src_index);
1097 let dst_mem = self.get_memory(dst_index);
1098
1099 let src = self.validate_inbounds(src_mem.current_length(), src, len)?;
1100 let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?;
1101 let len = usize::try_from(len).unwrap();
1102
1103 // Bounds and casts are checked above, by this point we know that
1104 // everything is safe.
1105 unsafe {
1106 let dst = dst_mem.base.as_ptr().add(dst);
1107 let src = src_mem.base.as_ptr().add(src);
1108 // FIXME audit whether this is safe in the presence of shared memory
1109 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1110 ptr::copy(src, dst, len);
1111 }
1112
1113 Ok(())
1114 }
1115
1116 fn validate_inbounds(&self, max: usize, ptr: u64, len: u64) -> Result<usize, Trap> {
1117 let oob = || Trap::MemoryOutOfBounds;
1118 let end = ptr
1119 .checked_add(len)
1120 .and_then(|i| usize::try_from(i).ok())
1121 .ok_or_else(oob)?;
1122 if end > max {
1123 Err(oob())
1124 } else {
1125 Ok(ptr.try_into().unwrap())
1126 }
1127 }
1128
1129 /// Perform the `memory.fill` operation on a locally defined memory.
1130 ///
1131 /// # Errors
1132 ///
1133 /// Returns a `Trap` error if the memory range is out of bounds.
1134 pub(crate) fn memory_fill(
1135 self: Pin<&mut Self>,
1136 memory_index: DefinedMemoryIndex,
1137 dst: u64,
1138 val: u8,
1139 len: u64,
1140 ) -> Result<(), Trap> {
1141 let memory_index = self.env_module().memory_index(memory_index);
1142 let memory = self.get_memory(memory_index);
1143 let dst = self.validate_inbounds(memory.current_length(), dst, len)?;
1144 let len = usize::try_from(len).unwrap();
1145
1146 // Bounds and casts are checked above, by this point we know that
1147 // everything is safe.
1148 unsafe {
1149 let dst = memory.base.as_ptr().add(dst);
1150 // FIXME audit whether this is safe in the presence of shared memory
1151 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1152 ptr::write_bytes(dst, val, len);
1153 }
1154
1155 Ok(())
1156 }
1157
1158 /// Get the internal storage range of a particular Wasm data segment.
1159 pub(crate) fn wasm_data_range(&self, index: DataIndex) -> Range<u32> {
1160 match self.env_module().passive_data_map.get(&index) {
1161 Some(range) if !self.dropped_data.contains(index) => range.clone(),
1162 _ => 0..0,
1163 }
1164 }
1165
1166 /// Given an internal storage range of a Wasm data segment (or subset of a
1167 /// Wasm data segment), get the data's raw bytes.
1168 pub(crate) fn wasm_data(&self, range: Range<u32>) -> &[u8] {
1169 let start = usize::try_from(range.start).unwrap();
1170 let end = usize::try_from(range.end).unwrap();
1171 &self.runtime_info.wasm_data()[start..end]
1172 }
1173
1174 /// Performs the `memory.init` operation.
1175 ///
1176 /// # Errors
1177 ///
1178 /// Returns a `Trap` error if the destination range is out of this module's
1179 /// memory's bounds or if the source range is outside the data segment's
1180 /// bounds.
1181 pub(crate) fn memory_init(
1182 self: Pin<&mut Self>,
1183 memory_index: MemoryIndex,
1184 data_index: DataIndex,
1185 dst: u64,
1186 src: u32,
1187 len: u32,
1188 ) -> Result<(), Trap> {
1189 let range = self.wasm_data_range(data_index);
1190 self.memory_init_segment(memory_index, range, dst, src, len)
1191 }
1192
1193 pub(crate) fn memory_init_segment(
1194 self: Pin<&mut Self>,
1195 memory_index: MemoryIndex,
1196 range: Range<u32>,
1197 dst: u64,
1198 src: u32,
1199 len: u32,
1200 ) -> Result<(), Trap> {
1201 // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
1202
1203 let memory = self.get_memory(memory_index);
1204 let data = self.wasm_data(range);
1205 let dst = self.validate_inbounds(memory.current_length(), dst, len.into())?;
1206 let src = self.validate_inbounds(data.len(), src.into(), len.into())?;
1207 let len = len as usize;
1208
1209 unsafe {
1210 let src_start = data.as_ptr().add(src);
1211 let dst_start = memory.base.as_ptr().add(dst);
1212 // FIXME audit whether this is safe in the presence of shared memory
1213 // (https://github.com/bytecodealliance/wasmtime/issues/4203).
1214 ptr::copy_nonoverlapping(src_start, dst_start, len);
1215 }
1216
1217 Ok(())
1218 }
1219
1220 /// Drop the given data segment, truncating its length to zero.
1221 pub(crate) fn data_drop(self: Pin<&mut Self>, data_index: DataIndex) {
1222 self.dropped_data_mut().insert(data_index);
1223
1224 // Note that we don't check that we actually removed a segment because
1225 // dropping a non-passive segment is a no-op (not a trap).
1226 }
1227
1228 /// Get a table by index regardless of whether it is locally-defined
1229 /// or an imported, foreign table. Ensure that the given range of
1230 /// elements in the table is lazily initialized. We define this
1231 /// operation all-in-one for safety, to ensure the lazy-init
1232 /// happens.
1233 ///
1234 /// Takes an `Iterator` for the index-range to lazy-initialize,
1235 /// for flexibility. This can be a range, single item, or empty
1236 /// sequence, for example. The iterator should return indices in
1237 /// increasing order, so that the break-at-out-of-bounds behavior
1238 /// works correctly.
1239 pub(crate) fn get_table_with_lazy_init(
1240 self: Pin<&mut Self>,
1241 registry: &ModuleRegistry,
1242 table_index: TableIndex,
1243 range: impl Iterator<Item = u64>,
1244 ) -> &mut Table {
1245 let (idx, instance) = self.defined_table_index_and_instance(table_index);
1246 instance.get_defined_table_with_lazy_init(registry, idx, range)
1247 }
1248
1249 /// Gets the raw runtime table data structure owned by this instance
1250 /// given the provided `idx`.
1251 ///
1252 /// The `range` specified is eagerly initialized for funcref tables.
1253 pub fn get_defined_table_with_lazy_init(
1254 mut self: Pin<&mut Self>,
1255 registry: &ModuleRegistry,
1256 idx: DefinedTableIndex,
1257 range: impl IntoIterator<Item = u64>,
1258 ) -> &mut Table {
1259 let elt_ty = self.tables[idx].1.element_type();
1260
1261 if elt_ty == TableElementType::Func {
1262 for i in range {
1263 match self.tables[idx].1.get_func_maybe_init(i) {
1264 // Uninitialized table element.
1265 Ok(None) => {}
1266 // Initialized table element, move on to the next.
1267 Ok(Some(_)) => continue,
1268 // Out-of-bounds; caller will handle by likely
1269 // throwing a trap. No work to do to lazy-init
1270 // beyond the end.
1271 Err(_) => break,
1272 };
1273
1274 // The table element `i` is uninitialized and is now being
1275 // initialized. This must imply that a `precompiled` list of
1276 // function indices is available for this table. The precompiled
1277 // list is extracted and then it is consulted with `i` to
1278 // determine the function that is going to be initialized. Note
1279 // that `i` may be outside the limits of the static
1280 // initialization so it's a fallible `get` instead of an index.
1281 let module = self.env_module();
1282 let precomputed = match &module.table_initialization.initial_values[idx] {
1283 TableInitialValue::Null { precomputed } => precomputed,
1284 TableInitialValue::Expr(_) => unreachable!(),
1285 };
1286 // Panicking here helps catch bugs rather than silently truncating by accident.
1287 let func_index = precomputed.get(usize::try_from(i).unwrap()).cloned();
1288 let func_ref = func_index
1289 .and_then(|func_index| self.as_mut().get_func_ref(registry, func_index));
1290 self.as_mut().tables_mut()[idx]
1291 .1
1292 .set_func(i, func_ref)
1293 .expect("Table type should match and index should be in-bounds");
1294 }
1295 }
1296
1297 self.get_defined_table(idx)
1298 }
1299
1300 /// Get a table by index regardless of whether it is locally-defined or an
1301 /// imported, foreign table.
1302 pub(crate) fn get_table(self: Pin<&mut Self>, table_index: TableIndex) -> &mut Table {
1303 let (idx, instance) = self.defined_table_index_and_instance(table_index);
1304 instance.get_defined_table(idx)
1305 }
1306
1307 /// Get a locally-defined table.
1308 pub(crate) fn get_defined_table(self: Pin<&mut Self>, index: DefinedTableIndex) -> &mut Table {
1309 &mut self.tables_mut()[index].1
1310 }
1311
1312 pub(crate) fn defined_table_index_and_instance<'a>(
1313 self: Pin<&'a mut Self>,
1314 index: TableIndex,
1315 ) -> (DefinedTableIndex, Pin<&'a mut Instance>) {
1316 if let Some(defined_table_index) = self.env_module().defined_table_index(index) {
1317 (defined_table_index, self)
1318 } else {
1319 let import = self.imported_table(index);
1320 let index = import.index;
1321 let vmctx = import.vmctx.as_non_null();
1322 // SAFETY: the validity of `self` means that the reachable instances
1323 // should also all be owned by the same store and fully initialized,
1324 // so it's safe to laterally move from a mutable borrow of this
1325 // instance to a mutable borrow of a sibling instance.
1326 let foreign_instance = unsafe { self.sibling_vmctx_mut(vmctx) };
1327 (index, foreign_instance)
1328 }
1329 }
1330
1331 /// Same as `self.runtime_info.env_module()` but additionally returns the
1332 /// `Pin<&mut Self>` with the same original lifetime.
1333 pub fn module_and_self(self: Pin<&mut Self>) -> (&wasmtime_environ::Module, Pin<&mut Self>) {
1334 // SAFETY: this function is projecting both `&Module` and the same
1335 // pointer both connected to the same lifetime. This is safe because
1336 // it's a contract of `Pin<&mut Self>` that the `runtime_info` field is
1337 // never written, meaning it's effectively unsafe to have `&mut Module`
1338 // projected from `Pin<&mut Self>`. Consequently it's safe to have a
1339 // read-only view of the field while still retaining mutable access to
1340 // all other fields.
1341 let module = self.runtime_info.env_module();
1342 let module = &raw const *module;
1343 let module = unsafe { &*module };
1344 (module, self)
1345 }
1346
1347 /// Initialize the VMContext data associated with this Instance.
1348 ///
1349 /// The `VMContext` memory is assumed to be uninitialized; any field
1350 /// that we need in a certain state will be explicitly written by this
1351 /// function.
1352 unsafe fn initialize_vmctx(self: Pin<&mut Self>, store: &StoreOpaque, imports: Imports) {
1353 let (module, mut instance) = self.module_and_self();
1354
1355 // SAFETY: the type of the magic field is indeed `u32` and this function
1356 // is initializing its value.
1357 unsafe {
1358 let offsets = instance.runtime_info.offsets();
1359 instance
1360 .vmctx_plus_offset_raw::<u32>(offsets.ptr.vmctx_magic())
1361 .write(VMCONTEXT_MAGIC);
1362 }
1363
1364 // SAFETY: it's up to the caller to provide a valid store pointer here.
1365 unsafe {
1366 instance.as_mut().set_store(store);
1367 }
1368
1369 // Initialize shared types
1370 //
1371 // SAFETY: validity of the vmctx means it should be safe to write to it
1372 // here.
1373 unsafe {
1374 let types = NonNull::from(instance.runtime_info.type_ids());
1375 instance.type_ids_array().write(types.cast().into());
1376 }
1377
1378 // Initialize the built-in functions
1379 //
1380 // SAFETY: the type of the builtin functions field is indeed a pointer
1381 // and the pointer being filled in here, plus the vmctx is valid to
1382 // write to during initialization.
1383 unsafe {
1384 static BUILTINS: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray::INIT;
1385 let ptr = BUILTINS.expose_provenance();
1386 let offsets = instance.runtime_info.offsets();
1387 instance
1388 .vmctx_plus_offset_raw(offsets.ptr.vmctx_builtin_functions())
1389 .write(VmPtr::from(ptr));
1390 }
1391
1392 // Initialize the imports
1393 //
1394 // SAFETY: the vmctx is safe to initialize during this function and
1395 // validity of each item itself is a contract the caller must uphold.
1396 debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
1397 unsafe {
1398 let offsets = instance.runtime_info.offsets();
1399 ptr::copy_nonoverlapping(
1400 imports.functions.as_ptr(),
1401 instance
1402 .vmctx_plus_offset_raw(offsets.vmctx_imported_functions_begin())
1403 .as_ptr(),
1404 imports.functions.len(),
1405 );
1406 debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
1407 ptr::copy_nonoverlapping(
1408 imports.tables.as_ptr(),
1409 instance
1410 .vmctx_plus_offset_raw(offsets.vmctx_imported_tables_begin())
1411 .as_ptr(),
1412 imports.tables.len(),
1413 );
1414 debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
1415 ptr::copy_nonoverlapping(
1416 imports.memories.as_ptr(),
1417 instance
1418 .vmctx_plus_offset_raw(offsets.vmctx_imported_memories_begin())
1419 .as_ptr(),
1420 imports.memories.len(),
1421 );
1422 debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
1423 ptr::copy_nonoverlapping(
1424 imports.globals.as_ptr(),
1425 instance
1426 .vmctx_plus_offset_raw(offsets.vmctx_imported_globals_begin())
1427 .as_ptr(),
1428 imports.globals.len(),
1429 );
1430 debug_assert_eq!(imports.tags.len(), module.num_imported_tags);
1431 ptr::copy_nonoverlapping(
1432 imports.tags.as_ptr(),
1433 instance
1434 .vmctx_plus_offset_raw(offsets.vmctx_imported_tags_begin())
1435 .as_ptr(),
1436 imports.tags.len(),
1437 );
1438 }
1439
1440 // N.B.: there is no need to initialize the funcrefs array because we
1441 // eagerly construct each element in it whenever asked for a reference
1442 // to that element. In other words, there is no state needed to track
1443 // the lazy-init, so we don't need to initialize any state now.
1444
1445 // Initialize the defined tables
1446 //
1447 // SAFETY: it's safe to initialize these tables during initialization
1448 // here and the various types of pointers and such here should all be
1449 // valid.
1450 unsafe {
1451 let offsets = instance.runtime_info.offsets();
1452 let mut ptr = instance.vmctx_plus_offset_raw(offsets.vmctx_tables_begin());
1453 let tables = instance.as_mut().tables_mut();
1454 for i in 0..module.num_defined_tables() {
1455 ptr.write(tables[DefinedTableIndex::new(i)].1.vmtable());
1456 ptr = ptr.add(1);
1457 }
1458 }
1459
1460 // Initialize the defined memories. This fills in both the
1461 // `defined_memories` table and the `owned_memories` table at the same
1462 // time. Entries in `defined_memories` hold a pointer to a definition
1463 // (all memories) whereas the `owned_memories` hold the actual
1464 // definitions of memories owned (not shared) in the module.
1465 //
1466 // SAFETY: it's safe to initialize these memories during initialization
1467 // here and the various types of pointers and such here should all be
1468 // valid.
1469 unsafe {
1470 let offsets = instance.runtime_info.offsets();
1471 let mut ptr = instance.vmctx_plus_offset_raw(offsets.vmctx_memories_begin());
1472 let mut owned_ptr =
1473 instance.vmctx_plus_offset_raw(offsets.vmctx_owned_memories_begin());
1474 let memories = instance.as_mut().memories_mut();
1475 for i in 0..module.num_defined_memories() {
1476 let defined_memory_index = DefinedMemoryIndex::new(i);
1477 let memory_index = module.memory_index(defined_memory_index);
1478 if module.memories[memory_index].shared {
1479 let def_ptr = memories[defined_memory_index]
1480 .1
1481 .as_shared_memory()
1482 .unwrap()
1483 .vmmemory_ptr();
1484 ptr.write(VmPtr::from(def_ptr));
1485 } else {
1486 owned_ptr.write(memories[defined_memory_index].1.vmmemory());
1487 ptr.write(VmPtr::from(owned_ptr));
1488 owned_ptr = owned_ptr.add(1);
1489 }
1490 ptr = ptr.add(1);
1491 }
1492 }
1493
1494 // Zero-initialize the globals so that nothing is uninitialized memory
1495 // after this function returns. The globals are actually initialized
1496 // with their const expression initializers after the instance is fully
1497 // allocated.
1498 //
1499 // SAFETY: it's safe to initialize globals during initialization
1500 // here. Note that while the value being written is not valid for all
1501 // types of globals it's initializing the memory to zero instead of
1502 // being in an undefined state. So it's still unsafe to access globals
1503 // after this, but if it's read then it'd hopefully crash faster than
1504 // leaving this undefined.
1505 unsafe {
1506 for (index, _init) in module.global_initializers.iter() {
1507 instance.global_ptr(index).write(VMGlobalDefinition::new());
1508 }
1509 }
1510
1511 // Initialize the defined tags
1512 //
1513 // SAFETY: it's safe to initialize these tags during initialization
1514 // here and the various types of pointers and such here should all be
1515 // valid.
1516 unsafe {
1517 let offsets = instance.runtime_info.offsets();
1518 let mut ptr = instance.vmctx_plus_offset_raw(offsets.vmctx_tags_begin());
1519 for i in 0..module.num_defined_tags() {
1520 let defined_index = DefinedTagIndex::new(i);
1521 let tag_index = module.tag_index(defined_index);
1522 let tag = module.tags[tag_index];
1523 ptr.write(VMTagDefinition::new(
1524 tag.signature.unwrap_engine_type_index(),
1525 ));
1526 ptr = ptr.add(1);
1527 }
1528 }
1529 }
1530
1531 /// Attempts to convert from the host `addr` specified to a WebAssembly
1532 /// based address recorded in `WasmFault`.
1533 ///
1534 /// This method will check all linear memories that this instance contains
1535 /// to see if any of them contain `addr`. If one does then `Some` is
1536 /// returned with metadata about the wasm fault. Otherwise `None` is
1537 /// returned and `addr` doesn't belong to this instance.
1538 pub fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1539 let mut fault = None;
1540 for (_, (_, memory)) in self.memories.iter() {
1541 let accessible = memory.wasm_accessible();
1542 if accessible.start <= addr && addr < accessible.end {
1543 // All linear memories should be disjoint so assert that no
1544 // prior fault has been found.
1545 assert!(fault.is_none());
1546 fault = Some(WasmFault {
1547 memory_size: memory.byte_size(),
1548 wasm_address: u64::try_from(addr - accessible.start).unwrap(),
1549 });
1550 }
1551 }
1552 fault
1553 }
1554
1555 /// Returns the id, within this instance's store, that it's assigned.
1556 pub fn id(&self) -> InstanceId {
1557 self.id
1558 }
1559
1560 /// Get all memories within this instance.
1561 ///
1562 /// Returns both import and defined memories.
1563 ///
1564 /// Returns both exported and non-exported memories.
1565 ///
1566 /// Gives access to the full memories space.
1567 pub fn all_memories(
1568 &self,
1569 store: StoreId,
1570 ) -> impl ExactSizeIterator<Item = (MemoryIndex, ExportMemory)> + '_ {
1571 self.env_module()
1572 .memories
1573 .iter()
1574 .map(move |(i, _)| (i, self.get_exported_memory(store, i)))
1575 }
1576
1577 /// Return the memories defined in this instance (not imported).
1578 pub fn defined_memories<'a>(
1579 &'a self,
1580 store: StoreId,
1581 ) -> impl ExactSizeIterator<Item = ExportMemory> + 'a {
1582 let num_imported = self.env_module().num_imported_memories;
1583 self.all_memories(store)
1584 .skip(num_imported)
1585 .map(|(_i, memory)| memory)
1586 }
1587
1588 /// Lookup an item with the given index.
1589 ///
1590 /// # Panics
1591 ///
1592 /// Panics if `export` is not valid for this instance.
1593 ///
1594 /// # Safety
1595 ///
1596 /// This function requires that `store` is the correct store which owns this
1597 /// instance.
1598 pub unsafe fn get_export_by_index_mut(
1599 self: Pin<&mut Self>,
1600 registry: &ModuleRegistry,
1601 store: StoreId,
1602 export: EntityIndex,
1603 ) -> Export {
1604 match export {
1605 // SAFETY: the contract of `store` owning the this instance is a
1606 // safety requirement of this function itself.
1607 EntityIndex::Function(i) => {
1608 Export::Function(unsafe { self.get_exported_func(registry, store, i) })
1609 }
1610 EntityIndex::Global(i) => Export::Global(self.get_exported_global(store, i)),
1611 EntityIndex::Table(i) => Export::Table(self.get_exported_table(store, i)),
1612 EntityIndex::Memory(i) => match self.get_exported_memory(store, i) {
1613 ExportMemory::Unshared(m) => Export::Memory(m),
1614 ExportMemory::Shared(m, i) => Export::SharedMemory(m, i),
1615 },
1616 EntityIndex::Tag(i) => Export::Tag(self.get_exported_tag(store, i)),
1617 }
1618 }
1619
1620 fn store_mut(self: Pin<&mut Self>) -> &mut Option<VMStoreRawPtr> {
1621 // SAFETY: this is a pin-projection to get a mutable reference to an
1622 // internal field and is safe so long as the `&mut Self` temporarily
1623 // created is not overwritten, which it isn't here.
1624 unsafe { &mut self.get_unchecked_mut().store }
1625 }
1626
1627 fn dropped_elements_mut(self: Pin<&mut Self>) -> &mut EntitySet<ElemIndex> {
1628 // SAFETY: see `store_mut` above.
1629 unsafe { &mut self.get_unchecked_mut().dropped_elements }
1630 }
1631
1632 fn dropped_data_mut(self: Pin<&mut Self>) -> &mut EntitySet<DataIndex> {
1633 // SAFETY: see `store_mut` above.
1634 unsafe { &mut self.get_unchecked_mut().dropped_data }
1635 }
1636
1637 fn memories_mut(
1638 self: Pin<&mut Self>,
1639 ) -> &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)> {
1640 // SAFETY: see `store_mut` above.
1641 unsafe { &mut self.get_unchecked_mut().memories }
1642 }
1643
1644 pub(crate) fn tables_mut(
1645 self: Pin<&mut Self>,
1646 ) -> &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)> {
1647 // SAFETY: see `store_mut` above.
1648 unsafe { &mut self.get_unchecked_mut().tables }
1649 }
1650
1651 #[cfg(feature = "wmemcheck")]
1652 pub(super) fn wmemcheck_state_mut(self: Pin<&mut Self>) -> &mut Option<Wmemcheck> {
1653 // SAFETY: see `store_mut` above.
1654 unsafe { &mut self.get_unchecked_mut().wmemcheck_state }
1655 }
1656}
1657
1658// SAFETY: `layout` should describe this accurately and `OwnedVMContext` is the
1659// last field of `ComponentInstance`.
1660unsafe impl InstanceLayout for Instance {
1661 const INIT_ZEROED: bool = false;
1662 type VMContext = VMContext;
1663
1664 fn layout(&self) -> Layout {
1665 Self::alloc_layout(self.runtime_info.offsets())
1666 }
1667
1668 fn owned_vmctx(&self) -> &OwnedVMContext<VMContext> {
1669 &self.vmctx
1670 }
1671
1672 fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<VMContext> {
1673 &mut self.vmctx
1674 }
1675}
1676
1677pub type InstanceHandle = OwnedInstance<Instance>;
1678
1679/// A handle holding an `Instance` of a WebAssembly module.
1680///
1681/// This structure is an owning handle of the `instance` contained internally.
1682/// When this value goes out of scope it will deallocate the `Instance` and all
1683/// memory associated with it.
1684///
1685/// Note that this lives within a `StoreOpaque` on a list of instances that a
1686/// store is keeping alive.
1687#[derive(Debug)]
1688#[repr(transparent)] // guarantee this is a zero-cost wrapper
1689pub struct OwnedInstance<T: InstanceLayout> {
1690 /// The raw pointer to the instance that was allocated.
1691 ///
1692 /// Note that this is not equivalent to `Box<Instance>` because the
1693 /// allocation here has a `VMContext` trailing after it. Thus the custom
1694 /// destructor to invoke the `dealloc` function with the appropriate
1695 /// layout.
1696 instance: SendSyncPtr<T>,
1697 _marker: marker::PhantomData<Box<(T, OwnedVMContext<T::VMContext>)>>,
1698}
1699
1700/// Structure that must be placed at the end of a type implementing
1701/// `InstanceLayout`.
1702#[repr(align(16))] // match the alignment of VMContext
1703pub struct OwnedVMContext<T> {
1704 /// A pointer to the `vmctx` field at the end of the `structure`.
1705 ///
1706 /// If you're looking at this a reasonable question would be "why do we need
1707 /// a pointer to ourselves?" because after all the pointer's value is
1708 /// trivially derivable from any `&Instance` pointer. The rationale for this
1709 /// field's existence is subtle, but it's required for correctness. The
1710 /// short version is "this makes miri happy".
1711 ///
1712 /// The long version of why this field exists is that the rules that MIRI
1713 /// uses to ensure pointers are used correctly have various conditions on
1714 /// them depend on how pointers are used. More specifically if `*mut T` is
1715 /// derived from `&mut T`, then that invalidates all prior pointers drived
1716 /// from the `&mut T`. This means that while we liberally want to re-acquire
1717 /// a `*mut VMContext` throughout the implementation of `Instance` the
1718 /// trivial way, a function `fn vmctx(Pin<&mut Instance>) -> *mut VMContext`
1719 /// would effectively invalidate all prior `*mut VMContext` pointers
1720 /// acquired. The purpose of this field is to serve as a sort of
1721 /// source-of-truth for where `*mut VMContext` pointers come from.
1722 ///
1723 /// This field is initialized when the `Instance` is created with the
1724 /// original allocation's pointer. That means that the provenance of this
1725 /// pointer contains the entire allocation (both instance and `VMContext`).
1726 /// This provenance bit is then "carried through" where `fn vmctx` will base
1727 /// all returned pointers on this pointer itself. This provides the means of
1728 /// never invalidating this pointer throughout MIRI and additionally being
1729 /// able to still temporarily have `Pin<&mut Instance>` methods and such.
1730 ///
1731 /// It's important to note, though, that this is not here purely for MIRI.
1732 /// The careful construction of the `fn vmctx` method has ramifications on
1733 /// the LLVM IR generated, for example. A historical CVE on Wasmtime,
1734 /// GHSA-ch89-5g45-qwc7, was caused due to relying on undefined behavior. By
1735 /// deriving VMContext pointers from this pointer it specifically hints to
1736 /// LLVM that trickery is afoot and it properly informs `noalias` and such
1737 /// annotations and analysis. More-or-less this pointer is actually loaded
1738 /// in LLVM IR which helps defeat otherwise present aliasing optimizations,
1739 /// which we want, since writes to this should basically never be optimized
1740 /// out.
1741 ///
1742 /// As a final note it's worth pointing out that the machine code generated
1743 /// for accessing `fn vmctx` is still as one would expect. This member isn't
1744 /// actually ever loaded at runtime (or at least shouldn't be). Perhaps in
1745 /// the future if the memory consumption of this field is a problem we could
1746 /// shrink it slightly, but for now one extra pointer per wasm instance
1747 /// seems not too bad.
1748 vmctx_self_reference: SendSyncPtr<T>,
1749
1750 /// This field ensures that going from `Pin<&mut T>` to `&mut T` is not a
1751 /// safe operation.
1752 _marker: core::marker::PhantomPinned,
1753}
1754
1755impl<T> OwnedVMContext<T> {
1756 /// Creates a new blank vmctx to place at the end of an instance.
1757 pub fn new() -> OwnedVMContext<T> {
1758 OwnedVMContext {
1759 vmctx_self_reference: SendSyncPtr::new(NonNull::dangling()),
1760 _marker: core::marker::PhantomPinned,
1761 }
1762 }
1763}
1764
1765/// Helper trait to plumb both core instances and component instances into
1766/// `OwnedInstance` below.
1767///
1768/// # Safety
1769///
1770/// This trait requires `layout` to correctly describe `Self` and appropriately
1771/// allocate space for `Self::VMContext` afterwards. Additionally the field
1772/// returned by `owned_vmctx()` must be the last field in the structure.
1773pub unsafe trait InstanceLayout {
1774 /// Whether or not to allocate this instance with `alloc_zeroed` or `alloc`.
1775 const INIT_ZEROED: bool;
1776
1777 /// The trailing `VMContext` type at the end of this instance.
1778 type VMContext;
1779
1780 /// The memory layout to use to allocate and deallocate this instance.
1781 fn layout(&self) -> Layout;
1782
1783 fn owned_vmctx(&self) -> &OwnedVMContext<Self::VMContext>;
1784 fn owned_vmctx_mut(&mut self) -> &mut OwnedVMContext<Self::VMContext>;
1785
1786 /// Returns the `vmctx_self_reference` set above.
1787 #[inline]
1788 fn vmctx(&self) -> NonNull<Self::VMContext> {
1789 // The definition of this method is subtle but intentional. The goal
1790 // here is that effectively this should return `&mut self.vmctx`, but
1791 // it's not quite so simple. Some more documentation is available on the
1792 // `vmctx_self_reference` field, but the general idea is that we're
1793 // creating a pointer to return with proper provenance. Provenance is
1794 // still in the works in Rust at the time of this writing but the load
1795 // of the `self.vmctx_self_reference` field is important here as it
1796 // affects how LLVM thinks about aliasing with respect to the returned
1797 // pointer.
1798 //
1799 // The intention of this method is to codegen to machine code as `&mut
1800 // self.vmctx`, however. While it doesn't show up like this in LLVM IR
1801 // (there's an actual load of the field) it does look like that by the
1802 // time the backend runs. (that's magic to me, the backend removing
1803 // loads...)
1804 let owned_vmctx = self.owned_vmctx();
1805 let owned_vmctx_raw = NonNull::from(owned_vmctx);
1806 // SAFETY: it's part of the contract of `InstanceLayout` and the usage
1807 // with `OwnedInstance` that this indeed points to the vmctx.
1808 let addr = unsafe { owned_vmctx_raw.add(1) };
1809 owned_vmctx
1810 .vmctx_self_reference
1811 .as_non_null()
1812 .with_addr(addr.addr())
1813 }
1814
1815 /// Helper function to access various locations offset from our `*mut
1816 /// VMContext` object.
1817 ///
1818 /// Note that this method takes `&self` as an argument but returns
1819 /// `NonNull<T>` which is frequently used to mutate said memory. This is an
1820 /// intentional design decision where the safety of the modification of
1821 /// memory is placed as a burden onto the caller. The implementation of this
1822 /// method explicitly does not require `&mut self` to acquire mutable
1823 /// provenance to update the `VMContext` region. Instead all pointers into
1824 /// the `VMContext` area have provenance/permissions to write.
1825 ///
1826 /// Also note though that care must be taken to ensure that reads/writes of
1827 /// memory must only happen where appropriate, for example a non-atomic
1828 /// write (as most are) should never happen concurrently with another read
1829 /// or write. It's generally on the burden of the caller to adhere to this.
1830 ///
1831 /// Also of note is that most of the time the usage of this method falls
1832 /// into one of:
1833 ///
1834 /// * Something in the VMContext is being read or written. In that case use
1835 /// `vmctx_plus_offset` or `vmctx_plus_offset_mut` if possible due to
1836 /// that having a safer lifetime.
1837 ///
1838 /// * A pointer is being created to pass to other VM* data structures. In
1839 /// that situation the lifetime of all VM data structures are typically
1840 /// tied to the `Store<T>` which is what provides the guarantees around
1841 /// concurrency/etc.
1842 ///
1843 /// There's quite a lot of unsafety riding on this method, especially
1844 /// related to the ascription `T` of the byte `offset`. It's hoped that in
1845 /// the future we're able to settle on an in theory safer design.
1846 ///
1847 /// # Safety
1848 ///
1849 /// This method is unsafe because the `offset` must be within bounds of the
1850 /// `VMContext` object trailing this instance. Additionally `T` must be a
1851 /// valid ascription of the value that resides at that location.
1852 unsafe fn vmctx_plus_offset_raw<T: VmSafe>(&self, offset: impl Into<u32>) -> NonNull<T> {
1853 // SAFETY: the safety requirements of `byte_add` are forwarded to this
1854 // method's caller.
1855 unsafe {
1856 self.vmctx()
1857 .byte_add(usize::try_from(offset.into()).unwrap())
1858 .cast()
1859 }
1860 }
1861
1862 /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1863 /// `&self` to the returned reference `&T`.
1864 ///
1865 /// # Safety
1866 ///
1867 /// See the safety documentation of `vmctx_plus_offset_raw`.
1868 unsafe fn vmctx_plus_offset<T: VmSafe>(&self, offset: impl Into<u32>) -> &T {
1869 // SAFETY: this method has the same safety requirements as
1870 // `vmctx_plus_offset_raw`.
1871 unsafe { self.vmctx_plus_offset_raw(offset).as_ref() }
1872 }
1873
1874 /// Helper above `vmctx_plus_offset_raw` which transfers the lifetime of
1875 /// `&mut self` to the returned reference `&mut T`.
1876 ///
1877 /// # Safety
1878 ///
1879 /// See the safety documentation of `vmctx_plus_offset_raw`.
1880 unsafe fn vmctx_plus_offset_mut<T: VmSafe>(
1881 self: Pin<&mut Self>,
1882 offset: impl Into<u32>,
1883 ) -> &mut T {
1884 // SAFETY: this method has the same safety requirements as
1885 // `vmctx_plus_offset_raw`.
1886 unsafe { self.vmctx_plus_offset_raw(offset).as_mut() }
1887 }
1888}
1889
1890impl<T: InstanceLayout> OwnedInstance<T> {
1891 /// Allocates a new `OwnedInstance` and places `instance` inside of it.
1892 ///
1893 /// This will `instance`
1894 pub(super) fn new(mut instance: T) -> OwnedInstance<T> {
1895 let layout = instance.layout();
1896 debug_assert!(layout.size() >= size_of_val(&instance));
1897 debug_assert!(layout.align() >= align_of_val(&instance));
1898
1899 // SAFETY: it's up to us to assert that `layout` has a non-zero size,
1900 // which is asserted here.
1901 let ptr = unsafe {
1902 assert!(layout.size() > 0);
1903 if T::INIT_ZEROED {
1904 alloc::alloc::alloc_zeroed(layout)
1905 } else {
1906 alloc::alloc::alloc(layout)
1907 }
1908 };
1909 if ptr.is_null() {
1910 alloc::alloc::handle_alloc_error(layout);
1911 }
1912 let instance_ptr = NonNull::new(ptr.cast::<T>()).unwrap();
1913
1914 // SAFETY: it's part of the unsafe contract of `InstanceLayout` that the
1915 // `add` here is appropriate for the layout allocated.
1916 let vmctx_self_reference = unsafe { instance_ptr.add(1).cast() };
1917 instance.owned_vmctx_mut().vmctx_self_reference = vmctx_self_reference.into();
1918
1919 // SAFETY: we allocated above and it's an unsafe contract of
1920 // `InstanceLayout` that the layout is suitable for writing the
1921 // instance.
1922 unsafe {
1923 instance_ptr.write(instance);
1924 }
1925
1926 let ret = OwnedInstance {
1927 instance: SendSyncPtr::new(instance_ptr),
1928 _marker: marker::PhantomData,
1929 };
1930
1931 // Double-check various vmctx calculations are correct.
1932 debug_assert_eq!(
1933 vmctx_self_reference.addr(),
1934 // SAFETY: `InstanceLayout` should guarantee it's safe to add 1 to
1935 // the last field to get a pointer to 1-byte-past-the-end of an
1936 // object, which should be valid.
1937 unsafe { NonNull::from(ret.get().owned_vmctx()).add(1).addr() }
1938 );
1939 debug_assert_eq!(vmctx_self_reference.addr(), ret.get().vmctx().addr());
1940
1941 ret
1942 }
1943
1944 /// Gets the raw underlying `&Instance` from this handle.
1945 pub fn get(&self) -> &T {
1946 // SAFETY: this is an owned instance handle that retains exclusive
1947 // ownership of the `Instance` inside. With `&self` given we know
1948 // this pointer is valid valid and the returned lifetime is connected
1949 // to `self` so that should also be valid.
1950 unsafe { self.instance.as_non_null().as_ref() }
1951 }
1952
1953 /// Same as [`Self::get`] except for mutability.
1954 pub fn get_mut(&mut self) -> Pin<&mut T> {
1955 // SAFETY: The lifetime concerns here are the same as `get` above.
1956 // Otherwise `new_unchecked` is used here to uphold the contract that
1957 // instances are always pinned in memory.
1958 unsafe { Pin::new_unchecked(self.instance.as_non_null().as_mut()) }
1959 }
1960}
1961
1962impl<T: InstanceLayout> Drop for OwnedInstance<T> {
1963 fn drop(&mut self) {
1964 unsafe {
1965 let layout = self.get().layout();
1966 ptr::drop_in_place(self.instance.as_ptr());
1967 alloc::alloc::dealloc(self.instance.as_ptr().cast(), layout);
1968 }
1969 }
1970}