wasmtime/runtime/store.rs
1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//! Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//! intended to be consumed by the outside world. Note that the "just a
39//! pointer large" is a load-bearing implementation detail in Wasmtime. This
40//! enables it to store a pointer to its own trait object which doesn't need
41//! to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//! stored inside the `Box`. This is the general Rust pattern when one struct
45//! is a layer over another. The surprising part, though, is that this is
46//! further subdivided. This structure only contains things which actually
47//! need `T` itself. The downside of this structure is that it's always
48//! generic and means that code is monomorphized into consumer crates. We
49//! strive to have things be as monomorphic as possible in `wasmtime` so this
50//! type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//! Stored inline in the outer type the "opaque" here means that it's a
54//! "store" but it doesn't have access to the `T`. This is the primary
55//! "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//! internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//! All references of Wasm items into a `Store` are actually indices into a
60//! table in this structure, and the `StoreData` being separate makes it a bit
61//! easier to manage/define/work with. There's no real fundamental reason this
62//! is split out, although sometimes it's useful to have separate borrows into
63//! these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79#[cfg(feature = "debug")]
80use crate::DebugHandler;
81#[cfg(all(feature = "gc", feature = "debug"))]
82use crate::OwnedRooted;
83use crate::RootSet;
84#[cfg(feature = "gc")]
85use crate::ThrownException;
86#[cfg(feature = "component-model-async")]
87use crate::component::ComponentStoreData;
88#[cfg(feature = "component-model")]
89use crate::component::concurrent;
90#[cfg(feature = "async")]
91use crate::fiber;
92use crate::module::RegisteredModuleId;
93use crate::prelude::*;
94#[cfg(feature = "gc")]
95use crate::runtime::vm::GcRootsList;
96#[cfg(feature = "stack-switching")]
97use crate::runtime::vm::VMContRef;
98use crate::runtime::vm::mpk::ProtectionKey;
99use crate::runtime::vm::{
100 self, ExportMemory, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator,
101 InstanceHandle, Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator,
102 SendSyncPtr, SignalHandler, StoreBox, Unwind, VMContext, VMFuncRef, VMGcRef, VMStore,
103 VMStoreContext,
104};
105use crate::trampoline::VMHostGlobalContext;
106use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry};
107#[cfg(feature = "gc")]
108use crate::{ExnRef, Rooted};
109use crate::{Global, Instance, Table, Uninhabited};
110use alloc::sync::Arc;
111use core::fmt;
112use core::marker;
113use core::mem::{self, ManuallyDrop, MaybeUninit};
114use core::num::NonZeroU64;
115use core::ops::{Deref, DerefMut};
116use core::pin::Pin;
117use core::ptr::NonNull;
118use wasmtime_environ::StaticModuleIndex;
119use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
120
121mod context;
122pub use self::context::*;
123mod data;
124pub use self::data::*;
125mod func_refs;
126use func_refs::FuncRefs;
127#[cfg(feature = "component-model-async")]
128mod token;
129#[cfg(feature = "component-model-async")]
130pub(crate) use token::StoreToken;
131#[cfg(feature = "async")]
132mod async_;
133#[cfg(all(feature = "async", feature = "call-hook"))]
134pub use self::async_::CallHookHandler;
135
136#[cfg(feature = "gc")]
137use super::vm::VMExnRef;
138#[cfg(feature = "gc")]
139mod gc;
140
141/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
142///
143/// All WebAssembly instances and items will be attached to and refer to a
144/// [`Store`]. For example instances, functions, globals, and tables are all
145/// attached to a [`Store`]. Instances are created by instantiating a
146/// [`Module`](crate::Module) within a [`Store`].
147///
148/// A [`Store`] is intended to be a short-lived object in a program. No form
149/// of GC is implemented at this time so once an instance is created within a
150/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
151/// This makes [`Store`] unsuitable for creating an unbounded number of
152/// instances in it because [`Store`] will never release this memory. It's
153/// recommended to have a [`Store`] correspond roughly to the lifetime of a
154/// "main instance" that an embedding is interested in executing.
155///
156/// ## Type parameter `T`
157///
158/// Each [`Store`] has a type parameter `T` associated with it. This `T`
159/// represents state defined by the host. This state will be accessible through
160/// the [`Caller`](crate::Caller) type that host-defined functions get access
161/// to. This `T` is suitable for storing `Store`-specific information which
162/// imported functions may want access to.
163///
164/// The data `T` can be accessed through methods like [`Store::data`] and
165/// [`Store::data_mut`].
166///
167/// ## Stores, contexts, oh my
168///
169/// Most methods in Wasmtime take something of the form
170/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
171/// the first argument. These two traits allow ergonomically passing in the
172/// context you currently have to any method. The primary two sources of
173/// contexts are:
174///
175/// * `Store<T>`
176/// * `Caller<'_, T>`
177///
178/// corresponding to what you create and what you have access to in a host
179/// function. You can also explicitly acquire a [`StoreContext`] or
180/// [`StoreContextMut`] and pass that around as well.
181///
182/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
183/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
184/// form of context you have you can call various methods, create objects, etc.
185///
186/// ## Stores and `Default`
187///
188/// You can create a store with default configuration settings using
189/// `Store::default()`. This will create a brand new [`Engine`] with default
190/// configuration (see [`Config`](crate::Config) for more information).
191///
192/// ## Cross-store usage of items
193///
194/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
195/// [`Store`]. The store they belong to is the one they were created with
196/// (passed in as a parameter) or instantiated with. This store is the only
197/// store that can be used to interact with wasm items after they're created.
198///
199/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
200/// operations is incorrect. In other words it's considered a programmer error
201/// rather than a recoverable error for the wrong [`Store`] to be used when
202/// calling APIs.
203pub struct Store<T: 'static> {
204 // for comments about `ManuallyDrop`, see `Store::into_data`
205 inner: ManuallyDrop<Box<StoreInner<T>>>,
206}
207
208#[derive(Copy, Clone, Debug)]
209/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
210/// the WebAssembly VM.
211pub enum CallHook {
212 /// Indicates the VM is calling a WebAssembly function, from the host.
213 CallingWasm,
214 /// Indicates the VM is returning from a WebAssembly function, to the host.
215 ReturningFromWasm,
216 /// Indicates the VM is calling a host function, from WebAssembly.
217 CallingHost,
218 /// Indicates the VM is returning from a host function, to WebAssembly.
219 ReturningFromHost,
220}
221
222impl CallHook {
223 /// Indicates the VM is entering host code (exiting WebAssembly code)
224 pub fn entering_host(&self) -> bool {
225 match self {
226 CallHook::ReturningFromWasm | CallHook::CallingHost => true,
227 _ => false,
228 }
229 }
230 /// Indicates the VM is exiting host code (entering WebAssembly code)
231 pub fn exiting_host(&self) -> bool {
232 match self {
233 CallHook::ReturningFromHost | CallHook::CallingWasm => true,
234 _ => false,
235 }
236 }
237}
238
239/// Internal contents of a `Store<T>` that live on the heap.
240///
241/// The members of this struct are those that need to be generic over `T`, the
242/// store's internal type storage. Otherwise all things that don't rely on `T`
243/// should go into `StoreOpaque`.
244pub struct StoreInner<T: 'static> {
245 /// Generic metadata about the store that doesn't need access to `T`.
246 inner: StoreOpaque,
247
248 limiter: Option<ResourceLimiterInner<T>>,
249 call_hook: Option<CallHookInner<T>>,
250 #[cfg(target_has_atomic = "64")]
251 epoch_deadline_behavior:
252 Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
253
254 /// The user's `T` data.
255 ///
256 /// Don't actually access it via this field, however! Use the
257 /// `Store{,Inner,Context,ContextMut}::data[_mut]` methods instead, to
258 /// preserve stacked borrows and provenance in the face of potential
259 /// direct-access of `T` from Wasm code (via unsafe intrinsics).
260 ///
261 /// The only exception to the above is when taking ownership of the value,
262 /// e.g. in `Store::into_data`, after which nothing can access this field
263 /// via raw pointers anymore so there is no more provenance to preserve.
264 ///
265 /// For comments about `ManuallyDrop`, see `Store::into_data`.
266 data_no_provenance: ManuallyDrop<T>,
267
268 /// The user's debug handler, if any. See [`crate::DebugHandler`]
269 /// for more documentation.
270 ///
271 /// We need this to be an `Arc` because the handler itself takes
272 /// `&self` and also the whole Store mutably (via
273 /// `StoreContextMut`); so we need to hold a separate reference to
274 /// it while invoking it.
275 #[cfg(feature = "debug")]
276 debug_handler: Option<Box<dyn StoreDebugHandler<T>>>,
277}
278
279/// Adapter around `DebugHandler` that gets monomorphized into an
280/// object-safe dyn trait to place in `store.debug_handler`.
281#[cfg(feature = "debug")]
282trait StoreDebugHandler<T: 'static>: Send + Sync {
283 fn handle<'a>(
284 self: Box<Self>,
285 store: StoreContextMut<'a, T>,
286 event: crate::DebugEvent<'a>,
287 ) -> Box<dyn Future<Output = ()> + Send + 'a>;
288}
289
290#[cfg(feature = "debug")]
291impl<D> StoreDebugHandler<D::Data> for D
292where
293 D: DebugHandler,
294 D::Data: Send,
295{
296 fn handle<'a>(
297 self: Box<Self>,
298 store: StoreContextMut<'a, D::Data>,
299 event: crate::DebugEvent<'a>,
300 ) -> Box<dyn Future<Output = ()> + Send + 'a> {
301 // Clone the underlying `DebugHandler` (the trait requires
302 // Clone as a supertrait), not the Box. The clone happens here
303 // rather than at the callsite because `Clone::clone` is not
304 // object-safe so needs to be in a monomorphized context.
305 let handler: D = (*self).clone();
306 // Since we temporarily took `self` off the store at the
307 // callsite, put it back now that we've cloned it.
308 store.0.debug_handler = Some(self);
309 Box::new(async move { handler.handle(store, event).await })
310 }
311}
312
313enum ResourceLimiterInner<T> {
314 Sync(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync>),
315 #[cfg(feature = "async")]
316 Async(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiterAsync) + Send + Sync>),
317}
318
319/// Representation of a configured resource limiter for a store.
320///
321/// This is acquired with `resource_limiter_and_store_opaque` for example and is
322/// threaded through to growth operations on tables/memories. Note that this is
323/// passed around as `Option<&mut StoreResourceLimiter<'_>>` to make it
324/// efficient to pass around (nullable pointer) and it's also notably passed
325/// around as an `Option` to represent how this is optionally specified within a
326/// store.
327pub enum StoreResourceLimiter<'a> {
328 Sync(&'a mut dyn crate::ResourceLimiter),
329 #[cfg(feature = "async")]
330 Async(&'a mut dyn crate::ResourceLimiterAsync),
331}
332
333impl StoreResourceLimiter<'_> {
334 pub(crate) async fn memory_growing(
335 &mut self,
336 current: usize,
337 desired: usize,
338 maximum: Option<usize>,
339 ) -> Result<bool, Error> {
340 match self {
341 Self::Sync(s) => s.memory_growing(current, desired, maximum),
342 #[cfg(feature = "async")]
343 Self::Async(s) => s.memory_growing(current, desired, maximum).await,
344 }
345 }
346
347 pub(crate) fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
348 match self {
349 Self::Sync(s) => s.memory_grow_failed(error),
350 #[cfg(feature = "async")]
351 Self::Async(s) => s.memory_grow_failed(error),
352 }
353 }
354
355 pub(crate) async fn table_growing(
356 &mut self,
357 current: usize,
358 desired: usize,
359 maximum: Option<usize>,
360 ) -> Result<bool, Error> {
361 match self {
362 Self::Sync(s) => s.table_growing(current, desired, maximum),
363 #[cfg(feature = "async")]
364 Self::Async(s) => s.table_growing(current, desired, maximum).await,
365 }
366 }
367
368 pub(crate) fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
369 match self {
370 Self::Sync(s) => s.table_grow_failed(error),
371 #[cfg(feature = "async")]
372 Self::Async(s) => s.table_grow_failed(error),
373 }
374 }
375}
376
377enum CallHookInner<T: 'static> {
378 #[cfg(feature = "call-hook")]
379 Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
380 #[cfg(all(feature = "async", feature = "call-hook"))]
381 Async(Box<dyn CallHookHandler<T> + Send + Sync>),
382 #[expect(
383 dead_code,
384 reason = "forcing, regardless of cfg, the type param to be used"
385 )]
386 ForceTypeParameterToBeUsed {
387 uninhabited: Uninhabited,
388 _marker: marker::PhantomData<T>,
389 },
390}
391
392/// What to do after returning from a callback when the engine epoch reaches
393/// the deadline for a Store during execution of a function using that store.
394#[non_exhaustive]
395pub enum UpdateDeadline {
396 /// Halt execution of WebAssembly, don't update the epoch deadline, and
397 /// raise a trap.
398 Interrupt,
399 /// Extend the deadline by the specified number of ticks.
400 Continue(u64),
401 /// Extend the deadline by the specified number of ticks after yielding to
402 /// the async executor loop. This can only be used with an async [`Store`]
403 /// configured via [`Config::async_support`](crate::Config::async_support).
404 #[cfg(feature = "async")]
405 Yield(u64),
406 /// Extend the deadline by the specified number of ticks after yielding to
407 /// the async executor loop. This can only be used with an async [`Store`]
408 /// configured via [`Config::async_support`](crate::Config::async_support).
409 ///
410 /// The yield will be performed by the future provided; when using `tokio`
411 /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
412 /// here.
413 #[cfg(feature = "async")]
414 YieldCustom(
415 u64,
416 ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
417 ),
418}
419
420// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
421impl<T> Deref for StoreInner<T> {
422 type Target = StoreOpaque;
423 fn deref(&self) -> &Self::Target {
424 &self.inner
425 }
426}
427
428impl<T> DerefMut for StoreInner<T> {
429 fn deref_mut(&mut self) -> &mut Self::Target {
430 &mut self.inner
431 }
432}
433
434/// Monomorphic storage for a `Store<T>`.
435///
436/// This structure contains the bulk of the metadata about a `Store`. This is
437/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
438/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
439/// crate itself.
440pub struct StoreOpaque {
441 // This `StoreOpaque` structure has references to itself. These aren't
442 // immediately evident, however, so we need to tell the compiler that it
443 // contains self-references. This notably suppresses `noalias` annotations
444 // when this shows up in compiled code because types of this structure do
445 // indeed alias itself. An example of this is `default_callee` holds a
446 // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
447 // aliasing!
448 //
449 // It's somewhat unclear to me at this time if this is 100% sufficient to
450 // get all the right codegen in all the right places. For example does
451 // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
452 // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
453 // enough with `Pin` to understand if it's appropriate here (we do, for
454 // example want to allow movement in and out of `data: T`, just not movement
455 // of most of the other members). It's also not clear if using `Pin` in a
456 // few places buys us much other than a bunch of `unsafe` that we already
457 // sort of hand-wave away.
458 //
459 // In any case this seems like a good mid-ground for now where we're at
460 // least telling the compiler something about all the aliasing happening
461 // within a `Store`.
462 _marker: marker::PhantomPinned,
463
464 engine: Engine,
465 vm_store_context: VMStoreContext,
466
467 // Contains all continuations ever allocated throughout the lifetime of this
468 // store.
469 #[cfg(feature = "stack-switching")]
470 continuations: Vec<Box<VMContRef>>,
471
472 instances: PrimaryMap<InstanceId, StoreInstance>,
473
474 #[cfg(feature = "component-model")]
475 num_component_instances: usize,
476 signal_handler: Option<SignalHandler>,
477 modules: ModuleRegistry,
478 func_refs: FuncRefs,
479 host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
480 // GC-related fields.
481 gc_store: Option<GcStore>,
482 gc_roots: RootSet,
483 #[cfg(feature = "gc")]
484 gc_roots_list: GcRootsList,
485 // Types for which the embedder has created an allocator for.
486 #[cfg(feature = "gc")]
487 gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
488 /// Pending exception, if any. This is also a GC root, because it
489 /// needs to be rooted somewhere between the time that a pending
490 /// exception is set and the time that the handling code takes the
491 /// exception object. We use this rooting strategy rather than a
492 /// root in an `Err` branch of a `Result` on the host side because
493 /// it is less error-prone with respect to rooting behavior. See
494 /// `throw()`, `take_pending_exception()`,
495 /// `peek_pending_exception()`, `has_pending_exception()`, and
496 /// `catch()`.
497 #[cfg(feature = "gc")]
498 pending_exception: Option<VMExnRef>,
499
500 // Numbers of resources instantiated in this store, and their limits
501 instance_count: usize,
502 instance_limit: usize,
503 memory_count: usize,
504 memory_limit: usize,
505 table_count: usize,
506 table_limit: usize,
507 #[cfg(feature = "async")]
508 async_state: fiber::AsyncState,
509
510 // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
511 // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
512 // together. Then when we run out of gas, we inject the yield amount from the reserve
513 // until the reserve is empty.
514 fuel_reserve: u64,
515 pub(crate) fuel_yield_interval: Option<NonZeroU64>,
516 /// Indexed data within this `Store`, used to store information about
517 /// globals, functions, memories, etc.
518 store_data: StoreData,
519 traitobj: StorePtr,
520 default_caller_vmctx: SendSyncPtr<VMContext>,
521
522 /// Used to optimized wasm->host calls when the host function is defined with
523 /// `Func::new` to avoid allocating a new vector each time a function is
524 /// called.
525 hostcall_val_storage: Vec<Val>,
526 /// Same as `hostcall_val_storage`, but for the direction of the host
527 /// calling wasm.
528 wasm_val_raw_storage: Vec<ValRaw>,
529
530 /// Keep track of what protection key is being used during allocation so
531 /// that the right memory pages can be enabled when entering WebAssembly
532 /// guest code.
533 pkey: Option<ProtectionKey>,
534
535 /// Runtime state for components used in the handling of resources, borrow,
536 /// and calls. These also interact with the `ResourceAny` type and its
537 /// internal representation.
538 #[cfg(feature = "component-model")]
539 component_host_table: vm::component::HandleTable,
540 #[cfg(feature = "component-model")]
541 component_calls: vm::component::CallContexts,
542 #[cfg(feature = "component-model")]
543 host_resource_data: crate::component::HostResourceData,
544 #[cfg(feature = "component-model")]
545 concurrent_state: concurrent::ConcurrentState,
546
547 /// State related to the executor of wasm code.
548 ///
549 /// For example if Pulley is enabled and configured then this will store a
550 /// Pulley interpreter.
551 executor: Executor,
552}
553
554/// Self-pointer to `StoreInner<T>` from within a `StoreOpaque` which is chiefly
555/// used to copy into instances during instantiation.
556///
557/// FIXME: ideally this type would get deleted and Wasmtime's reliance on it
558/// would go away.
559struct StorePtr(Option<NonNull<dyn VMStore>>);
560
561// We can't make `VMStore: Send + Sync` because that requires making all of
562// Wastime's internals generic over the `Store`'s `T`. So instead, we take care
563// in the whole VM layer to only use the `VMStore` in ways that are `Send`- and
564// `Sync`-safe and we have to have these unsafe impls.
565unsafe impl Send for StorePtr {}
566unsafe impl Sync for StorePtr {}
567
568/// Executor state within `StoreOpaque`.
569///
570/// Effectively stores Pulley interpreter state and handles conditional support
571/// for Cranelift at compile time.
572pub(crate) enum Executor {
573 Interpreter(Interpreter),
574 #[cfg(has_host_compiler_backend)]
575 Native,
576}
577
578impl Executor {
579 pub(crate) fn new(engine: &Engine) -> Self {
580 #[cfg(has_host_compiler_backend)]
581 if cfg!(feature = "pulley") && engine.target().is_pulley() {
582 Executor::Interpreter(Interpreter::new(engine))
583 } else {
584 Executor::Native
585 }
586 #[cfg(not(has_host_compiler_backend))]
587 {
588 debug_assert!(engine.target().is_pulley());
589 Executor::Interpreter(Interpreter::new(engine))
590 }
591 }
592}
593
594/// A borrowed reference to `Executor` above.
595pub(crate) enum ExecutorRef<'a> {
596 Interpreter(InterpreterRef<'a>),
597 #[cfg(has_host_compiler_backend)]
598 Native,
599}
600
601/// An RAII type to automatically mark a region of code as unsafe for GC.
602#[doc(hidden)]
603pub struct AutoAssertNoGc<'a> {
604 store: &'a mut StoreOpaque,
605 entered: bool,
606}
607
608impl<'a> AutoAssertNoGc<'a> {
609 #[inline]
610 pub fn new(store: &'a mut StoreOpaque) -> Self {
611 let entered = if !cfg!(feature = "gc") {
612 false
613 } else if let Some(gc_store) = store.gc_store.as_mut() {
614 gc_store.gc_heap.enter_no_gc_scope();
615 true
616 } else {
617 false
618 };
619
620 AutoAssertNoGc { store, entered }
621 }
622
623 /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
624 /// disables checks for no GC happening for the duration of this value.
625 ///
626 /// This is used when it is statically otherwise known that a GC doesn't
627 /// happen for the various types involved.
628 ///
629 /// # Unsafety
630 ///
631 /// This method is `unsafe` as it does not provide the same safety
632 /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
633 /// caller that a GC doesn't happen.
634 #[inline]
635 pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
636 if cfg!(debug_assertions) {
637 AutoAssertNoGc::new(store)
638 } else {
639 AutoAssertNoGc {
640 store,
641 entered: false,
642 }
643 }
644 }
645}
646
647impl core::ops::Deref for AutoAssertNoGc<'_> {
648 type Target = StoreOpaque;
649
650 #[inline]
651 fn deref(&self) -> &Self::Target {
652 &*self.store
653 }
654}
655
656impl core::ops::DerefMut for AutoAssertNoGc<'_> {
657 #[inline]
658 fn deref_mut(&mut self) -> &mut Self::Target {
659 &mut *self.store
660 }
661}
662
663impl Drop for AutoAssertNoGc<'_> {
664 #[inline]
665 fn drop(&mut self) {
666 if self.entered {
667 self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
668 }
669 }
670}
671
672/// Used to associate instances with the store.
673///
674/// This is needed to track if the instance was allocated explicitly with the on-demand
675/// instance allocator.
676struct StoreInstance {
677 handle: InstanceHandle,
678 kind: StoreInstanceKind,
679}
680
681enum StoreInstanceKind {
682 /// An actual, non-dummy instance.
683 Real {
684 /// The id of this instance's module inside our owning store's
685 /// `ModuleRegistry`.
686 module_id: RegisteredModuleId,
687 },
688
689 /// This is a dummy instance that is just an implementation detail for
690 /// something else. For example, host-created memories internally create a
691 /// dummy instance.
692 ///
693 /// Regardless of the configured instance allocator for the engine, dummy
694 /// instances always use the on-demand allocator to deallocate the instance.
695 Dummy,
696}
697
698impl<T> Store<T> {
699 /// Creates a new [`Store`] to be associated with the given [`Engine`] and
700 /// `data` provided.
701 ///
702 /// The created [`Store`] will place no additional limits on the size of
703 /// linear memories or tables at runtime. Linear memories and tables will
704 /// be allowed to grow to any upper limit specified in their definitions.
705 /// The store will limit the number of instances, linear memories, and
706 /// tables created to 10,000. This can be overridden with the
707 /// [`Store::limiter`] configuration method.
708 pub fn new(engine: &Engine, data: T) -> Self {
709 let store_data = StoreData::new();
710 log::trace!("creating new store {:?}", store_data.id());
711
712 let pkey = engine.allocator().next_available_pkey();
713
714 let inner = StoreOpaque {
715 _marker: marker::PhantomPinned,
716 engine: engine.clone(),
717 vm_store_context: Default::default(),
718 #[cfg(feature = "stack-switching")]
719 continuations: Vec::new(),
720 instances: PrimaryMap::new(),
721 #[cfg(feature = "component-model")]
722 num_component_instances: 0,
723 signal_handler: None,
724 gc_store: None,
725 gc_roots: RootSet::default(),
726 #[cfg(feature = "gc")]
727 gc_roots_list: GcRootsList::default(),
728 #[cfg(feature = "gc")]
729 gc_host_alloc_types: Default::default(),
730 #[cfg(feature = "gc")]
731 pending_exception: None,
732 modules: ModuleRegistry::default(),
733 func_refs: FuncRefs::default(),
734 host_globals: PrimaryMap::new(),
735 instance_count: 0,
736 instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
737 memory_count: 0,
738 memory_limit: crate::DEFAULT_MEMORY_LIMIT,
739 table_count: 0,
740 table_limit: crate::DEFAULT_TABLE_LIMIT,
741 #[cfg(feature = "async")]
742 async_state: Default::default(),
743 fuel_reserve: 0,
744 fuel_yield_interval: None,
745 store_data,
746 traitobj: StorePtr(None),
747 default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
748 hostcall_val_storage: Vec::new(),
749 wasm_val_raw_storage: Vec::new(),
750 pkey,
751 #[cfg(feature = "component-model")]
752 component_host_table: Default::default(),
753 #[cfg(feature = "component-model")]
754 component_calls: Default::default(),
755 #[cfg(feature = "component-model")]
756 host_resource_data: Default::default(),
757 executor: Executor::new(engine),
758 #[cfg(feature = "component-model")]
759 concurrent_state: Default::default(),
760 };
761 let mut inner = Box::new(StoreInner {
762 inner,
763 limiter: None,
764 call_hook: None,
765 #[cfg(target_has_atomic = "64")]
766 epoch_deadline_behavior: None,
767 data_no_provenance: ManuallyDrop::new(data),
768 #[cfg(feature = "debug")]
769 debug_handler: None,
770 });
771
772 let store_data =
773 <NonNull<ManuallyDrop<T>>>::from(&mut inner.data_no_provenance).cast::<()>();
774 inner.inner.vm_store_context.store_data = store_data.into();
775
776 inner.traitobj = StorePtr(Some(NonNull::from(&mut *inner)));
777
778 // Wasmtime uses the callee argument to host functions to learn about
779 // the original pointer to the `Store` itself, allowing it to
780 // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
781 // however, there's no "callee" to provide. To fix this we allocate a
782 // single "default callee" for the entire `Store`. This is then used as
783 // part of `Func::call` to guarantee that the `callee: *mut VMContext`
784 // is never null.
785 let module = Arc::new(wasmtime_environ::Module::new(StaticModuleIndex::from_u32(
786 0,
787 )));
788 let shim = ModuleRuntimeInfo::bare(module);
789 let allocator = OnDemandInstanceAllocator::default();
790
791 allocator
792 .validate_module(shim.env_module(), shim.offsets())
793 .unwrap();
794
795 unsafe {
796 // Note that this dummy instance doesn't allocate tables or memories
797 // (also no limiter is passed in) so it won't have an async await
798 // point meaning that it should be ok to assert the future is
799 // always ready.
800 let id = vm::assert_ready(inner.allocate_instance(
801 None,
802 AllocateInstanceKind::Dummy {
803 allocator: &allocator,
804 },
805 &shim,
806 Default::default(),
807 ))
808 .expect("failed to allocate default callee");
809 let default_caller_vmctx = inner.instance(id).vmctx();
810 inner.default_caller_vmctx = default_caller_vmctx.into();
811 }
812
813 Self {
814 inner: ManuallyDrop::new(inner),
815 }
816 }
817
818 /// Access the underlying `T` data owned by this `Store`.
819 #[inline]
820 pub fn data(&self) -> &T {
821 self.inner.data()
822 }
823
824 /// Access the underlying `T` data owned by this `Store`.
825 #[inline]
826 pub fn data_mut(&mut self) -> &mut T {
827 self.inner.data_mut()
828 }
829
830 fn run_manual_drop_routines(&mut self) {
831 // We need to drop the fibers of each component instance before
832 // attempting to drop the instances themselves since the fibers may need
833 // to be resumed and allowed to exit cleanly before we yank the state
834 // out from under them.
835 //
836 // This will also drop any futures which might use a `&Accessor` fields
837 // in their `Drop::drop` implementations, in which case they'll need to
838 // be called from with in the context of a `tls::set` closure.
839 #[cfg(feature = "component-model-async")]
840 ComponentStoreData::drop_fibers_and_futures(&mut **self.inner);
841
842 // Ensure all fiber stacks, even cached ones, are all flushed out to the
843 // instance allocator.
844 self.inner.flush_fiber_stack();
845 }
846
847 /// Consumes this [`Store`], destroying it, and returns the underlying data.
848 pub fn into_data(mut self) -> T {
849 self.run_manual_drop_routines();
850
851 // This is an unsafe operation because we want to avoid having a runtime
852 // check or boolean for whether the data is actually contained within a
853 // `Store`. The data itself is stored as `ManuallyDrop` since we're
854 // manually managing the memory here, and there's also a `ManuallyDrop`
855 // around the `Box<StoreInner<T>>`. The way this works though is a bit
856 // tricky, so here's how things get dropped appropriately:
857 //
858 // * When a `Store<T>` is normally dropped, the custom destructor for
859 // `Store<T>` will drop `T`, then the `self.inner` field. The
860 // rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
861 // `StoreInner<T>`. This cleans up all internal fields and doesn't
862 // touch `T` because it's wrapped in `ManuallyDrop`.
863 //
864 // * When calling this method we skip the top-level destructor for
865 // `Store<T>` with `mem::forget`. This skips both the destructor for
866 // `T` and the destructor for `StoreInner<T>`. We do, however, run the
867 // destructor for `Box<StoreInner<T>>` which, like above, will skip
868 // the destructor for `T` since it's `ManuallyDrop`.
869 //
870 // In both cases all the other fields of `StoreInner<T>` should all get
871 // dropped, and the manual management of destructors is basically
872 // between this method and `Drop for Store<T>`. Note that this also
873 // means that `Drop for StoreInner<T>` cannot access `self.data`, so
874 // there is a comment indicating this as well.
875 unsafe {
876 let mut inner = ManuallyDrop::take(&mut self.inner);
877 core::mem::forget(self);
878 ManuallyDrop::take(&mut inner.data_no_provenance)
879 }
880 }
881
882 /// Configures the [`ResourceLimiter`] used to limit resource creation
883 /// within this [`Store`].
884 ///
885 /// Whenever resources such as linear memory, tables, or instances are
886 /// allocated the `limiter` specified here is invoked with the store's data
887 /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
888 /// being allocated. The returned [`ResourceLimiter`] is intended to live
889 /// within the `T` itself, for example by storing a
890 /// [`StoreLimits`](crate::StoreLimits).
891 ///
892 /// Note that this limiter is only used to limit the creation/growth of
893 /// resources in the future, this does not retroactively attempt to apply
894 /// limits to the [`Store`].
895 ///
896 /// # Examples
897 ///
898 /// ```
899 /// use wasmtime::*;
900 ///
901 /// struct MyApplicationState {
902 /// my_state: u32,
903 /// limits: StoreLimits,
904 /// }
905 ///
906 /// let engine = Engine::default();
907 /// let my_state = MyApplicationState {
908 /// my_state: 42,
909 /// limits: StoreLimitsBuilder::new()
910 /// .memory_size(1 << 20 /* 1 MB */)
911 /// .instances(2)
912 /// .build(),
913 /// };
914 /// let mut store = Store::new(&engine, my_state);
915 /// store.limiter(|state| &mut state.limits);
916 ///
917 /// // Creation of smaller memories is allowed
918 /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
919 ///
920 /// // Creation of a larger memory, however, will exceed the 1MB limit we've
921 /// // configured
922 /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
923 ///
924 /// // The number of instances in this store is limited to 2, so the third
925 /// // instance here should fail.
926 /// let module = Module::new(&engine, "(module)").unwrap();
927 /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
928 /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
929 /// assert!(Instance::new(&mut store, &module, &[]).is_err());
930 /// ```
931 ///
932 /// [`ResourceLimiter`]: crate::ResourceLimiter
933 pub fn limiter(
934 &mut self,
935 mut limiter: impl (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync + 'static,
936 ) {
937 // Apply the limits on instances, tables, and memory given by the limiter:
938 let inner = &mut self.inner;
939 let (instance_limit, table_limit, memory_limit) = {
940 let l = limiter(inner.data_mut());
941 (l.instances(), l.tables(), l.memories())
942 };
943 let innermost = &mut inner.inner;
944 innermost.instance_limit = instance_limit;
945 innermost.table_limit = table_limit;
946 innermost.memory_limit = memory_limit;
947
948 // Save the limiter accessor function:
949 inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
950 }
951
952 /// Configure a function that runs on calls and returns between WebAssembly
953 /// and host code.
954 ///
955 /// The function is passed a [`CallHook`] argument, which indicates which
956 /// state transition the VM is making.
957 ///
958 /// This function may return a [`Trap`]. If a trap is returned when an
959 /// import was called, it is immediately raised as-if the host import had
960 /// returned the trap. If a trap is returned after wasm returns to the host
961 /// then the wasm function's result is ignored and this trap is returned
962 /// instead.
963 ///
964 /// After this function returns a trap, it may be called for subsequent returns
965 /// to host or wasm code as the trap propagates to the root call.
966 #[cfg(feature = "call-hook")]
967 pub fn call_hook(
968 &mut self,
969 hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
970 ) {
971 self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
972 }
973
974 /// Returns the [`Engine`] that this store is associated with.
975 pub fn engine(&self) -> &Engine {
976 self.inner.engine()
977 }
978
979 /// Perform garbage collection.
980 ///
981 /// Note that it is not required to actively call this function. GC will
982 /// automatically happen according to various internal heuristics. This is
983 /// provided if fine-grained control over the GC is desired.
984 ///
985 /// If you are calling this method after an attempted allocation failed, you
986 /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
987 /// When you do so, this method will attempt to create enough space in the
988 /// GC heap for that allocation, so that it will succeed on the next
989 /// attempt.
990 ///
991 /// This method is only available when the `gc` Cargo feature is enabled.
992 #[cfg(feature = "gc")]
993 pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
994 StoreContextMut(&mut self.inner).gc(why)
995 }
996
997 /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
998 /// be configured via [`Store::set_fuel`].
999 ///
1000 /// # Errors
1001 ///
1002 /// This function will return an error if fuel consumption is not enabled
1003 /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
1004 pub fn get_fuel(&self) -> Result<u64> {
1005 self.inner.get_fuel()
1006 }
1007
1008 /// Set the fuel to this [`Store`] for wasm to consume while executing.
1009 ///
1010 /// For this method to work fuel consumption must be enabled via
1011 /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
1012 /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
1013 /// immediately trap). This function must be called for the store to have
1014 /// some fuel to allow WebAssembly to execute.
1015 ///
1016 /// Most WebAssembly instructions consume 1 unit of fuel. Some
1017 /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
1018 /// units, as any execution cost associated with them involves other
1019 /// instructions which do consume fuel.
1020 ///
1021 /// Note that when fuel is entirely consumed it will cause wasm to trap.
1022 ///
1023 /// # Errors
1024 ///
1025 /// This function will return an error if fuel consumption is not enabled via
1026 /// [`Config::consume_fuel`](crate::Config::consume_fuel).
1027 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1028 self.inner.set_fuel(fuel)
1029 }
1030
1031 /// Configures a [`Store`] to yield execution of async WebAssembly code
1032 /// periodically.
1033 ///
1034 /// When a [`Store`] is configured to consume fuel with
1035 /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
1036 /// configure WebAssembly to be suspended and control will be yielded back to the
1037 /// caller every `interval` units of fuel consumed. This is only suitable with use of
1038 /// a store associated with an [async config](crate::Config::async_support) because
1039 /// only then are futures used and yields are possible.
1040 ///
1041 /// The purpose of this behavior is to ensure that futures which represent
1042 /// execution of WebAssembly do not execute too long inside their
1043 /// `Future::poll` method. This allows for some form of cooperative
1044 /// multitasking where WebAssembly will voluntarily yield control
1045 /// periodically (based on fuel consumption) back to the running thread.
1046 ///
1047 /// Note that futures returned by this crate will automatically flag
1048 /// themselves to get re-polled if a yield happens. This means that
1049 /// WebAssembly will continue to execute, just after giving the host an
1050 /// opportunity to do something else.
1051 ///
1052 /// The `interval` parameter indicates how much fuel should be
1053 /// consumed between yields of an async future. When fuel runs out wasm will trap.
1054 ///
1055 /// # Error
1056 ///
1057 /// This method will error if it is not called on a store associated with an [async
1058 /// config](crate::Config::async_support).
1059 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1060 self.inner.fuel_async_yield_interval(interval)
1061 }
1062
1063 /// Sets the epoch deadline to a certain number of ticks in the future.
1064 ///
1065 /// When the Wasm guest code is compiled with epoch-interruption
1066 /// instrumentation
1067 /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
1068 /// and when the `Engine`'s epoch is incremented
1069 /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
1070 /// past a deadline, execution can be configured to either trap or
1071 /// yield and then continue.
1072 ///
1073 /// This deadline is always set relative to the current epoch:
1074 /// `ticks_beyond_current` ticks in the future. The deadline can
1075 /// be set explicitly via this method, or refilled automatically
1076 /// on a yield if configured via
1077 /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
1078 /// this method is invoked, the deadline is reached when
1079 /// [`Engine::increment_epoch()`] has been invoked at least
1080 /// `ticks_beyond_current` times.
1081 ///
1082 /// By default a store will trap immediately with an epoch deadline of 0
1083 /// (which has always "elapsed"). This method is required to be configured
1084 /// for stores with epochs enabled to some future epoch deadline.
1085 ///
1086 /// See documentation on
1087 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1088 /// for an introduction to epoch-based interruption.
1089 #[cfg(target_has_atomic = "64")]
1090 pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1091 self.inner.set_epoch_deadline(ticks_beyond_current);
1092 }
1093
1094 /// Configures epoch-deadline expiration to trap.
1095 ///
1096 /// When epoch-interruption-instrumented code is executed on this
1097 /// store and the epoch deadline is reached before completion,
1098 /// with the store configured in this way, execution will
1099 /// terminate with a trap as soon as an epoch check in the
1100 /// instrumented code is reached.
1101 ///
1102 /// This behavior is the default if the store is not otherwise
1103 /// configured via
1104 /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
1105 /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
1106 /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
1107 ///
1108 /// This setting is intended to allow for coarse-grained
1109 /// interruption, but not a deterministic deadline of a fixed,
1110 /// finite interval. For deterministic interruption, see the
1111 /// "fuel" mechanism instead.
1112 ///
1113 /// Note that when this is used it's required to call
1114 /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
1115 /// trap.
1116 ///
1117 /// See documentation on
1118 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1119 /// for an introduction to epoch-based interruption.
1120 #[cfg(target_has_atomic = "64")]
1121 pub fn epoch_deadline_trap(&mut self) {
1122 self.inner.epoch_deadline_trap();
1123 }
1124
1125 /// Configures epoch-deadline expiration to invoke a custom callback
1126 /// function.
1127 ///
1128 /// When epoch-interruption-instrumented code is executed on this
1129 /// store and the epoch deadline is reached before completion, the
1130 /// provided callback function is invoked.
1131 ///
1132 /// This callback should either return an [`UpdateDeadline`], or
1133 /// return an error, which will terminate execution with a trap.
1134 ///
1135 /// The [`UpdateDeadline`] is a positive number of ticks to
1136 /// add to the epoch deadline, as well as indicating what
1137 /// to do after the callback returns. If the [`Store`] is
1138 /// configured with async support, then the callback may return
1139 /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
1140 /// to yield to the async executor before updating the epoch deadline.
1141 /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
1142 /// update the epoch deadline immediately.
1143 ///
1144 /// This setting is intended to allow for coarse-grained
1145 /// interruption, but not a deterministic deadline of a fixed,
1146 /// finite interval. For deterministic interruption, see the
1147 /// "fuel" mechanism instead.
1148 ///
1149 /// See documentation on
1150 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1151 /// for an introduction to epoch-based interruption.
1152 #[cfg(target_has_atomic = "64")]
1153 pub fn epoch_deadline_callback(
1154 &mut self,
1155 callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
1156 ) {
1157 self.inner.epoch_deadline_callback(Box::new(callback));
1158 }
1159
1160 /// Set an exception as the currently pending exception, and
1161 /// return an error that propagates the throw.
1162 ///
1163 /// This method takes an exception object and stores it in the
1164 /// `Store` as the currently pending exception. This is a special
1165 /// rooted slot that holds the exception as long as it is
1166 /// propagating. This method then returns a `ThrownException`
1167 /// error, which is a special type that indicates a pending
1168 /// exception exists. When this type propagates as an error
1169 /// returned from a Wasm-to-host call, the pending exception is
1170 /// thrown within the Wasm context, and either caught or
1171 /// propagated further to the host-to-Wasm call boundary. If an
1172 /// exception is thrown out of Wasm (or across Wasm from a
1173 /// hostcall) back to the host-to-Wasm call boundary, *that*
1174 /// invocation returns a `ThrownException`, and the pending
1175 /// exception slot is again set. In other words, the
1176 /// `ThrownException` error type should propagate upward exactly
1177 /// and only when a pending exception is set.
1178 ///
1179 /// To inspect or take the pending exception, use
1180 /// [`peek_pending_exception`] and [`take_pending_exception`]. For
1181 /// a convenient wrapper that invokes a closure and provides any
1182 /// caught exception from the closure to a separate handler
1183 /// closure, see [`StoreContextMut::catch`].
1184 ///
1185 /// This method is parameterized over `R` for convenience, but
1186 /// will always return an `Err`.
1187 ///
1188 /// # Panics
1189 ///
1190 /// - Will panic if `exception` has been unrooted.
1191 /// - Will panic if `exception` is a null reference.
1192 /// - Will panic if a pending exception has already been set.
1193 #[cfg(feature = "gc")]
1194 pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1195 self.inner.throw_impl(exception);
1196 Err(ThrownException)
1197 }
1198
1199 /// Take the currently pending exception, if any, and return it,
1200 /// removing it from the "pending exception" slot.
1201 ///
1202 /// If there is no pending exception, returns `None`.
1203 ///
1204 /// Note: the returned exception is a LIFO root (see
1205 /// [`crate::Rooted`]), rooted in the current handle scope. Take
1206 /// care to ensure that it is re-rooted or otherwise does not
1207 /// escape this scope! It is usually best to allow an exception
1208 /// object to be rooted in the store's "pending exception" slot
1209 /// until the final consumer has taken it, rather than root it and
1210 /// pass it up the callstack in some other way.
1211 ///
1212 /// This method is useful to implement ad-hoc exception plumbing
1213 /// in various ways, but for the most idiomatic handling, see
1214 /// [`StoreContextMut::catch`].
1215 #[cfg(feature = "gc")]
1216 pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1217 self.inner.take_pending_exception_rooted()
1218 }
1219
1220 /// Tests whether there is a pending exception.
1221 ///
1222 /// Ordinarily, a pending exception will be set on a store if and
1223 /// only if a host-side callstack is propagating a
1224 /// [`crate::ThrownException`] error. The final consumer that
1225 /// catches the exception takes it; it may re-place it to re-throw
1226 /// (using [`throw`]) if it chooses not to actually handle the
1227 /// exception.
1228 ///
1229 /// This method is useful to tell whether a store is in this
1230 /// state, but should not be used as part of the ordinary
1231 /// exception-handling flow. For the most idiomatic handling, see
1232 /// [`StoreContextMut::catch`].
1233 #[cfg(feature = "gc")]
1234 pub fn has_pending_exception(&self) -> bool {
1235 self.inner.pending_exception.is_some()
1236 }
1237
1238 /// Provide an object that views Wasm stack state, including Wasm
1239 /// VM-level values (locals and operand stack), when debugging is
1240 /// enabled.
1241 ///
1242 /// This object views the frames from the most recent Wasm entry
1243 /// onward (up to the exit that allows this host code to run). Any
1244 /// Wasm stack frames upward from the most recent entry to Wasm
1245 /// are not visible to this cursor.
1246 ///
1247 /// Returns `None` if debug instrumentation is not enabled for
1248 /// the engine containing this store.
1249 #[cfg(feature = "debug")]
1250 pub fn debug_frames(&mut self) -> Option<crate::DebugFrameCursor<'_, T>> {
1251 self.as_context_mut().debug_frames()
1252 }
1253
1254 /// Set the debug callback on this store.
1255 ///
1256 /// See [`crate::DebugHandler`] for more documentation.
1257 ///
1258 /// # Panics
1259 ///
1260 /// - Will panic if this store is not configured for async
1261 /// support.
1262 /// - Will panic if guest-debug support was not enabled via
1263 /// [`crate::Config::guest_debug`].
1264 #[cfg(feature = "debug")]
1265 pub fn set_debug_handler(&mut self, handler: impl DebugHandler<Data = T>)
1266 where
1267 // We require `Send` here because the debug handler becomes
1268 // referenced from a future: when `DebugHandler::handle` is
1269 // invoked, its `self` references the `handler` with the
1270 // user's state. Note that we are careful to keep this bound
1271 // constrained to debug-handler-related code only and not
1272 // propagate it outward to the store in general. The presence
1273 // of the trait implementation serves as a witness that `T:
1274 // Send`. This is required in particular because we will have
1275 // a `&mut dyn VMStore` on the stack when we pause a fiber
1276 // with `block_on` to run a debugger hook; that `VMStore` must
1277 // be a `Store<T> where T: Send`.
1278 T: Send,
1279 {
1280 assert!(
1281 self.inner.async_support(),
1282 "debug hooks rely on async support"
1283 );
1284 assert!(
1285 self.engine().tunables().debug_guest,
1286 "debug hooks require guest debugging to be enabled"
1287 );
1288 self.inner.debug_handler = Some(Box::new(handler));
1289 }
1290
1291 /// Clear the debug handler on this store. If any existed, it will
1292 /// be dropped.
1293 #[cfg(feature = "debug")]
1294 pub fn clear_debug_handler(&mut self) {
1295 self.inner.debug_handler = None;
1296 }
1297}
1298
1299impl<'a, T> StoreContext<'a, T> {
1300 pub(crate) fn async_support(&self) -> bool {
1301 self.0.async_support()
1302 }
1303
1304 /// Returns the underlying [`Engine`] this store is connected to.
1305 pub fn engine(&self) -> &Engine {
1306 self.0.engine()
1307 }
1308
1309 /// Access the underlying data owned by this `Store`.
1310 ///
1311 /// Same as [`Store::data`].
1312 pub fn data(&self) -> &'a T {
1313 self.0.data()
1314 }
1315
1316 /// Returns the remaining fuel in this store.
1317 ///
1318 /// For more information see [`Store::get_fuel`].
1319 pub fn get_fuel(&self) -> Result<u64> {
1320 self.0.get_fuel()
1321 }
1322}
1323
1324impl<'a, T> StoreContextMut<'a, T> {
1325 /// Access the underlying data owned by this `Store`.
1326 ///
1327 /// Same as [`Store::data`].
1328 pub fn data(&self) -> &T {
1329 self.0.data()
1330 }
1331
1332 /// Access the underlying data owned by this `Store`.
1333 ///
1334 /// Same as [`Store::data_mut`].
1335 pub fn data_mut(&mut self) -> &mut T {
1336 self.0.data_mut()
1337 }
1338
1339 /// Returns the underlying [`Engine`] this store is connected to.
1340 pub fn engine(&self) -> &Engine {
1341 self.0.engine()
1342 }
1343
1344 /// Perform garbage collection of `ExternRef`s.
1345 ///
1346 /// Same as [`Store::gc`].
1347 ///
1348 /// This method is only available when the `gc` Cargo feature is enabled.
1349 #[cfg(feature = "gc")]
1350 pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
1351 assert!(!self.0.async_support());
1352 let (mut limiter, store) = self.0.resource_limiter_and_store_opaque();
1353 vm::assert_ready(store.gc(limiter.as_mut(), None, why.map(|e| e.bytes_needed())));
1354 }
1355
1356 /// Returns remaining fuel in this store.
1357 ///
1358 /// For more information see [`Store::get_fuel`]
1359 pub fn get_fuel(&self) -> Result<u64> {
1360 self.0.get_fuel()
1361 }
1362
1363 /// Set the amount of fuel in this store.
1364 ///
1365 /// For more information see [`Store::set_fuel`]
1366 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1367 self.0.set_fuel(fuel)
1368 }
1369
1370 /// Configures this `Store` to periodically yield while executing futures.
1371 ///
1372 /// For more information see [`Store::fuel_async_yield_interval`]
1373 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1374 self.0.fuel_async_yield_interval(interval)
1375 }
1376
1377 /// Sets the epoch deadline to a certain number of ticks in the future.
1378 ///
1379 /// For more information see [`Store::set_epoch_deadline`].
1380 #[cfg(target_has_atomic = "64")]
1381 pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1382 self.0.set_epoch_deadline(ticks_beyond_current);
1383 }
1384
1385 /// Configures epoch-deadline expiration to trap.
1386 ///
1387 /// For more information see [`Store::epoch_deadline_trap`].
1388 #[cfg(target_has_atomic = "64")]
1389 pub fn epoch_deadline_trap(&mut self) {
1390 self.0.epoch_deadline_trap();
1391 }
1392
1393 /// Set an exception as the currently pending exception, and
1394 /// return an error that propagates the throw.
1395 ///
1396 /// See [`Store::throw`] for more details.
1397 #[cfg(feature = "gc")]
1398 pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1399 self.0.inner.throw_impl(exception);
1400 Err(ThrownException)
1401 }
1402
1403 /// Take the currently pending exception, if any, and return it,
1404 /// removing it from the "pending exception" slot.
1405 ///
1406 /// See [`Store::take_pending_exception`] for more details.
1407 #[cfg(feature = "gc")]
1408 pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1409 self.0.inner.take_pending_exception_rooted()
1410 }
1411
1412 /// Tests whether there is a pending exception.
1413 ///
1414 /// See [`Store::has_pending_exception`] for more details.
1415 #[cfg(feature = "gc")]
1416 pub fn has_pending_exception(&self) -> bool {
1417 self.0.inner.pending_exception.is_some()
1418 }
1419}
1420
1421impl<T> StoreInner<T> {
1422 #[inline]
1423 fn data(&self) -> &T {
1424 // We are actually just accessing `&self.data_no_provenance` but we must
1425 // do so with the `VMStoreContext::store_data` pointer's provenance. If
1426 // we did otherwise, i.e. directly accessed the field, we would
1427 // invalidate that pointer, which would in turn invalidate any direct
1428 // `T` accesses that Wasm code makes via unsafe intrinsics.
1429 let data: *const ManuallyDrop<T> = &raw const self.data_no_provenance;
1430 let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1431 let ptr = provenance.with_addr(data.addr());
1432
1433 // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1434 // to access because of our `&self` borrow.
1435 debug_assert_ne!(ptr, core::ptr::null_mut());
1436 debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1437 unsafe { &*ptr }
1438 }
1439
1440 #[inline]
1441 fn data_limiter_and_opaque(
1442 &mut self,
1443 ) -> (
1444 &mut T,
1445 Option<&mut ResourceLimiterInner<T>>,
1446 &mut StoreOpaque,
1447 ) {
1448 // See the comments about provenance in `StoreInner::data` above.
1449 let data: *mut ManuallyDrop<T> = &raw mut self.data_no_provenance;
1450 let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1451 let ptr = provenance.with_addr(data.addr());
1452
1453 // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1454 // to access because of our `&mut self` borrow.
1455 debug_assert_ne!(ptr, core::ptr::null_mut());
1456 debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1457 let data = unsafe { &mut *ptr };
1458
1459 let limiter = self.limiter.as_mut();
1460
1461 (data, limiter, &mut self.inner)
1462 }
1463
1464 #[inline]
1465 fn data_mut(&mut self) -> &mut T {
1466 self.data_limiter_and_opaque().0
1467 }
1468
1469 #[inline]
1470 pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1471 if self.inner.pkey.is_none() && self.call_hook.is_none() {
1472 Ok(())
1473 } else {
1474 self.call_hook_slow_path(s)
1475 }
1476 }
1477
1478 fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1479 if let Some(pkey) = &self.inner.pkey {
1480 let allocator = self.engine().allocator();
1481 match s {
1482 CallHook::CallingWasm | CallHook::ReturningFromHost => {
1483 allocator.restrict_to_pkey(*pkey)
1484 }
1485 CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1486 }
1487 }
1488
1489 // Temporarily take the configured behavior to avoid mutably borrowing
1490 // multiple times.
1491 if let Some(mut call_hook) = self.call_hook.take() {
1492 let result = self.invoke_call_hook(&mut call_hook, s);
1493 self.call_hook = Some(call_hook);
1494 return result;
1495 }
1496
1497 Ok(())
1498 }
1499
1500 fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1501 match call_hook {
1502 #[cfg(feature = "call-hook")]
1503 CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1504
1505 #[cfg(all(feature = "async", feature = "call-hook"))]
1506 CallHookInner::Async(handler) => {
1507 if !self.can_block() {
1508 bail!("couldn't grab async_cx for call hook")
1509 }
1510 return (&mut *self)
1511 .as_context_mut()
1512 .with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
1513 }
1514
1515 CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1516 let _ = s;
1517 match *uninhabited {}
1518 }
1519 }
1520 }
1521
1522 #[cfg(not(feature = "async"))]
1523 fn flush_fiber_stack(&mut self) {
1524 // noop shim so code can assume this always exists.
1525 }
1526}
1527
1528fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1529 fuel_reserve.saturating_add_signed(-injected_fuel)
1530}
1531
1532// Add remaining fuel from the reserve into the active fuel if there is any left.
1533fn refuel(
1534 injected_fuel: &mut i64,
1535 fuel_reserve: &mut u64,
1536 yield_interval: Option<NonZeroU64>,
1537) -> bool {
1538 let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1539 if fuel > 0 {
1540 set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1541 true
1542 } else {
1543 false
1544 }
1545}
1546
1547fn set_fuel(
1548 injected_fuel: &mut i64,
1549 fuel_reserve: &mut u64,
1550 yield_interval: Option<NonZeroU64>,
1551 new_fuel_amount: u64,
1552) {
1553 let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1554 // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1555 // for the VM to use.
1556 let injected = core::cmp::min(interval, new_fuel_amount);
1557 // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1558 // VM at once to be i64 range.
1559 let injected = core::cmp::min(injected, i64::MAX as u64);
1560 // Add whatever is left over after injection to the reserve for later use.
1561 *fuel_reserve = new_fuel_amount - injected;
1562 // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1563 // this counter is positive.
1564 *injected_fuel = -(injected as i64);
1565}
1566
1567#[doc(hidden)]
1568impl StoreOpaque {
1569 pub fn id(&self) -> StoreId {
1570 self.store_data.id()
1571 }
1572
1573 pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1574 fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1575 let new = slot.saturating_add(amt);
1576 if new > max {
1577 bail!("resource limit exceeded: {desc} count too high at {new}");
1578 }
1579 *slot = new;
1580 Ok(())
1581 }
1582
1583 let module = module.env_module();
1584 let memories = module.num_defined_memories();
1585 let tables = module.num_defined_tables();
1586
1587 bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1588 bump(
1589 &mut self.memory_count,
1590 self.memory_limit,
1591 memories,
1592 "memory",
1593 )?;
1594 bump(&mut self.table_count, self.table_limit, tables, "table")?;
1595
1596 Ok(())
1597 }
1598
1599 #[inline]
1600 pub fn async_support(&self) -> bool {
1601 cfg!(feature = "async") && self.engine().config().async_support
1602 }
1603
1604 #[inline]
1605 pub fn engine(&self) -> &Engine {
1606 &self.engine
1607 }
1608
1609 #[inline]
1610 pub fn store_data(&self) -> &StoreData {
1611 &self.store_data
1612 }
1613
1614 #[inline]
1615 pub fn store_data_mut(&mut self) -> &mut StoreData {
1616 &mut self.store_data
1617 }
1618
1619 #[inline]
1620 pub(crate) fn modules(&self) -> &ModuleRegistry {
1621 &self.modules
1622 }
1623
1624 #[inline]
1625 pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
1626 &mut self.modules
1627 }
1628
1629 pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1630 (&mut self.func_refs, &self.modules)
1631 }
1632
1633 pub(crate) fn host_globals(
1634 &self,
1635 ) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1636 &self.host_globals
1637 }
1638
1639 pub(crate) fn host_globals_mut(
1640 &mut self,
1641 ) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1642 &mut self.host_globals
1643 }
1644
1645 pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1646 instance.store_id().assert_belongs_to(self.id());
1647 match self.instances[instance.instance()].kind {
1648 StoreInstanceKind::Dummy => None,
1649 StoreInstanceKind::Real { module_id } => {
1650 let module = self
1651 .modules()
1652 .lookup_module_by_id(module_id)
1653 .expect("should always have a registered module for real instances");
1654 Some(module)
1655 }
1656 }
1657 }
1658
1659 /// Accessor from `InstanceId` to `&vm::Instance`.
1660 ///
1661 /// Note that if you have a `StoreInstanceId` you should use
1662 /// `StoreInstanceId::get` instead. This assumes that `id` has been
1663 /// validated to already belong to this store.
1664 #[inline]
1665 pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1666 self.instances[id].handle.get()
1667 }
1668
1669 /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1670 ///
1671 /// Note that if you have a `StoreInstanceId` you should use
1672 /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1673 /// validated to already belong to this store.
1674 #[inline]
1675 pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1676 self.instances[id].handle.get_mut()
1677 }
1678
1679 /// Access multiple instances specified via `ids`.
1680 ///
1681 /// # Panics
1682 ///
1683 /// This method will panic if any indices in `ids` overlap.
1684 ///
1685 /// # Safety
1686 ///
1687 /// This method is not safe if the returned instances are used to traverse
1688 /// "laterally" between other instances. For example accessing imported
1689 /// items in an instance may traverse laterally to a sibling instance thus
1690 /// aliasing a returned value here. The caller must ensure that only defined
1691 /// items within the instances themselves are accessed.
1692 #[inline]
1693 pub unsafe fn optional_gc_store_and_instances_mut<const N: usize>(
1694 &mut self,
1695 ids: [InstanceId; N],
1696 ) -> (Option<&mut GcStore>, [Pin<&mut vm::Instance>; N]) {
1697 let instances = self
1698 .instances
1699 .get_disjoint_mut(ids)
1700 .unwrap()
1701 .map(|h| h.handle.get_mut());
1702 (self.gc_store.as_mut(), instances)
1703 }
1704
1705 /// Pair of `Self::optional_gc_store_mut` and `Self::instance_mut`
1706 pub fn optional_gc_store_and_instance_mut(
1707 &mut self,
1708 id: InstanceId,
1709 ) -> (Option<&mut GcStore>, Pin<&mut vm::Instance>) {
1710 (self.gc_store.as_mut(), self.instances[id].handle.get_mut())
1711 }
1712
1713 /// Get all instances (ignoring dummy instances) within this store.
1714 pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1715 let instances = self
1716 .instances
1717 .iter()
1718 .filter_map(|(id, inst)| {
1719 if let StoreInstanceKind::Dummy = inst.kind {
1720 None
1721 } else {
1722 Some(id)
1723 }
1724 })
1725 .collect::<Vec<_>>();
1726 instances
1727 .into_iter()
1728 .map(|i| Instance::from_wasmtime(i, self))
1729 }
1730
1731 /// Get all memories (host- or Wasm-defined) within this store.
1732 pub fn all_memories<'a>(&'a self) -> impl Iterator<Item = ExportMemory> + 'a {
1733 // NB: Host-created memories have dummy instances. Therefore, we can get
1734 // all memories in the store by iterating over all instances (including
1735 // dummy instances) and getting each of their defined memories.
1736 let id = self.id();
1737 self.instances
1738 .iter()
1739 .flat_map(move |(_, instance)| instance.handle.get().defined_memories(id))
1740 }
1741
1742 /// Iterate over all tables (host- or Wasm-defined) within this store.
1743 pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1744 // NB: Host-created tables have dummy instances. Therefore, we can get
1745 // all tables in the store by iterating over all instances (including
1746 // dummy instances) and getting each of their defined memories.
1747 for id in self.instances.keys() {
1748 let instance = StoreInstanceId::new(self.id(), id);
1749 for table in 0..self.instance(id).env_module().num_defined_tables() {
1750 let table = DefinedTableIndex::new(table);
1751 f(self, Table::from_raw(instance, table));
1752 }
1753 }
1754 }
1755
1756 /// Iterate over all globals (host- or Wasm-defined) within this store.
1757 pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1758 // First enumerate all the host-created globals.
1759 for global in self.host_globals.keys() {
1760 let global = Global::new_host(self, global);
1761 f(self, global);
1762 }
1763
1764 // Then enumerate all instances' defined globals.
1765 for id in self.instances.keys() {
1766 for index in 0..self.instance(id).env_module().num_defined_globals() {
1767 let index = DefinedGlobalIndex::new(index);
1768 let global = Global::new_instance(self, id, index);
1769 f(self, global);
1770 }
1771 }
1772 }
1773
1774 #[cfg(all(feature = "std", any(unix, windows)))]
1775 pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1776 self.signal_handler = handler;
1777 }
1778
1779 #[inline]
1780 pub fn vm_store_context(&self) -> &VMStoreContext {
1781 &self.vm_store_context
1782 }
1783
1784 #[inline]
1785 pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
1786 &mut self.vm_store_context
1787 }
1788
1789 /// Performs a lazy allocation of the `GcStore` within this store, returning
1790 /// the previous allocation if it's already present.
1791 ///
1792 /// This method will, if necessary, allocate a new `GcStore` -- linear
1793 /// memory and all. This is a blocking operation due to
1794 /// `ResourceLimiterAsync` which means that this should only be executed
1795 /// in a fiber context at this time.
1796 #[inline]
1797 pub(crate) async fn ensure_gc_store(
1798 &mut self,
1799 limiter: Option<&mut StoreResourceLimiter<'_>>,
1800 ) -> Result<&mut GcStore> {
1801 if self.gc_store.is_some() {
1802 return Ok(self.gc_store.as_mut().unwrap());
1803 }
1804 self.allocate_gc_store(limiter).await
1805 }
1806
1807 #[inline(never)]
1808 async fn allocate_gc_store(
1809 &mut self,
1810 limiter: Option<&mut StoreResourceLimiter<'_>>,
1811 ) -> Result<&mut GcStore> {
1812 log::trace!("allocating GC heap for store {:?}", self.id());
1813
1814 assert!(self.gc_store.is_none());
1815 assert_eq!(
1816 self.vm_store_context.gc_heap.base.as_non_null(),
1817 NonNull::dangling(),
1818 );
1819 assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1820
1821 let gc_store = allocate_gc_store(self, limiter).await?;
1822 self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1823 return Ok(self.gc_store.insert(gc_store));
1824
1825 #[cfg(feature = "gc")]
1826 async fn allocate_gc_store(
1827 store: &mut StoreOpaque,
1828 limiter: Option<&mut StoreResourceLimiter<'_>>,
1829 ) -> Result<GcStore> {
1830 use wasmtime_environ::{StaticModuleIndex, packed_option::ReservedValue};
1831
1832 let engine = store.engine();
1833 let mem_ty = engine.tunables().gc_heap_memory_type();
1834 ensure!(
1835 engine.features().gc_types(),
1836 "cannot allocate a GC store when GC is disabled at configuration time"
1837 );
1838
1839 // First, allocate the memory that will be our GC heap's storage.
1840 let mut request = InstanceAllocationRequest {
1841 id: InstanceId::reserved_value(),
1842 runtime_info: &ModuleRuntimeInfo::bare(Arc::new(wasmtime_environ::Module::new(
1843 StaticModuleIndex::from_u32(0),
1844 ))),
1845 imports: vm::Imports::default(),
1846 store,
1847 limiter,
1848 };
1849
1850 let (mem_alloc_index, mem) = engine
1851 .allocator()
1852 .allocate_memory(&mut request, &mem_ty, None)
1853 .await?;
1854
1855 // Then, allocate the actual GC heap, passing in that memory
1856 // storage.
1857 let gc_runtime = engine
1858 .gc_runtime()
1859 .context("no GC runtime: GC disabled at compile time or configuration time")?;
1860 let (index, heap) =
1861 engine
1862 .allocator()
1863 .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
1864
1865 Ok(GcStore::new(index, heap))
1866 }
1867
1868 #[cfg(not(feature = "gc"))]
1869 async fn allocate_gc_store(
1870 _: &mut StoreOpaque,
1871 _: Option<&mut StoreResourceLimiter<'_>>,
1872 ) -> Result<GcStore> {
1873 bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1874 }
1875 }
1876
1877 /// Helper method to require that a `GcStore` was previously allocated for
1878 /// this store, failing if it has not yet been allocated.
1879 ///
1880 /// Note that this should only be used in a context where allocation of a
1881 /// `GcStore` is sure to have already happened prior, otherwise this may
1882 /// return a confusing error to embedders which is a bug in Wasmtime.
1883 ///
1884 /// Some situations where it's safe to call this method:
1885 ///
1886 /// * There's already a non-null and non-i31 `VMGcRef` in scope. By existing
1887 /// this shows proof that the `GcStore` was previously allocated.
1888 /// * During instantiation and instance's `needs_gc_heap` flag will be
1889 /// handled and instantiation will automatically create a GC store.
1890 #[inline]
1891 #[cfg(feature = "gc")]
1892 pub(crate) fn require_gc_store(&self) -> Result<&GcStore> {
1893 match &self.gc_store {
1894 Some(gc_store) => Ok(gc_store),
1895 None => bail!("GC heap not initialized yet"),
1896 }
1897 }
1898
1899 /// Same as [`Self::require_gc_store`], but mutable.
1900 #[inline]
1901 #[cfg(feature = "gc")]
1902 pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> {
1903 match &mut self.gc_store {
1904 Some(gc_store) => Ok(gc_store),
1905 None => bail!("GC heap not initialized yet"),
1906 }
1907 }
1908
1909 /// Attempts to access the GC store that has been previously allocated.
1910 ///
1911 /// This method will return `Some` if the GC store was previously allocated.
1912 /// A `None` return value means either that the GC heap hasn't yet been
1913 /// allocated or that it does not need to be allocated for this store. Note
1914 /// that to require a GC store in a particular situation it's recommended to
1915 /// use [`Self::require_gc_store_mut`] instead.
1916 #[inline]
1917 pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
1918 if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1919 debug_assert!(self.gc_store.is_none());
1920 None
1921 } else {
1922 self.gc_store.as_mut()
1923 }
1924 }
1925
1926 /// Helper to assert that a GC store was previously allocated and is
1927 /// present.
1928 ///
1929 /// # Panics
1930 ///
1931 /// This method will panic if the GC store has not yet been allocated. This
1932 /// should only be used in a context where there's an existing GC reference,
1933 /// for example, or if `ensure_gc_store` has already been called.
1934 #[inline]
1935 #[track_caller]
1936 pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
1937 self.gc_store
1938 .as_ref()
1939 .expect("attempted to access the store's GC heap before it has been allocated")
1940 }
1941
1942 /// Same as [`Self::unwrap_gc_store`], but mutable.
1943 #[inline]
1944 #[track_caller]
1945 pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
1946 self.gc_store
1947 .as_mut()
1948 .expect("attempted to access the store's GC heap before it has been allocated")
1949 }
1950
1951 #[inline]
1952 pub(crate) fn gc_roots(&self) -> &RootSet {
1953 &self.gc_roots
1954 }
1955
1956 #[inline]
1957 #[cfg(feature = "gc")]
1958 pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
1959 &mut self.gc_roots
1960 }
1961
1962 #[inline]
1963 pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
1964 self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
1965 }
1966
1967 #[cfg(feature = "gc")]
1968 async fn do_gc(&mut self) {
1969 // If the GC heap hasn't been initialized, there is nothing to collect.
1970 if self.gc_store.is_none() {
1971 return;
1972 }
1973
1974 log::trace!("============ Begin GC ===========");
1975
1976 // Take the GC roots out of `self` so we can borrow it mutably but still
1977 // call mutable methods on `self`.
1978 let mut roots = core::mem::take(&mut self.gc_roots_list);
1979
1980 self.trace_roots(&mut roots).await;
1981 let async_yield = self.async_support();
1982 self.unwrap_gc_store_mut()
1983 .gc(async_yield, unsafe { roots.iter() })
1984 .await;
1985
1986 // Restore the GC roots for the next GC.
1987 roots.clear();
1988 self.gc_roots_list = roots;
1989
1990 log::trace!("============ End GC ===========");
1991 }
1992
1993 #[cfg(feature = "gc")]
1994 async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1995 log::trace!("Begin trace GC roots");
1996
1997 // We shouldn't have any leftover, stale GC roots.
1998 assert!(gc_roots_list.is_empty());
1999
2000 self.trace_wasm_stack_roots(gc_roots_list);
2001 #[cfg(feature = "async")]
2002 if self.async_support() {
2003 vm::Yield::new().await;
2004 }
2005 #[cfg(feature = "stack-switching")]
2006 {
2007 self.trace_wasm_continuation_roots(gc_roots_list);
2008 #[cfg(feature = "async")]
2009 if self.async_support() {
2010 vm::Yield::new().await;
2011 }
2012 }
2013 self.trace_vmctx_roots(gc_roots_list);
2014 #[cfg(feature = "async")]
2015 if self.async_support() {
2016 vm::Yield::new().await;
2017 }
2018 self.trace_user_roots(gc_roots_list);
2019 self.trace_pending_exception_roots(gc_roots_list);
2020
2021 log::trace!("End trace GC roots")
2022 }
2023
2024 #[cfg(feature = "gc")]
2025 fn trace_wasm_stack_frame(
2026 &self,
2027 gc_roots_list: &mut GcRootsList,
2028 frame: crate::runtime::vm::Frame,
2029 ) {
2030 let pc = frame.pc();
2031 debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
2032
2033 let fp = frame.fp() as *mut usize;
2034 debug_assert!(
2035 !fp.is_null(),
2036 "we should always get a valid frame pointer for Wasm frames"
2037 );
2038
2039 let module_info = self
2040 .modules()
2041 .lookup_module_by_pc(pc)
2042 .expect("should have module info for Wasm frame");
2043
2044 if let Some(stack_map) = module_info.lookup_stack_map(pc) {
2045 log::trace!(
2046 "We have a stack map that maps {} bytes in this Wasm frame",
2047 stack_map.frame_size()
2048 );
2049
2050 let sp = unsafe { stack_map.sp(fp) };
2051 for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
2052 unsafe {
2053 self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2054 }
2055 }
2056 }
2057
2058 #[cfg(feature = "debug")]
2059 if let Some(frame_table) = module_info.frame_table() {
2060 let relpc = module_info.text_offset(pc);
2061 for stack_slot in super::debug::gc_refs_in_frame(frame_table, relpc, fp) {
2062 unsafe {
2063 self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2064 }
2065 }
2066 }
2067 }
2068
2069 #[cfg(feature = "gc")]
2070 unsafe fn trace_wasm_stack_slot(&self, gc_roots_list: &mut GcRootsList, stack_slot: *mut u32) {
2071 use crate::runtime::vm::SendSyncPtr;
2072 use core::ptr::NonNull;
2073
2074 let raw: u32 = unsafe { core::ptr::read(stack_slot) };
2075 log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
2076
2077 let gc_ref = vm::VMGcRef::from_raw_u32(raw);
2078 if gc_ref.is_some() {
2079 unsafe {
2080 gc_roots_list
2081 .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
2082 }
2083 }
2084 }
2085
2086 #[cfg(feature = "gc")]
2087 fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2088 use crate::runtime::vm::Backtrace;
2089 log::trace!("Begin trace GC roots :: Wasm stack");
2090
2091 Backtrace::trace(self, |frame| {
2092 self.trace_wasm_stack_frame(gc_roots_list, frame);
2093 core::ops::ControlFlow::Continue(())
2094 });
2095
2096 log::trace!("End trace GC roots :: Wasm stack");
2097 }
2098
2099 #[cfg(all(feature = "gc", feature = "stack-switching"))]
2100 fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2101 use crate::{runtime::vm::Backtrace, vm::VMStackState};
2102 log::trace!("Begin trace GC roots :: continuations");
2103
2104 for continuation in &self.continuations {
2105 let state = continuation.common_stack_information.state;
2106
2107 // FIXME(frank-emrich) In general, it is not enough to just trace
2108 // through the stacks of continuations; we also need to look through
2109 // their `cont.bind` arguments. However, we don't currently have
2110 // enough RTTI information to check if any of the values in the
2111 // buffers used by `cont.bind` are GC values. As a workaround, note
2112 // that we currently disallow cont.bind-ing GC values altogether.
2113 // This way, it is okay not to check them here.
2114 match state {
2115 VMStackState::Suspended => {
2116 Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
2117 self.trace_wasm_stack_frame(gc_roots_list, frame);
2118 core::ops::ControlFlow::Continue(())
2119 });
2120 }
2121 VMStackState::Running => {
2122 // Handled by `trace_wasm_stack_roots`.
2123 }
2124 VMStackState::Parent => {
2125 // We don't know whether our child is suspended or running, but in
2126 // either case things should be handled correctly when traversing
2127 // further along in the chain, nothing required at this point.
2128 }
2129 VMStackState::Fresh | VMStackState::Returned => {
2130 // Fresh/Returned continuations have no gc values on their stack.
2131 }
2132 }
2133 }
2134
2135 log::trace!("End trace GC roots :: continuations");
2136 }
2137
2138 #[cfg(feature = "gc")]
2139 fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2140 log::trace!("Begin trace GC roots :: vmctx");
2141 self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
2142 self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
2143 log::trace!("End trace GC roots :: vmctx");
2144 }
2145
2146 #[cfg(feature = "gc")]
2147 fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2148 log::trace!("Begin trace GC roots :: user");
2149 self.gc_roots.trace_roots(gc_roots_list);
2150 log::trace!("End trace GC roots :: user");
2151 }
2152
2153 #[cfg(feature = "gc")]
2154 fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2155 log::trace!("Begin trace GC roots :: pending exception");
2156 if let Some(pending_exception) = self.pending_exception.as_mut() {
2157 unsafe {
2158 let root = pending_exception.as_gc_ref_mut();
2159 gc_roots_list.add_root(root.into(), "Pending exception");
2160 }
2161 }
2162 log::trace!("End trace GC roots :: pending exception");
2163 }
2164
2165 /// Insert a host-allocated GC type into this store.
2166 ///
2167 /// This makes it suitable for the embedder to allocate instances of this
2168 /// type in this store, and we don't have to worry about the type being
2169 /// reclaimed (since it is possible that none of the Wasm modules in this
2170 /// store are holding it alive).
2171 #[cfg(feature = "gc")]
2172 pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
2173 self.gc_host_alloc_types.insert(ty);
2174 }
2175
2176 /// Helper function execute a `init_gc_ref` when placing `gc_ref` in `dest`.
2177 ///
2178 /// This avoids allocating `GcStore` where possible.
2179 pub(crate) fn init_gc_ref(
2180 &mut self,
2181 dest: &mut MaybeUninit<Option<VMGcRef>>,
2182 gc_ref: Option<&VMGcRef>,
2183 ) {
2184 if GcStore::needs_init_barrier(gc_ref) {
2185 self.unwrap_gc_store_mut().init_gc_ref(dest, gc_ref)
2186 } else {
2187 dest.write(gc_ref.map(|r| r.copy_i31()));
2188 }
2189 }
2190
2191 /// Helper function execute a write barrier when placing `gc_ref` in `dest`.
2192 ///
2193 /// This avoids allocating `GcStore` where possible.
2194 pub(crate) fn write_gc_ref(&mut self, dest: &mut Option<VMGcRef>, gc_ref: Option<&VMGcRef>) {
2195 GcStore::write_gc_ref_optional_store(self.optional_gc_store_mut(), dest, gc_ref)
2196 }
2197
2198 /// Helper function to clone `gc_ref` notably avoiding allocating a
2199 /// `GcStore` where possible.
2200 pub(crate) fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
2201 if gc_ref.is_i31() {
2202 gc_ref.copy_i31()
2203 } else {
2204 self.unwrap_gc_store_mut().clone_gc_ref(gc_ref)
2205 }
2206 }
2207
2208 pub fn get_fuel(&self) -> Result<u64> {
2209 anyhow::ensure!(
2210 self.engine().tunables().consume_fuel,
2211 "fuel is not configured in this store"
2212 );
2213 let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
2214 Ok(get_fuel(injected_fuel, self.fuel_reserve))
2215 }
2216
2217 pub(crate) fn refuel(&mut self) -> bool {
2218 let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2219 refuel(
2220 injected_fuel,
2221 &mut self.fuel_reserve,
2222 self.fuel_yield_interval,
2223 )
2224 }
2225
2226 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
2227 anyhow::ensure!(
2228 self.engine().tunables().consume_fuel,
2229 "fuel is not configured in this store"
2230 );
2231 let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2232 set_fuel(
2233 injected_fuel,
2234 &mut self.fuel_reserve,
2235 self.fuel_yield_interval,
2236 fuel,
2237 );
2238 Ok(())
2239 }
2240
2241 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
2242 anyhow::ensure!(
2243 self.engine().tunables().consume_fuel,
2244 "fuel is not configured in this store"
2245 );
2246 anyhow::ensure!(
2247 self.engine().config().async_support,
2248 "async support is not configured in this store"
2249 );
2250 anyhow::ensure!(
2251 interval != Some(0),
2252 "fuel_async_yield_interval must not be 0"
2253 );
2254 self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
2255 // Reset the fuel active + reserve states by resetting the amount.
2256 self.set_fuel(self.get_fuel()?)
2257 }
2258
2259 #[inline]
2260 pub fn signal_handler(&self) -> Option<*const SignalHandler> {
2261 let handler = self.signal_handler.as_ref()?;
2262 Some(handler)
2263 }
2264
2265 #[inline]
2266 pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
2267 NonNull::from(&self.vm_store_context)
2268 }
2269
2270 #[inline]
2271 pub fn default_caller(&self) -> NonNull<VMContext> {
2272 self.default_caller_vmctx.as_non_null()
2273 }
2274
2275 #[inline]
2276 pub fn traitobj(&self) -> NonNull<dyn VMStore> {
2277 self.traitobj.0.unwrap()
2278 }
2279
2280 /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
2281 /// used as part of calling the host in a `Func::new` method invocation.
2282 #[inline]
2283 pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
2284 mem::take(&mut self.hostcall_val_storage)
2285 }
2286
2287 /// Restores the vector previously taken by `take_hostcall_val_storage`
2288 /// above back into the store, allowing it to be used in the future for the
2289 /// next wasm->host call.
2290 #[inline]
2291 pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
2292 if storage.capacity() > self.hostcall_val_storage.capacity() {
2293 self.hostcall_val_storage = storage;
2294 }
2295 }
2296
2297 /// Same as `take_hostcall_val_storage`, but for the direction of the host
2298 /// calling wasm.
2299 #[inline]
2300 pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
2301 mem::take(&mut self.wasm_val_raw_storage)
2302 }
2303
2304 /// Same as `save_hostcall_val_storage`, but for the direction of the host
2305 /// calling wasm.
2306 #[inline]
2307 pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
2308 if storage.capacity() > self.wasm_val_raw_storage.capacity() {
2309 self.wasm_val_raw_storage = storage;
2310 }
2311 }
2312
2313 /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
2314 /// WebAssembly-relative fault.
2315 ///
2316 /// This function may abort the process if `addr` is not found to actually
2317 /// reside in any linear memory. In such a situation it means that the
2318 /// segfault was erroneously caught by Wasmtime and is possibly indicative
2319 /// of a code generator bug.
2320 ///
2321 /// This function returns `None` for dynamically-bounds-checked-memories
2322 /// with spectre mitigations enabled since the hardware fault address is
2323 /// always zero in these situations which means that the trapping context
2324 /// doesn't have enough information to report the fault address.
2325 pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
2326 // There are a few instances where a "close to zero" pointer is loaded
2327 // and we expect that to happen:
2328 //
2329 // * Explicitly bounds-checked memories with spectre-guards enabled will
2330 // cause out-of-bounds accesses to get routed to address 0, so allow
2331 // wasm instructions to fault on the null address.
2332 // * `call_indirect` when invoking a null function pointer may load data
2333 // from the a `VMFuncRef` whose address is null, meaning any field of
2334 // `VMFuncRef` could be the address of the fault.
2335 //
2336 // In these situations where the address is so small it won't be in any
2337 // instance, so skip the checks below.
2338 if addr <= mem::size_of::<VMFuncRef>() {
2339 const _: () = {
2340 // static-assert that `VMFuncRef` isn't too big to ensure that
2341 // it lives solely within the first page as we currently only
2342 // have the guarantee that the first page of memory is unmapped,
2343 // no more.
2344 assert!(mem::size_of::<VMFuncRef>() <= 512);
2345 };
2346 return None;
2347 }
2348
2349 // Search all known instances in this store for this address. Note that
2350 // this is probably not the speediest way to do this. Traps, however,
2351 // are generally not expected to be super fast and additionally stores
2352 // probably don't have all that many instances or memories.
2353 //
2354 // If this loop becomes hot in the future, however, it should be
2355 // possible to precompute maps about linear memories in a store and have
2356 // a quicker lookup.
2357 let mut fault = None;
2358 for (_, instance) in self.instances.iter() {
2359 if let Some(f) = instance.handle.get().wasm_fault(addr) {
2360 assert!(fault.is_none());
2361 fault = Some(f);
2362 }
2363 }
2364 if fault.is_some() {
2365 return fault;
2366 }
2367
2368 cfg_if::cfg_if! {
2369 if #[cfg(feature = "std")] {
2370 // With the standard library a rich error can be printed here
2371 // to stderr and the native abort path is used.
2372 eprintln!(
2373 "\
2374Wasmtime caught a segfault for a wasm program because the faulting instruction
2375is allowed to segfault due to how linear memories are implemented. The address
2376that was accessed, however, is not known to any linear memory in use within this
2377Store. This may be indicative of a critical bug in Wasmtime's code generation
2378because all addresses which are known to be reachable from wasm won't reach this
2379message.
2380
2381 pc: 0x{pc:x}
2382 address: 0x{addr:x}
2383
2384This is a possible security issue because WebAssembly has accessed something it
2385shouldn't have been able to. Other accesses may have succeeded and this one just
2386happened to be caught. The process will now be aborted to prevent this damage
2387from going any further and to alert what's going on. If this is a security
2388issue please reach out to the Wasmtime team via its security policy
2389at https://bytecodealliance.org/security.
2390"
2391 );
2392 std::process::abort();
2393 } else if #[cfg(panic = "abort")] {
2394 // Without the standard library but with `panic=abort` then
2395 // it's safe to panic as that's known to halt execution. For
2396 // now avoid the above error message as well since without
2397 // `std` it's probably best to be a bit more size-conscious.
2398 let _ = pc;
2399 panic!("invalid fault");
2400 } else {
2401 // Without `std` and with `panic = "unwind"` there's no
2402 // dedicated API to abort the process portably, so manufacture
2403 // this with a double-panic.
2404 let _ = pc;
2405
2406 struct PanicAgainOnDrop;
2407
2408 impl Drop for PanicAgainOnDrop {
2409 fn drop(&mut self) {
2410 panic!("panicking again to trigger a process abort");
2411 }
2412
2413 }
2414
2415 let _bomb = PanicAgainOnDrop;
2416
2417 panic!("invalid fault");
2418 }
2419 }
2420 }
2421
2422 /// Retrieve the store's protection key.
2423 #[inline]
2424 #[cfg(feature = "pooling-allocator")]
2425 pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
2426 self.pkey
2427 }
2428
2429 #[inline]
2430 #[cfg(feature = "component-model")]
2431 pub(crate) fn component_resource_state(
2432 &mut self,
2433 ) -> (
2434 &mut vm::component::CallContexts,
2435 &mut vm::component::HandleTable,
2436 &mut crate::component::HostResourceData,
2437 ) {
2438 (
2439 &mut self.component_calls,
2440 &mut self.component_host_table,
2441 &mut self.host_resource_data,
2442 )
2443 }
2444
2445 #[cfg(feature = "component-model")]
2446 pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
2447 // We don't actually need the instance itself right now, but it seems
2448 // like something we will almost certainly eventually want to keep
2449 // around, so force callers to provide it.
2450 let _ = instance;
2451
2452 self.num_component_instances += 1;
2453 }
2454
2455 #[inline]
2456 #[cfg(feature = "component-model")]
2457 pub(crate) fn component_resource_state_with_instance(
2458 &mut self,
2459 instance: crate::component::Instance,
2460 ) -> (
2461 &mut vm::component::CallContexts,
2462 &mut vm::component::HandleTable,
2463 &mut crate::component::HostResourceData,
2464 Pin<&mut vm::component::ComponentInstance>,
2465 ) {
2466 (
2467 &mut self.component_calls,
2468 &mut self.component_host_table,
2469 &mut self.host_resource_data,
2470 instance.id().from_data_get_mut(&mut self.store_data),
2471 )
2472 }
2473
2474 #[cfg(feature = "component-model")]
2475 pub(crate) fn component_resource_state_with_instance_and_concurrent_state(
2476 &mut self,
2477 instance: crate::component::Instance,
2478 ) -> (
2479 &mut vm::component::CallContexts,
2480 &mut vm::component::HandleTable,
2481 &mut crate::component::HostResourceData,
2482 Pin<&mut vm::component::ComponentInstance>,
2483 &mut concurrent::ConcurrentState,
2484 ) {
2485 (
2486 &mut self.component_calls,
2487 &mut self.component_host_table,
2488 &mut self.host_resource_data,
2489 instance.id().from_data_get_mut(&mut self.store_data),
2490 &mut self.concurrent_state,
2491 )
2492 }
2493
2494 #[cfg(feature = "async")]
2495 pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
2496 &mut self.async_state
2497 }
2498
2499 #[cfg(feature = "component-model-async")]
2500 pub(crate) fn concurrent_state_mut(&mut self) -> &mut concurrent::ConcurrentState {
2501 &mut self.concurrent_state
2502 }
2503
2504 #[cfg(feature = "async")]
2505 pub(crate) fn has_pkey(&self) -> bool {
2506 self.pkey.is_some()
2507 }
2508
2509 pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
2510 match &mut self.executor {
2511 Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
2512 #[cfg(has_host_compiler_backend)]
2513 Executor::Native => ExecutorRef::Native,
2514 }
2515 }
2516
2517 #[cfg(feature = "async")]
2518 pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
2519 mem::swap(&mut self.executor, executor);
2520 }
2521
2522 pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
2523 match &self.executor {
2524 Executor::Interpreter(i) => i.unwinder(),
2525 #[cfg(has_host_compiler_backend)]
2526 Executor::Native => &vm::UnwindHost,
2527 }
2528 }
2529
2530 /// Allocates a new continuation. Note that we currently don't support
2531 /// deallocating them. Instead, all continuations remain allocated
2532 /// throughout the store's lifetime.
2533 #[cfg(feature = "stack-switching")]
2534 pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
2535 // FIXME(frank-emrich) Do we need to pin this?
2536 let mut continuation = Box::new(VMContRef::empty());
2537 let stack_size = self.engine.config().async_stack_size;
2538 let stack = crate::vm::VMContinuationStack::new(stack_size)?;
2539 continuation.stack = stack;
2540 let ptr = continuation.deref_mut() as *mut VMContRef;
2541 self.continuations.push(continuation);
2542 Ok(ptr)
2543 }
2544
2545 /// Constructs and executes an `InstanceAllocationRequest` and pushes the
2546 /// returned instance into the store.
2547 ///
2548 /// This is a helper method for invoking
2549 /// `InstanceAllocator::allocate_module` with the appropriate parameters
2550 /// from this store's own configuration. The `kind` provided is used to
2551 /// distinguish between "real" modules and dummy ones that are synthesized
2552 /// for embedder-created memories, globals, tables, etc. The `kind` will
2553 /// also use a different instance allocator by default, the one passed in,
2554 /// rather than the engine's default allocator.
2555 ///
2556 /// This method will push the instance within `StoreOpaque` onto the
2557 /// `instances` array and return the `InstanceId` which can be use to look
2558 /// it up within the store.
2559 ///
2560 /// # Safety
2561 ///
2562 /// The `imports` provided must be correctly sized/typed for the module
2563 /// being allocated.
2564 pub(crate) async unsafe fn allocate_instance(
2565 &mut self,
2566 limiter: Option<&mut StoreResourceLimiter<'_>>,
2567 kind: AllocateInstanceKind<'_>,
2568 runtime_info: &ModuleRuntimeInfo,
2569 imports: Imports<'_>,
2570 ) -> Result<InstanceId> {
2571 let id = self.instances.next_key();
2572
2573 let allocator = match kind {
2574 AllocateInstanceKind::Module(_) => self.engine().allocator(),
2575 AllocateInstanceKind::Dummy { allocator } => allocator,
2576 };
2577 // SAFETY: this function's own contract is the same as
2578 // `allocate_module`, namely the imports provided are valid.
2579 let handle = unsafe {
2580 allocator
2581 .allocate_module(InstanceAllocationRequest {
2582 id,
2583 runtime_info,
2584 imports,
2585 store: self,
2586 limiter,
2587 })
2588 .await?
2589 };
2590
2591 let actual = match kind {
2592 AllocateInstanceKind::Module(module_id) => {
2593 log::trace!(
2594 "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2595 self.id()
2596 );
2597 self.instances.push(StoreInstance {
2598 handle,
2599 kind: StoreInstanceKind::Real { module_id },
2600 })
2601 }
2602 AllocateInstanceKind::Dummy { .. } => {
2603 log::trace!(
2604 "Adding dummy instance to store: store={:?}, instance={id:?}",
2605 self.id()
2606 );
2607 self.instances.push(StoreInstance {
2608 handle,
2609 kind: StoreInstanceKind::Dummy,
2610 })
2611 }
2612 };
2613
2614 // double-check we didn't accidentally allocate two instances and our
2615 // prediction of what the id would be is indeed the id it should be.
2616 assert_eq!(id, actual);
2617
2618 Ok(id)
2619 }
2620
2621 /// Set a pending exception. The `exnref` is taken and held on
2622 /// this store to be fetched later by an unwind. This method does
2623 /// *not* set up an unwind request on the TLS call state; that
2624 /// must be done separately.
2625 #[cfg(feature = "gc")]
2626 pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) {
2627 self.pending_exception = Some(exnref);
2628 }
2629
2630 /// Take a pending exception, if any.
2631 #[cfg(feature = "gc")]
2632 pub(crate) fn take_pending_exception(&mut self) -> Option<VMExnRef> {
2633 self.pending_exception.take()
2634 }
2635
2636 /// Tests whether there is a pending exception.
2637 #[cfg(feature = "gc")]
2638 pub fn has_pending_exception(&self) -> bool {
2639 self.pending_exception.is_some()
2640 }
2641
2642 #[cfg(feature = "gc")]
2643 fn take_pending_exception_rooted(&mut self) -> Option<Rooted<ExnRef>> {
2644 let vmexnref = self.take_pending_exception()?;
2645 let mut nogc = AutoAssertNoGc::new(self);
2646 Some(Rooted::new(&mut nogc, vmexnref.into()))
2647 }
2648
2649 /// Get an owned rooted reference to the pending exception,
2650 /// without taking it off the store.
2651 #[cfg(all(feature = "gc", feature = "debug"))]
2652 pub(crate) fn pending_exception_owned_rooted(&mut self) -> Option<OwnedRooted<ExnRef>> {
2653 let mut nogc = AutoAssertNoGc::new(self);
2654 nogc.pending_exception.take().map(|vmexnref| {
2655 let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref());
2656 nogc.pending_exception = Some(cloned.into_exnref_unchecked());
2657 OwnedRooted::new(&mut nogc, vmexnref.into())
2658 })
2659 }
2660
2661 #[cfg(feature = "gc")]
2662 fn throw_impl(&mut self, exception: Rooted<ExnRef>) {
2663 let mut nogc = AutoAssertNoGc::new(self);
2664 let exnref = exception._to_raw(&mut nogc).unwrap();
2665 let exnref = VMGcRef::from_raw_u32(exnref)
2666 .expect("exception cannot be null")
2667 .into_exnref_unchecked();
2668 nogc.set_pending_exception(exnref);
2669 }
2670
2671 #[cfg(target_has_atomic = "64")]
2672 pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2673 // Set a new deadline based on the "epoch deadline delta".
2674 //
2675 // Also, note that when this update is performed while Wasm is
2676 // on the stack, the Wasm will reload the new value once we
2677 // return into it.
2678 let current_epoch = self.engine().current_epoch();
2679 let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2680 *epoch_deadline = current_epoch + delta;
2681 }
2682
2683 pub(crate) fn get_epoch_deadline(&mut self) -> u64 {
2684 *self.vm_store_context.epoch_deadline.get_mut()
2685 }
2686}
2687
2688/// Helper parameter to [`StoreOpaque::allocate_instance`].
2689pub(crate) enum AllocateInstanceKind<'a> {
2690 /// An embedder-provided module is being allocated meaning that the default
2691 /// engine's allocator will be used.
2692 Module(RegisteredModuleId),
2693
2694 /// Add a dummy instance that to the store.
2695 ///
2696 /// These are instances that are just implementation details of something
2697 /// else (e.g. host-created memories that are not actually defined in any
2698 /// Wasm module) and therefore shouldn't show up in things like core dumps.
2699 ///
2700 /// A custom, typically OnDemand-flavored, allocator is provided to execute
2701 /// the allocation.
2702 Dummy {
2703 allocator: &'a dyn InstanceAllocator,
2704 },
2705}
2706
2707unsafe impl<T> VMStore for StoreInner<T> {
2708 #[cfg(feature = "component-model-async")]
2709 fn component_async_store(
2710 &mut self,
2711 ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2712 self
2713 }
2714
2715 fn store_opaque(&self) -> &StoreOpaque {
2716 &self.inner
2717 }
2718
2719 fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2720 &mut self.inner
2721 }
2722
2723 fn resource_limiter_and_store_opaque(
2724 &mut self,
2725 ) -> (Option<StoreResourceLimiter<'_>>, &mut StoreOpaque) {
2726 let (data, limiter, opaque) = self.data_limiter_and_opaque();
2727
2728 let limiter = limiter.map(|l| match l {
2729 ResourceLimiterInner::Sync(s) => StoreResourceLimiter::Sync(s(data)),
2730 #[cfg(feature = "async")]
2731 ResourceLimiterInner::Async(s) => StoreResourceLimiter::Async(s(data)),
2732 });
2733
2734 (limiter, opaque)
2735 }
2736
2737 #[cfg(target_has_atomic = "64")]
2738 fn new_epoch_updated_deadline(&mut self) -> Result<UpdateDeadline> {
2739 // Temporarily take the configured behavior to avoid mutably borrowing
2740 // multiple times.
2741 let mut behavior = self.epoch_deadline_behavior.take();
2742 let update = match &mut behavior {
2743 Some(callback) => callback((&mut *self).as_context_mut()),
2744 None => Ok(UpdateDeadline::Interrupt),
2745 };
2746
2747 // Put back the original behavior which was replaced by `take`.
2748 self.epoch_deadline_behavior = behavior;
2749 update
2750 }
2751
2752 #[cfg(feature = "component-model")]
2753 fn component_calls(&mut self) -> &mut vm::component::CallContexts {
2754 &mut self.component_calls
2755 }
2756
2757 #[cfg(feature = "debug")]
2758 fn block_on_debug_handler(&mut self, event: crate::DebugEvent<'_>) -> anyhow::Result<()> {
2759 if let Some(handler) = self.debug_handler.take() {
2760 log::trace!("about to raise debug event {event:?}");
2761 StoreContextMut(self).with_blocking(|store, cx| {
2762 cx.block_on(Pin::from(handler.handle(store, event)).as_mut())
2763 })
2764 } else {
2765 Ok(())
2766 }
2767 }
2768}
2769
2770impl<T> StoreInner<T> {
2771 #[cfg(target_has_atomic = "64")]
2772 fn epoch_deadline_trap(&mut self) {
2773 self.epoch_deadline_behavior = None;
2774 }
2775
2776 #[cfg(target_has_atomic = "64")]
2777 fn epoch_deadline_callback(
2778 &mut self,
2779 callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2780 ) {
2781 self.epoch_deadline_behavior = Some(callback);
2782 }
2783}
2784
2785impl<T: Default> Default for Store<T> {
2786 fn default() -> Store<T> {
2787 Store::new(&Engine::default(), T::default())
2788 }
2789}
2790
2791impl<T: fmt::Debug> fmt::Debug for Store<T> {
2792 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2793 let inner = &**self.inner as *const StoreInner<T>;
2794 f.debug_struct("Store")
2795 .field("inner", &inner)
2796 .field("data", self.inner.data())
2797 .finish()
2798 }
2799}
2800
2801impl<T> Drop for Store<T> {
2802 fn drop(&mut self) {
2803 self.run_manual_drop_routines();
2804
2805 // For documentation on this `unsafe`, see `into_data`.
2806 unsafe {
2807 ManuallyDrop::drop(&mut self.inner.data_no_provenance);
2808 ManuallyDrop::drop(&mut self.inner);
2809 }
2810 }
2811}
2812
2813impl Drop for StoreOpaque {
2814 fn drop(&mut self) {
2815 // NB it's important that this destructor does not access `self.data`.
2816 // That is deallocated by `Drop for Store<T>` above.
2817
2818 unsafe {
2819 let allocator = self.engine.allocator();
2820 let ondemand = OnDemandInstanceAllocator::default();
2821 let store_id = self.id();
2822
2823 #[cfg(feature = "gc")]
2824 if let Some(gc_store) = self.gc_store.take() {
2825 let gc_alloc_index = gc_store.allocation_index;
2826 log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2827 debug_assert!(self.engine.features().gc_types());
2828 let (mem_alloc_index, mem) =
2829 allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2830 allocator.deallocate_memory(None, mem_alloc_index, mem);
2831 }
2832
2833 for (id, instance) in self.instances.iter_mut() {
2834 log::trace!("store {store_id:?} is deallocating {id:?}");
2835 let allocator = match instance.kind {
2836 StoreInstanceKind::Dummy => &ondemand,
2837 _ => allocator,
2838 };
2839 allocator.deallocate_module(&mut instance.handle);
2840 }
2841
2842 #[cfg(feature = "component-model")]
2843 {
2844 for _ in 0..self.num_component_instances {
2845 allocator.decrement_component_instance_count();
2846 }
2847 }
2848 }
2849 }
2850}
2851
2852#[cfg_attr(
2853 not(any(feature = "gc", feature = "async")),
2854 // NB: Rust 1.89, current stable, does not fire this lint. Rust 1.90,
2855 // however, does, so use #[allow] until our MSRV is 1.90.
2856 allow(dead_code, reason = "don't want to put #[cfg] on all impls below too")
2857)]
2858pub(crate) trait AsStoreOpaque {
2859 fn as_store_opaque(&mut self) -> &mut StoreOpaque;
2860}
2861
2862impl AsStoreOpaque for StoreOpaque {
2863 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2864 self
2865 }
2866}
2867
2868impl AsStoreOpaque for dyn VMStore {
2869 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2870 self
2871 }
2872}
2873
2874impl<T: 'static> AsStoreOpaque for Store<T> {
2875 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2876 &mut self.inner.inner
2877 }
2878}
2879
2880impl<T: 'static> AsStoreOpaque for StoreInner<T> {
2881 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2882 self
2883 }
2884}
2885
2886impl<T: AsStoreOpaque + ?Sized> AsStoreOpaque for &mut T {
2887 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2888 T::as_store_opaque(self)
2889 }
2890}
2891
2892#[cfg(test)]
2893mod tests {
2894 use super::*;
2895
2896 struct FuelTank {
2897 pub consumed_fuel: i64,
2898 pub reserve_fuel: u64,
2899 pub yield_interval: Option<NonZeroU64>,
2900 }
2901
2902 impl FuelTank {
2903 fn new() -> Self {
2904 FuelTank {
2905 consumed_fuel: 0,
2906 reserve_fuel: 0,
2907 yield_interval: None,
2908 }
2909 }
2910 fn get_fuel(&self) -> u64 {
2911 get_fuel(self.consumed_fuel, self.reserve_fuel)
2912 }
2913 fn refuel(&mut self) -> bool {
2914 refuel(
2915 &mut self.consumed_fuel,
2916 &mut self.reserve_fuel,
2917 self.yield_interval,
2918 )
2919 }
2920 fn set_fuel(&mut self, fuel: u64) {
2921 set_fuel(
2922 &mut self.consumed_fuel,
2923 &mut self.reserve_fuel,
2924 self.yield_interval,
2925 fuel,
2926 );
2927 }
2928 }
2929
2930 #[test]
2931 fn smoke() {
2932 let mut tank = FuelTank::new();
2933 tank.set_fuel(10);
2934 assert_eq!(tank.consumed_fuel, -10);
2935 assert_eq!(tank.reserve_fuel, 0);
2936
2937 tank.yield_interval = NonZeroU64::new(10);
2938 tank.set_fuel(25);
2939 assert_eq!(tank.consumed_fuel, -10);
2940 assert_eq!(tank.reserve_fuel, 15);
2941 }
2942
2943 #[test]
2944 fn does_not_lose_precision() {
2945 let mut tank = FuelTank::new();
2946 tank.set_fuel(u64::MAX);
2947 assert_eq!(tank.get_fuel(), u64::MAX);
2948
2949 tank.set_fuel(i64::MAX as u64);
2950 assert_eq!(tank.get_fuel(), i64::MAX as u64);
2951
2952 tank.set_fuel(i64::MAX as u64 + 1);
2953 assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
2954 }
2955
2956 #[test]
2957 fn yielding_does_not_lose_precision() {
2958 let mut tank = FuelTank::new();
2959
2960 tank.yield_interval = NonZeroU64::new(10);
2961 tank.set_fuel(u64::MAX);
2962 assert_eq!(tank.get_fuel(), u64::MAX);
2963 assert_eq!(tank.consumed_fuel, -10);
2964 assert_eq!(tank.reserve_fuel, u64::MAX - 10);
2965
2966 tank.yield_interval = NonZeroU64::new(u64::MAX);
2967 tank.set_fuel(u64::MAX);
2968 assert_eq!(tank.get_fuel(), u64::MAX);
2969 assert_eq!(tank.consumed_fuel, -i64::MAX);
2970 assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2971
2972 tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
2973 tank.set_fuel(u64::MAX);
2974 assert_eq!(tank.get_fuel(), u64::MAX);
2975 assert_eq!(tank.consumed_fuel, -i64::MAX);
2976 assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2977 }
2978
2979 #[test]
2980 fn refueling() {
2981 // It's possible to fuel to have consumed over the limit as some instructions can consume
2982 // multiple units of fuel at once. Refueling should be strict in it's consumption and not
2983 // add more fuel than there is.
2984 let mut tank = FuelTank::new();
2985
2986 tank.yield_interval = NonZeroU64::new(10);
2987 tank.reserve_fuel = 42;
2988 tank.consumed_fuel = 4;
2989 assert!(tank.refuel());
2990 assert_eq!(tank.reserve_fuel, 28);
2991 assert_eq!(tank.consumed_fuel, -10);
2992
2993 tank.yield_interval = NonZeroU64::new(1);
2994 tank.reserve_fuel = 8;
2995 tank.consumed_fuel = 4;
2996 assert_eq!(tank.get_fuel(), 4);
2997 assert!(tank.refuel());
2998 assert_eq!(tank.reserve_fuel, 3);
2999 assert_eq!(tank.consumed_fuel, -1);
3000 assert_eq!(tank.get_fuel(), 4);
3001
3002 tank.yield_interval = NonZeroU64::new(10);
3003 tank.reserve_fuel = 3;
3004 tank.consumed_fuel = 4;
3005 assert_eq!(tank.get_fuel(), 0);
3006 assert!(!tank.refuel());
3007 assert_eq!(tank.reserve_fuel, 3);
3008 assert_eq!(tank.consumed_fuel, 4);
3009 assert_eq!(tank.get_fuel(), 0);
3010 }
3011
3012 #[test]
3013 fn store_data_provenance() {
3014 // Test that we juggle pointer provenance and all that correctly, and
3015 // miri is happy with everything, while allowing both Rust code and
3016 // "Wasm" to access and modify the store's `T` data. Note that this is
3017 // not actually Wasm mutating the store data here because compiling Wasm
3018 // under miri is way too slow.
3019
3020 unsafe fn run_wasm(store: &mut Store<u32>) {
3021 let ptr = store
3022 .inner
3023 .inner
3024 .vm_store_context
3025 .store_data
3026 .as_ptr()
3027 .cast::<u32>();
3028 unsafe { *ptr += 1 }
3029 }
3030
3031 let engine = Engine::default();
3032 let mut store = Store::new(&engine, 0_u32);
3033
3034 assert_eq!(*store.data(), 0);
3035 *store.data_mut() += 1;
3036 assert_eq!(*store.data(), 1);
3037 unsafe { run_wasm(&mut store) }
3038 assert_eq!(*store.data(), 2);
3039 *store.data_mut() += 1;
3040 assert_eq!(*store.data(), 3);
3041 }
3042}