wasmtime/runtime/
store.rs

1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//!   Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//!   intended to be consumed by the outside world. Note that the "just a
39//!   pointer large" is a load-bearing implementation detail in Wasmtime. This
40//!   enables it to store a pointer to its own trait object which doesn't need
41//!   to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//!   stored inside the `Box`. This is the general Rust pattern when one struct
45//!   is a layer over another. The surprising part, though, is that this is
46//!   further subdivided. This structure only contains things which actually
47//!   need `T` itself. The downside of this structure is that it's always
48//!   generic and means that code is monomorphized into consumer crates. We
49//!   strive to have things be as monomorphic as possible in `wasmtime` so this
50//!   type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//!   Stored inline in the outer type the "opaque" here means that it's a
54//!   "store" but it doesn't have access to the `T`. This is the primary
55//!   "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//!   internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//!   All references of Wasm items into a `Store` are actually indices into a
60//!   table in this structure, and the `StoreData` being separate makes it a bit
61//!   easier to manage/define/work with. There's no real fundamental reason this
62//!   is split out, although sometimes it's useful to have separate borrows into
63//!   these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79#[cfg(feature = "debug")]
80use crate::DebugHandler;
81#[cfg(all(feature = "gc", feature = "debug"))]
82use crate::OwnedRooted;
83use crate::RootSet;
84#[cfg(feature = "gc")]
85use crate::ThrownException;
86#[cfg(feature = "component-model-async")]
87use crate::component::ComponentStoreData;
88#[cfg(feature = "component-model")]
89use crate::component::concurrent;
90#[cfg(feature = "async")]
91use crate::fiber;
92use crate::module::RegisteredModuleId;
93use crate::prelude::*;
94#[cfg(feature = "gc")]
95use crate::runtime::vm::GcRootsList;
96#[cfg(feature = "stack-switching")]
97use crate::runtime::vm::VMContRef;
98use crate::runtime::vm::mpk::ProtectionKey;
99use crate::runtime::vm::{
100    self, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
101    Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator, SendSyncPtr,
102    SignalHandler, StoreBox, Unwind, VMContext, VMFuncRef, VMGcRef, VMStore, VMStoreContext,
103};
104use crate::trampoline::VMHostGlobalContext;
105use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry};
106#[cfg(feature = "gc")]
107use crate::{ExnRef, Rooted};
108use crate::{Global, Instance, Memory, Table, Uninhabited};
109use alloc::sync::Arc;
110use core::fmt;
111use core::marker;
112use core::mem::{self, ManuallyDrop, MaybeUninit};
113use core::num::NonZeroU64;
114use core::ops::{Deref, DerefMut};
115use core::pin::Pin;
116use core::ptr::NonNull;
117use wasmtime_environ::StaticModuleIndex;
118use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
119
120mod context;
121pub use self::context::*;
122mod data;
123pub use self::data::*;
124mod func_refs;
125use func_refs::FuncRefs;
126#[cfg(feature = "component-model-async")]
127mod token;
128#[cfg(feature = "component-model-async")]
129pub(crate) use token::StoreToken;
130#[cfg(feature = "async")]
131mod async_;
132#[cfg(all(feature = "async", feature = "call-hook"))]
133pub use self::async_::CallHookHandler;
134
135#[cfg(feature = "gc")]
136use super::vm::VMExnRef;
137#[cfg(feature = "gc")]
138mod gc;
139
140/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
141///
142/// All WebAssembly instances and items will be attached to and refer to a
143/// [`Store`]. For example instances, functions, globals, and tables are all
144/// attached to a [`Store`]. Instances are created by instantiating a
145/// [`Module`](crate::Module) within a [`Store`].
146///
147/// A [`Store`] is intended to be a short-lived object in a program. No form
148/// of GC is implemented at this time so once an instance is created within a
149/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
150/// This makes [`Store`] unsuitable for creating an unbounded number of
151/// instances in it because [`Store`] will never release this memory. It's
152/// recommended to have a [`Store`] correspond roughly to the lifetime of a
153/// "main instance" that an embedding is interested in executing.
154///
155/// ## Type parameter `T`
156///
157/// Each [`Store`] has a type parameter `T` associated with it. This `T`
158/// represents state defined by the host. This state will be accessible through
159/// the [`Caller`](crate::Caller) type that host-defined functions get access
160/// to. This `T` is suitable for storing `Store`-specific information which
161/// imported functions may want access to.
162///
163/// The data `T` can be accessed through methods like [`Store::data`] and
164/// [`Store::data_mut`].
165///
166/// ## Stores, contexts, oh my
167///
168/// Most methods in Wasmtime take something of the form
169/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
170/// the first argument. These two traits allow ergonomically passing in the
171/// context you currently have to any method. The primary two sources of
172/// contexts are:
173///
174/// * `Store<T>`
175/// * `Caller<'_, T>`
176///
177/// corresponding to what you create and what you have access to in a host
178/// function. You can also explicitly acquire a [`StoreContext`] or
179/// [`StoreContextMut`] and pass that around as well.
180///
181/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
182/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
183/// form of context you have you can call various methods, create objects, etc.
184///
185/// ## Stores and `Default`
186///
187/// You can create a store with default configuration settings using
188/// `Store::default()`. This will create a brand new [`Engine`] with default
189/// configuration (see [`Config`](crate::Config) for more information).
190///
191/// ## Cross-store usage of items
192///
193/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
194/// [`Store`]. The store they belong to is the one they were created with
195/// (passed in as a parameter) or instantiated with. This store is the only
196/// store that can be used to interact with wasm items after they're created.
197///
198/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
199/// operations is incorrect. In other words it's considered a programmer error
200/// rather than a recoverable error for the wrong [`Store`] to be used when
201/// calling APIs.
202pub struct Store<T: 'static> {
203    // for comments about `ManuallyDrop`, see `Store::into_data`
204    inner: ManuallyDrop<Box<StoreInner<T>>>,
205}
206
207#[derive(Copy, Clone, Debug)]
208/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
209/// the WebAssembly VM.
210pub enum CallHook {
211    /// Indicates the VM is calling a WebAssembly function, from the host.
212    CallingWasm,
213    /// Indicates the VM is returning from a WebAssembly function, to the host.
214    ReturningFromWasm,
215    /// Indicates the VM is calling a host function, from WebAssembly.
216    CallingHost,
217    /// Indicates the VM is returning from a host function, to WebAssembly.
218    ReturningFromHost,
219}
220
221impl CallHook {
222    /// Indicates the VM is entering host code (exiting WebAssembly code)
223    pub fn entering_host(&self) -> bool {
224        match self {
225            CallHook::ReturningFromWasm | CallHook::CallingHost => true,
226            _ => false,
227        }
228    }
229    /// Indicates the VM is exiting host code (entering WebAssembly code)
230    pub fn exiting_host(&self) -> bool {
231        match self {
232            CallHook::ReturningFromHost | CallHook::CallingWasm => true,
233            _ => false,
234        }
235    }
236}
237
238/// Internal contents of a `Store<T>` that live on the heap.
239///
240/// The members of this struct are those that need to be generic over `T`, the
241/// store's internal type storage. Otherwise all things that don't rely on `T`
242/// should go into `StoreOpaque`.
243pub struct StoreInner<T: 'static> {
244    /// Generic metadata about the store that doesn't need access to `T`.
245    inner: StoreOpaque,
246
247    limiter: Option<ResourceLimiterInner<T>>,
248    call_hook: Option<CallHookInner<T>>,
249    #[cfg(target_has_atomic = "64")]
250    epoch_deadline_behavior:
251        Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
252
253    /// The user's `T` data.
254    ///
255    /// Don't actually access it via this field, however! Use the
256    /// `Store{,Inner,Context,ContextMut}::data[_mut]` methods instead, to
257    /// preserve stacked borrows and provenance in the face of potential
258    /// direct-access of `T` from Wasm code (via unsafe intrinsics).
259    ///
260    /// The only exception to the above is when taking ownership of the value,
261    /// e.g. in `Store::into_data`, after which nothing can access this field
262    /// via raw pointers anymore so there is no more provenance to preserve.
263    ///
264    /// For comments about `ManuallyDrop`, see `Store::into_data`.
265    data_no_provenance: ManuallyDrop<T>,
266
267    /// The user's debug handler, if any. See [`crate::DebugHandler`]
268    /// for more documentation.
269    ///
270    /// We need this to be an `Arc` because the handler itself takes
271    /// `&self` and also the whole Store mutably (via
272    /// `StoreContextMut`); so we need to hold a separate reference to
273    /// it while invoking it.
274    #[cfg(feature = "debug")]
275    debug_handler: Option<Box<dyn StoreDebugHandler<T>>>,
276}
277
278/// Adapter around `DebugHandler` that gets monomorphized into an
279/// object-safe dyn trait to place in `store.debug_handler`.
280#[cfg(feature = "debug")]
281trait StoreDebugHandler<T: 'static>: Send + Sync {
282    fn handle<'a>(
283        self: Box<Self>,
284        store: StoreContextMut<'a, T>,
285        event: crate::DebugEvent<'a>,
286    ) -> Box<dyn Future<Output = ()> + Send + 'a>;
287}
288
289#[cfg(feature = "debug")]
290impl<D> StoreDebugHandler<D::Data> for D
291where
292    D: DebugHandler,
293    D::Data: Send,
294{
295    fn handle<'a>(
296        self: Box<Self>,
297        store: StoreContextMut<'a, D::Data>,
298        event: crate::DebugEvent<'a>,
299    ) -> Box<dyn Future<Output = ()> + Send + 'a> {
300        // Clone the underlying `DebugHandler` (the trait requires
301        // Clone as a supertrait), not the Box. The clone happens here
302        // rather than at the callsite because `Clone::clone` is not
303        // object-safe so needs to be in a monomorphized context.
304        let handler: D = (*self).clone();
305        // Since we temporarily took `self` off the store at the
306        // callsite, put it back now that we've cloned it.
307        store.0.debug_handler = Some(self);
308        Box::new(async move { handler.handle(store, event).await })
309    }
310}
311
312enum ResourceLimiterInner<T> {
313    Sync(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync>),
314    #[cfg(feature = "async")]
315    Async(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiterAsync) + Send + Sync>),
316}
317
318/// Representation of a configured resource limiter for a store.
319///
320/// This is acquired with `resource_limiter_and_store_opaque` for example and is
321/// threaded through to growth operations on tables/memories. Note that this is
322/// passed around as `Option<&mut StoreResourceLimiter<'_>>` to make it
323/// efficient to pass around (nullable pointer) and it's also notably passed
324/// around as an `Option` to represent how this is optionally specified within a
325/// store.
326pub enum StoreResourceLimiter<'a> {
327    Sync(&'a mut dyn crate::ResourceLimiter),
328    #[cfg(feature = "async")]
329    Async(&'a mut dyn crate::ResourceLimiterAsync),
330}
331
332impl StoreResourceLimiter<'_> {
333    pub(crate) async fn memory_growing(
334        &mut self,
335        current: usize,
336        desired: usize,
337        maximum: Option<usize>,
338    ) -> Result<bool, Error> {
339        match self {
340            Self::Sync(s) => s.memory_growing(current, desired, maximum),
341            #[cfg(feature = "async")]
342            Self::Async(s) => s.memory_growing(current, desired, maximum).await,
343        }
344    }
345
346    pub(crate) fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
347        match self {
348            Self::Sync(s) => s.memory_grow_failed(error),
349            #[cfg(feature = "async")]
350            Self::Async(s) => s.memory_grow_failed(error),
351        }
352    }
353
354    pub(crate) async fn table_growing(
355        &mut self,
356        current: usize,
357        desired: usize,
358        maximum: Option<usize>,
359    ) -> Result<bool, Error> {
360        match self {
361            Self::Sync(s) => s.table_growing(current, desired, maximum),
362            #[cfg(feature = "async")]
363            Self::Async(s) => s.table_growing(current, desired, maximum).await,
364        }
365    }
366
367    pub(crate) fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
368        match self {
369            Self::Sync(s) => s.table_grow_failed(error),
370            #[cfg(feature = "async")]
371            Self::Async(s) => s.table_grow_failed(error),
372        }
373    }
374}
375
376enum CallHookInner<T: 'static> {
377    #[cfg(feature = "call-hook")]
378    Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
379    #[cfg(all(feature = "async", feature = "call-hook"))]
380    Async(Box<dyn CallHookHandler<T> + Send + Sync>),
381    #[expect(
382        dead_code,
383        reason = "forcing, regardless of cfg, the type param to be used"
384    )]
385    ForceTypeParameterToBeUsed {
386        uninhabited: Uninhabited,
387        _marker: marker::PhantomData<T>,
388    },
389}
390
391/// What to do after returning from a callback when the engine epoch reaches
392/// the deadline for a Store during execution of a function using that store.
393#[non_exhaustive]
394pub enum UpdateDeadline {
395    /// Halt execution of WebAssembly, don't update the epoch deadline, and
396    /// raise a trap.
397    Interrupt,
398    /// Extend the deadline by the specified number of ticks.
399    Continue(u64),
400    /// Extend the deadline by the specified number of ticks after yielding to
401    /// the async executor loop. This can only be used with an async [`Store`]
402    /// configured via [`Config::async_support`](crate::Config::async_support).
403    #[cfg(feature = "async")]
404    Yield(u64),
405    /// Extend the deadline by the specified number of ticks after yielding to
406    /// the async executor loop. This can only be used with an async [`Store`]
407    /// configured via [`Config::async_support`](crate::Config::async_support).
408    ///
409    /// The yield will be performed by the future provided; when using `tokio`
410    /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
411    /// here.
412    #[cfg(feature = "async")]
413    YieldCustom(
414        u64,
415        ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
416    ),
417}
418
419// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
420impl<T> Deref for StoreInner<T> {
421    type Target = StoreOpaque;
422    fn deref(&self) -> &Self::Target {
423        &self.inner
424    }
425}
426
427impl<T> DerefMut for StoreInner<T> {
428    fn deref_mut(&mut self) -> &mut Self::Target {
429        &mut self.inner
430    }
431}
432
433/// Monomorphic storage for a `Store<T>`.
434///
435/// This structure contains the bulk of the metadata about a `Store`. This is
436/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
437/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
438/// crate itself.
439pub struct StoreOpaque {
440    // This `StoreOpaque` structure has references to itself. These aren't
441    // immediately evident, however, so we need to tell the compiler that it
442    // contains self-references. This notably suppresses `noalias` annotations
443    // when this shows up in compiled code because types of this structure do
444    // indeed alias itself. An example of this is `default_callee` holds a
445    // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
446    // aliasing!
447    //
448    // It's somewhat unclear to me at this time if this is 100% sufficient to
449    // get all the right codegen in all the right places. For example does
450    // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
451    // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
452    // enough with `Pin` to understand if it's appropriate here (we do, for
453    // example want to allow movement in and out of `data: T`, just not movement
454    // of most of the other members). It's also not clear if using `Pin` in a
455    // few places buys us much other than a bunch of `unsafe` that we already
456    // sort of hand-wave away.
457    //
458    // In any case this seems like a good mid-ground for now where we're at
459    // least telling the compiler something about all the aliasing happening
460    // within a `Store`.
461    _marker: marker::PhantomPinned,
462
463    engine: Engine,
464    vm_store_context: VMStoreContext,
465
466    // Contains all continuations ever allocated throughout the lifetime of this
467    // store.
468    #[cfg(feature = "stack-switching")]
469    continuations: Vec<Box<VMContRef>>,
470
471    instances: PrimaryMap<InstanceId, StoreInstance>,
472
473    #[cfg(feature = "component-model")]
474    num_component_instances: usize,
475    signal_handler: Option<SignalHandler>,
476    modules: ModuleRegistry,
477    func_refs: FuncRefs,
478    host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
479    // GC-related fields.
480    gc_store: Option<GcStore>,
481    gc_roots: RootSet,
482    #[cfg(feature = "gc")]
483    gc_roots_list: GcRootsList,
484    // Types for which the embedder has created an allocator for.
485    #[cfg(feature = "gc")]
486    gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
487    /// Pending exception, if any. This is also a GC root, because it
488    /// needs to be rooted somewhere between the time that a pending
489    /// exception is set and the time that the handling code takes the
490    /// exception object. We use this rooting strategy rather than a
491    /// root in an `Err` branch of a `Result` on the host side because
492    /// it is less error-prone with respect to rooting behavior. See
493    /// `throw()`, `take_pending_exception()`,
494    /// `peek_pending_exception()`, `has_pending_exception()`, and
495    /// `catch()`.
496    #[cfg(feature = "gc")]
497    pending_exception: Option<VMExnRef>,
498
499    // Numbers of resources instantiated in this store, and their limits
500    instance_count: usize,
501    instance_limit: usize,
502    memory_count: usize,
503    memory_limit: usize,
504    table_count: usize,
505    table_limit: usize,
506    #[cfg(feature = "async")]
507    async_state: fiber::AsyncState,
508
509    // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
510    // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
511    // together. Then when we run out of gas, we inject the yield amount from the reserve
512    // until the reserve is empty.
513    fuel_reserve: u64,
514    pub(crate) fuel_yield_interval: Option<NonZeroU64>,
515    /// Indexed data within this `Store`, used to store information about
516    /// globals, functions, memories, etc.
517    store_data: StoreData,
518    traitobj: StorePtr,
519    default_caller_vmctx: SendSyncPtr<VMContext>,
520
521    /// Used to optimized wasm->host calls when the host function is defined with
522    /// `Func::new` to avoid allocating a new vector each time a function is
523    /// called.
524    hostcall_val_storage: Vec<Val>,
525    /// Same as `hostcall_val_storage`, but for the direction of the host
526    /// calling wasm.
527    wasm_val_raw_storage: Vec<ValRaw>,
528
529    /// Keep track of what protection key is being used during allocation so
530    /// that the right memory pages can be enabled when entering WebAssembly
531    /// guest code.
532    pkey: Option<ProtectionKey>,
533
534    /// Runtime state for components used in the handling of resources, borrow,
535    /// and calls. These also interact with the `ResourceAny` type and its
536    /// internal representation.
537    #[cfg(feature = "component-model")]
538    component_host_table: vm::component::HandleTable,
539    #[cfg(feature = "component-model")]
540    component_calls: vm::component::CallContexts,
541    #[cfg(feature = "component-model")]
542    host_resource_data: crate::component::HostResourceData,
543    #[cfg(feature = "component-model")]
544    concurrent_state: concurrent::ConcurrentState,
545
546    /// State related to the executor of wasm code.
547    ///
548    /// For example if Pulley is enabled and configured then this will store a
549    /// Pulley interpreter.
550    executor: Executor,
551}
552
553/// Self-pointer to `StoreInner<T>` from within a `StoreOpaque` which is chiefly
554/// used to copy into instances during instantiation.
555///
556/// FIXME: ideally this type would get deleted and Wasmtime's reliance on it
557/// would go away.
558struct StorePtr(Option<NonNull<dyn VMStore>>);
559
560// We can't make `VMStore: Send + Sync` because that requires making all of
561// Wastime's internals generic over the `Store`'s `T`. So instead, we take care
562// in the whole VM layer to only use the `VMStore` in ways that are `Send`- and
563// `Sync`-safe and we have to have these unsafe impls.
564unsafe impl Send for StorePtr {}
565unsafe impl Sync for StorePtr {}
566
567/// Executor state within `StoreOpaque`.
568///
569/// Effectively stores Pulley interpreter state and handles conditional support
570/// for Cranelift at compile time.
571pub(crate) enum Executor {
572    Interpreter(Interpreter),
573    #[cfg(has_host_compiler_backend)]
574    Native,
575}
576
577impl Executor {
578    pub(crate) fn new(engine: &Engine) -> Self {
579        #[cfg(has_host_compiler_backend)]
580        if cfg!(feature = "pulley") && engine.target().is_pulley() {
581            Executor::Interpreter(Interpreter::new(engine))
582        } else {
583            Executor::Native
584        }
585        #[cfg(not(has_host_compiler_backend))]
586        {
587            debug_assert!(engine.target().is_pulley());
588            Executor::Interpreter(Interpreter::new(engine))
589        }
590    }
591}
592
593/// A borrowed reference to `Executor` above.
594pub(crate) enum ExecutorRef<'a> {
595    Interpreter(InterpreterRef<'a>),
596    #[cfg(has_host_compiler_backend)]
597    Native,
598}
599
600/// An RAII type to automatically mark a region of code as unsafe for GC.
601#[doc(hidden)]
602pub struct AutoAssertNoGc<'a> {
603    store: &'a mut StoreOpaque,
604    entered: bool,
605}
606
607impl<'a> AutoAssertNoGc<'a> {
608    #[inline]
609    pub fn new(store: &'a mut StoreOpaque) -> Self {
610        let entered = if !cfg!(feature = "gc") {
611            false
612        } else if let Some(gc_store) = store.gc_store.as_mut() {
613            gc_store.gc_heap.enter_no_gc_scope();
614            true
615        } else {
616            false
617        };
618
619        AutoAssertNoGc { store, entered }
620    }
621
622    /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
623    /// disables checks for no GC happening for the duration of this value.
624    ///
625    /// This is used when it is statically otherwise known that a GC doesn't
626    /// happen for the various types involved.
627    ///
628    /// # Unsafety
629    ///
630    /// This method is `unsafe` as it does not provide the same safety
631    /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
632    /// caller that a GC doesn't happen.
633    #[inline]
634    pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
635        if cfg!(debug_assertions) {
636            AutoAssertNoGc::new(store)
637        } else {
638            AutoAssertNoGc {
639                store,
640                entered: false,
641            }
642        }
643    }
644}
645
646impl core::ops::Deref for AutoAssertNoGc<'_> {
647    type Target = StoreOpaque;
648
649    #[inline]
650    fn deref(&self) -> &Self::Target {
651        &*self.store
652    }
653}
654
655impl core::ops::DerefMut for AutoAssertNoGc<'_> {
656    #[inline]
657    fn deref_mut(&mut self) -> &mut Self::Target {
658        &mut *self.store
659    }
660}
661
662impl Drop for AutoAssertNoGc<'_> {
663    #[inline]
664    fn drop(&mut self) {
665        if self.entered {
666            self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
667        }
668    }
669}
670
671/// Used to associate instances with the store.
672///
673/// This is needed to track if the instance was allocated explicitly with the on-demand
674/// instance allocator.
675struct StoreInstance {
676    handle: InstanceHandle,
677    kind: StoreInstanceKind,
678}
679
680enum StoreInstanceKind {
681    /// An actual, non-dummy instance.
682    Real {
683        /// The id of this instance's module inside our owning store's
684        /// `ModuleRegistry`.
685        module_id: RegisteredModuleId,
686    },
687
688    /// This is a dummy instance that is just an implementation detail for
689    /// something else. For example, host-created memories internally create a
690    /// dummy instance.
691    ///
692    /// Regardless of the configured instance allocator for the engine, dummy
693    /// instances always use the on-demand allocator to deallocate the instance.
694    Dummy,
695}
696
697impl<T> Store<T> {
698    /// Creates a new [`Store`] to be associated with the given [`Engine`] and
699    /// `data` provided.
700    ///
701    /// The created [`Store`] will place no additional limits on the size of
702    /// linear memories or tables at runtime. Linear memories and tables will
703    /// be allowed to grow to any upper limit specified in their definitions.
704    /// The store will limit the number of instances, linear memories, and
705    /// tables created to 10,000. This can be overridden with the
706    /// [`Store::limiter`] configuration method.
707    pub fn new(engine: &Engine, data: T) -> Self {
708        let store_data = StoreData::new();
709        log::trace!("creating new store {:?}", store_data.id());
710
711        let pkey = engine.allocator().next_available_pkey();
712
713        let inner = StoreOpaque {
714            _marker: marker::PhantomPinned,
715            engine: engine.clone(),
716            vm_store_context: Default::default(),
717            #[cfg(feature = "stack-switching")]
718            continuations: Vec::new(),
719            instances: PrimaryMap::new(),
720            #[cfg(feature = "component-model")]
721            num_component_instances: 0,
722            signal_handler: None,
723            gc_store: None,
724            gc_roots: RootSet::default(),
725            #[cfg(feature = "gc")]
726            gc_roots_list: GcRootsList::default(),
727            #[cfg(feature = "gc")]
728            gc_host_alloc_types: Default::default(),
729            #[cfg(feature = "gc")]
730            pending_exception: None,
731            modules: ModuleRegistry::default(),
732            func_refs: FuncRefs::default(),
733            host_globals: PrimaryMap::new(),
734            instance_count: 0,
735            instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
736            memory_count: 0,
737            memory_limit: crate::DEFAULT_MEMORY_LIMIT,
738            table_count: 0,
739            table_limit: crate::DEFAULT_TABLE_LIMIT,
740            #[cfg(feature = "async")]
741            async_state: Default::default(),
742            fuel_reserve: 0,
743            fuel_yield_interval: None,
744            store_data,
745            traitobj: StorePtr(None),
746            default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
747            hostcall_val_storage: Vec::new(),
748            wasm_val_raw_storage: Vec::new(),
749            pkey,
750            #[cfg(feature = "component-model")]
751            component_host_table: Default::default(),
752            #[cfg(feature = "component-model")]
753            component_calls: Default::default(),
754            #[cfg(feature = "component-model")]
755            host_resource_data: Default::default(),
756            executor: Executor::new(engine),
757            #[cfg(feature = "component-model")]
758            concurrent_state: Default::default(),
759        };
760        let mut inner = Box::new(StoreInner {
761            inner,
762            limiter: None,
763            call_hook: None,
764            #[cfg(target_has_atomic = "64")]
765            epoch_deadline_behavior: None,
766            data_no_provenance: ManuallyDrop::new(data),
767            #[cfg(feature = "debug")]
768            debug_handler: None,
769        });
770
771        let store_data =
772            <NonNull<ManuallyDrop<T>>>::from(&mut inner.data_no_provenance).cast::<()>();
773        inner.inner.vm_store_context.store_data = store_data.into();
774
775        inner.traitobj = StorePtr(Some(NonNull::from(&mut *inner)));
776
777        // Wasmtime uses the callee argument to host functions to learn about
778        // the original pointer to the `Store` itself, allowing it to
779        // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
780        // however, there's no "callee" to provide. To fix this we allocate a
781        // single "default callee" for the entire `Store`. This is then used as
782        // part of `Func::call` to guarantee that the `callee: *mut VMContext`
783        // is never null.
784        let module = Arc::new(wasmtime_environ::Module::new(StaticModuleIndex::from_u32(
785            0,
786        )));
787        let shim = ModuleRuntimeInfo::bare(module);
788        let allocator = OnDemandInstanceAllocator::default();
789
790        allocator
791            .validate_module(shim.env_module(), shim.offsets())
792            .unwrap();
793
794        unsafe {
795            // Note that this dummy instance doesn't allocate tables or memories
796            // (also no limiter is passed in) so it won't have an async await
797            // point meaning that it should be ok to assert the future is
798            // always ready.
799            let id = vm::assert_ready(inner.allocate_instance(
800                None,
801                AllocateInstanceKind::Dummy {
802                    allocator: &allocator,
803                },
804                &shim,
805                Default::default(),
806            ))
807            .expect("failed to allocate default callee");
808            let default_caller_vmctx = inner.instance(id).vmctx();
809            inner.default_caller_vmctx = default_caller_vmctx.into();
810        }
811
812        Self {
813            inner: ManuallyDrop::new(inner),
814        }
815    }
816
817    /// Access the underlying `T` data owned by this `Store`.
818    #[inline]
819    pub fn data(&self) -> &T {
820        self.inner.data()
821    }
822
823    /// Access the underlying `T` data owned by this `Store`.
824    #[inline]
825    pub fn data_mut(&mut self) -> &mut T {
826        self.inner.data_mut()
827    }
828
829    fn run_manual_drop_routines(&mut self) {
830        // We need to drop the fibers of each component instance before
831        // attempting to drop the instances themselves since the fibers may need
832        // to be resumed and allowed to exit cleanly before we yank the state
833        // out from under them.
834        //
835        // This will also drop any futures which might use a `&Accessor` fields
836        // in their `Drop::drop` implementations, in which case they'll need to
837        // be called from with in the context of a `tls::set` closure.
838        #[cfg(feature = "component-model-async")]
839        ComponentStoreData::drop_fibers_and_futures(&mut **self.inner);
840
841        // Ensure all fiber stacks, even cached ones, are all flushed out to the
842        // instance allocator.
843        self.inner.flush_fiber_stack();
844    }
845
846    /// Consumes this [`Store`], destroying it, and returns the underlying data.
847    pub fn into_data(mut self) -> T {
848        self.run_manual_drop_routines();
849
850        // This is an unsafe operation because we want to avoid having a runtime
851        // check or boolean for whether the data is actually contained within a
852        // `Store`. The data itself is stored as `ManuallyDrop` since we're
853        // manually managing the memory here, and there's also a `ManuallyDrop`
854        // around the `Box<StoreInner<T>>`. The way this works though is a bit
855        // tricky, so here's how things get dropped appropriately:
856        //
857        // * When a `Store<T>` is normally dropped, the custom destructor for
858        //   `Store<T>` will drop `T`, then the `self.inner` field. The
859        //   rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
860        //   `StoreInner<T>`. This cleans up all internal fields and doesn't
861        //   touch `T` because it's wrapped in `ManuallyDrop`.
862        //
863        // * When calling this method we skip the top-level destructor for
864        //   `Store<T>` with `mem::forget`. This skips both the destructor for
865        //   `T` and the destructor for `StoreInner<T>`. We do, however, run the
866        //   destructor for `Box<StoreInner<T>>` which, like above, will skip
867        //   the destructor for `T` since it's `ManuallyDrop`.
868        //
869        // In both cases all the other fields of `StoreInner<T>` should all get
870        // dropped, and the manual management of destructors is basically
871        // between this method and `Drop for Store<T>`. Note that this also
872        // means that `Drop for StoreInner<T>` cannot access `self.data`, so
873        // there is a comment indicating this as well.
874        unsafe {
875            let mut inner = ManuallyDrop::take(&mut self.inner);
876            core::mem::forget(self);
877            ManuallyDrop::take(&mut inner.data_no_provenance)
878        }
879    }
880
881    /// Configures the [`ResourceLimiter`] used to limit resource creation
882    /// within this [`Store`].
883    ///
884    /// Whenever resources such as linear memory, tables, or instances are
885    /// allocated the `limiter` specified here is invoked with the store's data
886    /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
887    /// being allocated. The returned [`ResourceLimiter`] is intended to live
888    /// within the `T` itself, for example by storing a
889    /// [`StoreLimits`](crate::StoreLimits).
890    ///
891    /// Note that this limiter is only used to limit the creation/growth of
892    /// resources in the future, this does not retroactively attempt to apply
893    /// limits to the [`Store`].
894    ///
895    /// # Examples
896    ///
897    /// ```
898    /// use wasmtime::*;
899    ///
900    /// struct MyApplicationState {
901    ///     my_state: u32,
902    ///     limits: StoreLimits,
903    /// }
904    ///
905    /// let engine = Engine::default();
906    /// let my_state = MyApplicationState {
907    ///     my_state: 42,
908    ///     limits: StoreLimitsBuilder::new()
909    ///         .memory_size(1 << 20 /* 1 MB */)
910    ///         .instances(2)
911    ///         .build(),
912    /// };
913    /// let mut store = Store::new(&engine, my_state);
914    /// store.limiter(|state| &mut state.limits);
915    ///
916    /// // Creation of smaller memories is allowed
917    /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
918    ///
919    /// // Creation of a larger memory, however, will exceed the 1MB limit we've
920    /// // configured
921    /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
922    ///
923    /// // The number of instances in this store is limited to 2, so the third
924    /// // instance here should fail.
925    /// let module = Module::new(&engine, "(module)").unwrap();
926    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
927    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
928    /// assert!(Instance::new(&mut store, &module, &[]).is_err());
929    /// ```
930    ///
931    /// [`ResourceLimiter`]: crate::ResourceLimiter
932    pub fn limiter(
933        &mut self,
934        mut limiter: impl (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync + 'static,
935    ) {
936        // Apply the limits on instances, tables, and memory given by the limiter:
937        let inner = &mut self.inner;
938        let (instance_limit, table_limit, memory_limit) = {
939            let l = limiter(inner.data_mut());
940            (l.instances(), l.tables(), l.memories())
941        };
942        let innermost = &mut inner.inner;
943        innermost.instance_limit = instance_limit;
944        innermost.table_limit = table_limit;
945        innermost.memory_limit = memory_limit;
946
947        // Save the limiter accessor function:
948        inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
949    }
950
951    /// Configure a function that runs on calls and returns between WebAssembly
952    /// and host code.
953    ///
954    /// The function is passed a [`CallHook`] argument, which indicates which
955    /// state transition the VM is making.
956    ///
957    /// This function may return a [`Trap`]. If a trap is returned when an
958    /// import was called, it is immediately raised as-if the host import had
959    /// returned the trap. If a trap is returned after wasm returns to the host
960    /// then the wasm function's result is ignored and this trap is returned
961    /// instead.
962    ///
963    /// After this function returns a trap, it may be called for subsequent returns
964    /// to host or wasm code as the trap propagates to the root call.
965    #[cfg(feature = "call-hook")]
966    pub fn call_hook(
967        &mut self,
968        hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
969    ) {
970        self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
971    }
972
973    /// Returns the [`Engine`] that this store is associated with.
974    pub fn engine(&self) -> &Engine {
975        self.inner.engine()
976    }
977
978    /// Perform garbage collection.
979    ///
980    /// Note that it is not required to actively call this function. GC will
981    /// automatically happen according to various internal heuristics. This is
982    /// provided if fine-grained control over the GC is desired.
983    ///
984    /// If you are calling this method after an attempted allocation failed, you
985    /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
986    /// When you do so, this method will attempt to create enough space in the
987    /// GC heap for that allocation, so that it will succeed on the next
988    /// attempt.
989    ///
990    /// This method is only available when the `gc` Cargo feature is enabled.
991    #[cfg(feature = "gc")]
992    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
993        StoreContextMut(&mut self.inner).gc(why)
994    }
995
996    /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
997    /// be configured via [`Store::set_fuel`].
998    ///
999    /// # Errors
1000    ///
1001    /// This function will return an error if fuel consumption is not enabled
1002    /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
1003    pub fn get_fuel(&self) -> Result<u64> {
1004        self.inner.get_fuel()
1005    }
1006
1007    /// Set the fuel to this [`Store`] for wasm to consume while executing.
1008    ///
1009    /// For this method to work fuel consumption must be enabled via
1010    /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
1011    /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
1012    /// immediately trap). This function must be called for the store to have
1013    /// some fuel to allow WebAssembly to execute.
1014    ///
1015    /// Most WebAssembly instructions consume 1 unit of fuel. Some
1016    /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
1017    /// units, as any execution cost associated with them involves other
1018    /// instructions which do consume fuel.
1019    ///
1020    /// Note that when fuel is entirely consumed it will cause wasm to trap.
1021    ///
1022    /// # Errors
1023    ///
1024    /// This function will return an error if fuel consumption is not enabled via
1025    /// [`Config::consume_fuel`](crate::Config::consume_fuel).
1026    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1027        self.inner.set_fuel(fuel)
1028    }
1029
1030    /// Configures a [`Store`] to yield execution of async WebAssembly code
1031    /// periodically.
1032    ///
1033    /// When a [`Store`] is configured to consume fuel with
1034    /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
1035    /// configure WebAssembly to be suspended and control will be yielded back to the
1036    /// caller every `interval` units of fuel consumed. This is only suitable with use of
1037    /// a store associated with an [async config](crate::Config::async_support) because
1038    /// only then are futures used and yields are possible.
1039    ///
1040    /// The purpose of this behavior is to ensure that futures which represent
1041    /// execution of WebAssembly do not execute too long inside their
1042    /// `Future::poll` method. This allows for some form of cooperative
1043    /// multitasking where WebAssembly will voluntarily yield control
1044    /// periodically (based on fuel consumption) back to the running thread.
1045    ///
1046    /// Note that futures returned by this crate will automatically flag
1047    /// themselves to get re-polled if a yield happens. This means that
1048    /// WebAssembly will continue to execute, just after giving the host an
1049    /// opportunity to do something else.
1050    ///
1051    /// The `interval` parameter indicates how much fuel should be
1052    /// consumed between yields of an async future. When fuel runs out wasm will trap.
1053    ///
1054    /// # Error
1055    ///
1056    /// This method will error if it is not called on a store associated with an [async
1057    /// config](crate::Config::async_support).
1058    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1059        self.inner.fuel_async_yield_interval(interval)
1060    }
1061
1062    /// Sets the epoch deadline to a certain number of ticks in the future.
1063    ///
1064    /// When the Wasm guest code is compiled with epoch-interruption
1065    /// instrumentation
1066    /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
1067    /// and when the `Engine`'s epoch is incremented
1068    /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
1069    /// past a deadline, execution can be configured to either trap or
1070    /// yield and then continue.
1071    ///
1072    /// This deadline is always set relative to the current epoch:
1073    /// `ticks_beyond_current` ticks in the future. The deadline can
1074    /// be set explicitly via this method, or refilled automatically
1075    /// on a yield if configured via
1076    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
1077    /// this method is invoked, the deadline is reached when
1078    /// [`Engine::increment_epoch()`] has been invoked at least
1079    /// `ticks_beyond_current` times.
1080    ///
1081    /// By default a store will trap immediately with an epoch deadline of 0
1082    /// (which has always "elapsed"). This method is required to be configured
1083    /// for stores with epochs enabled to some future epoch deadline.
1084    ///
1085    /// See documentation on
1086    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1087    /// for an introduction to epoch-based interruption.
1088    #[cfg(target_has_atomic = "64")]
1089    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1090        self.inner.set_epoch_deadline(ticks_beyond_current);
1091    }
1092
1093    /// Configures epoch-deadline expiration to trap.
1094    ///
1095    /// When epoch-interruption-instrumented code is executed on this
1096    /// store and the epoch deadline is reached before completion,
1097    /// with the store configured in this way, execution will
1098    /// terminate with a trap as soon as an epoch check in the
1099    /// instrumented code is reached.
1100    ///
1101    /// This behavior is the default if the store is not otherwise
1102    /// configured via
1103    /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
1104    /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
1105    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
1106    ///
1107    /// This setting is intended to allow for coarse-grained
1108    /// interruption, but not a deterministic deadline of a fixed,
1109    /// finite interval. For deterministic interruption, see the
1110    /// "fuel" mechanism instead.
1111    ///
1112    /// Note that when this is used it's required to call
1113    /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
1114    /// trap.
1115    ///
1116    /// See documentation on
1117    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1118    /// for an introduction to epoch-based interruption.
1119    #[cfg(target_has_atomic = "64")]
1120    pub fn epoch_deadline_trap(&mut self) {
1121        self.inner.epoch_deadline_trap();
1122    }
1123
1124    /// Configures epoch-deadline expiration to invoke a custom callback
1125    /// function.
1126    ///
1127    /// When epoch-interruption-instrumented code is executed on this
1128    /// store and the epoch deadline is reached before completion, the
1129    /// provided callback function is invoked.
1130    ///
1131    /// This callback should either return an [`UpdateDeadline`], or
1132    /// return an error, which will terminate execution with a trap.
1133    ///
1134    /// The [`UpdateDeadline`] is a positive number of ticks to
1135    /// add to the epoch deadline, as well as indicating what
1136    /// to do after the callback returns. If the [`Store`] is
1137    /// configured with async support, then the callback may return
1138    /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
1139    /// to yield to the async executor before updating the epoch deadline.
1140    /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
1141    /// update the epoch deadline immediately.
1142    ///
1143    /// This setting is intended to allow for coarse-grained
1144    /// interruption, but not a deterministic deadline of a fixed,
1145    /// finite interval. For deterministic interruption, see the
1146    /// "fuel" mechanism instead.
1147    ///
1148    /// See documentation on
1149    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1150    /// for an introduction to epoch-based interruption.
1151    #[cfg(target_has_atomic = "64")]
1152    pub fn epoch_deadline_callback(
1153        &mut self,
1154        callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
1155    ) {
1156        self.inner.epoch_deadline_callback(Box::new(callback));
1157    }
1158
1159    /// Set an exception as the currently pending exception, and
1160    /// return an error that propagates the throw.
1161    ///
1162    /// This method takes an exception object and stores it in the
1163    /// `Store` as the currently pending exception. This is a special
1164    /// rooted slot that holds the exception as long as it is
1165    /// propagating. This method then returns a `ThrownException`
1166    /// error, which is a special type that indicates a pending
1167    /// exception exists. When this type propagates as an error
1168    /// returned from a Wasm-to-host call, the pending exception is
1169    /// thrown within the Wasm context, and either caught or
1170    /// propagated further to the host-to-Wasm call boundary. If an
1171    /// exception is thrown out of Wasm (or across Wasm from a
1172    /// hostcall) back to the host-to-Wasm call boundary, *that*
1173    /// invocation returns a `ThrownException`, and the pending
1174    /// exception slot is again set. In other words, the
1175    /// `ThrownException` error type should propagate upward exactly
1176    /// and only when a pending exception is set.
1177    ///
1178    /// To inspect or take the pending exception, use
1179    /// [`peek_pending_exception`] and [`take_pending_exception`]. For
1180    /// a convenient wrapper that invokes a closure and provides any
1181    /// caught exception from the closure to a separate handler
1182    /// closure, see [`StoreContextMut::catch`].
1183    ///
1184    /// This method is parameterized over `R` for convenience, but
1185    /// will always return an `Err`.
1186    ///
1187    /// # Panics
1188    ///
1189    /// - Will panic if `exception` has been unrooted.
1190    /// - Will panic if `exception` is a null reference.
1191    /// - Will panic if a pending exception has already been set.
1192    #[cfg(feature = "gc")]
1193    pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1194        self.inner.throw_impl(exception);
1195        Err(ThrownException)
1196    }
1197
1198    /// Take the currently pending exception, if any, and return it,
1199    /// removing it from the "pending exception" slot.
1200    ///
1201    /// If there is no pending exception, returns `None`.
1202    ///
1203    /// Note: the returned exception is a LIFO root (see
1204    /// [`crate::Rooted`]), rooted in the current handle scope. Take
1205    /// care to ensure that it is re-rooted or otherwise does not
1206    /// escape this scope! It is usually best to allow an exception
1207    /// object to be rooted in the store's "pending exception" slot
1208    /// until the final consumer has taken it, rather than root it and
1209    /// pass it up the callstack in some other way.
1210    ///
1211    /// This method is useful to implement ad-hoc exception plumbing
1212    /// in various ways, but for the most idiomatic handling, see
1213    /// [`StoreContextMut::catch`].
1214    #[cfg(feature = "gc")]
1215    pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1216        self.inner.take_pending_exception_rooted()
1217    }
1218
1219    /// Tests whether there is a pending exception.
1220    ///
1221    /// Ordinarily, a pending exception will be set on a store if and
1222    /// only if a host-side callstack is propagating a
1223    /// [`crate::ThrownException`] error. The final consumer that
1224    /// catches the exception takes it; it may re-place it to re-throw
1225    /// (using [`throw`]) if it chooses not to actually handle the
1226    /// exception.
1227    ///
1228    /// This method is useful to tell whether a store is in this
1229    /// state, but should not be used as part of the ordinary
1230    /// exception-handling flow. For the most idiomatic handling, see
1231    /// [`StoreContextMut::catch`].
1232    #[cfg(feature = "gc")]
1233    pub fn has_pending_exception(&self) -> bool {
1234        self.inner.pending_exception.is_some()
1235    }
1236
1237    /// Provide an object that views Wasm stack state, including Wasm
1238    /// VM-level values (locals and operand stack), when debugging is
1239    /// enabled.
1240    ///
1241    /// This object views the frames from the most recent Wasm entry
1242    /// onward (up to the exit that allows this host code to run). Any
1243    /// Wasm stack frames upward from the most recent entry to Wasm
1244    /// are not visible to this cursor.
1245    ///
1246    /// Returns `None` if debug instrumentation is not enabled for
1247    /// the engine containing this store.
1248    #[cfg(feature = "debug")]
1249    pub fn debug_frames(&mut self) -> Option<crate::DebugFrameCursor<'_, T>> {
1250        self.as_context_mut().debug_frames()
1251    }
1252
1253    /// Set the debug callback on this store.
1254    ///
1255    /// See [`crate::DebugHandler`] for more documentation.
1256    ///
1257    /// # Panics
1258    ///
1259    /// - Will panic if this store is not configured for async
1260    ///   support.
1261    /// - Will panic if guest-debug support was not enabled via
1262    ///   [`crate::Config::guest_debug`].
1263    #[cfg(feature = "debug")]
1264    pub fn set_debug_handler(&mut self, handler: impl DebugHandler<Data = T>)
1265    where
1266        // We require `Send` here because the debug handler becomes
1267        // referenced from a future: when `DebugHandler::handle` is
1268        // invoked, its `self` references the `handler` with the
1269        // user's state. Note that we are careful to keep this bound
1270        // constrained to debug-handler-related code only and not
1271        // propagate it outward to the store in general. The presence
1272        // of the trait implementation serves as a witness that `T:
1273        // Send`. This is required in particular because we will have
1274        // a `&mut dyn VMStore` on the stack when we pause a fiber
1275        // with `block_on` to run a debugger hook; that `VMStore` must
1276        // be a `Store<T> where T: Send`.
1277        T: Send,
1278    {
1279        assert!(
1280            self.inner.async_support(),
1281            "debug hooks rely on async support"
1282        );
1283        assert!(
1284            self.engine().tunables().debug_guest,
1285            "debug hooks require guest debugging to be enabled"
1286        );
1287        self.inner.debug_handler = Some(Box::new(handler));
1288    }
1289
1290    /// Clear the debug handler on this store. If any existed, it will
1291    /// be dropped.
1292    #[cfg(feature = "debug")]
1293    pub fn clear_debug_handler(&mut self) {
1294        self.inner.debug_handler = None;
1295    }
1296}
1297
1298impl<'a, T> StoreContext<'a, T> {
1299    pub(crate) fn async_support(&self) -> bool {
1300        self.0.async_support()
1301    }
1302
1303    /// Returns the underlying [`Engine`] this store is connected to.
1304    pub fn engine(&self) -> &Engine {
1305        self.0.engine()
1306    }
1307
1308    /// Access the underlying data owned by this `Store`.
1309    ///
1310    /// Same as [`Store::data`].
1311    pub fn data(&self) -> &'a T {
1312        self.0.data()
1313    }
1314
1315    /// Returns the remaining fuel in this store.
1316    ///
1317    /// For more information see [`Store::get_fuel`].
1318    pub fn get_fuel(&self) -> Result<u64> {
1319        self.0.get_fuel()
1320    }
1321}
1322
1323impl<'a, T> StoreContextMut<'a, T> {
1324    /// Access the underlying data owned by this `Store`.
1325    ///
1326    /// Same as [`Store::data`].
1327    pub fn data(&self) -> &T {
1328        self.0.data()
1329    }
1330
1331    /// Access the underlying data owned by this `Store`.
1332    ///
1333    /// Same as [`Store::data_mut`].
1334    pub fn data_mut(&mut self) -> &mut T {
1335        self.0.data_mut()
1336    }
1337
1338    /// Returns the underlying [`Engine`] this store is connected to.
1339    pub fn engine(&self) -> &Engine {
1340        self.0.engine()
1341    }
1342
1343    /// Perform garbage collection of `ExternRef`s.
1344    ///
1345    /// Same as [`Store::gc`].
1346    ///
1347    /// This method is only available when the `gc` Cargo feature is enabled.
1348    #[cfg(feature = "gc")]
1349    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
1350        assert!(!self.0.async_support());
1351        let (mut limiter, store) = self.0.resource_limiter_and_store_opaque();
1352        vm::assert_ready(store.gc(limiter.as_mut(), None, why.map(|e| e.bytes_needed())));
1353    }
1354
1355    /// Returns remaining fuel in this store.
1356    ///
1357    /// For more information see [`Store::get_fuel`]
1358    pub fn get_fuel(&self) -> Result<u64> {
1359        self.0.get_fuel()
1360    }
1361
1362    /// Set the amount of fuel in this store.
1363    ///
1364    /// For more information see [`Store::set_fuel`]
1365    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1366        self.0.set_fuel(fuel)
1367    }
1368
1369    /// Configures this `Store` to periodically yield while executing futures.
1370    ///
1371    /// For more information see [`Store::fuel_async_yield_interval`]
1372    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1373        self.0.fuel_async_yield_interval(interval)
1374    }
1375
1376    /// Sets the epoch deadline to a certain number of ticks in the future.
1377    ///
1378    /// For more information see [`Store::set_epoch_deadline`].
1379    #[cfg(target_has_atomic = "64")]
1380    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1381        self.0.set_epoch_deadline(ticks_beyond_current);
1382    }
1383
1384    /// Configures epoch-deadline expiration to trap.
1385    ///
1386    /// For more information see [`Store::epoch_deadline_trap`].
1387    #[cfg(target_has_atomic = "64")]
1388    pub fn epoch_deadline_trap(&mut self) {
1389        self.0.epoch_deadline_trap();
1390    }
1391
1392    /// Set an exception as the currently pending exception, and
1393    /// return an error that propagates the throw.
1394    ///
1395    /// See [`Store::throw`] for more details.
1396    #[cfg(feature = "gc")]
1397    pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1398        self.0.inner.throw_impl(exception);
1399        Err(ThrownException)
1400    }
1401
1402    /// Take the currently pending exception, if any, and return it,
1403    /// removing it from the "pending exception" slot.
1404    ///
1405    /// See [`Store::take_pending_exception`] for more details.
1406    #[cfg(feature = "gc")]
1407    pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1408        self.0.inner.take_pending_exception_rooted()
1409    }
1410
1411    /// Tests whether there is a pending exception.
1412    ///
1413    /// See [`Store::has_pending_exception`] for more details.
1414    #[cfg(feature = "gc")]
1415    pub fn has_pending_exception(&self) -> bool {
1416        self.0.inner.pending_exception.is_some()
1417    }
1418}
1419
1420impl<T> StoreInner<T> {
1421    #[inline]
1422    fn data(&self) -> &T {
1423        // We are actually just accessing `&self.data_no_provenance` but we must
1424        // do so with the `VMStoreContext::store_data` pointer's provenance. If
1425        // we did otherwise, i.e. directly accessed the field, we would
1426        // invalidate that pointer, which would in turn invalidate any direct
1427        // `T` accesses that Wasm code makes via unsafe intrinsics.
1428        let data: *const ManuallyDrop<T> = &raw const self.data_no_provenance;
1429        let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1430        let ptr = provenance.with_addr(data.addr());
1431
1432        // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1433        // to access because of our `&self` borrow.
1434        debug_assert_ne!(ptr, core::ptr::null_mut());
1435        debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1436        unsafe { &*ptr }
1437    }
1438
1439    #[inline]
1440    fn data_limiter_and_opaque(
1441        &mut self,
1442    ) -> (
1443        &mut T,
1444        Option<&mut ResourceLimiterInner<T>>,
1445        &mut StoreOpaque,
1446    ) {
1447        // See the comments about provenance in `StoreInner::data` above.
1448        let data: *mut ManuallyDrop<T> = &raw mut self.data_no_provenance;
1449        let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1450        let ptr = provenance.with_addr(data.addr());
1451
1452        // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1453        // to access because of our `&mut self` borrow.
1454        debug_assert_ne!(ptr, core::ptr::null_mut());
1455        debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1456        let data = unsafe { &mut *ptr };
1457
1458        let limiter = self.limiter.as_mut();
1459
1460        (data, limiter, &mut self.inner)
1461    }
1462
1463    #[inline]
1464    fn data_mut(&mut self) -> &mut T {
1465        self.data_limiter_and_opaque().0
1466    }
1467
1468    #[inline]
1469    pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1470        if self.inner.pkey.is_none() && self.call_hook.is_none() {
1471            Ok(())
1472        } else {
1473            self.call_hook_slow_path(s)
1474        }
1475    }
1476
1477    fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1478        if let Some(pkey) = &self.inner.pkey {
1479            let allocator = self.engine().allocator();
1480            match s {
1481                CallHook::CallingWasm | CallHook::ReturningFromHost => {
1482                    allocator.restrict_to_pkey(*pkey)
1483                }
1484                CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1485            }
1486        }
1487
1488        // Temporarily take the configured behavior to avoid mutably borrowing
1489        // multiple times.
1490        if let Some(mut call_hook) = self.call_hook.take() {
1491            let result = self.invoke_call_hook(&mut call_hook, s);
1492            self.call_hook = Some(call_hook);
1493            return result;
1494        }
1495
1496        Ok(())
1497    }
1498
1499    fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1500        match call_hook {
1501            #[cfg(feature = "call-hook")]
1502            CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1503
1504            #[cfg(all(feature = "async", feature = "call-hook"))]
1505            CallHookInner::Async(handler) => {
1506                if !self.can_block() {
1507                    bail!("couldn't grab async_cx for call hook")
1508                }
1509                return (&mut *self)
1510                    .as_context_mut()
1511                    .with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
1512            }
1513
1514            CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1515                let _ = s;
1516                match *uninhabited {}
1517            }
1518        }
1519    }
1520
1521    #[cfg(not(feature = "async"))]
1522    fn flush_fiber_stack(&mut self) {
1523        // noop shim so code can assume this always exists.
1524    }
1525}
1526
1527fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1528    fuel_reserve.saturating_add_signed(-injected_fuel)
1529}
1530
1531// Add remaining fuel from the reserve into the active fuel if there is any left.
1532fn refuel(
1533    injected_fuel: &mut i64,
1534    fuel_reserve: &mut u64,
1535    yield_interval: Option<NonZeroU64>,
1536) -> bool {
1537    let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1538    if fuel > 0 {
1539        set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1540        true
1541    } else {
1542        false
1543    }
1544}
1545
1546fn set_fuel(
1547    injected_fuel: &mut i64,
1548    fuel_reserve: &mut u64,
1549    yield_interval: Option<NonZeroU64>,
1550    new_fuel_amount: u64,
1551) {
1552    let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1553    // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1554    // for the VM to use.
1555    let injected = core::cmp::min(interval, new_fuel_amount);
1556    // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1557    // VM at once to be i64 range.
1558    let injected = core::cmp::min(injected, i64::MAX as u64);
1559    // Add whatever is left over after injection to the reserve for later use.
1560    *fuel_reserve = new_fuel_amount - injected;
1561    // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1562    // this counter is positive.
1563    *injected_fuel = -(injected as i64);
1564}
1565
1566#[doc(hidden)]
1567impl StoreOpaque {
1568    pub fn id(&self) -> StoreId {
1569        self.store_data.id()
1570    }
1571
1572    pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1573        fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1574            let new = slot.saturating_add(amt);
1575            if new > max {
1576                bail!("resource limit exceeded: {desc} count too high at {new}");
1577            }
1578            *slot = new;
1579            Ok(())
1580        }
1581
1582        let module = module.env_module();
1583        let memories = module.num_defined_memories();
1584        let tables = module.num_defined_tables();
1585
1586        bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1587        bump(
1588            &mut self.memory_count,
1589            self.memory_limit,
1590            memories,
1591            "memory",
1592        )?;
1593        bump(&mut self.table_count, self.table_limit, tables, "table")?;
1594
1595        Ok(())
1596    }
1597
1598    #[inline]
1599    pub fn async_support(&self) -> bool {
1600        cfg!(feature = "async") && self.engine().config().async_support
1601    }
1602
1603    #[inline]
1604    pub fn engine(&self) -> &Engine {
1605        &self.engine
1606    }
1607
1608    #[inline]
1609    pub fn store_data(&self) -> &StoreData {
1610        &self.store_data
1611    }
1612
1613    #[inline]
1614    pub fn store_data_mut(&mut self) -> &mut StoreData {
1615        &mut self.store_data
1616    }
1617
1618    #[inline]
1619    pub(crate) fn modules(&self) -> &ModuleRegistry {
1620        &self.modules
1621    }
1622
1623    #[inline]
1624    pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
1625        &mut self.modules
1626    }
1627
1628    pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1629        (&mut self.func_refs, &self.modules)
1630    }
1631
1632    pub(crate) fn host_globals(
1633        &self,
1634    ) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1635        &self.host_globals
1636    }
1637
1638    pub(crate) fn host_globals_mut(
1639        &mut self,
1640    ) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1641        &mut self.host_globals
1642    }
1643
1644    pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1645        instance.store_id().assert_belongs_to(self.id());
1646        match self.instances[instance.instance()].kind {
1647            StoreInstanceKind::Dummy => None,
1648            StoreInstanceKind::Real { module_id } => {
1649                let module = self
1650                    .modules()
1651                    .lookup_module_by_id(module_id)
1652                    .expect("should always have a registered module for real instances");
1653                Some(module)
1654            }
1655        }
1656    }
1657
1658    /// Accessor from `InstanceId` to `&vm::Instance`.
1659    ///
1660    /// Note that if you have a `StoreInstanceId` you should use
1661    /// `StoreInstanceId::get` instead. This assumes that `id` has been
1662    /// validated to already belong to this store.
1663    #[inline]
1664    pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1665        self.instances[id].handle.get()
1666    }
1667
1668    /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1669    ///
1670    /// Note that if you have a `StoreInstanceId` you should use
1671    /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1672    /// validated to already belong to this store.
1673    #[inline]
1674    pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1675        self.instances[id].handle.get_mut()
1676    }
1677
1678    /// Access multiple instances specified via `ids`.
1679    ///
1680    /// # Panics
1681    ///
1682    /// This method will panic if any indices in `ids` overlap.
1683    ///
1684    /// # Safety
1685    ///
1686    /// This method is not safe if the returned instances are used to traverse
1687    /// "laterally" between other instances. For example accessing imported
1688    /// items in an instance may traverse laterally to a sibling instance thus
1689    /// aliasing a returned value here. The caller must ensure that only defined
1690    /// items within the instances themselves are accessed.
1691    #[inline]
1692    pub unsafe fn optional_gc_store_and_instances_mut<const N: usize>(
1693        &mut self,
1694        ids: [InstanceId; N],
1695    ) -> (Option<&mut GcStore>, [Pin<&mut vm::Instance>; N]) {
1696        let instances = self
1697            .instances
1698            .get_disjoint_mut(ids)
1699            .unwrap()
1700            .map(|h| h.handle.get_mut());
1701        (self.gc_store.as_mut(), instances)
1702    }
1703
1704    /// Pair of `Self::optional_gc_store_mut` and `Self::instance_mut`
1705    pub fn optional_gc_store_and_instance_mut(
1706        &mut self,
1707        id: InstanceId,
1708    ) -> (Option<&mut GcStore>, Pin<&mut vm::Instance>) {
1709        (self.gc_store.as_mut(), self.instances[id].handle.get_mut())
1710    }
1711
1712    /// Get all instances (ignoring dummy instances) within this store.
1713    pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1714        let instances = self
1715            .instances
1716            .iter()
1717            .filter_map(|(id, inst)| {
1718                if let StoreInstanceKind::Dummy = inst.kind {
1719                    None
1720                } else {
1721                    Some(id)
1722                }
1723            })
1724            .collect::<Vec<_>>();
1725        instances
1726            .into_iter()
1727            .map(|i| Instance::from_wasmtime(i, self))
1728    }
1729
1730    /// Get all memories (host- or Wasm-defined) within this store.
1731    pub fn all_memories<'a>(&'a self) -> impl Iterator<Item = Memory> + 'a {
1732        // NB: Host-created memories have dummy instances. Therefore, we can get
1733        // all memories in the store by iterating over all instances (including
1734        // dummy instances) and getting each of their defined memories.
1735        let id = self.id();
1736        self.instances
1737            .iter()
1738            .flat_map(move |(_, instance)| instance.handle.get().defined_memories(id))
1739    }
1740
1741    /// Iterate over all tables (host- or Wasm-defined) within this store.
1742    pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1743        // NB: Host-created tables have dummy instances. Therefore, we can get
1744        // all tables in the store by iterating over all instances (including
1745        // dummy instances) and getting each of their defined memories.
1746        for id in self.instances.keys() {
1747            let instance = StoreInstanceId::new(self.id(), id);
1748            for table in 0..self.instance(id).env_module().num_defined_tables() {
1749                let table = DefinedTableIndex::new(table);
1750                f(self, Table::from_raw(instance, table));
1751            }
1752        }
1753    }
1754
1755    /// Iterate over all globals (host- or Wasm-defined) within this store.
1756    pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1757        // First enumerate all the host-created globals.
1758        for global in self.host_globals.keys() {
1759            let global = Global::new_host(self, global);
1760            f(self, global);
1761        }
1762
1763        // Then enumerate all instances' defined globals.
1764        for id in self.instances.keys() {
1765            for index in 0..self.instance(id).env_module().num_defined_globals() {
1766                let index = DefinedGlobalIndex::new(index);
1767                let global = Global::new_instance(self, id, index);
1768                f(self, global);
1769            }
1770        }
1771    }
1772
1773    #[cfg(all(feature = "std", any(unix, windows)))]
1774    pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1775        self.signal_handler = handler;
1776    }
1777
1778    #[inline]
1779    pub fn vm_store_context(&self) -> &VMStoreContext {
1780        &self.vm_store_context
1781    }
1782
1783    #[inline]
1784    pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
1785        &mut self.vm_store_context
1786    }
1787
1788    /// Performs a lazy allocation of the `GcStore` within this store, returning
1789    /// the previous allocation if it's already present.
1790    ///
1791    /// This method will, if necessary, allocate a new `GcStore` -- linear
1792    /// memory and all. This is a blocking operation due to
1793    /// `ResourceLimiterAsync` which means that this should only be executed
1794    /// in a fiber context at this time.
1795    #[inline]
1796    pub(crate) async fn ensure_gc_store(
1797        &mut self,
1798        limiter: Option<&mut StoreResourceLimiter<'_>>,
1799    ) -> Result<&mut GcStore> {
1800        if self.gc_store.is_some() {
1801            return Ok(self.gc_store.as_mut().unwrap());
1802        }
1803        self.allocate_gc_store(limiter).await
1804    }
1805
1806    #[inline(never)]
1807    async fn allocate_gc_store(
1808        &mut self,
1809        limiter: Option<&mut StoreResourceLimiter<'_>>,
1810    ) -> Result<&mut GcStore> {
1811        log::trace!("allocating GC heap for store {:?}", self.id());
1812
1813        assert!(self.gc_store.is_none());
1814        assert_eq!(
1815            self.vm_store_context.gc_heap.base.as_non_null(),
1816            NonNull::dangling(),
1817        );
1818        assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1819
1820        let gc_store = allocate_gc_store(self, limiter).await?;
1821        self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1822        return Ok(self.gc_store.insert(gc_store));
1823
1824        #[cfg(feature = "gc")]
1825        async fn allocate_gc_store(
1826            store: &mut StoreOpaque,
1827            limiter: Option<&mut StoreResourceLimiter<'_>>,
1828        ) -> Result<GcStore> {
1829            use wasmtime_environ::{StaticModuleIndex, packed_option::ReservedValue};
1830
1831            let engine = store.engine();
1832            let mem_ty = engine.tunables().gc_heap_memory_type();
1833            ensure!(
1834                engine.features().gc_types(),
1835                "cannot allocate a GC store when GC is disabled at configuration time"
1836            );
1837
1838            // First, allocate the memory that will be our GC heap's storage.
1839            let mut request = InstanceAllocationRequest {
1840                id: InstanceId::reserved_value(),
1841                runtime_info: &ModuleRuntimeInfo::bare(Arc::new(wasmtime_environ::Module::new(
1842                    StaticModuleIndex::from_u32(0),
1843                ))),
1844                imports: vm::Imports::default(),
1845                store,
1846                limiter,
1847            };
1848
1849            let (mem_alloc_index, mem) = engine
1850                .allocator()
1851                .allocate_memory(&mut request, &mem_ty, None)
1852                .await?;
1853
1854            // Then, allocate the actual GC heap, passing in that memory
1855            // storage.
1856            let gc_runtime = engine
1857                .gc_runtime()
1858                .context("no GC runtime: GC disabled at compile time or configuration time")?;
1859            let (index, heap) =
1860                engine
1861                    .allocator()
1862                    .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
1863
1864            Ok(GcStore::new(index, heap))
1865        }
1866
1867        #[cfg(not(feature = "gc"))]
1868        async fn allocate_gc_store(
1869            _: &mut StoreOpaque,
1870            _: Option<&mut StoreResourceLimiter<'_>>,
1871        ) -> Result<GcStore> {
1872            bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1873        }
1874    }
1875
1876    /// Helper method to require that a `GcStore` was previously allocated for
1877    /// this store, failing if it has not yet been allocated.
1878    ///
1879    /// Note that this should only be used in a context where allocation of a
1880    /// `GcStore` is sure to have already happened prior, otherwise this may
1881    /// return a confusing error to embedders which is a bug in Wasmtime.
1882    ///
1883    /// Some situations where it's safe to call this method:
1884    ///
1885    /// * There's already a non-null and non-i31 `VMGcRef` in scope. By existing
1886    ///   this shows proof that the `GcStore` was previously allocated.
1887    /// * During instantiation and instance's `needs_gc_heap` flag will be
1888    ///   handled and instantiation will automatically create a GC store.
1889    #[inline]
1890    #[cfg(feature = "gc")]
1891    pub(crate) fn require_gc_store(&self) -> Result<&GcStore> {
1892        match &self.gc_store {
1893            Some(gc_store) => Ok(gc_store),
1894            None => bail!("GC heap not initialized yet"),
1895        }
1896    }
1897
1898    /// Same as [`Self::require_gc_store`], but mutable.
1899    #[inline]
1900    #[cfg(feature = "gc")]
1901    pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> {
1902        match &mut self.gc_store {
1903            Some(gc_store) => Ok(gc_store),
1904            None => bail!("GC heap not initialized yet"),
1905        }
1906    }
1907
1908    /// Attempts to access the GC store that has been previously allocated.
1909    ///
1910    /// This method will return `Some` if the GC store was previously allocated.
1911    /// A `None` return value means either that the GC heap hasn't yet been
1912    /// allocated or that it does not need to be allocated for this store. Note
1913    /// that to require a GC store in a particular situation it's recommended to
1914    /// use [`Self::require_gc_store_mut`] instead.
1915    #[inline]
1916    pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
1917        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1918            debug_assert!(self.gc_store.is_none());
1919            None
1920        } else {
1921            self.gc_store.as_mut()
1922        }
1923    }
1924
1925    /// Helper to assert that a GC store was previously allocated and is
1926    /// present.
1927    ///
1928    /// # Panics
1929    ///
1930    /// This method will panic if the GC store has not yet been allocated. This
1931    /// should only be used in a context where there's an existing GC reference,
1932    /// for example, or if `ensure_gc_store` has already been called.
1933    #[inline]
1934    #[track_caller]
1935    pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
1936        self.gc_store
1937            .as_ref()
1938            .expect("attempted to access the store's GC heap before it has been allocated")
1939    }
1940
1941    /// Same as [`Self::unwrap_gc_store`], but mutable.
1942    #[inline]
1943    #[track_caller]
1944    pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
1945        self.gc_store
1946            .as_mut()
1947            .expect("attempted to access the store's GC heap before it has been allocated")
1948    }
1949
1950    #[inline]
1951    pub(crate) fn gc_roots(&self) -> &RootSet {
1952        &self.gc_roots
1953    }
1954
1955    #[inline]
1956    #[cfg(feature = "gc")]
1957    pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
1958        &mut self.gc_roots
1959    }
1960
1961    #[inline]
1962    pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
1963        self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
1964    }
1965
1966    #[cfg(feature = "gc")]
1967    async fn do_gc(&mut self) {
1968        // If the GC heap hasn't been initialized, there is nothing to collect.
1969        if self.gc_store.is_none() {
1970            return;
1971        }
1972
1973        log::trace!("============ Begin GC ===========");
1974
1975        // Take the GC roots out of `self` so we can borrow it mutably but still
1976        // call mutable methods on `self`.
1977        let mut roots = core::mem::take(&mut self.gc_roots_list);
1978
1979        self.trace_roots(&mut roots).await;
1980        let async_yield = self.async_support();
1981        self.unwrap_gc_store_mut()
1982            .gc(async_yield, unsafe { roots.iter() })
1983            .await;
1984
1985        // Restore the GC roots for the next GC.
1986        roots.clear();
1987        self.gc_roots_list = roots;
1988
1989        log::trace!("============ End GC ===========");
1990    }
1991
1992    #[cfg(feature = "gc")]
1993    async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1994        log::trace!("Begin trace GC roots");
1995
1996        // We shouldn't have any leftover, stale GC roots.
1997        assert!(gc_roots_list.is_empty());
1998
1999        self.trace_wasm_stack_roots(gc_roots_list);
2000        #[cfg(feature = "async")]
2001        if self.async_support() {
2002            vm::Yield::new().await;
2003        }
2004        #[cfg(feature = "stack-switching")]
2005        {
2006            self.trace_wasm_continuation_roots(gc_roots_list);
2007            #[cfg(feature = "async")]
2008            if self.async_support() {
2009                vm::Yield::new().await;
2010            }
2011        }
2012        self.trace_vmctx_roots(gc_roots_list);
2013        #[cfg(feature = "async")]
2014        if self.async_support() {
2015            vm::Yield::new().await;
2016        }
2017        self.trace_user_roots(gc_roots_list);
2018        self.trace_pending_exception_roots(gc_roots_list);
2019
2020        log::trace!("End trace GC roots")
2021    }
2022
2023    #[cfg(feature = "gc")]
2024    fn trace_wasm_stack_frame(
2025        &self,
2026        gc_roots_list: &mut GcRootsList,
2027        frame: crate::runtime::vm::Frame,
2028    ) {
2029        let pc = frame.pc();
2030        debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
2031
2032        let fp = frame.fp() as *mut usize;
2033        debug_assert!(
2034            !fp.is_null(),
2035            "we should always get a valid frame pointer for Wasm frames"
2036        );
2037
2038        let module_info = self
2039            .modules()
2040            .lookup_module_by_pc(pc)
2041            .expect("should have module info for Wasm frame");
2042
2043        if let Some(stack_map) = module_info.lookup_stack_map(pc) {
2044            log::trace!(
2045                "We have a stack map that maps {} bytes in this Wasm frame",
2046                stack_map.frame_size()
2047            );
2048
2049            let sp = unsafe { stack_map.sp(fp) };
2050            for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
2051                unsafe {
2052                    self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2053                }
2054            }
2055        }
2056
2057        #[cfg(feature = "debug")]
2058        if let Some(frame_table) = module_info.frame_table() {
2059            let relpc = module_info.text_offset(pc);
2060            for stack_slot in super::debug::gc_refs_in_frame(frame_table, relpc, fp) {
2061                unsafe {
2062                    self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2063                }
2064            }
2065        }
2066    }
2067
2068    #[cfg(feature = "gc")]
2069    unsafe fn trace_wasm_stack_slot(&self, gc_roots_list: &mut GcRootsList, stack_slot: *mut u32) {
2070        use crate::runtime::vm::SendSyncPtr;
2071        use core::ptr::NonNull;
2072
2073        let raw: u32 = unsafe { core::ptr::read(stack_slot) };
2074        log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
2075
2076        let gc_ref = vm::VMGcRef::from_raw_u32(raw);
2077        if gc_ref.is_some() {
2078            unsafe {
2079                gc_roots_list
2080                    .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
2081            }
2082        }
2083    }
2084
2085    #[cfg(feature = "gc")]
2086    fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2087        use crate::runtime::vm::Backtrace;
2088        log::trace!("Begin trace GC roots :: Wasm stack");
2089
2090        Backtrace::trace(self, |frame| {
2091            self.trace_wasm_stack_frame(gc_roots_list, frame);
2092            core::ops::ControlFlow::Continue(())
2093        });
2094
2095        log::trace!("End trace GC roots :: Wasm stack");
2096    }
2097
2098    #[cfg(all(feature = "gc", feature = "stack-switching"))]
2099    fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2100        use crate::{runtime::vm::Backtrace, vm::VMStackState};
2101        log::trace!("Begin trace GC roots :: continuations");
2102
2103        for continuation in &self.continuations {
2104            let state = continuation.common_stack_information.state;
2105
2106            // FIXME(frank-emrich) In general, it is not enough to just trace
2107            // through the stacks of continuations; we also need to look through
2108            // their `cont.bind` arguments. However, we don't currently have
2109            // enough RTTI information to check if any of the values in the
2110            // buffers used by `cont.bind` are GC values. As a workaround, note
2111            // that we currently disallow cont.bind-ing GC values altogether.
2112            // This way, it is okay not to check them here.
2113            match state {
2114                VMStackState::Suspended => {
2115                    Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
2116                        self.trace_wasm_stack_frame(gc_roots_list, frame);
2117                        core::ops::ControlFlow::Continue(())
2118                    });
2119                }
2120                VMStackState::Running => {
2121                    // Handled by `trace_wasm_stack_roots`.
2122                }
2123                VMStackState::Parent => {
2124                    // We don't know whether our child is suspended or running, but in
2125                    // either case things should be handled correctly when traversing
2126                    // further along in the chain, nothing required at this point.
2127                }
2128                VMStackState::Fresh | VMStackState::Returned => {
2129                    // Fresh/Returned continuations have no gc values on their stack.
2130                }
2131            }
2132        }
2133
2134        log::trace!("End trace GC roots :: continuations");
2135    }
2136
2137    #[cfg(feature = "gc")]
2138    fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2139        log::trace!("Begin trace GC roots :: vmctx");
2140        self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
2141        self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
2142        log::trace!("End trace GC roots :: vmctx");
2143    }
2144
2145    #[cfg(feature = "gc")]
2146    fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2147        log::trace!("Begin trace GC roots :: user");
2148        self.gc_roots.trace_roots(gc_roots_list);
2149        log::trace!("End trace GC roots :: user");
2150    }
2151
2152    #[cfg(feature = "gc")]
2153    fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2154        log::trace!("Begin trace GC roots :: pending exception");
2155        if let Some(pending_exception) = self.pending_exception.as_mut() {
2156            unsafe {
2157                let root = pending_exception.as_gc_ref_mut();
2158                gc_roots_list.add_root(root.into(), "Pending exception");
2159            }
2160        }
2161        log::trace!("End trace GC roots :: pending exception");
2162    }
2163
2164    /// Insert a host-allocated GC type into this store.
2165    ///
2166    /// This makes it suitable for the embedder to allocate instances of this
2167    /// type in this store, and we don't have to worry about the type being
2168    /// reclaimed (since it is possible that none of the Wasm modules in this
2169    /// store are holding it alive).
2170    #[cfg(feature = "gc")]
2171    pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
2172        self.gc_host_alloc_types.insert(ty);
2173    }
2174
2175    /// Helper function execute a `init_gc_ref` when placing `gc_ref` in `dest`.
2176    ///
2177    /// This avoids allocating `GcStore` where possible.
2178    pub(crate) fn init_gc_ref(
2179        &mut self,
2180        dest: &mut MaybeUninit<Option<VMGcRef>>,
2181        gc_ref: Option<&VMGcRef>,
2182    ) {
2183        if GcStore::needs_init_barrier(gc_ref) {
2184            self.unwrap_gc_store_mut().init_gc_ref(dest, gc_ref)
2185        } else {
2186            dest.write(gc_ref.map(|r| r.copy_i31()));
2187        }
2188    }
2189
2190    /// Helper function execute a write barrier when placing `gc_ref` in `dest`.
2191    ///
2192    /// This avoids allocating `GcStore` where possible.
2193    pub(crate) fn write_gc_ref(&mut self, dest: &mut Option<VMGcRef>, gc_ref: Option<&VMGcRef>) {
2194        GcStore::write_gc_ref_optional_store(self.optional_gc_store_mut(), dest, gc_ref)
2195    }
2196
2197    /// Helper function to clone `gc_ref` notably avoiding allocating a
2198    /// `GcStore` where possible.
2199    pub(crate) fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
2200        if gc_ref.is_i31() {
2201            gc_ref.copy_i31()
2202        } else {
2203            self.unwrap_gc_store_mut().clone_gc_ref(gc_ref)
2204        }
2205    }
2206
2207    pub fn get_fuel(&self) -> Result<u64> {
2208        anyhow::ensure!(
2209            self.engine().tunables().consume_fuel,
2210            "fuel is not configured in this store"
2211        );
2212        let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
2213        Ok(get_fuel(injected_fuel, self.fuel_reserve))
2214    }
2215
2216    pub(crate) fn refuel(&mut self) -> bool {
2217        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2218        refuel(
2219            injected_fuel,
2220            &mut self.fuel_reserve,
2221            self.fuel_yield_interval,
2222        )
2223    }
2224
2225    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
2226        anyhow::ensure!(
2227            self.engine().tunables().consume_fuel,
2228            "fuel is not configured in this store"
2229        );
2230        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2231        set_fuel(
2232            injected_fuel,
2233            &mut self.fuel_reserve,
2234            self.fuel_yield_interval,
2235            fuel,
2236        );
2237        Ok(())
2238    }
2239
2240    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
2241        anyhow::ensure!(
2242            self.engine().tunables().consume_fuel,
2243            "fuel is not configured in this store"
2244        );
2245        anyhow::ensure!(
2246            self.engine().config().async_support,
2247            "async support is not configured in this store"
2248        );
2249        anyhow::ensure!(
2250            interval != Some(0),
2251            "fuel_async_yield_interval must not be 0"
2252        );
2253        self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
2254        // Reset the fuel active + reserve states by resetting the amount.
2255        self.set_fuel(self.get_fuel()?)
2256    }
2257
2258    #[inline]
2259    pub fn signal_handler(&self) -> Option<*const SignalHandler> {
2260        let handler = self.signal_handler.as_ref()?;
2261        Some(handler)
2262    }
2263
2264    #[inline]
2265    pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
2266        NonNull::from(&self.vm_store_context)
2267    }
2268
2269    #[inline]
2270    pub fn default_caller(&self) -> NonNull<VMContext> {
2271        self.default_caller_vmctx.as_non_null()
2272    }
2273
2274    #[inline]
2275    pub fn traitobj(&self) -> NonNull<dyn VMStore> {
2276        self.traitobj.0.unwrap()
2277    }
2278
2279    /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
2280    /// used as part of calling the host in a `Func::new` method invocation.
2281    #[inline]
2282    pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
2283        mem::take(&mut self.hostcall_val_storage)
2284    }
2285
2286    /// Restores the vector previously taken by `take_hostcall_val_storage`
2287    /// above back into the store, allowing it to be used in the future for the
2288    /// next wasm->host call.
2289    #[inline]
2290    pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
2291        if storage.capacity() > self.hostcall_val_storage.capacity() {
2292            self.hostcall_val_storage = storage;
2293        }
2294    }
2295
2296    /// Same as `take_hostcall_val_storage`, but for the direction of the host
2297    /// calling wasm.
2298    #[inline]
2299    pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
2300        mem::take(&mut self.wasm_val_raw_storage)
2301    }
2302
2303    /// Same as `save_hostcall_val_storage`, but for the direction of the host
2304    /// calling wasm.
2305    #[inline]
2306    pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
2307        if storage.capacity() > self.wasm_val_raw_storage.capacity() {
2308            self.wasm_val_raw_storage = storage;
2309        }
2310    }
2311
2312    /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
2313    /// WebAssembly-relative fault.
2314    ///
2315    /// This function may abort the process if `addr` is not found to actually
2316    /// reside in any linear memory. In such a situation it means that the
2317    /// segfault was erroneously caught by Wasmtime and is possibly indicative
2318    /// of a code generator bug.
2319    ///
2320    /// This function returns `None` for dynamically-bounds-checked-memories
2321    /// with spectre mitigations enabled since the hardware fault address is
2322    /// always zero in these situations which means that the trapping context
2323    /// doesn't have enough information to report the fault address.
2324    pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
2325        // There are a few instances where a "close to zero" pointer is loaded
2326        // and we expect that to happen:
2327        //
2328        // * Explicitly bounds-checked memories with spectre-guards enabled will
2329        //   cause out-of-bounds accesses to get routed to address 0, so allow
2330        //   wasm instructions to fault on the null address.
2331        // * `call_indirect` when invoking a null function pointer may load data
2332        //   from the a `VMFuncRef` whose address is null, meaning any field of
2333        //   `VMFuncRef` could be the address of the fault.
2334        //
2335        // In these situations where the address is so small it won't be in any
2336        // instance, so skip the checks below.
2337        if addr <= mem::size_of::<VMFuncRef>() {
2338            const _: () = {
2339                // static-assert that `VMFuncRef` isn't too big to ensure that
2340                // it lives solely within the first page as we currently only
2341                // have the guarantee that the first page of memory is unmapped,
2342                // no more.
2343                assert!(mem::size_of::<VMFuncRef>() <= 512);
2344            };
2345            return None;
2346        }
2347
2348        // Search all known instances in this store for this address. Note that
2349        // this is probably not the speediest way to do this. Traps, however,
2350        // are generally not expected to be super fast and additionally stores
2351        // probably don't have all that many instances or memories.
2352        //
2353        // If this loop becomes hot in the future, however, it should be
2354        // possible to precompute maps about linear memories in a store and have
2355        // a quicker lookup.
2356        let mut fault = None;
2357        for (_, instance) in self.instances.iter() {
2358            if let Some(f) = instance.handle.get().wasm_fault(addr) {
2359                assert!(fault.is_none());
2360                fault = Some(f);
2361            }
2362        }
2363        if fault.is_some() {
2364            return fault;
2365        }
2366
2367        cfg_if::cfg_if! {
2368            if #[cfg(feature = "std")] {
2369                // With the standard library a rich error can be printed here
2370                // to stderr and the native abort path is used.
2371                eprintln!(
2372                    "\
2373Wasmtime caught a segfault for a wasm program because the faulting instruction
2374is allowed to segfault due to how linear memories are implemented. The address
2375that was accessed, however, is not known to any linear memory in use within this
2376Store. This may be indicative of a critical bug in Wasmtime's code generation
2377because all addresses which are known to be reachable from wasm won't reach this
2378message.
2379
2380    pc:      0x{pc:x}
2381    address: 0x{addr:x}
2382
2383This is a possible security issue because WebAssembly has accessed something it
2384shouldn't have been able to. Other accesses may have succeeded and this one just
2385happened to be caught. The process will now be aborted to prevent this damage
2386from going any further and to alert what's going on. If this is a security
2387issue please reach out to the Wasmtime team via its security policy
2388at https://bytecodealliance.org/security.
2389"
2390                );
2391                std::process::abort();
2392            } else if #[cfg(panic = "abort")] {
2393                // Without the standard library but with `panic=abort` then
2394                // it's safe to panic as that's known to halt execution. For
2395                // now avoid the above error message as well since without
2396                // `std` it's probably best to be a bit more size-conscious.
2397                let _ = pc;
2398                panic!("invalid fault");
2399            } else {
2400                // Without `std` and with `panic = "unwind"` there's no
2401                // dedicated API to abort the process portably, so manufacture
2402                // this with a double-panic.
2403                let _ = pc;
2404
2405                struct PanicAgainOnDrop;
2406
2407                impl Drop for PanicAgainOnDrop {
2408                    fn drop(&mut self) {
2409                        panic!("panicking again to trigger a process abort");
2410                    }
2411
2412                }
2413
2414                let _bomb = PanicAgainOnDrop;
2415
2416                panic!("invalid fault");
2417            }
2418        }
2419    }
2420
2421    /// Retrieve the store's protection key.
2422    #[inline]
2423    #[cfg(feature = "pooling-allocator")]
2424    pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
2425        self.pkey
2426    }
2427
2428    #[inline]
2429    #[cfg(feature = "component-model")]
2430    pub(crate) fn component_resource_state(
2431        &mut self,
2432    ) -> (
2433        &mut vm::component::CallContexts,
2434        &mut vm::component::HandleTable,
2435        &mut crate::component::HostResourceData,
2436    ) {
2437        (
2438            &mut self.component_calls,
2439            &mut self.component_host_table,
2440            &mut self.host_resource_data,
2441        )
2442    }
2443
2444    #[cfg(feature = "component-model")]
2445    pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
2446        // We don't actually need the instance itself right now, but it seems
2447        // like something we will almost certainly eventually want to keep
2448        // around, so force callers to provide it.
2449        let _ = instance;
2450
2451        self.num_component_instances += 1;
2452    }
2453
2454    #[inline]
2455    #[cfg(feature = "component-model")]
2456    pub(crate) fn component_resource_state_with_instance(
2457        &mut self,
2458        instance: crate::component::Instance,
2459    ) -> (
2460        &mut vm::component::CallContexts,
2461        &mut vm::component::HandleTable,
2462        &mut crate::component::HostResourceData,
2463        Pin<&mut vm::component::ComponentInstance>,
2464    ) {
2465        (
2466            &mut self.component_calls,
2467            &mut self.component_host_table,
2468            &mut self.host_resource_data,
2469            instance.id().from_data_get_mut(&mut self.store_data),
2470        )
2471    }
2472
2473    #[cfg(feature = "component-model")]
2474    pub(crate) fn component_resource_state_with_instance_and_concurrent_state(
2475        &mut self,
2476        instance: crate::component::Instance,
2477    ) -> (
2478        &mut vm::component::CallContexts,
2479        &mut vm::component::HandleTable,
2480        &mut crate::component::HostResourceData,
2481        Pin<&mut vm::component::ComponentInstance>,
2482        &mut concurrent::ConcurrentState,
2483    ) {
2484        (
2485            &mut self.component_calls,
2486            &mut self.component_host_table,
2487            &mut self.host_resource_data,
2488            instance.id().from_data_get_mut(&mut self.store_data),
2489            &mut self.concurrent_state,
2490        )
2491    }
2492
2493    #[cfg(feature = "async")]
2494    pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
2495        &mut self.async_state
2496    }
2497
2498    #[cfg(feature = "component-model-async")]
2499    pub(crate) fn concurrent_state_mut(&mut self) -> &mut concurrent::ConcurrentState {
2500        &mut self.concurrent_state
2501    }
2502
2503    #[cfg(feature = "async")]
2504    pub(crate) fn has_pkey(&self) -> bool {
2505        self.pkey.is_some()
2506    }
2507
2508    pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
2509        match &mut self.executor {
2510            Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
2511            #[cfg(has_host_compiler_backend)]
2512            Executor::Native => ExecutorRef::Native,
2513        }
2514    }
2515
2516    #[cfg(feature = "async")]
2517    pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
2518        mem::swap(&mut self.executor, executor);
2519    }
2520
2521    pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
2522        match &self.executor {
2523            Executor::Interpreter(i) => i.unwinder(),
2524            #[cfg(has_host_compiler_backend)]
2525            Executor::Native => &vm::UnwindHost,
2526        }
2527    }
2528
2529    /// Allocates a new continuation. Note that we currently don't support
2530    /// deallocating them. Instead, all continuations remain allocated
2531    /// throughout the store's lifetime.
2532    #[cfg(feature = "stack-switching")]
2533    pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
2534        // FIXME(frank-emrich) Do we need to pin this?
2535        let mut continuation = Box::new(VMContRef::empty());
2536        let stack_size = self.engine.config().async_stack_size;
2537        let stack = crate::vm::VMContinuationStack::new(stack_size)?;
2538        continuation.stack = stack;
2539        let ptr = continuation.deref_mut() as *mut VMContRef;
2540        self.continuations.push(continuation);
2541        Ok(ptr)
2542    }
2543
2544    /// Constructs and executes an `InstanceAllocationRequest` and pushes the
2545    /// returned instance into the store.
2546    ///
2547    /// This is a helper method for invoking
2548    /// `InstanceAllocator::allocate_module` with the appropriate parameters
2549    /// from this store's own configuration. The `kind` provided is used to
2550    /// distinguish between "real" modules and dummy ones that are synthesized
2551    /// for embedder-created memories, globals, tables, etc. The `kind` will
2552    /// also use a different instance allocator by default, the one passed in,
2553    /// rather than the engine's default allocator.
2554    ///
2555    /// This method will push the instance within `StoreOpaque` onto the
2556    /// `instances` array and return the `InstanceId` which can be use to look
2557    /// it up within the store.
2558    ///
2559    /// # Safety
2560    ///
2561    /// The `imports` provided must be correctly sized/typed for the module
2562    /// being allocated.
2563    pub(crate) async unsafe fn allocate_instance(
2564        &mut self,
2565        limiter: Option<&mut StoreResourceLimiter<'_>>,
2566        kind: AllocateInstanceKind<'_>,
2567        runtime_info: &ModuleRuntimeInfo,
2568        imports: Imports<'_>,
2569    ) -> Result<InstanceId> {
2570        let id = self.instances.next_key();
2571
2572        let allocator = match kind {
2573            AllocateInstanceKind::Module(_) => self.engine().allocator(),
2574            AllocateInstanceKind::Dummy { allocator } => allocator,
2575        };
2576        // SAFETY: this function's own contract is the same as
2577        // `allocate_module`, namely the imports provided are valid.
2578        let handle = unsafe {
2579            allocator
2580                .allocate_module(InstanceAllocationRequest {
2581                    id,
2582                    runtime_info,
2583                    imports,
2584                    store: self,
2585                    limiter,
2586                })
2587                .await?
2588        };
2589
2590        let actual = match kind {
2591            AllocateInstanceKind::Module(module_id) => {
2592                log::trace!(
2593                    "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2594                    self.id()
2595                );
2596                self.instances.push(StoreInstance {
2597                    handle,
2598                    kind: StoreInstanceKind::Real { module_id },
2599                })
2600            }
2601            AllocateInstanceKind::Dummy { .. } => {
2602                log::trace!(
2603                    "Adding dummy instance to store: store={:?}, instance={id:?}",
2604                    self.id()
2605                );
2606                self.instances.push(StoreInstance {
2607                    handle,
2608                    kind: StoreInstanceKind::Dummy,
2609                })
2610            }
2611        };
2612
2613        // double-check we didn't accidentally allocate two instances and our
2614        // prediction of what the id would be is indeed the id it should be.
2615        assert_eq!(id, actual);
2616
2617        Ok(id)
2618    }
2619
2620    /// Set a pending exception. The `exnref` is taken and held on
2621    /// this store to be fetched later by an unwind. This method does
2622    /// *not* set up an unwind request on the TLS call state; that
2623    /// must be done separately.
2624    #[cfg(feature = "gc")]
2625    pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) {
2626        self.pending_exception = Some(exnref);
2627    }
2628
2629    /// Take a pending exception, if any.
2630    #[cfg(feature = "gc")]
2631    pub(crate) fn take_pending_exception(&mut self) -> Option<VMExnRef> {
2632        self.pending_exception.take()
2633    }
2634
2635    /// Tests whether there is a pending exception.
2636    #[cfg(feature = "gc")]
2637    pub fn has_pending_exception(&self) -> bool {
2638        self.pending_exception.is_some()
2639    }
2640
2641    #[cfg(feature = "gc")]
2642    fn take_pending_exception_rooted(&mut self) -> Option<Rooted<ExnRef>> {
2643        let vmexnref = self.take_pending_exception()?;
2644        let mut nogc = AutoAssertNoGc::new(self);
2645        Some(Rooted::new(&mut nogc, vmexnref.into()))
2646    }
2647
2648    /// Get an owned rooted reference to the pending exception,
2649    /// without taking it off the store.
2650    #[cfg(all(feature = "gc", feature = "debug"))]
2651    pub(crate) fn pending_exception_owned_rooted(&mut self) -> Option<OwnedRooted<ExnRef>> {
2652        let mut nogc = AutoAssertNoGc::new(self);
2653        nogc.pending_exception.take().map(|vmexnref| {
2654            let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref());
2655            nogc.pending_exception = Some(cloned.into_exnref_unchecked());
2656            OwnedRooted::new(&mut nogc, vmexnref.into())
2657        })
2658    }
2659
2660    #[cfg(feature = "gc")]
2661    fn throw_impl(&mut self, exception: Rooted<ExnRef>) {
2662        let mut nogc = AutoAssertNoGc::new(self);
2663        let exnref = exception._to_raw(&mut nogc).unwrap();
2664        let exnref = VMGcRef::from_raw_u32(exnref)
2665            .expect("exception cannot be null")
2666            .into_exnref_unchecked();
2667        nogc.set_pending_exception(exnref);
2668    }
2669
2670    #[cfg(target_has_atomic = "64")]
2671    pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2672        // Set a new deadline based on the "epoch deadline delta".
2673        //
2674        // Also, note that when this update is performed while Wasm is
2675        // on the stack, the Wasm will reload the new value once we
2676        // return into it.
2677        let current_epoch = self.engine().current_epoch();
2678        let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2679        *epoch_deadline = current_epoch + delta;
2680    }
2681
2682    pub(crate) fn get_epoch_deadline(&mut self) -> u64 {
2683        *self.vm_store_context.epoch_deadline.get_mut()
2684    }
2685}
2686
2687/// Helper parameter to [`StoreOpaque::allocate_instance`].
2688pub(crate) enum AllocateInstanceKind<'a> {
2689    /// An embedder-provided module is being allocated meaning that the default
2690    /// engine's allocator will be used.
2691    Module(RegisteredModuleId),
2692
2693    /// Add a dummy instance that to the store.
2694    ///
2695    /// These are instances that are just implementation details of something
2696    /// else (e.g. host-created memories that are not actually defined in any
2697    /// Wasm module) and therefore shouldn't show up in things like core dumps.
2698    ///
2699    /// A custom, typically OnDemand-flavored, allocator is provided to execute
2700    /// the allocation.
2701    Dummy {
2702        allocator: &'a dyn InstanceAllocator,
2703    },
2704}
2705
2706unsafe impl<T> VMStore for StoreInner<T> {
2707    #[cfg(feature = "component-model-async")]
2708    fn component_async_store(
2709        &mut self,
2710    ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2711        self
2712    }
2713
2714    fn store_opaque(&self) -> &StoreOpaque {
2715        &self.inner
2716    }
2717
2718    fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2719        &mut self.inner
2720    }
2721
2722    fn resource_limiter_and_store_opaque(
2723        &mut self,
2724    ) -> (Option<StoreResourceLimiter<'_>>, &mut StoreOpaque) {
2725        let (data, limiter, opaque) = self.data_limiter_and_opaque();
2726
2727        let limiter = limiter.map(|l| match l {
2728            ResourceLimiterInner::Sync(s) => StoreResourceLimiter::Sync(s(data)),
2729            #[cfg(feature = "async")]
2730            ResourceLimiterInner::Async(s) => StoreResourceLimiter::Async(s(data)),
2731        });
2732
2733        (limiter, opaque)
2734    }
2735
2736    #[cfg(target_has_atomic = "64")]
2737    fn new_epoch_updated_deadline(&mut self) -> Result<UpdateDeadline> {
2738        // Temporarily take the configured behavior to avoid mutably borrowing
2739        // multiple times.
2740        let mut behavior = self.epoch_deadline_behavior.take();
2741        let update = match &mut behavior {
2742            Some(callback) => callback((&mut *self).as_context_mut()),
2743            None => Ok(UpdateDeadline::Interrupt),
2744        };
2745
2746        // Put back the original behavior which was replaced by `take`.
2747        self.epoch_deadline_behavior = behavior;
2748        update
2749    }
2750
2751    #[cfg(feature = "component-model")]
2752    fn component_calls(&mut self) -> &mut vm::component::CallContexts {
2753        &mut self.component_calls
2754    }
2755
2756    #[cfg(feature = "debug")]
2757    fn block_on_debug_handler(&mut self, event: crate::DebugEvent<'_>) -> anyhow::Result<()> {
2758        if let Some(handler) = self.debug_handler.take() {
2759            log::trace!("about to raise debug event {event:?}");
2760            StoreContextMut(self).with_blocking(|store, cx| {
2761                cx.block_on(Pin::from(handler.handle(store, event)).as_mut())
2762            })
2763        } else {
2764            Ok(())
2765        }
2766    }
2767}
2768
2769impl<T> StoreInner<T> {
2770    #[cfg(target_has_atomic = "64")]
2771    fn epoch_deadline_trap(&mut self) {
2772        self.epoch_deadline_behavior = None;
2773    }
2774
2775    #[cfg(target_has_atomic = "64")]
2776    fn epoch_deadline_callback(
2777        &mut self,
2778        callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2779    ) {
2780        self.epoch_deadline_behavior = Some(callback);
2781    }
2782}
2783
2784impl<T: Default> Default for Store<T> {
2785    fn default() -> Store<T> {
2786        Store::new(&Engine::default(), T::default())
2787    }
2788}
2789
2790impl<T: fmt::Debug> fmt::Debug for Store<T> {
2791    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2792        let inner = &**self.inner as *const StoreInner<T>;
2793        f.debug_struct("Store")
2794            .field("inner", &inner)
2795            .field("data", self.inner.data())
2796            .finish()
2797    }
2798}
2799
2800impl<T> Drop for Store<T> {
2801    fn drop(&mut self) {
2802        self.run_manual_drop_routines();
2803
2804        // For documentation on this `unsafe`, see `into_data`.
2805        unsafe {
2806            ManuallyDrop::drop(&mut self.inner.data_no_provenance);
2807            ManuallyDrop::drop(&mut self.inner);
2808        }
2809    }
2810}
2811
2812impl Drop for StoreOpaque {
2813    fn drop(&mut self) {
2814        // NB it's important that this destructor does not access `self.data`.
2815        // That is deallocated by `Drop for Store<T>` above.
2816
2817        unsafe {
2818            let allocator = self.engine.allocator();
2819            let ondemand = OnDemandInstanceAllocator::default();
2820            let store_id = self.id();
2821
2822            #[cfg(feature = "gc")]
2823            if let Some(gc_store) = self.gc_store.take() {
2824                let gc_alloc_index = gc_store.allocation_index;
2825                log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2826                debug_assert!(self.engine.features().gc_types());
2827                let (mem_alloc_index, mem) =
2828                    allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2829                allocator.deallocate_memory(None, mem_alloc_index, mem);
2830            }
2831
2832            for (id, instance) in self.instances.iter_mut() {
2833                log::trace!("store {store_id:?} is deallocating {id:?}");
2834                let allocator = match instance.kind {
2835                    StoreInstanceKind::Dummy => &ondemand,
2836                    _ => allocator,
2837                };
2838                allocator.deallocate_module(&mut instance.handle);
2839            }
2840
2841            #[cfg(feature = "component-model")]
2842            {
2843                for _ in 0..self.num_component_instances {
2844                    allocator.decrement_component_instance_count();
2845                }
2846            }
2847        }
2848    }
2849}
2850
2851#[cfg_attr(
2852    not(any(feature = "gc", feature = "async")),
2853    // NB: Rust 1.89, current stable, does not fire this lint. Rust 1.90,
2854    // however, does, so use #[allow] until our MSRV is 1.90.
2855    allow(dead_code, reason = "don't want to put #[cfg] on all impls below too")
2856)]
2857pub(crate) trait AsStoreOpaque {
2858    fn as_store_opaque(&mut self) -> &mut StoreOpaque;
2859}
2860
2861impl AsStoreOpaque for StoreOpaque {
2862    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2863        self
2864    }
2865}
2866
2867impl AsStoreOpaque for dyn VMStore {
2868    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2869        self
2870    }
2871}
2872
2873impl<T: 'static> AsStoreOpaque for Store<T> {
2874    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2875        &mut self.inner.inner
2876    }
2877}
2878
2879impl<T: 'static> AsStoreOpaque for StoreInner<T> {
2880    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2881        self
2882    }
2883}
2884
2885impl<T: AsStoreOpaque + ?Sized> AsStoreOpaque for &mut T {
2886    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2887        T::as_store_opaque(self)
2888    }
2889}
2890
2891#[cfg(test)]
2892mod tests {
2893    use super::*;
2894
2895    struct FuelTank {
2896        pub consumed_fuel: i64,
2897        pub reserve_fuel: u64,
2898        pub yield_interval: Option<NonZeroU64>,
2899    }
2900
2901    impl FuelTank {
2902        fn new() -> Self {
2903            FuelTank {
2904                consumed_fuel: 0,
2905                reserve_fuel: 0,
2906                yield_interval: None,
2907            }
2908        }
2909        fn get_fuel(&self) -> u64 {
2910            get_fuel(self.consumed_fuel, self.reserve_fuel)
2911        }
2912        fn refuel(&mut self) -> bool {
2913            refuel(
2914                &mut self.consumed_fuel,
2915                &mut self.reserve_fuel,
2916                self.yield_interval,
2917            )
2918        }
2919        fn set_fuel(&mut self, fuel: u64) {
2920            set_fuel(
2921                &mut self.consumed_fuel,
2922                &mut self.reserve_fuel,
2923                self.yield_interval,
2924                fuel,
2925            );
2926        }
2927    }
2928
2929    #[test]
2930    fn smoke() {
2931        let mut tank = FuelTank::new();
2932        tank.set_fuel(10);
2933        assert_eq!(tank.consumed_fuel, -10);
2934        assert_eq!(tank.reserve_fuel, 0);
2935
2936        tank.yield_interval = NonZeroU64::new(10);
2937        tank.set_fuel(25);
2938        assert_eq!(tank.consumed_fuel, -10);
2939        assert_eq!(tank.reserve_fuel, 15);
2940    }
2941
2942    #[test]
2943    fn does_not_lose_precision() {
2944        let mut tank = FuelTank::new();
2945        tank.set_fuel(u64::MAX);
2946        assert_eq!(tank.get_fuel(), u64::MAX);
2947
2948        tank.set_fuel(i64::MAX as u64);
2949        assert_eq!(tank.get_fuel(), i64::MAX as u64);
2950
2951        tank.set_fuel(i64::MAX as u64 + 1);
2952        assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
2953    }
2954
2955    #[test]
2956    fn yielding_does_not_lose_precision() {
2957        let mut tank = FuelTank::new();
2958
2959        tank.yield_interval = NonZeroU64::new(10);
2960        tank.set_fuel(u64::MAX);
2961        assert_eq!(tank.get_fuel(), u64::MAX);
2962        assert_eq!(tank.consumed_fuel, -10);
2963        assert_eq!(tank.reserve_fuel, u64::MAX - 10);
2964
2965        tank.yield_interval = NonZeroU64::new(u64::MAX);
2966        tank.set_fuel(u64::MAX);
2967        assert_eq!(tank.get_fuel(), u64::MAX);
2968        assert_eq!(tank.consumed_fuel, -i64::MAX);
2969        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2970
2971        tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
2972        tank.set_fuel(u64::MAX);
2973        assert_eq!(tank.get_fuel(), u64::MAX);
2974        assert_eq!(tank.consumed_fuel, -i64::MAX);
2975        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2976    }
2977
2978    #[test]
2979    fn refueling() {
2980        // It's possible to fuel to have consumed over the limit as some instructions can consume
2981        // multiple units of fuel at once. Refueling should be strict in it's consumption and not
2982        // add more fuel than there is.
2983        let mut tank = FuelTank::new();
2984
2985        tank.yield_interval = NonZeroU64::new(10);
2986        tank.reserve_fuel = 42;
2987        tank.consumed_fuel = 4;
2988        assert!(tank.refuel());
2989        assert_eq!(tank.reserve_fuel, 28);
2990        assert_eq!(tank.consumed_fuel, -10);
2991
2992        tank.yield_interval = NonZeroU64::new(1);
2993        tank.reserve_fuel = 8;
2994        tank.consumed_fuel = 4;
2995        assert_eq!(tank.get_fuel(), 4);
2996        assert!(tank.refuel());
2997        assert_eq!(tank.reserve_fuel, 3);
2998        assert_eq!(tank.consumed_fuel, -1);
2999        assert_eq!(tank.get_fuel(), 4);
3000
3001        tank.yield_interval = NonZeroU64::new(10);
3002        tank.reserve_fuel = 3;
3003        tank.consumed_fuel = 4;
3004        assert_eq!(tank.get_fuel(), 0);
3005        assert!(!tank.refuel());
3006        assert_eq!(tank.reserve_fuel, 3);
3007        assert_eq!(tank.consumed_fuel, 4);
3008        assert_eq!(tank.get_fuel(), 0);
3009    }
3010
3011    #[test]
3012    fn store_data_provenance() {
3013        // Test that we juggle pointer provenance and all that correctly, and
3014        // miri is happy with everything, while allowing both Rust code and
3015        // "Wasm" to access and modify the store's `T` data. Note that this is
3016        // not actually Wasm mutating the store data here because compiling Wasm
3017        // under miri is way too slow.
3018
3019        unsafe fn run_wasm(store: &mut Store<u32>) {
3020            let ptr = store
3021                .inner
3022                .inner
3023                .vm_store_context
3024                .store_data
3025                .as_ptr()
3026                .cast::<u32>();
3027            unsafe { *ptr += 1 }
3028        }
3029
3030        let engine = Engine::default();
3031        let mut store = Store::new(&engine, 0_u32);
3032
3033        assert_eq!(*store.data(), 0);
3034        *store.data_mut() += 1;
3035        assert_eq!(*store.data(), 1);
3036        unsafe { run_wasm(&mut store) }
3037        assert_eq!(*store.data(), 2);
3038        *store.data_mut() += 1;
3039        assert_eq!(*store.data(), 3);
3040    }
3041}