wasmtime/runtime/
store.rs

1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//!   Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//!   intended to be consumed by the outside world. Note that the "just a
39//!   pointer large" is a load-bearing implementation detail in Wasmtime. This
40//!   enables it to store a pointer to its own trait object which doesn't need
41//!   to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//!   stored inside the `Box`. This is the general Rust pattern when one struct
45//!   is a layer over another. The surprising part, though, is that this is
46//!   further subdivided. This structure only contains things which actually
47//!   need `T` itself. The downside of this structure is that it's always
48//!   generic and means that code is monomorphized into consumer crates. We
49//!   strive to have things be as monomorphic as possible in `wasmtime` so this
50//!   type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//!   Stored inline in the outer type the "opaque" here means that it's a
54//!   "store" but it doesn't have access to the `T`. This is the primary
55//!   "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//!   internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//!   All references of Wasm items into a `Store` are actually indices into a
60//!   table in this structure, and the `StoreData` being separate makes it a bit
61//!   easier to manage/define/work with. There's no real fundamental reason this
62//!   is split out, although sometimes it's useful to have separate borrows into
63//!   these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79#[cfg(feature = "debug")]
80use crate::DebugHandler;
81#[cfg(all(feature = "gc", feature = "debug"))]
82use crate::OwnedRooted;
83use crate::RootSet;
84#[cfg(feature = "gc")]
85use crate::ThrownException;
86#[cfg(feature = "component-model-async")]
87use crate::component::ComponentStoreData;
88#[cfg(feature = "component-model")]
89use crate::component::concurrent;
90#[cfg(feature = "async")]
91use crate::fiber;
92use crate::module::RegisteredModuleId;
93use crate::prelude::*;
94#[cfg(feature = "gc")]
95use crate::runtime::vm::GcRootsList;
96#[cfg(feature = "stack-switching")]
97use crate::runtime::vm::VMContRef;
98use crate::runtime::vm::mpk::ProtectionKey;
99use crate::runtime::vm::{
100    self, ExportMemory, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator,
101    InstanceHandle, Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator,
102    SendSyncPtr, SignalHandler, StoreBox, Unwind, VMContext, VMFuncRef, VMGcRef, VMStore,
103    VMStoreContext,
104};
105use crate::trampoline::VMHostGlobalContext;
106use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry};
107#[cfg(feature = "gc")]
108use crate::{ExnRef, Rooted};
109use crate::{Global, Instance, Table};
110use alloc::sync::Arc;
111use core::convert::Infallible;
112use core::fmt;
113use core::marker;
114use core::mem::{self, ManuallyDrop, MaybeUninit};
115use core::num::NonZeroU64;
116use core::ops::{Deref, DerefMut};
117use core::pin::Pin;
118use core::ptr::NonNull;
119use wasmtime_environ::StaticModuleIndex;
120use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
121
122mod context;
123pub use self::context::*;
124mod data;
125pub use self::data::*;
126mod func_refs;
127use func_refs::FuncRefs;
128#[cfg(feature = "component-model-async")]
129mod token;
130#[cfg(feature = "component-model-async")]
131pub(crate) use token::StoreToken;
132#[cfg(feature = "async")]
133mod async_;
134#[cfg(all(feature = "async", feature = "call-hook"))]
135pub use self::async_::CallHookHandler;
136
137#[cfg(feature = "gc")]
138use super::vm::VMExnRef;
139#[cfg(feature = "gc")]
140mod gc;
141
142/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
143///
144/// All WebAssembly instances and items will be attached to and refer to a
145/// [`Store`]. For example instances, functions, globals, and tables are all
146/// attached to a [`Store`]. Instances are created by instantiating a
147/// [`Module`](crate::Module) within a [`Store`].
148///
149/// A [`Store`] is intended to be a short-lived object in a program. No form
150/// of GC is implemented at this time so once an instance is created within a
151/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
152/// This makes [`Store`] unsuitable for creating an unbounded number of
153/// instances in it because [`Store`] will never release this memory. It's
154/// recommended to have a [`Store`] correspond roughly to the lifetime of a
155/// "main instance" that an embedding is interested in executing.
156///
157/// ## Type parameter `T`
158///
159/// Each [`Store`] has a type parameter `T` associated with it. This `T`
160/// represents state defined by the host. This state will be accessible through
161/// the [`Caller`](crate::Caller) type that host-defined functions get access
162/// to. This `T` is suitable for storing `Store`-specific information which
163/// imported functions may want access to.
164///
165/// The data `T` can be accessed through methods like [`Store::data`] and
166/// [`Store::data_mut`].
167///
168/// ## Stores, contexts, oh my
169///
170/// Most methods in Wasmtime take something of the form
171/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
172/// the first argument. These two traits allow ergonomically passing in the
173/// context you currently have to any method. The primary two sources of
174/// contexts are:
175///
176/// * `Store<T>`
177/// * `Caller<'_, T>`
178///
179/// corresponding to what you create and what you have access to in a host
180/// function. You can also explicitly acquire a [`StoreContext`] or
181/// [`StoreContextMut`] and pass that around as well.
182///
183/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
184/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
185/// form of context you have you can call various methods, create objects, etc.
186///
187/// ## Stores and `Default`
188///
189/// You can create a store with default configuration settings using
190/// `Store::default()`. This will create a brand new [`Engine`] with default
191/// configuration (see [`Config`](crate::Config) for more information).
192///
193/// ## Cross-store usage of items
194///
195/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
196/// [`Store`]. The store they belong to is the one they were created with
197/// (passed in as a parameter) or instantiated with. This store is the only
198/// store that can be used to interact with wasm items after they're created.
199///
200/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
201/// operations is incorrect. In other words it's considered a programmer error
202/// rather than a recoverable error for the wrong [`Store`] to be used when
203/// calling APIs.
204pub struct Store<T: 'static> {
205    // for comments about `ManuallyDrop`, see `Store::into_data`
206    inner: ManuallyDrop<Box<StoreInner<T>>>,
207}
208
209#[derive(Copy, Clone, Debug)]
210/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
211/// the WebAssembly VM.
212pub enum CallHook {
213    /// Indicates the VM is calling a WebAssembly function, from the host.
214    CallingWasm,
215    /// Indicates the VM is returning from a WebAssembly function, to the host.
216    ReturningFromWasm,
217    /// Indicates the VM is calling a host function, from WebAssembly.
218    CallingHost,
219    /// Indicates the VM is returning from a host function, to WebAssembly.
220    ReturningFromHost,
221}
222
223impl CallHook {
224    /// Indicates the VM is entering host code (exiting WebAssembly code)
225    pub fn entering_host(&self) -> bool {
226        match self {
227            CallHook::ReturningFromWasm | CallHook::CallingHost => true,
228            _ => false,
229        }
230    }
231    /// Indicates the VM is exiting host code (entering WebAssembly code)
232    pub fn exiting_host(&self) -> bool {
233        match self {
234            CallHook::ReturningFromHost | CallHook::CallingWasm => true,
235            _ => false,
236        }
237    }
238}
239
240/// Internal contents of a `Store<T>` that live on the heap.
241///
242/// The members of this struct are those that need to be generic over `T`, the
243/// store's internal type storage. Otherwise all things that don't rely on `T`
244/// should go into `StoreOpaque`.
245pub struct StoreInner<T: 'static> {
246    /// Generic metadata about the store that doesn't need access to `T`.
247    inner: StoreOpaque,
248
249    limiter: Option<ResourceLimiterInner<T>>,
250    call_hook: Option<CallHookInner<T>>,
251    #[cfg(target_has_atomic = "64")]
252    epoch_deadline_behavior:
253        Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
254
255    /// The user's `T` data.
256    ///
257    /// Don't actually access it via this field, however! Use the
258    /// `Store{,Inner,Context,ContextMut}::data[_mut]` methods instead, to
259    /// preserve stacked borrows and provenance in the face of potential
260    /// direct-access of `T` from Wasm code (via unsafe intrinsics).
261    ///
262    /// The only exception to the above is when taking ownership of the value,
263    /// e.g. in `Store::into_data`, after which nothing can access this field
264    /// via raw pointers anymore so there is no more provenance to preserve.
265    ///
266    /// For comments about `ManuallyDrop`, see `Store::into_data`.
267    data_no_provenance: ManuallyDrop<T>,
268
269    /// The user's debug handler, if any. See [`crate::DebugHandler`]
270    /// for more documentation.
271    ///
272    /// We need this to be an `Arc` because the handler itself takes
273    /// `&self` and also the whole Store mutably (via
274    /// `StoreContextMut`); so we need to hold a separate reference to
275    /// it while invoking it.
276    #[cfg(feature = "debug")]
277    debug_handler: Option<Box<dyn StoreDebugHandler<T>>>,
278}
279
280/// Adapter around `DebugHandler` that gets monomorphized into an
281/// object-safe dyn trait to place in `store.debug_handler`.
282#[cfg(feature = "debug")]
283trait StoreDebugHandler<T: 'static>: Send + Sync {
284    fn handle<'a>(
285        self: Box<Self>,
286        store: StoreContextMut<'a, T>,
287        event: crate::DebugEvent<'a>,
288    ) -> Box<dyn Future<Output = ()> + Send + 'a>;
289}
290
291#[cfg(feature = "debug")]
292impl<D> StoreDebugHandler<D::Data> for D
293where
294    D: DebugHandler,
295    D::Data: Send,
296{
297    fn handle<'a>(
298        self: Box<Self>,
299        store: StoreContextMut<'a, D::Data>,
300        event: crate::DebugEvent<'a>,
301    ) -> Box<dyn Future<Output = ()> + Send + 'a> {
302        // Clone the underlying `DebugHandler` (the trait requires
303        // Clone as a supertrait), not the Box. The clone happens here
304        // rather than at the callsite because `Clone::clone` is not
305        // object-safe so needs to be in a monomorphized context.
306        let handler: D = (*self).clone();
307        // Since we temporarily took `self` off the store at the
308        // callsite, put it back now that we've cloned it.
309        store.0.debug_handler = Some(self);
310        Box::new(async move { handler.handle(store, event).await })
311    }
312}
313
314enum ResourceLimiterInner<T> {
315    Sync(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync>),
316    #[cfg(feature = "async")]
317    Async(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiterAsync) + Send + Sync>),
318}
319
320/// Representation of a configured resource limiter for a store.
321///
322/// This is acquired with `resource_limiter_and_store_opaque` for example and is
323/// threaded through to growth operations on tables/memories. Note that this is
324/// passed around as `Option<&mut StoreResourceLimiter<'_>>` to make it
325/// efficient to pass around (nullable pointer) and it's also notably passed
326/// around as an `Option` to represent how this is optionally specified within a
327/// store.
328pub enum StoreResourceLimiter<'a> {
329    Sync(&'a mut dyn crate::ResourceLimiter),
330    #[cfg(feature = "async")]
331    Async(&'a mut dyn crate::ResourceLimiterAsync),
332}
333
334impl StoreResourceLimiter<'_> {
335    pub(crate) async fn memory_growing(
336        &mut self,
337        current: usize,
338        desired: usize,
339        maximum: Option<usize>,
340    ) -> Result<bool, Error> {
341        match self {
342            Self::Sync(s) => s.memory_growing(current, desired, maximum),
343            #[cfg(feature = "async")]
344            Self::Async(s) => s.memory_growing(current, desired, maximum).await,
345        }
346    }
347
348    pub(crate) fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
349        match self {
350            Self::Sync(s) => s.memory_grow_failed(error),
351            #[cfg(feature = "async")]
352            Self::Async(s) => s.memory_grow_failed(error),
353        }
354    }
355
356    pub(crate) async fn table_growing(
357        &mut self,
358        current: usize,
359        desired: usize,
360        maximum: Option<usize>,
361    ) -> Result<bool, Error> {
362        match self {
363            Self::Sync(s) => s.table_growing(current, desired, maximum),
364            #[cfg(feature = "async")]
365            Self::Async(s) => s.table_growing(current, desired, maximum).await,
366        }
367    }
368
369    pub(crate) fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
370        match self {
371            Self::Sync(s) => s.table_grow_failed(error),
372            #[cfg(feature = "async")]
373            Self::Async(s) => s.table_grow_failed(error),
374        }
375    }
376}
377
378enum CallHookInner<T: 'static> {
379    #[cfg(feature = "call-hook")]
380    Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
381    #[cfg(all(feature = "async", feature = "call-hook"))]
382    Async(Box<dyn CallHookHandler<T> + Send + Sync>),
383    #[expect(
384        dead_code,
385        reason = "forcing, regardless of cfg, the type param to be used"
386    )]
387    ForceTypeParameterToBeUsed {
388        uninhabited: Infallible,
389        _marker: marker::PhantomData<T>,
390    },
391}
392
393/// What to do after returning from a callback when the engine epoch reaches
394/// the deadline for a Store during execution of a function using that store.
395#[non_exhaustive]
396pub enum UpdateDeadline {
397    /// Halt execution of WebAssembly, don't update the epoch deadline, and
398    /// raise a trap.
399    Interrupt,
400    /// Extend the deadline by the specified number of ticks.
401    Continue(u64),
402    /// Extend the deadline by the specified number of ticks after yielding to
403    /// the async executor loop. This can only be used with an async [`Store`]
404    /// configured via [`Config::async_support`](crate::Config::async_support).
405    #[cfg(feature = "async")]
406    Yield(u64),
407    /// Extend the deadline by the specified number of ticks after yielding to
408    /// the async executor loop. This can only be used with an async [`Store`]
409    /// configured via [`Config::async_support`](crate::Config::async_support).
410    ///
411    /// The yield will be performed by the future provided; when using `tokio`
412    /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
413    /// here.
414    #[cfg(feature = "async")]
415    YieldCustom(
416        u64,
417        ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
418    ),
419}
420
421// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
422impl<T> Deref for StoreInner<T> {
423    type Target = StoreOpaque;
424    fn deref(&self) -> &Self::Target {
425        &self.inner
426    }
427}
428
429impl<T> DerefMut for StoreInner<T> {
430    fn deref_mut(&mut self) -> &mut Self::Target {
431        &mut self.inner
432    }
433}
434
435/// Monomorphic storage for a `Store<T>`.
436///
437/// This structure contains the bulk of the metadata about a `Store`. This is
438/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
439/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
440/// crate itself.
441pub struct StoreOpaque {
442    // This `StoreOpaque` structure has references to itself. These aren't
443    // immediately evident, however, so we need to tell the compiler that it
444    // contains self-references. This notably suppresses `noalias` annotations
445    // when this shows up in compiled code because types of this structure do
446    // indeed alias itself. An example of this is `default_callee` holds a
447    // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
448    // aliasing!
449    //
450    // It's somewhat unclear to me at this time if this is 100% sufficient to
451    // get all the right codegen in all the right places. For example does
452    // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
453    // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
454    // enough with `Pin` to understand if it's appropriate here (we do, for
455    // example want to allow movement in and out of `data: T`, just not movement
456    // of most of the other members). It's also not clear if using `Pin` in a
457    // few places buys us much other than a bunch of `unsafe` that we already
458    // sort of hand-wave away.
459    //
460    // In any case this seems like a good mid-ground for now where we're at
461    // least telling the compiler something about all the aliasing happening
462    // within a `Store`.
463    _marker: marker::PhantomPinned,
464
465    engine: Engine,
466    vm_store_context: VMStoreContext,
467
468    // Contains all continuations ever allocated throughout the lifetime of this
469    // store.
470    #[cfg(feature = "stack-switching")]
471    continuations: Vec<Box<VMContRef>>,
472
473    instances: PrimaryMap<InstanceId, StoreInstance>,
474
475    #[cfg(feature = "component-model")]
476    num_component_instances: usize,
477    signal_handler: Option<SignalHandler>,
478    modules: ModuleRegistry,
479    func_refs: FuncRefs,
480    host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
481    // GC-related fields.
482    gc_store: Option<GcStore>,
483    gc_roots: RootSet,
484    #[cfg(feature = "gc")]
485    gc_roots_list: GcRootsList,
486    // Types for which the embedder has created an allocator for.
487    #[cfg(feature = "gc")]
488    gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
489    /// Pending exception, if any. This is also a GC root, because it
490    /// needs to be rooted somewhere between the time that a pending
491    /// exception is set and the time that the handling code takes the
492    /// exception object. We use this rooting strategy rather than a
493    /// root in an `Err` branch of a `Result` on the host side because
494    /// it is less error-prone with respect to rooting behavior. See
495    /// `throw()`, `take_pending_exception()`,
496    /// `peek_pending_exception()`, `has_pending_exception()`, and
497    /// `catch()`.
498    #[cfg(feature = "gc")]
499    pending_exception: Option<VMExnRef>,
500
501    // Numbers of resources instantiated in this store, and their limits
502    instance_count: usize,
503    instance_limit: usize,
504    memory_count: usize,
505    memory_limit: usize,
506    table_count: usize,
507    table_limit: usize,
508    #[cfg(feature = "async")]
509    async_state: fiber::AsyncState,
510
511    // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
512    // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
513    // together. Then when we run out of gas, we inject the yield amount from the reserve
514    // until the reserve is empty.
515    fuel_reserve: u64,
516    pub(crate) fuel_yield_interval: Option<NonZeroU64>,
517    /// Indexed data within this `Store`, used to store information about
518    /// globals, functions, memories, etc.
519    store_data: StoreData,
520    traitobj: StorePtr,
521    default_caller_vmctx: SendSyncPtr<VMContext>,
522
523    /// Used to optimized wasm->host calls when the host function is defined with
524    /// `Func::new` to avoid allocating a new vector each time a function is
525    /// called.
526    hostcall_val_storage: Vec<Val>,
527    /// Same as `hostcall_val_storage`, but for the direction of the host
528    /// calling wasm.
529    wasm_val_raw_storage: Vec<ValRaw>,
530
531    /// Keep track of what protection key is being used during allocation so
532    /// that the right memory pages can be enabled when entering WebAssembly
533    /// guest code.
534    pkey: Option<ProtectionKey>,
535
536    /// Runtime state for components used in the handling of resources, borrow,
537    /// and calls. These also interact with the `ResourceAny` type and its
538    /// internal representation.
539    #[cfg(feature = "component-model")]
540    component_host_table: vm::component::HandleTable,
541    #[cfg(feature = "component-model")]
542    component_calls: vm::component::CallContexts,
543    #[cfg(feature = "component-model")]
544    host_resource_data: crate::component::HostResourceData,
545    #[cfg(feature = "component-model")]
546    concurrent_state: concurrent::ConcurrentState,
547
548    /// State related to the executor of wasm code.
549    ///
550    /// For example if Pulley is enabled and configured then this will store a
551    /// Pulley interpreter.
552    executor: Executor,
553}
554
555/// Self-pointer to `StoreInner<T>` from within a `StoreOpaque` which is chiefly
556/// used to copy into instances during instantiation.
557///
558/// FIXME: ideally this type would get deleted and Wasmtime's reliance on it
559/// would go away.
560struct StorePtr(Option<NonNull<dyn VMStore>>);
561
562// We can't make `VMStore: Send + Sync` because that requires making all of
563// Wastime's internals generic over the `Store`'s `T`. So instead, we take care
564// in the whole VM layer to only use the `VMStore` in ways that are `Send`- and
565// `Sync`-safe and we have to have these unsafe impls.
566unsafe impl Send for StorePtr {}
567unsafe impl Sync for StorePtr {}
568
569/// Executor state within `StoreOpaque`.
570///
571/// Effectively stores Pulley interpreter state and handles conditional support
572/// for Cranelift at compile time.
573pub(crate) enum Executor {
574    Interpreter(Interpreter),
575    #[cfg(has_host_compiler_backend)]
576    Native,
577}
578
579impl Executor {
580    pub(crate) fn new(engine: &Engine) -> Self {
581        #[cfg(has_host_compiler_backend)]
582        if cfg!(feature = "pulley") && engine.target().is_pulley() {
583            Executor::Interpreter(Interpreter::new(engine))
584        } else {
585            Executor::Native
586        }
587        #[cfg(not(has_host_compiler_backend))]
588        {
589            debug_assert!(engine.target().is_pulley());
590            Executor::Interpreter(Interpreter::new(engine))
591        }
592    }
593}
594
595/// A borrowed reference to `Executor` above.
596pub(crate) enum ExecutorRef<'a> {
597    Interpreter(InterpreterRef<'a>),
598    #[cfg(has_host_compiler_backend)]
599    Native,
600}
601
602/// An RAII type to automatically mark a region of code as unsafe for GC.
603#[doc(hidden)]
604pub struct AutoAssertNoGc<'a> {
605    store: &'a mut StoreOpaque,
606    entered: bool,
607}
608
609impl<'a> AutoAssertNoGc<'a> {
610    #[inline]
611    pub fn new(store: &'a mut StoreOpaque) -> Self {
612        let entered = if !cfg!(feature = "gc") {
613            false
614        } else if let Some(gc_store) = store.gc_store.as_mut() {
615            gc_store.gc_heap.enter_no_gc_scope();
616            true
617        } else {
618            false
619        };
620
621        AutoAssertNoGc { store, entered }
622    }
623
624    /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
625    /// disables checks for no GC happening for the duration of this value.
626    ///
627    /// This is used when it is statically otherwise known that a GC doesn't
628    /// happen for the various types involved.
629    ///
630    /// # Unsafety
631    ///
632    /// This method is `unsafe` as it does not provide the same safety
633    /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
634    /// caller that a GC doesn't happen.
635    #[inline]
636    pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
637        if cfg!(debug_assertions) {
638            AutoAssertNoGc::new(store)
639        } else {
640            AutoAssertNoGc {
641                store,
642                entered: false,
643            }
644        }
645    }
646}
647
648impl core::ops::Deref for AutoAssertNoGc<'_> {
649    type Target = StoreOpaque;
650
651    #[inline]
652    fn deref(&self) -> &Self::Target {
653        &*self.store
654    }
655}
656
657impl core::ops::DerefMut for AutoAssertNoGc<'_> {
658    #[inline]
659    fn deref_mut(&mut self) -> &mut Self::Target {
660        &mut *self.store
661    }
662}
663
664impl Drop for AutoAssertNoGc<'_> {
665    #[inline]
666    fn drop(&mut self) {
667        if self.entered {
668            self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
669        }
670    }
671}
672
673/// Used to associate instances with the store.
674///
675/// This is needed to track if the instance was allocated explicitly with the on-demand
676/// instance allocator.
677struct StoreInstance {
678    handle: InstanceHandle,
679    kind: StoreInstanceKind,
680}
681
682enum StoreInstanceKind {
683    /// An actual, non-dummy instance.
684    Real {
685        /// The id of this instance's module inside our owning store's
686        /// `ModuleRegistry`.
687        module_id: RegisteredModuleId,
688    },
689
690    /// This is a dummy instance that is just an implementation detail for
691    /// something else. For example, host-created memories internally create a
692    /// dummy instance.
693    ///
694    /// Regardless of the configured instance allocator for the engine, dummy
695    /// instances always use the on-demand allocator to deallocate the instance.
696    Dummy,
697}
698
699impl<T> Store<T> {
700    /// Creates a new [`Store`] to be associated with the given [`Engine`] and
701    /// `data` provided.
702    ///
703    /// The created [`Store`] will place no additional limits on the size of
704    /// linear memories or tables at runtime. Linear memories and tables will
705    /// be allowed to grow to any upper limit specified in their definitions.
706    /// The store will limit the number of instances, linear memories, and
707    /// tables created to 10,000. This can be overridden with the
708    /// [`Store::limiter`] configuration method.
709    pub fn new(engine: &Engine, data: T) -> Self {
710        let store_data = StoreData::new();
711        log::trace!("creating new store {:?}", store_data.id());
712
713        let pkey = engine.allocator().next_available_pkey();
714
715        let inner = StoreOpaque {
716            _marker: marker::PhantomPinned,
717            engine: engine.clone(),
718            vm_store_context: Default::default(),
719            #[cfg(feature = "stack-switching")]
720            continuations: Vec::new(),
721            instances: PrimaryMap::new(),
722            #[cfg(feature = "component-model")]
723            num_component_instances: 0,
724            signal_handler: None,
725            gc_store: None,
726            gc_roots: RootSet::default(),
727            #[cfg(feature = "gc")]
728            gc_roots_list: GcRootsList::default(),
729            #[cfg(feature = "gc")]
730            gc_host_alloc_types: Default::default(),
731            #[cfg(feature = "gc")]
732            pending_exception: None,
733            modules: ModuleRegistry::default(),
734            func_refs: FuncRefs::default(),
735            host_globals: PrimaryMap::new(),
736            instance_count: 0,
737            instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
738            memory_count: 0,
739            memory_limit: crate::DEFAULT_MEMORY_LIMIT,
740            table_count: 0,
741            table_limit: crate::DEFAULT_TABLE_LIMIT,
742            #[cfg(feature = "async")]
743            async_state: Default::default(),
744            fuel_reserve: 0,
745            fuel_yield_interval: None,
746            store_data,
747            traitobj: StorePtr(None),
748            default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
749            hostcall_val_storage: Vec::new(),
750            wasm_val_raw_storage: Vec::new(),
751            pkey,
752            #[cfg(feature = "component-model")]
753            component_host_table: Default::default(),
754            #[cfg(feature = "component-model")]
755            component_calls: Default::default(),
756            #[cfg(feature = "component-model")]
757            host_resource_data: Default::default(),
758            executor: Executor::new(engine),
759            #[cfg(feature = "component-model")]
760            concurrent_state: Default::default(),
761        };
762        let mut inner = Box::new(StoreInner {
763            inner,
764            limiter: None,
765            call_hook: None,
766            #[cfg(target_has_atomic = "64")]
767            epoch_deadline_behavior: None,
768            data_no_provenance: ManuallyDrop::new(data),
769            #[cfg(feature = "debug")]
770            debug_handler: None,
771        });
772
773        let store_data =
774            <NonNull<ManuallyDrop<T>>>::from(&mut inner.data_no_provenance).cast::<()>();
775        inner.inner.vm_store_context.store_data = store_data.into();
776
777        inner.traitobj = StorePtr(Some(NonNull::from(&mut *inner)));
778
779        // Wasmtime uses the callee argument to host functions to learn about
780        // the original pointer to the `Store` itself, allowing it to
781        // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
782        // however, there's no "callee" to provide. To fix this we allocate a
783        // single "default callee" for the entire `Store`. This is then used as
784        // part of `Func::call` to guarantee that the `callee: *mut VMContext`
785        // is never null.
786        let module = Arc::new(wasmtime_environ::Module::new(StaticModuleIndex::from_u32(
787            0,
788        )));
789        let shim = ModuleRuntimeInfo::bare(module);
790        let allocator = OnDemandInstanceAllocator::default();
791
792        allocator
793            .validate_module(shim.env_module(), shim.offsets())
794            .unwrap();
795
796        unsafe {
797            // Note that this dummy instance doesn't allocate tables or memories
798            // (also no limiter is passed in) so it won't have an async await
799            // point meaning that it should be ok to assert the future is
800            // always ready.
801            let id = vm::assert_ready(inner.allocate_instance(
802                None,
803                AllocateInstanceKind::Dummy {
804                    allocator: &allocator,
805                },
806                &shim,
807                Default::default(),
808            ))
809            .expect("failed to allocate default callee");
810            let default_caller_vmctx = inner.instance(id).vmctx();
811            inner.default_caller_vmctx = default_caller_vmctx.into();
812        }
813
814        Self {
815            inner: ManuallyDrop::new(inner),
816        }
817    }
818
819    /// Access the underlying `T` data owned by this `Store`.
820    #[inline]
821    pub fn data(&self) -> &T {
822        self.inner.data()
823    }
824
825    /// Access the underlying `T` data owned by this `Store`.
826    #[inline]
827    pub fn data_mut(&mut self) -> &mut T {
828        self.inner.data_mut()
829    }
830
831    fn run_manual_drop_routines(&mut self) {
832        // We need to drop the fibers of each component instance before
833        // attempting to drop the instances themselves since the fibers may need
834        // to be resumed and allowed to exit cleanly before we yank the state
835        // out from under them.
836        //
837        // This will also drop any futures which might use a `&Accessor` fields
838        // in their `Drop::drop` implementations, in which case they'll need to
839        // be called from with in the context of a `tls::set` closure.
840        #[cfg(feature = "component-model-async")]
841        ComponentStoreData::drop_fibers_and_futures(&mut **self.inner);
842
843        // Ensure all fiber stacks, even cached ones, are all flushed out to the
844        // instance allocator.
845        self.inner.flush_fiber_stack();
846    }
847
848    /// Consumes this [`Store`], destroying it, and returns the underlying data.
849    pub fn into_data(mut self) -> T {
850        self.run_manual_drop_routines();
851
852        // This is an unsafe operation because we want to avoid having a runtime
853        // check or boolean for whether the data is actually contained within a
854        // `Store`. The data itself is stored as `ManuallyDrop` since we're
855        // manually managing the memory here, and there's also a `ManuallyDrop`
856        // around the `Box<StoreInner<T>>`. The way this works though is a bit
857        // tricky, so here's how things get dropped appropriately:
858        //
859        // * When a `Store<T>` is normally dropped, the custom destructor for
860        //   `Store<T>` will drop `T`, then the `self.inner` field. The
861        //   rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
862        //   `StoreInner<T>`. This cleans up all internal fields and doesn't
863        //   touch `T` because it's wrapped in `ManuallyDrop`.
864        //
865        // * When calling this method we skip the top-level destructor for
866        //   `Store<T>` with `mem::forget`. This skips both the destructor for
867        //   `T` and the destructor for `StoreInner<T>`. We do, however, run the
868        //   destructor for `Box<StoreInner<T>>` which, like above, will skip
869        //   the destructor for `T` since it's `ManuallyDrop`.
870        //
871        // In both cases all the other fields of `StoreInner<T>` should all get
872        // dropped, and the manual management of destructors is basically
873        // between this method and `Drop for Store<T>`. Note that this also
874        // means that `Drop for StoreInner<T>` cannot access `self.data`, so
875        // there is a comment indicating this as well.
876        unsafe {
877            let mut inner = ManuallyDrop::take(&mut self.inner);
878            core::mem::forget(self);
879            ManuallyDrop::take(&mut inner.data_no_provenance)
880        }
881    }
882
883    /// Configures the [`ResourceLimiter`] used to limit resource creation
884    /// within this [`Store`].
885    ///
886    /// Whenever resources such as linear memory, tables, or instances are
887    /// allocated the `limiter` specified here is invoked with the store's data
888    /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
889    /// being allocated. The returned [`ResourceLimiter`] is intended to live
890    /// within the `T` itself, for example by storing a
891    /// [`StoreLimits`](crate::StoreLimits).
892    ///
893    /// Note that this limiter is only used to limit the creation/growth of
894    /// resources in the future, this does not retroactively attempt to apply
895    /// limits to the [`Store`].
896    ///
897    /// # Examples
898    ///
899    /// ```
900    /// use wasmtime::*;
901    ///
902    /// struct MyApplicationState {
903    ///     my_state: u32,
904    ///     limits: StoreLimits,
905    /// }
906    ///
907    /// let engine = Engine::default();
908    /// let my_state = MyApplicationState {
909    ///     my_state: 42,
910    ///     limits: StoreLimitsBuilder::new()
911    ///         .memory_size(1 << 20 /* 1 MB */)
912    ///         .instances(2)
913    ///         .build(),
914    /// };
915    /// let mut store = Store::new(&engine, my_state);
916    /// store.limiter(|state| &mut state.limits);
917    ///
918    /// // Creation of smaller memories is allowed
919    /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
920    ///
921    /// // Creation of a larger memory, however, will exceed the 1MB limit we've
922    /// // configured
923    /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
924    ///
925    /// // The number of instances in this store is limited to 2, so the third
926    /// // instance here should fail.
927    /// let module = Module::new(&engine, "(module)").unwrap();
928    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
929    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
930    /// assert!(Instance::new(&mut store, &module, &[]).is_err());
931    /// ```
932    ///
933    /// [`ResourceLimiter`]: crate::ResourceLimiter
934    pub fn limiter(
935        &mut self,
936        mut limiter: impl (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync + 'static,
937    ) {
938        // Apply the limits on instances, tables, and memory given by the limiter:
939        let inner = &mut self.inner;
940        let (instance_limit, table_limit, memory_limit) = {
941            let l = limiter(inner.data_mut());
942            (l.instances(), l.tables(), l.memories())
943        };
944        let innermost = &mut inner.inner;
945        innermost.instance_limit = instance_limit;
946        innermost.table_limit = table_limit;
947        innermost.memory_limit = memory_limit;
948
949        // Save the limiter accessor function:
950        inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
951    }
952
953    /// Configure a function that runs on calls and returns between WebAssembly
954    /// and host code.
955    ///
956    /// The function is passed a [`CallHook`] argument, which indicates which
957    /// state transition the VM is making.
958    ///
959    /// This function may return a [`Trap`]. If a trap is returned when an
960    /// import was called, it is immediately raised as-if the host import had
961    /// returned the trap. If a trap is returned after wasm returns to the host
962    /// then the wasm function's result is ignored and this trap is returned
963    /// instead.
964    ///
965    /// After this function returns a trap, it may be called for subsequent returns
966    /// to host or wasm code as the trap propagates to the root call.
967    #[cfg(feature = "call-hook")]
968    pub fn call_hook(
969        &mut self,
970        hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
971    ) {
972        self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
973    }
974
975    /// Returns the [`Engine`] that this store is associated with.
976    pub fn engine(&self) -> &Engine {
977        self.inner.engine()
978    }
979
980    /// Perform garbage collection.
981    ///
982    /// Note that it is not required to actively call this function. GC will
983    /// automatically happen according to various internal heuristics. This is
984    /// provided if fine-grained control over the GC is desired.
985    ///
986    /// If you are calling this method after an attempted allocation failed, you
987    /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
988    /// When you do so, this method will attempt to create enough space in the
989    /// GC heap for that allocation, so that it will succeed on the next
990    /// attempt.
991    ///
992    /// This method is only available when the `gc` Cargo feature is enabled.
993    #[cfg(feature = "gc")]
994    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
995        StoreContextMut(&mut self.inner).gc(why)
996    }
997
998    /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
999    /// be configured via [`Store::set_fuel`].
1000    ///
1001    /// # Errors
1002    ///
1003    /// This function will return an error if fuel consumption is not enabled
1004    /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
1005    pub fn get_fuel(&self) -> Result<u64> {
1006        self.inner.get_fuel()
1007    }
1008
1009    /// Set the fuel to this [`Store`] for wasm to consume while executing.
1010    ///
1011    /// For this method to work fuel consumption must be enabled via
1012    /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
1013    /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
1014    /// immediately trap). This function must be called for the store to have
1015    /// some fuel to allow WebAssembly to execute.
1016    ///
1017    /// Most WebAssembly instructions consume 1 unit of fuel. Some
1018    /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
1019    /// units, as any execution cost associated with them involves other
1020    /// instructions which do consume fuel.
1021    ///
1022    /// Note that when fuel is entirely consumed it will cause wasm to trap.
1023    ///
1024    /// # Errors
1025    ///
1026    /// This function will return an error if fuel consumption is not enabled via
1027    /// [`Config::consume_fuel`](crate::Config::consume_fuel).
1028    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1029        self.inner.set_fuel(fuel)
1030    }
1031
1032    /// Configures a [`Store`] to yield execution of async WebAssembly code
1033    /// periodically.
1034    ///
1035    /// When a [`Store`] is configured to consume fuel with
1036    /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
1037    /// configure WebAssembly to be suspended and control will be yielded back to the
1038    /// caller every `interval` units of fuel consumed. This is only suitable with use of
1039    /// a store associated with an [async config](crate::Config::async_support) because
1040    /// only then are futures used and yields are possible.
1041    ///
1042    /// The purpose of this behavior is to ensure that futures which represent
1043    /// execution of WebAssembly do not execute too long inside their
1044    /// `Future::poll` method. This allows for some form of cooperative
1045    /// multitasking where WebAssembly will voluntarily yield control
1046    /// periodically (based on fuel consumption) back to the running thread.
1047    ///
1048    /// Note that futures returned by this crate will automatically flag
1049    /// themselves to get re-polled if a yield happens. This means that
1050    /// WebAssembly will continue to execute, just after giving the host an
1051    /// opportunity to do something else.
1052    ///
1053    /// The `interval` parameter indicates how much fuel should be
1054    /// consumed between yields of an async future. When fuel runs out wasm will trap.
1055    ///
1056    /// # Error
1057    ///
1058    /// This method will error if it is not called on a store associated with an [async
1059    /// config](crate::Config::async_support).
1060    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1061        self.inner.fuel_async_yield_interval(interval)
1062    }
1063
1064    /// Sets the epoch deadline to a certain number of ticks in the future.
1065    ///
1066    /// When the Wasm guest code is compiled with epoch-interruption
1067    /// instrumentation
1068    /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
1069    /// and when the `Engine`'s epoch is incremented
1070    /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
1071    /// past a deadline, execution can be configured to either trap or
1072    /// yield and then continue.
1073    ///
1074    /// This deadline is always set relative to the current epoch:
1075    /// `ticks_beyond_current` ticks in the future. The deadline can
1076    /// be set explicitly via this method, or refilled automatically
1077    /// on a yield if configured via
1078    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
1079    /// this method is invoked, the deadline is reached when
1080    /// [`Engine::increment_epoch()`] has been invoked at least
1081    /// `ticks_beyond_current` times.
1082    ///
1083    /// By default a store will trap immediately with an epoch deadline of 0
1084    /// (which has always "elapsed"). This method is required to be configured
1085    /// for stores with epochs enabled to some future epoch deadline.
1086    ///
1087    /// See documentation on
1088    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1089    /// for an introduction to epoch-based interruption.
1090    #[cfg(target_has_atomic = "64")]
1091    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1092        self.inner.set_epoch_deadline(ticks_beyond_current);
1093    }
1094
1095    /// Configures epoch-deadline expiration to trap.
1096    ///
1097    /// When epoch-interruption-instrumented code is executed on this
1098    /// store and the epoch deadline is reached before completion,
1099    /// with the store configured in this way, execution will
1100    /// terminate with a trap as soon as an epoch check in the
1101    /// instrumented code is reached.
1102    ///
1103    /// This behavior is the default if the store is not otherwise
1104    /// configured via
1105    /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
1106    /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
1107    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
1108    ///
1109    /// This setting is intended to allow for coarse-grained
1110    /// interruption, but not a deterministic deadline of a fixed,
1111    /// finite interval. For deterministic interruption, see the
1112    /// "fuel" mechanism instead.
1113    ///
1114    /// Note that when this is used it's required to call
1115    /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
1116    /// trap.
1117    ///
1118    /// See documentation on
1119    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1120    /// for an introduction to epoch-based interruption.
1121    #[cfg(target_has_atomic = "64")]
1122    pub fn epoch_deadline_trap(&mut self) {
1123        self.inner.epoch_deadline_trap();
1124    }
1125
1126    /// Configures epoch-deadline expiration to invoke a custom callback
1127    /// function.
1128    ///
1129    /// When epoch-interruption-instrumented code is executed on this
1130    /// store and the epoch deadline is reached before completion, the
1131    /// provided callback function is invoked.
1132    ///
1133    /// This callback should either return an [`UpdateDeadline`], or
1134    /// return an error, which will terminate execution with a trap.
1135    ///
1136    /// The [`UpdateDeadline`] is a positive number of ticks to
1137    /// add to the epoch deadline, as well as indicating what
1138    /// to do after the callback returns. If the [`Store`] is
1139    /// configured with async support, then the callback may return
1140    /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
1141    /// to yield to the async executor before updating the epoch deadline.
1142    /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
1143    /// update the epoch deadline immediately.
1144    ///
1145    /// This setting is intended to allow for coarse-grained
1146    /// interruption, but not a deterministic deadline of a fixed,
1147    /// finite interval. For deterministic interruption, see the
1148    /// "fuel" mechanism instead.
1149    ///
1150    /// See documentation on
1151    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1152    /// for an introduction to epoch-based interruption.
1153    #[cfg(target_has_atomic = "64")]
1154    pub fn epoch_deadline_callback(
1155        &mut self,
1156        callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
1157    ) {
1158        self.inner.epoch_deadline_callback(Box::new(callback));
1159    }
1160
1161    /// Set an exception as the currently pending exception, and
1162    /// return an error that propagates the throw.
1163    ///
1164    /// This method takes an exception object and stores it in the
1165    /// `Store` as the currently pending exception. This is a special
1166    /// rooted slot that holds the exception as long as it is
1167    /// propagating. This method then returns a `ThrownException`
1168    /// error, which is a special type that indicates a pending
1169    /// exception exists. When this type propagates as an error
1170    /// returned from a Wasm-to-host call, the pending exception is
1171    /// thrown within the Wasm context, and either caught or
1172    /// propagated further to the host-to-Wasm call boundary. If an
1173    /// exception is thrown out of Wasm (or across Wasm from a
1174    /// hostcall) back to the host-to-Wasm call boundary, *that*
1175    /// invocation returns a `ThrownException`, and the pending
1176    /// exception slot is again set. In other words, the
1177    /// `ThrownException` error type should propagate upward exactly
1178    /// and only when a pending exception is set.
1179    ///
1180    /// To inspect or take the pending exception, use
1181    /// [`peek_pending_exception`] and [`take_pending_exception`]. For
1182    /// a convenient wrapper that invokes a closure and provides any
1183    /// caught exception from the closure to a separate handler
1184    /// closure, see [`StoreContextMut::catch`].
1185    ///
1186    /// This method is parameterized over `R` for convenience, but
1187    /// will always return an `Err`.
1188    ///
1189    /// # Panics
1190    ///
1191    /// - Will panic if `exception` has been unrooted.
1192    /// - Will panic if `exception` is a null reference.
1193    /// - Will panic if a pending exception has already been set.
1194    #[cfg(feature = "gc")]
1195    pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1196        self.inner.throw_impl(exception);
1197        Err(ThrownException)
1198    }
1199
1200    /// Take the currently pending exception, if any, and return it,
1201    /// removing it from the "pending exception" slot.
1202    ///
1203    /// If there is no pending exception, returns `None`.
1204    ///
1205    /// Note: the returned exception is a LIFO root (see
1206    /// [`crate::Rooted`]), rooted in the current handle scope. Take
1207    /// care to ensure that it is re-rooted or otherwise does not
1208    /// escape this scope! It is usually best to allow an exception
1209    /// object to be rooted in the store's "pending exception" slot
1210    /// until the final consumer has taken it, rather than root it and
1211    /// pass it up the callstack in some other way.
1212    ///
1213    /// This method is useful to implement ad-hoc exception plumbing
1214    /// in various ways, but for the most idiomatic handling, see
1215    /// [`StoreContextMut::catch`].
1216    #[cfg(feature = "gc")]
1217    pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1218        self.inner.take_pending_exception_rooted()
1219    }
1220
1221    /// Tests whether there is a pending exception.
1222    ///
1223    /// Ordinarily, a pending exception will be set on a store if and
1224    /// only if a host-side callstack is propagating a
1225    /// [`crate::ThrownException`] error. The final consumer that
1226    /// catches the exception takes it; it may re-place it to re-throw
1227    /// (using [`throw`]) if it chooses not to actually handle the
1228    /// exception.
1229    ///
1230    /// This method is useful to tell whether a store is in this
1231    /// state, but should not be used as part of the ordinary
1232    /// exception-handling flow. For the most idiomatic handling, see
1233    /// [`StoreContextMut::catch`].
1234    #[cfg(feature = "gc")]
1235    pub fn has_pending_exception(&self) -> bool {
1236        self.inner.pending_exception.is_some()
1237    }
1238
1239    /// Provide an object that views Wasm stack state, including Wasm
1240    /// VM-level values (locals and operand stack), when debugging is
1241    /// enabled.
1242    ///
1243    /// This object views the frames from the most recent Wasm entry
1244    /// onward (up to the exit that allows this host code to run). Any
1245    /// Wasm stack frames upward from the most recent entry to Wasm
1246    /// are not visible to this cursor.
1247    ///
1248    /// Returns `None` if debug instrumentation is not enabled for
1249    /// the engine containing this store.
1250    #[cfg(feature = "debug")]
1251    pub fn debug_frames(&mut self) -> Option<crate::DebugFrameCursor<'_, T>> {
1252        self.as_context_mut().debug_frames()
1253    }
1254
1255    /// Set the debug callback on this store.
1256    ///
1257    /// See [`crate::DebugHandler`] for more documentation.
1258    ///
1259    /// # Panics
1260    ///
1261    /// - Will panic if this store is not configured for async
1262    ///   support.
1263    /// - Will panic if guest-debug support was not enabled via
1264    ///   [`crate::Config::guest_debug`].
1265    #[cfg(feature = "debug")]
1266    pub fn set_debug_handler(&mut self, handler: impl DebugHandler<Data = T>)
1267    where
1268        // We require `Send` here because the debug handler becomes
1269        // referenced from a future: when `DebugHandler::handle` is
1270        // invoked, its `self` references the `handler` with the
1271        // user's state. Note that we are careful to keep this bound
1272        // constrained to debug-handler-related code only and not
1273        // propagate it outward to the store in general. The presence
1274        // of the trait implementation serves as a witness that `T:
1275        // Send`. This is required in particular because we will have
1276        // a `&mut dyn VMStore` on the stack when we pause a fiber
1277        // with `block_on` to run a debugger hook; that `VMStore` must
1278        // be a `Store<T> where T: Send`.
1279        T: Send,
1280    {
1281        assert!(
1282            self.inner.async_support(),
1283            "debug hooks rely on async support"
1284        );
1285        assert!(
1286            self.engine().tunables().debug_guest,
1287            "debug hooks require guest debugging to be enabled"
1288        );
1289        self.inner.debug_handler = Some(Box::new(handler));
1290    }
1291
1292    /// Clear the debug handler on this store. If any existed, it will
1293    /// be dropped.
1294    #[cfg(feature = "debug")]
1295    pub fn clear_debug_handler(&mut self) {
1296        self.inner.debug_handler = None;
1297    }
1298}
1299
1300impl<'a, T> StoreContext<'a, T> {
1301    pub(crate) fn async_support(&self) -> bool {
1302        self.0.async_support()
1303    }
1304
1305    /// Returns the underlying [`Engine`] this store is connected to.
1306    pub fn engine(&self) -> &Engine {
1307        self.0.engine()
1308    }
1309
1310    /// Access the underlying data owned by this `Store`.
1311    ///
1312    /// Same as [`Store::data`].
1313    pub fn data(&self) -> &'a T {
1314        self.0.data()
1315    }
1316
1317    /// Returns the remaining fuel in this store.
1318    ///
1319    /// For more information see [`Store::get_fuel`].
1320    pub fn get_fuel(&self) -> Result<u64> {
1321        self.0.get_fuel()
1322    }
1323}
1324
1325impl<'a, T> StoreContextMut<'a, T> {
1326    /// Access the underlying data owned by this `Store`.
1327    ///
1328    /// Same as [`Store::data`].
1329    pub fn data(&self) -> &T {
1330        self.0.data()
1331    }
1332
1333    /// Access the underlying data owned by this `Store`.
1334    ///
1335    /// Same as [`Store::data_mut`].
1336    pub fn data_mut(&mut self) -> &mut T {
1337        self.0.data_mut()
1338    }
1339
1340    /// Returns the underlying [`Engine`] this store is connected to.
1341    pub fn engine(&self) -> &Engine {
1342        self.0.engine()
1343    }
1344
1345    /// Perform garbage collection of `ExternRef`s.
1346    ///
1347    /// Same as [`Store::gc`].
1348    ///
1349    /// This method is only available when the `gc` Cargo feature is enabled.
1350    #[cfg(feature = "gc")]
1351    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
1352        assert!(!self.0.async_support());
1353        let (mut limiter, store) = self.0.resource_limiter_and_store_opaque();
1354        vm::assert_ready(store.gc(limiter.as_mut(), None, why.map(|e| e.bytes_needed())));
1355    }
1356
1357    /// Returns remaining fuel in this store.
1358    ///
1359    /// For more information see [`Store::get_fuel`]
1360    pub fn get_fuel(&self) -> Result<u64> {
1361        self.0.get_fuel()
1362    }
1363
1364    /// Set the amount of fuel in this store.
1365    ///
1366    /// For more information see [`Store::set_fuel`]
1367    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1368        self.0.set_fuel(fuel)
1369    }
1370
1371    /// Configures this `Store` to periodically yield while executing futures.
1372    ///
1373    /// For more information see [`Store::fuel_async_yield_interval`]
1374    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1375        self.0.fuel_async_yield_interval(interval)
1376    }
1377
1378    /// Sets the epoch deadline to a certain number of ticks in the future.
1379    ///
1380    /// For more information see [`Store::set_epoch_deadline`].
1381    #[cfg(target_has_atomic = "64")]
1382    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1383        self.0.set_epoch_deadline(ticks_beyond_current);
1384    }
1385
1386    /// Configures epoch-deadline expiration to trap.
1387    ///
1388    /// For more information see [`Store::epoch_deadline_trap`].
1389    #[cfg(target_has_atomic = "64")]
1390    pub fn epoch_deadline_trap(&mut self) {
1391        self.0.epoch_deadline_trap();
1392    }
1393
1394    /// Set an exception as the currently pending exception, and
1395    /// return an error that propagates the throw.
1396    ///
1397    /// See [`Store::throw`] for more details.
1398    #[cfg(feature = "gc")]
1399    pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1400        self.0.inner.throw_impl(exception);
1401        Err(ThrownException)
1402    }
1403
1404    /// Take the currently pending exception, if any, and return it,
1405    /// removing it from the "pending exception" slot.
1406    ///
1407    /// See [`Store::take_pending_exception`] for more details.
1408    #[cfg(feature = "gc")]
1409    pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1410        self.0.inner.take_pending_exception_rooted()
1411    }
1412
1413    /// Tests whether there is a pending exception.
1414    ///
1415    /// See [`Store::has_pending_exception`] for more details.
1416    #[cfg(feature = "gc")]
1417    pub fn has_pending_exception(&self) -> bool {
1418        self.0.inner.pending_exception.is_some()
1419    }
1420}
1421
1422impl<T> StoreInner<T> {
1423    #[inline]
1424    fn data(&self) -> &T {
1425        // We are actually just accessing `&self.data_no_provenance` but we must
1426        // do so with the `VMStoreContext::store_data` pointer's provenance. If
1427        // we did otherwise, i.e. directly accessed the field, we would
1428        // invalidate that pointer, which would in turn invalidate any direct
1429        // `T` accesses that Wasm code makes via unsafe intrinsics.
1430        let data: *const ManuallyDrop<T> = &raw const self.data_no_provenance;
1431        let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1432        let ptr = provenance.with_addr(data.addr());
1433
1434        // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1435        // to access because of our `&self` borrow.
1436        debug_assert_ne!(ptr, core::ptr::null_mut());
1437        debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1438        unsafe { &*ptr }
1439    }
1440
1441    #[inline]
1442    fn data_limiter_and_opaque(
1443        &mut self,
1444    ) -> (
1445        &mut T,
1446        Option<&mut ResourceLimiterInner<T>>,
1447        &mut StoreOpaque,
1448    ) {
1449        // See the comments about provenance in `StoreInner::data` above.
1450        let data: *mut ManuallyDrop<T> = &raw mut self.data_no_provenance;
1451        let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1452        let ptr = provenance.with_addr(data.addr());
1453
1454        // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1455        // to access because of our `&mut self` borrow.
1456        debug_assert_ne!(ptr, core::ptr::null_mut());
1457        debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1458        let data = unsafe { &mut *ptr };
1459
1460        let limiter = self.limiter.as_mut();
1461
1462        (data, limiter, &mut self.inner)
1463    }
1464
1465    #[inline]
1466    fn data_mut(&mut self) -> &mut T {
1467        self.data_limiter_and_opaque().0
1468    }
1469
1470    #[inline]
1471    pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1472        if self.inner.pkey.is_none() && self.call_hook.is_none() {
1473            Ok(())
1474        } else {
1475            self.call_hook_slow_path(s)
1476        }
1477    }
1478
1479    fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1480        if let Some(pkey) = &self.inner.pkey {
1481            let allocator = self.engine().allocator();
1482            match s {
1483                CallHook::CallingWasm | CallHook::ReturningFromHost => {
1484                    allocator.restrict_to_pkey(*pkey)
1485                }
1486                CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1487            }
1488        }
1489
1490        // Temporarily take the configured behavior to avoid mutably borrowing
1491        // multiple times.
1492        if let Some(mut call_hook) = self.call_hook.take() {
1493            let result = self.invoke_call_hook(&mut call_hook, s);
1494            self.call_hook = Some(call_hook);
1495            return result;
1496        }
1497
1498        Ok(())
1499    }
1500
1501    fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1502        match call_hook {
1503            #[cfg(feature = "call-hook")]
1504            CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1505
1506            #[cfg(all(feature = "async", feature = "call-hook"))]
1507            CallHookInner::Async(handler) => {
1508                if !self.can_block() {
1509                    bail!("couldn't grab async_cx for call hook")
1510                }
1511                return (&mut *self)
1512                    .as_context_mut()
1513                    .with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
1514            }
1515
1516            CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1517                let _ = s;
1518                match *uninhabited {}
1519            }
1520        }
1521    }
1522
1523    #[cfg(not(feature = "async"))]
1524    fn flush_fiber_stack(&mut self) {
1525        // noop shim so code can assume this always exists.
1526    }
1527}
1528
1529fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1530    fuel_reserve.saturating_add_signed(-injected_fuel)
1531}
1532
1533// Add remaining fuel from the reserve into the active fuel if there is any left.
1534fn refuel(
1535    injected_fuel: &mut i64,
1536    fuel_reserve: &mut u64,
1537    yield_interval: Option<NonZeroU64>,
1538) -> bool {
1539    let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1540    if fuel > 0 {
1541        set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1542        true
1543    } else {
1544        false
1545    }
1546}
1547
1548fn set_fuel(
1549    injected_fuel: &mut i64,
1550    fuel_reserve: &mut u64,
1551    yield_interval: Option<NonZeroU64>,
1552    new_fuel_amount: u64,
1553) {
1554    let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1555    // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1556    // for the VM to use.
1557    let injected = core::cmp::min(interval, new_fuel_amount);
1558    // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1559    // VM at once to be i64 range.
1560    let injected = core::cmp::min(injected, i64::MAX as u64);
1561    // Add whatever is left over after injection to the reserve for later use.
1562    *fuel_reserve = new_fuel_amount - injected;
1563    // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1564    // this counter is positive.
1565    *injected_fuel = -(injected as i64);
1566}
1567
1568#[doc(hidden)]
1569impl StoreOpaque {
1570    pub fn id(&self) -> StoreId {
1571        self.store_data.id()
1572    }
1573
1574    pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1575        fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1576            let new = slot.saturating_add(amt);
1577            if new > max {
1578                bail!("resource limit exceeded: {desc} count too high at {new}");
1579            }
1580            *slot = new;
1581            Ok(())
1582        }
1583
1584        let module = module.env_module();
1585        let memories = module.num_defined_memories();
1586        let tables = module.num_defined_tables();
1587
1588        bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1589        bump(
1590            &mut self.memory_count,
1591            self.memory_limit,
1592            memories,
1593            "memory",
1594        )?;
1595        bump(&mut self.table_count, self.table_limit, tables, "table")?;
1596
1597        Ok(())
1598    }
1599
1600    #[inline]
1601    pub fn async_support(&self) -> bool {
1602        cfg!(feature = "async") && self.engine().config().async_support
1603    }
1604
1605    #[inline]
1606    pub fn engine(&self) -> &Engine {
1607        &self.engine
1608    }
1609
1610    #[inline]
1611    pub fn store_data(&self) -> &StoreData {
1612        &self.store_data
1613    }
1614
1615    #[inline]
1616    pub fn store_data_mut(&mut self) -> &mut StoreData {
1617        &mut self.store_data
1618    }
1619
1620    pub fn store_data_mut_and_registry(&mut self) -> (&mut StoreData, &ModuleRegistry) {
1621        (&mut self.store_data, &self.modules)
1622    }
1623
1624    #[inline]
1625    pub(crate) fn modules(&self) -> &ModuleRegistry {
1626        &self.modules
1627    }
1628
1629    pub(crate) fn register_module(&mut self, module: &Module) -> Result<RegisteredModuleId> {
1630        self.modules.register_module(module, &self.engine)
1631    }
1632
1633    #[cfg(feature = "component-model")]
1634    pub(crate) fn register_component(
1635        &mut self,
1636        component: &crate::component::Component,
1637    ) -> Result<()> {
1638        self.modules.register_component(component, &self.engine)
1639    }
1640
1641    pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1642        (&mut self.func_refs, &self.modules)
1643    }
1644
1645    pub(crate) fn host_globals(
1646        &self,
1647    ) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1648        &self.host_globals
1649    }
1650
1651    pub(crate) fn host_globals_mut(
1652        &mut self,
1653    ) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1654        &mut self.host_globals
1655    }
1656
1657    pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1658        instance.store_id().assert_belongs_to(self.id());
1659        match self.instances[instance.instance()].kind {
1660            StoreInstanceKind::Dummy => None,
1661            StoreInstanceKind::Real { module_id } => {
1662                let module = self
1663                    .modules()
1664                    .module_by_id(module_id)
1665                    .expect("should always have a registered module for real instances");
1666                Some(module)
1667            }
1668        }
1669    }
1670
1671    /// Accessor from `InstanceId` to `&vm::Instance`.
1672    ///
1673    /// Note that if you have a `StoreInstanceId` you should use
1674    /// `StoreInstanceId::get` instead. This assumes that `id` has been
1675    /// validated to already belong to this store.
1676    #[inline]
1677    pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1678        self.instances[id].handle.get()
1679    }
1680
1681    /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1682    ///
1683    /// Note that if you have a `StoreInstanceId` you should use
1684    /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1685    /// validated to already belong to this store.
1686    #[inline]
1687    pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1688        self.instances[id].handle.get_mut()
1689    }
1690
1691    /// Accessor from `InstanceId` to both `Pin<&mut vm::Instance>`
1692    /// and `&ModuleRegistry`.
1693    #[inline]
1694    pub fn instance_and_module_registry_mut(
1695        &mut self,
1696        id: InstanceId,
1697    ) -> (Pin<&mut vm::Instance>, &ModuleRegistry) {
1698        (self.instances[id].handle.get_mut(), &self.modules)
1699    }
1700
1701    /// Access multiple instances specified via `ids`.
1702    ///
1703    /// # Panics
1704    ///
1705    /// This method will panic if any indices in `ids` overlap.
1706    ///
1707    /// # Safety
1708    ///
1709    /// This method is not safe if the returned instances are used to traverse
1710    /// "laterally" between other instances. For example accessing imported
1711    /// items in an instance may traverse laterally to a sibling instance thus
1712    /// aliasing a returned value here. The caller must ensure that only defined
1713    /// items within the instances themselves are accessed.
1714    #[inline]
1715    pub unsafe fn optional_gc_store_and_instances_mut<const N: usize>(
1716        &mut self,
1717        ids: [InstanceId; N],
1718    ) -> (Option<&mut GcStore>, [Pin<&mut vm::Instance>; N]) {
1719        let instances = self
1720            .instances
1721            .get_disjoint_mut(ids)
1722            .unwrap()
1723            .map(|h| h.handle.get_mut());
1724        (self.gc_store.as_mut(), instances)
1725    }
1726
1727    /// Pair of `Self::optional_gc_store_mut` and `Self::instance_mut`
1728    pub fn optional_gc_store_and_instance_mut(
1729        &mut self,
1730        id: InstanceId,
1731    ) -> (Option<&mut GcStore>, Pin<&mut vm::Instance>) {
1732        (self.gc_store.as_mut(), self.instances[id].handle.get_mut())
1733    }
1734
1735    /// Tuple of `Self::optional_gc_store_mut`, `Self::modules`, and
1736    /// `Self::instance_mut`.
1737    pub fn optional_gc_store_and_registry_and_instance_mut(
1738        &mut self,
1739        id: InstanceId,
1740    ) -> (
1741        Option<&mut GcStore>,
1742        &ModuleRegistry,
1743        Pin<&mut vm::Instance>,
1744    ) {
1745        (
1746            self.gc_store.as_mut(),
1747            &self.modules,
1748            self.instances[id].handle.get_mut(),
1749        )
1750    }
1751
1752    /// Get all instances (ignoring dummy instances) within this store.
1753    pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1754        let instances = self
1755            .instances
1756            .iter()
1757            .filter_map(|(id, inst)| {
1758                if let StoreInstanceKind::Dummy = inst.kind {
1759                    None
1760                } else {
1761                    Some(id)
1762                }
1763            })
1764            .collect::<Vec<_>>();
1765        instances
1766            .into_iter()
1767            .map(|i| Instance::from_wasmtime(i, self))
1768    }
1769
1770    /// Get all memories (host- or Wasm-defined) within this store.
1771    pub fn all_memories<'a>(&'a self) -> impl Iterator<Item = ExportMemory> + 'a {
1772        // NB: Host-created memories have dummy instances. Therefore, we can get
1773        // all memories in the store by iterating over all instances (including
1774        // dummy instances) and getting each of their defined memories.
1775        let id = self.id();
1776        self.instances
1777            .iter()
1778            .flat_map(move |(_, instance)| instance.handle.get().defined_memories(id))
1779    }
1780
1781    /// Iterate over all tables (host- or Wasm-defined) within this store.
1782    pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1783        // NB: Host-created tables have dummy instances. Therefore, we can get
1784        // all tables in the store by iterating over all instances (including
1785        // dummy instances) and getting each of their defined memories.
1786        for id in self.instances.keys() {
1787            let instance = StoreInstanceId::new(self.id(), id);
1788            for table in 0..self.instance(id).env_module().num_defined_tables() {
1789                let table = DefinedTableIndex::new(table);
1790                f(self, Table::from_raw(instance, table));
1791            }
1792        }
1793    }
1794
1795    /// Iterate over all globals (host- or Wasm-defined) within this store.
1796    pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1797        // First enumerate all the host-created globals.
1798        for global in self.host_globals.keys() {
1799            let global = Global::new_host(self, global);
1800            f(self, global);
1801        }
1802
1803        // Then enumerate all instances' defined globals.
1804        for id in self.instances.keys() {
1805            for index in 0..self.instance(id).env_module().num_defined_globals() {
1806                let index = DefinedGlobalIndex::new(index);
1807                let global = Global::new_instance(self, id, index);
1808                f(self, global);
1809            }
1810        }
1811    }
1812
1813    #[cfg(all(feature = "std", any(unix, windows)))]
1814    pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1815        self.signal_handler = handler;
1816    }
1817
1818    #[inline]
1819    pub fn vm_store_context(&self) -> &VMStoreContext {
1820        &self.vm_store_context
1821    }
1822
1823    #[inline]
1824    pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
1825        &mut self.vm_store_context
1826    }
1827
1828    /// Performs a lazy allocation of the `GcStore` within this store, returning
1829    /// the previous allocation if it's already present.
1830    ///
1831    /// This method will, if necessary, allocate a new `GcStore` -- linear
1832    /// memory and all. This is a blocking operation due to
1833    /// `ResourceLimiterAsync` which means that this should only be executed
1834    /// in a fiber context at this time.
1835    #[inline]
1836    pub(crate) async fn ensure_gc_store(
1837        &mut self,
1838        limiter: Option<&mut StoreResourceLimiter<'_>>,
1839    ) -> Result<&mut GcStore> {
1840        if self.gc_store.is_some() {
1841            return Ok(self.gc_store.as_mut().unwrap());
1842        }
1843        self.allocate_gc_store(limiter).await
1844    }
1845
1846    #[inline(never)]
1847    async fn allocate_gc_store(
1848        &mut self,
1849        limiter: Option<&mut StoreResourceLimiter<'_>>,
1850    ) -> Result<&mut GcStore> {
1851        log::trace!("allocating GC heap for store {:?}", self.id());
1852
1853        assert!(self.gc_store.is_none());
1854        assert_eq!(
1855            self.vm_store_context.gc_heap.base.as_non_null(),
1856            NonNull::dangling(),
1857        );
1858        assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1859
1860        let gc_store = allocate_gc_store(self, limiter).await?;
1861        self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1862        return Ok(self.gc_store.insert(gc_store));
1863
1864        #[cfg(feature = "gc")]
1865        async fn allocate_gc_store(
1866            store: &mut StoreOpaque,
1867            limiter: Option<&mut StoreResourceLimiter<'_>>,
1868        ) -> Result<GcStore> {
1869            use wasmtime_environ::{StaticModuleIndex, packed_option::ReservedValue};
1870
1871            let engine = store.engine();
1872            let mem_ty = engine.tunables().gc_heap_memory_type();
1873            ensure!(
1874                engine.features().gc_types(),
1875                "cannot allocate a GC store when GC is disabled at configuration time"
1876            );
1877
1878            // First, allocate the memory that will be our GC heap's storage.
1879            let mut request = InstanceAllocationRequest {
1880                id: InstanceId::reserved_value(),
1881                runtime_info: &ModuleRuntimeInfo::bare(Arc::new(wasmtime_environ::Module::new(
1882                    StaticModuleIndex::from_u32(0),
1883                ))),
1884                imports: vm::Imports::default(),
1885                store,
1886                limiter,
1887            };
1888
1889            let (mem_alloc_index, mem) = engine
1890                .allocator()
1891                .allocate_memory(&mut request, &mem_ty, None)
1892                .await?;
1893
1894            // Then, allocate the actual GC heap, passing in that memory
1895            // storage.
1896            let gc_runtime = engine
1897                .gc_runtime()
1898                .context("no GC runtime: GC disabled at compile time or configuration time")?;
1899            let (index, heap) =
1900                engine
1901                    .allocator()
1902                    .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
1903
1904            Ok(GcStore::new(index, heap))
1905        }
1906
1907        #[cfg(not(feature = "gc"))]
1908        async fn allocate_gc_store(
1909            _: &mut StoreOpaque,
1910            _: Option<&mut StoreResourceLimiter<'_>>,
1911        ) -> Result<GcStore> {
1912            bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1913        }
1914    }
1915
1916    /// Helper method to require that a `GcStore` was previously allocated for
1917    /// this store, failing if it has not yet been allocated.
1918    ///
1919    /// Note that this should only be used in a context where allocation of a
1920    /// `GcStore` is sure to have already happened prior, otherwise this may
1921    /// return a confusing error to embedders which is a bug in Wasmtime.
1922    ///
1923    /// Some situations where it's safe to call this method:
1924    ///
1925    /// * There's already a non-null and non-i31 `VMGcRef` in scope. By existing
1926    ///   this shows proof that the `GcStore` was previously allocated.
1927    /// * During instantiation and instance's `needs_gc_heap` flag will be
1928    ///   handled and instantiation will automatically create a GC store.
1929    #[inline]
1930    #[cfg(feature = "gc")]
1931    pub(crate) fn require_gc_store(&self) -> Result<&GcStore> {
1932        match &self.gc_store {
1933            Some(gc_store) => Ok(gc_store),
1934            None => bail!("GC heap not initialized yet"),
1935        }
1936    }
1937
1938    /// Same as [`Self::require_gc_store`], but mutable.
1939    #[inline]
1940    #[cfg(feature = "gc")]
1941    pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> {
1942        match &mut self.gc_store {
1943            Some(gc_store) => Ok(gc_store),
1944            None => bail!("GC heap not initialized yet"),
1945        }
1946    }
1947
1948    /// Attempts to access the GC store that has been previously allocated.
1949    ///
1950    /// This method will return `Some` if the GC store was previously allocated.
1951    /// A `None` return value means either that the GC heap hasn't yet been
1952    /// allocated or that it does not need to be allocated for this store. Note
1953    /// that to require a GC store in a particular situation it's recommended to
1954    /// use [`Self::require_gc_store_mut`] instead.
1955    #[inline]
1956    pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
1957        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1958            debug_assert!(self.gc_store.is_none());
1959            None
1960        } else {
1961            self.gc_store.as_mut()
1962        }
1963    }
1964
1965    /// Helper to assert that a GC store was previously allocated and is
1966    /// present.
1967    ///
1968    /// # Panics
1969    ///
1970    /// This method will panic if the GC store has not yet been allocated. This
1971    /// should only be used in a context where there's an existing GC reference,
1972    /// for example, or if `ensure_gc_store` has already been called.
1973    #[inline]
1974    #[track_caller]
1975    pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
1976        self.gc_store
1977            .as_ref()
1978            .expect("attempted to access the store's GC heap before it has been allocated")
1979    }
1980
1981    /// Same as [`Self::unwrap_gc_store`], but mutable.
1982    #[inline]
1983    #[track_caller]
1984    pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
1985        self.gc_store
1986            .as_mut()
1987            .expect("attempted to access the store's GC heap before it has been allocated")
1988    }
1989
1990    #[inline]
1991    pub(crate) fn gc_roots(&self) -> &RootSet {
1992        &self.gc_roots
1993    }
1994
1995    #[inline]
1996    #[cfg(feature = "gc")]
1997    pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
1998        &mut self.gc_roots
1999    }
2000
2001    #[inline]
2002    pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
2003        self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
2004    }
2005
2006    #[cfg(feature = "gc")]
2007    async fn do_gc(&mut self) {
2008        // If the GC heap hasn't been initialized, there is nothing to collect.
2009        if self.gc_store.is_none() {
2010            return;
2011        }
2012
2013        log::trace!("============ Begin GC ===========");
2014
2015        // Take the GC roots out of `self` so we can borrow it mutably but still
2016        // call mutable methods on `self`.
2017        let mut roots = core::mem::take(&mut self.gc_roots_list);
2018
2019        self.trace_roots(&mut roots).await;
2020        let async_yield = self.async_support();
2021        self.unwrap_gc_store_mut()
2022            .gc(async_yield, unsafe { roots.iter() })
2023            .await;
2024
2025        // Restore the GC roots for the next GC.
2026        roots.clear();
2027        self.gc_roots_list = roots;
2028
2029        log::trace!("============ End GC ===========");
2030    }
2031
2032    #[cfg(feature = "gc")]
2033    async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2034        log::trace!("Begin trace GC roots");
2035
2036        // We shouldn't have any leftover, stale GC roots.
2037        assert!(gc_roots_list.is_empty());
2038
2039        self.trace_wasm_stack_roots(gc_roots_list);
2040        #[cfg(feature = "async")]
2041        if self.async_support() {
2042            vm::Yield::new().await;
2043        }
2044        #[cfg(feature = "stack-switching")]
2045        {
2046            self.trace_wasm_continuation_roots(gc_roots_list);
2047            #[cfg(feature = "async")]
2048            if self.async_support() {
2049                vm::Yield::new().await;
2050            }
2051        }
2052        self.trace_vmctx_roots(gc_roots_list);
2053        #[cfg(feature = "async")]
2054        if self.async_support() {
2055            vm::Yield::new().await;
2056        }
2057        self.trace_user_roots(gc_roots_list);
2058        self.trace_pending_exception_roots(gc_roots_list);
2059
2060        log::trace!("End trace GC roots")
2061    }
2062
2063    #[cfg(feature = "gc")]
2064    fn trace_wasm_stack_frame(
2065        &self,
2066        gc_roots_list: &mut GcRootsList,
2067        frame: crate::runtime::vm::Frame,
2068    ) {
2069        let pc = frame.pc();
2070        debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
2071
2072        let fp = frame.fp() as *mut usize;
2073        debug_assert!(
2074            !fp.is_null(),
2075            "we should always get a valid frame pointer for Wasm frames"
2076        );
2077
2078        let (module_with_code, _offset) = self
2079            .modules()
2080            .module_and_code_by_pc(pc)
2081            .expect("should have module info for Wasm frame");
2082
2083        if let Some(stack_map) = module_with_code.lookup_stack_map(pc) {
2084            log::trace!(
2085                "We have a stack map that maps {} bytes in this Wasm frame",
2086                stack_map.frame_size()
2087            );
2088
2089            let sp = unsafe { stack_map.sp(fp) };
2090            for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
2091                unsafe {
2092                    self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2093                }
2094            }
2095        }
2096
2097        #[cfg(feature = "debug")]
2098        if let Some(frame_table) = module_with_code.module().frame_table() {
2099            let relpc = module_with_code
2100                .text_offset(pc)
2101                .expect("PC should be within module");
2102            for stack_slot in super::debug::gc_refs_in_frame(frame_table, relpc, fp) {
2103                unsafe {
2104                    self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2105                }
2106            }
2107        }
2108    }
2109
2110    #[cfg(feature = "gc")]
2111    unsafe fn trace_wasm_stack_slot(&self, gc_roots_list: &mut GcRootsList, stack_slot: *mut u32) {
2112        use crate::runtime::vm::SendSyncPtr;
2113        use core::ptr::NonNull;
2114
2115        let raw: u32 = unsafe { core::ptr::read(stack_slot) };
2116        log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
2117
2118        let gc_ref = vm::VMGcRef::from_raw_u32(raw);
2119        if gc_ref.is_some() {
2120            unsafe {
2121                gc_roots_list
2122                    .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
2123            }
2124        }
2125    }
2126
2127    #[cfg(feature = "gc")]
2128    fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2129        use crate::runtime::vm::Backtrace;
2130        log::trace!("Begin trace GC roots :: Wasm stack");
2131
2132        Backtrace::trace(self, |frame| {
2133            self.trace_wasm_stack_frame(gc_roots_list, frame);
2134            core::ops::ControlFlow::Continue(())
2135        });
2136
2137        log::trace!("End trace GC roots :: Wasm stack");
2138    }
2139
2140    #[cfg(all(feature = "gc", feature = "stack-switching"))]
2141    fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2142        use crate::{runtime::vm::Backtrace, vm::VMStackState};
2143        log::trace!("Begin trace GC roots :: continuations");
2144
2145        for continuation in &self.continuations {
2146            let state = continuation.common_stack_information.state;
2147
2148            // FIXME(frank-emrich) In general, it is not enough to just trace
2149            // through the stacks of continuations; we also need to look through
2150            // their `cont.bind` arguments. However, we don't currently have
2151            // enough RTTI information to check if any of the values in the
2152            // buffers used by `cont.bind` are GC values. As a workaround, note
2153            // that we currently disallow cont.bind-ing GC values altogether.
2154            // This way, it is okay not to check them here.
2155            match state {
2156                VMStackState::Suspended => {
2157                    Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
2158                        self.trace_wasm_stack_frame(gc_roots_list, frame);
2159                        core::ops::ControlFlow::Continue(())
2160                    });
2161                }
2162                VMStackState::Running => {
2163                    // Handled by `trace_wasm_stack_roots`.
2164                }
2165                VMStackState::Parent => {
2166                    // We don't know whether our child is suspended or running, but in
2167                    // either case things should be handled correctly when traversing
2168                    // further along in the chain, nothing required at this point.
2169                }
2170                VMStackState::Fresh | VMStackState::Returned => {
2171                    // Fresh/Returned continuations have no gc values on their stack.
2172                }
2173            }
2174        }
2175
2176        log::trace!("End trace GC roots :: continuations");
2177    }
2178
2179    #[cfg(feature = "gc")]
2180    fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2181        log::trace!("Begin trace GC roots :: vmctx");
2182        self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
2183        self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
2184        log::trace!("End trace GC roots :: vmctx");
2185    }
2186
2187    #[cfg(feature = "gc")]
2188    fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2189        log::trace!("Begin trace GC roots :: user");
2190        self.gc_roots.trace_roots(gc_roots_list);
2191        log::trace!("End trace GC roots :: user");
2192    }
2193
2194    #[cfg(feature = "gc")]
2195    fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2196        log::trace!("Begin trace GC roots :: pending exception");
2197        if let Some(pending_exception) = self.pending_exception.as_mut() {
2198            unsafe {
2199                let root = pending_exception.as_gc_ref_mut();
2200                gc_roots_list.add_root(root.into(), "Pending exception");
2201            }
2202        }
2203        log::trace!("End trace GC roots :: pending exception");
2204    }
2205
2206    /// Insert a host-allocated GC type into this store.
2207    ///
2208    /// This makes it suitable for the embedder to allocate instances of this
2209    /// type in this store, and we don't have to worry about the type being
2210    /// reclaimed (since it is possible that none of the Wasm modules in this
2211    /// store are holding it alive).
2212    #[cfg(feature = "gc")]
2213    pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
2214        self.gc_host_alloc_types.insert(ty);
2215    }
2216
2217    /// Helper function execute a `init_gc_ref` when placing `gc_ref` in `dest`.
2218    ///
2219    /// This avoids allocating `GcStore` where possible.
2220    pub(crate) fn init_gc_ref(
2221        &mut self,
2222        dest: &mut MaybeUninit<Option<VMGcRef>>,
2223        gc_ref: Option<&VMGcRef>,
2224    ) {
2225        if GcStore::needs_init_barrier(gc_ref) {
2226            self.unwrap_gc_store_mut().init_gc_ref(dest, gc_ref)
2227        } else {
2228            dest.write(gc_ref.map(|r| r.copy_i31()));
2229        }
2230    }
2231
2232    /// Helper function execute a write barrier when placing `gc_ref` in `dest`.
2233    ///
2234    /// This avoids allocating `GcStore` where possible.
2235    pub(crate) fn write_gc_ref(&mut self, dest: &mut Option<VMGcRef>, gc_ref: Option<&VMGcRef>) {
2236        GcStore::write_gc_ref_optional_store(self.optional_gc_store_mut(), dest, gc_ref)
2237    }
2238
2239    /// Helper function to clone `gc_ref` notably avoiding allocating a
2240    /// `GcStore` where possible.
2241    pub(crate) fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
2242        if gc_ref.is_i31() {
2243            gc_ref.copy_i31()
2244        } else {
2245            self.unwrap_gc_store_mut().clone_gc_ref(gc_ref)
2246        }
2247    }
2248
2249    pub fn get_fuel(&self) -> Result<u64> {
2250        anyhow::ensure!(
2251            self.engine().tunables().consume_fuel,
2252            "fuel is not configured in this store"
2253        );
2254        let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
2255        Ok(get_fuel(injected_fuel, self.fuel_reserve))
2256    }
2257
2258    pub(crate) fn refuel(&mut self) -> bool {
2259        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2260        refuel(
2261            injected_fuel,
2262            &mut self.fuel_reserve,
2263            self.fuel_yield_interval,
2264        )
2265    }
2266
2267    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
2268        anyhow::ensure!(
2269            self.engine().tunables().consume_fuel,
2270            "fuel is not configured in this store"
2271        );
2272        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2273        set_fuel(
2274            injected_fuel,
2275            &mut self.fuel_reserve,
2276            self.fuel_yield_interval,
2277            fuel,
2278        );
2279        Ok(())
2280    }
2281
2282    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
2283        anyhow::ensure!(
2284            self.engine().tunables().consume_fuel,
2285            "fuel is not configured in this store"
2286        );
2287        anyhow::ensure!(
2288            self.engine().config().async_support,
2289            "async support is not configured in this store"
2290        );
2291        anyhow::ensure!(
2292            interval != Some(0),
2293            "fuel_async_yield_interval must not be 0"
2294        );
2295        self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
2296        // Reset the fuel active + reserve states by resetting the amount.
2297        self.set_fuel(self.get_fuel()?)
2298    }
2299
2300    #[inline]
2301    pub fn signal_handler(&self) -> Option<*const SignalHandler> {
2302        let handler = self.signal_handler.as_ref()?;
2303        Some(handler)
2304    }
2305
2306    #[inline]
2307    pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
2308        NonNull::from(&self.vm_store_context)
2309    }
2310
2311    #[inline]
2312    pub fn default_caller(&self) -> NonNull<VMContext> {
2313        self.default_caller_vmctx.as_non_null()
2314    }
2315
2316    #[inline]
2317    pub fn traitobj(&self) -> NonNull<dyn VMStore> {
2318        self.traitobj.0.unwrap()
2319    }
2320
2321    /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
2322    /// used as part of calling the host in a `Func::new` method invocation.
2323    #[inline]
2324    pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
2325        mem::take(&mut self.hostcall_val_storage)
2326    }
2327
2328    /// Restores the vector previously taken by `take_hostcall_val_storage`
2329    /// above back into the store, allowing it to be used in the future for the
2330    /// next wasm->host call.
2331    #[inline]
2332    pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
2333        if storage.capacity() > self.hostcall_val_storage.capacity() {
2334            self.hostcall_val_storage = storage;
2335        }
2336    }
2337
2338    /// Same as `take_hostcall_val_storage`, but for the direction of the host
2339    /// calling wasm.
2340    #[inline]
2341    pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
2342        mem::take(&mut self.wasm_val_raw_storage)
2343    }
2344
2345    /// Same as `save_hostcall_val_storage`, but for the direction of the host
2346    /// calling wasm.
2347    #[inline]
2348    pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
2349        if storage.capacity() > self.wasm_val_raw_storage.capacity() {
2350            self.wasm_val_raw_storage = storage;
2351        }
2352    }
2353
2354    /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
2355    /// WebAssembly-relative fault.
2356    ///
2357    /// This function may abort the process if `addr` is not found to actually
2358    /// reside in any linear memory. In such a situation it means that the
2359    /// segfault was erroneously caught by Wasmtime and is possibly indicative
2360    /// of a code generator bug.
2361    ///
2362    /// This function returns `None` for dynamically-bounds-checked-memories
2363    /// with spectre mitigations enabled since the hardware fault address is
2364    /// always zero in these situations which means that the trapping context
2365    /// doesn't have enough information to report the fault address.
2366    pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
2367        // There are a few instances where a "close to zero" pointer is loaded
2368        // and we expect that to happen:
2369        //
2370        // * Explicitly bounds-checked memories with spectre-guards enabled will
2371        //   cause out-of-bounds accesses to get routed to address 0, so allow
2372        //   wasm instructions to fault on the null address.
2373        // * `call_indirect` when invoking a null function pointer may load data
2374        //   from the a `VMFuncRef` whose address is null, meaning any field of
2375        //   `VMFuncRef` could be the address of the fault.
2376        //
2377        // In these situations where the address is so small it won't be in any
2378        // instance, so skip the checks below.
2379        if addr <= mem::size_of::<VMFuncRef>() {
2380            const _: () = {
2381                // static-assert that `VMFuncRef` isn't too big to ensure that
2382                // it lives solely within the first page as we currently only
2383                // have the guarantee that the first page of memory is unmapped,
2384                // no more.
2385                assert!(mem::size_of::<VMFuncRef>() <= 512);
2386            };
2387            return None;
2388        }
2389
2390        // Search all known instances in this store for this address. Note that
2391        // this is probably not the speediest way to do this. Traps, however,
2392        // are generally not expected to be super fast and additionally stores
2393        // probably don't have all that many instances or memories.
2394        //
2395        // If this loop becomes hot in the future, however, it should be
2396        // possible to precompute maps about linear memories in a store and have
2397        // a quicker lookup.
2398        let mut fault = None;
2399        for (_, instance) in self.instances.iter() {
2400            if let Some(f) = instance.handle.get().wasm_fault(addr) {
2401                assert!(fault.is_none());
2402                fault = Some(f);
2403            }
2404        }
2405        if fault.is_some() {
2406            return fault;
2407        }
2408
2409        cfg_if::cfg_if! {
2410            if #[cfg(feature = "std")] {
2411                // With the standard library a rich error can be printed here
2412                // to stderr and the native abort path is used.
2413                eprintln!(
2414                    "\
2415Wasmtime caught a segfault for a wasm program because the faulting instruction
2416is allowed to segfault due to how linear memories are implemented. The address
2417that was accessed, however, is not known to any linear memory in use within this
2418Store. This may be indicative of a critical bug in Wasmtime's code generation
2419because all addresses which are known to be reachable from wasm won't reach this
2420message.
2421
2422    pc:      0x{pc:x}
2423    address: 0x{addr:x}
2424
2425This is a possible security issue because WebAssembly has accessed something it
2426shouldn't have been able to. Other accesses may have succeeded and this one just
2427happened to be caught. The process will now be aborted to prevent this damage
2428from going any further and to alert what's going on. If this is a security
2429issue please reach out to the Wasmtime team via its security policy
2430at https://bytecodealliance.org/security.
2431"
2432                );
2433                std::process::abort();
2434            } else if #[cfg(panic = "abort")] {
2435                // Without the standard library but with `panic=abort` then
2436                // it's safe to panic as that's known to halt execution. For
2437                // now avoid the above error message as well since without
2438                // `std` it's probably best to be a bit more size-conscious.
2439                let _ = pc;
2440                panic!("invalid fault");
2441            } else {
2442                // Without `std` and with `panic = "unwind"` there's no
2443                // dedicated API to abort the process portably, so manufacture
2444                // this with a double-panic.
2445                let _ = pc;
2446
2447                struct PanicAgainOnDrop;
2448
2449                impl Drop for PanicAgainOnDrop {
2450                    fn drop(&mut self) {
2451                        panic!("panicking again to trigger a process abort");
2452                    }
2453
2454                }
2455
2456                let _bomb = PanicAgainOnDrop;
2457
2458                panic!("invalid fault");
2459            }
2460        }
2461    }
2462
2463    /// Retrieve the store's protection key.
2464    #[inline]
2465    #[cfg(feature = "pooling-allocator")]
2466    pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
2467        self.pkey
2468    }
2469
2470    #[inline]
2471    #[cfg(feature = "component-model")]
2472    pub(crate) fn component_resource_state(
2473        &mut self,
2474    ) -> (
2475        &mut vm::component::CallContexts,
2476        &mut vm::component::HandleTable,
2477        &mut crate::component::HostResourceData,
2478    ) {
2479        (
2480            &mut self.component_calls,
2481            &mut self.component_host_table,
2482            &mut self.host_resource_data,
2483        )
2484    }
2485
2486    #[cfg(feature = "component-model")]
2487    pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
2488        // We don't actually need the instance itself right now, but it seems
2489        // like something we will almost certainly eventually want to keep
2490        // around, so force callers to provide it.
2491        let _ = instance;
2492
2493        self.num_component_instances += 1;
2494    }
2495
2496    #[inline]
2497    #[cfg(feature = "component-model")]
2498    pub(crate) fn component_resource_state_with_instance(
2499        &mut self,
2500        instance: crate::component::Instance,
2501    ) -> (
2502        &mut vm::component::CallContexts,
2503        &mut vm::component::HandleTable,
2504        &mut crate::component::HostResourceData,
2505        Pin<&mut vm::component::ComponentInstance>,
2506    ) {
2507        (
2508            &mut self.component_calls,
2509            &mut self.component_host_table,
2510            &mut self.host_resource_data,
2511            instance.id().from_data_get_mut(&mut self.store_data),
2512        )
2513    }
2514
2515    #[cfg(feature = "component-model")]
2516    pub(crate) fn component_resource_state_with_instance_and_concurrent_state(
2517        &mut self,
2518        instance: crate::component::Instance,
2519    ) -> (
2520        &mut vm::component::CallContexts,
2521        &mut vm::component::HandleTable,
2522        &mut crate::component::HostResourceData,
2523        Pin<&mut vm::component::ComponentInstance>,
2524        &mut concurrent::ConcurrentState,
2525    ) {
2526        (
2527            &mut self.component_calls,
2528            &mut self.component_host_table,
2529            &mut self.host_resource_data,
2530            instance.id().from_data_get_mut(&mut self.store_data),
2531            &mut self.concurrent_state,
2532        )
2533    }
2534
2535    #[cfg(feature = "async")]
2536    pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
2537        &mut self.async_state
2538    }
2539
2540    #[cfg(feature = "component-model-async")]
2541    pub(crate) fn concurrent_state_mut(&mut self) -> &mut concurrent::ConcurrentState {
2542        &mut self.concurrent_state
2543    }
2544
2545    #[cfg(feature = "async")]
2546    pub(crate) fn has_pkey(&self) -> bool {
2547        self.pkey.is_some()
2548    }
2549
2550    pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
2551        match &mut self.executor {
2552            Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
2553            #[cfg(has_host_compiler_backend)]
2554            Executor::Native => ExecutorRef::Native,
2555        }
2556    }
2557
2558    #[cfg(feature = "async")]
2559    pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
2560        mem::swap(&mut self.executor, executor);
2561    }
2562
2563    pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
2564        match &self.executor {
2565            Executor::Interpreter(i) => i.unwinder(),
2566            #[cfg(has_host_compiler_backend)]
2567            Executor::Native => &vm::UnwindHost,
2568        }
2569    }
2570
2571    /// Allocates a new continuation. Note that we currently don't support
2572    /// deallocating them. Instead, all continuations remain allocated
2573    /// throughout the store's lifetime.
2574    #[cfg(feature = "stack-switching")]
2575    pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
2576        // FIXME(frank-emrich) Do we need to pin this?
2577        let mut continuation = Box::new(VMContRef::empty());
2578        let stack_size = self.engine.config().async_stack_size;
2579        let stack = crate::vm::VMContinuationStack::new(stack_size)?;
2580        continuation.stack = stack;
2581        let ptr = continuation.deref_mut() as *mut VMContRef;
2582        self.continuations.push(continuation);
2583        Ok(ptr)
2584    }
2585
2586    /// Constructs and executes an `InstanceAllocationRequest` and pushes the
2587    /// returned instance into the store.
2588    ///
2589    /// This is a helper method for invoking
2590    /// `InstanceAllocator::allocate_module` with the appropriate parameters
2591    /// from this store's own configuration. The `kind` provided is used to
2592    /// distinguish between "real" modules and dummy ones that are synthesized
2593    /// for embedder-created memories, globals, tables, etc. The `kind` will
2594    /// also use a different instance allocator by default, the one passed in,
2595    /// rather than the engine's default allocator.
2596    ///
2597    /// This method will push the instance within `StoreOpaque` onto the
2598    /// `instances` array and return the `InstanceId` which can be use to look
2599    /// it up within the store.
2600    ///
2601    /// # Safety
2602    ///
2603    /// The `imports` provided must be correctly sized/typed for the module
2604    /// being allocated.
2605    pub(crate) async unsafe fn allocate_instance(
2606        &mut self,
2607        limiter: Option<&mut StoreResourceLimiter<'_>>,
2608        kind: AllocateInstanceKind<'_>,
2609        runtime_info: &ModuleRuntimeInfo,
2610        imports: Imports<'_>,
2611    ) -> Result<InstanceId> {
2612        let id = self.instances.next_key();
2613
2614        let allocator = match kind {
2615            AllocateInstanceKind::Module(_) => self.engine().allocator(),
2616            AllocateInstanceKind::Dummy { allocator } => allocator,
2617        };
2618        // SAFETY: this function's own contract is the same as
2619        // `allocate_module`, namely the imports provided are valid.
2620        let handle = unsafe {
2621            allocator
2622                .allocate_module(InstanceAllocationRequest {
2623                    id,
2624                    runtime_info,
2625                    imports,
2626                    store: self,
2627                    limiter,
2628                })
2629                .await?
2630        };
2631
2632        let actual = match kind {
2633            AllocateInstanceKind::Module(module_id) => {
2634                log::trace!(
2635                    "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2636                    self.id()
2637                );
2638                self.instances.push(StoreInstance {
2639                    handle,
2640                    kind: StoreInstanceKind::Real { module_id },
2641                })
2642            }
2643            AllocateInstanceKind::Dummy { .. } => {
2644                log::trace!(
2645                    "Adding dummy instance to store: store={:?}, instance={id:?}",
2646                    self.id()
2647                );
2648                self.instances.push(StoreInstance {
2649                    handle,
2650                    kind: StoreInstanceKind::Dummy,
2651                })
2652            }
2653        };
2654
2655        // double-check we didn't accidentally allocate two instances and our
2656        // prediction of what the id would be is indeed the id it should be.
2657        assert_eq!(id, actual);
2658
2659        Ok(id)
2660    }
2661
2662    /// Set a pending exception. The `exnref` is taken and held on
2663    /// this store to be fetched later by an unwind. This method does
2664    /// *not* set up an unwind request on the TLS call state; that
2665    /// must be done separately.
2666    #[cfg(feature = "gc")]
2667    pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) {
2668        self.pending_exception = Some(exnref);
2669    }
2670
2671    /// Take a pending exception, if any.
2672    #[cfg(feature = "gc")]
2673    pub(crate) fn take_pending_exception(&mut self) -> Option<VMExnRef> {
2674        self.pending_exception.take()
2675    }
2676
2677    /// Tests whether there is a pending exception.
2678    #[cfg(feature = "gc")]
2679    pub fn has_pending_exception(&self) -> bool {
2680        self.pending_exception.is_some()
2681    }
2682
2683    #[cfg(feature = "gc")]
2684    fn take_pending_exception_rooted(&mut self) -> Option<Rooted<ExnRef>> {
2685        let vmexnref = self.take_pending_exception()?;
2686        let mut nogc = AutoAssertNoGc::new(self);
2687        Some(Rooted::new(&mut nogc, vmexnref.into()))
2688    }
2689
2690    /// Get an owned rooted reference to the pending exception,
2691    /// without taking it off the store.
2692    #[cfg(all(feature = "gc", feature = "debug"))]
2693    pub(crate) fn pending_exception_owned_rooted(&mut self) -> Option<OwnedRooted<ExnRef>> {
2694        let mut nogc = AutoAssertNoGc::new(self);
2695        nogc.pending_exception.take().map(|vmexnref| {
2696            let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref());
2697            nogc.pending_exception = Some(cloned.into_exnref_unchecked());
2698            OwnedRooted::new(&mut nogc, vmexnref.into())
2699        })
2700    }
2701
2702    #[cfg(feature = "gc")]
2703    fn throw_impl(&mut self, exception: Rooted<ExnRef>) {
2704        let mut nogc = AutoAssertNoGc::new(self);
2705        let exnref = exception._to_raw(&mut nogc).unwrap();
2706        let exnref = VMGcRef::from_raw_u32(exnref)
2707            .expect("exception cannot be null")
2708            .into_exnref_unchecked();
2709        nogc.set_pending_exception(exnref);
2710    }
2711
2712    #[cfg(target_has_atomic = "64")]
2713    pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2714        // Set a new deadline based on the "epoch deadline delta".
2715        //
2716        // Also, note that when this update is performed while Wasm is
2717        // on the stack, the Wasm will reload the new value once we
2718        // return into it.
2719        let current_epoch = self.engine().current_epoch();
2720        let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2721        *epoch_deadline = current_epoch + delta;
2722    }
2723
2724    pub(crate) fn get_epoch_deadline(&mut self) -> u64 {
2725        *self.vm_store_context.epoch_deadline.get_mut()
2726    }
2727}
2728
2729/// Helper parameter to [`StoreOpaque::allocate_instance`].
2730pub(crate) enum AllocateInstanceKind<'a> {
2731    /// An embedder-provided module is being allocated meaning that the default
2732    /// engine's allocator will be used.
2733    Module(RegisteredModuleId),
2734
2735    /// Add a dummy instance that to the store.
2736    ///
2737    /// These are instances that are just implementation details of something
2738    /// else (e.g. host-created memories that are not actually defined in any
2739    /// Wasm module) and therefore shouldn't show up in things like core dumps.
2740    ///
2741    /// A custom, typically OnDemand-flavored, allocator is provided to execute
2742    /// the allocation.
2743    Dummy {
2744        allocator: &'a dyn InstanceAllocator,
2745    },
2746}
2747
2748unsafe impl<T> VMStore for StoreInner<T> {
2749    #[cfg(feature = "component-model-async")]
2750    fn component_async_store(
2751        &mut self,
2752    ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2753        self
2754    }
2755
2756    fn store_opaque(&self) -> &StoreOpaque {
2757        &self.inner
2758    }
2759
2760    fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2761        &mut self.inner
2762    }
2763
2764    fn resource_limiter_and_store_opaque(
2765        &mut self,
2766    ) -> (Option<StoreResourceLimiter<'_>>, &mut StoreOpaque) {
2767        let (data, limiter, opaque) = self.data_limiter_and_opaque();
2768
2769        let limiter = limiter.map(|l| match l {
2770            ResourceLimiterInner::Sync(s) => StoreResourceLimiter::Sync(s(data)),
2771            #[cfg(feature = "async")]
2772            ResourceLimiterInner::Async(s) => StoreResourceLimiter::Async(s(data)),
2773        });
2774
2775        (limiter, opaque)
2776    }
2777
2778    #[cfg(target_has_atomic = "64")]
2779    fn new_epoch_updated_deadline(&mut self) -> Result<UpdateDeadline> {
2780        // Temporarily take the configured behavior to avoid mutably borrowing
2781        // multiple times.
2782        let mut behavior = self.epoch_deadline_behavior.take();
2783        let update = match &mut behavior {
2784            Some(callback) => callback((&mut *self).as_context_mut()),
2785            None => Ok(UpdateDeadline::Interrupt),
2786        };
2787
2788        // Put back the original behavior which was replaced by `take`.
2789        self.epoch_deadline_behavior = behavior;
2790        update
2791    }
2792
2793    #[cfg(feature = "component-model")]
2794    fn component_calls(&mut self) -> &mut vm::component::CallContexts {
2795        &mut self.component_calls
2796    }
2797
2798    #[cfg(feature = "debug")]
2799    fn block_on_debug_handler(&mut self, event: crate::DebugEvent<'_>) -> anyhow::Result<()> {
2800        if let Some(handler) = self.debug_handler.take() {
2801            log::trace!("about to raise debug event {event:?}");
2802            StoreContextMut(self).with_blocking(|store, cx| {
2803                cx.block_on(Pin::from(handler.handle(store, event)).as_mut())
2804            })
2805        } else {
2806            Ok(())
2807        }
2808    }
2809}
2810
2811impl<T> StoreInner<T> {
2812    #[cfg(target_has_atomic = "64")]
2813    fn epoch_deadline_trap(&mut self) {
2814        self.epoch_deadline_behavior = None;
2815    }
2816
2817    #[cfg(target_has_atomic = "64")]
2818    fn epoch_deadline_callback(
2819        &mut self,
2820        callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2821    ) {
2822        self.epoch_deadline_behavior = Some(callback);
2823    }
2824}
2825
2826impl<T: Default> Default for Store<T> {
2827    fn default() -> Store<T> {
2828        Store::new(&Engine::default(), T::default())
2829    }
2830}
2831
2832impl<T: fmt::Debug> fmt::Debug for Store<T> {
2833    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2834        let inner = &**self.inner as *const StoreInner<T>;
2835        f.debug_struct("Store")
2836            .field("inner", &inner)
2837            .field("data", self.inner.data())
2838            .finish()
2839    }
2840}
2841
2842impl<T> Drop for Store<T> {
2843    fn drop(&mut self) {
2844        self.run_manual_drop_routines();
2845
2846        // For documentation on this `unsafe`, see `into_data`.
2847        unsafe {
2848            ManuallyDrop::drop(&mut self.inner.data_no_provenance);
2849            ManuallyDrop::drop(&mut self.inner);
2850        }
2851    }
2852}
2853
2854impl Drop for StoreOpaque {
2855    fn drop(&mut self) {
2856        // NB it's important that this destructor does not access `self.data`.
2857        // That is deallocated by `Drop for Store<T>` above.
2858
2859        unsafe {
2860            let allocator = self.engine.allocator();
2861            let ondemand = OnDemandInstanceAllocator::default();
2862            let store_id = self.id();
2863
2864            #[cfg(feature = "gc")]
2865            if let Some(gc_store) = self.gc_store.take() {
2866                let gc_alloc_index = gc_store.allocation_index;
2867                log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2868                debug_assert!(self.engine.features().gc_types());
2869                let (mem_alloc_index, mem) =
2870                    allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2871                allocator.deallocate_memory(None, mem_alloc_index, mem);
2872            }
2873
2874            for (id, instance) in self.instances.iter_mut() {
2875                log::trace!("store {store_id:?} is deallocating {id:?}");
2876                let allocator = match instance.kind {
2877                    StoreInstanceKind::Dummy => &ondemand,
2878                    _ => allocator,
2879                };
2880                allocator.deallocate_module(&mut instance.handle);
2881            }
2882
2883            #[cfg(feature = "component-model")]
2884            {
2885                for _ in 0..self.num_component_instances {
2886                    allocator.decrement_component_instance_count();
2887                }
2888            }
2889        }
2890    }
2891}
2892
2893#[cfg_attr(
2894    not(any(feature = "gc", feature = "async")),
2895    // NB: Rust 1.89, current stable, does not fire this lint. Rust 1.90,
2896    // however, does, so use #[allow] until our MSRV is 1.90.
2897    allow(dead_code, reason = "don't want to put #[cfg] on all impls below too")
2898)]
2899pub(crate) trait AsStoreOpaque {
2900    fn as_store_opaque(&mut self) -> &mut StoreOpaque;
2901}
2902
2903impl AsStoreOpaque for StoreOpaque {
2904    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2905        self
2906    }
2907}
2908
2909impl AsStoreOpaque for dyn VMStore {
2910    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2911        self
2912    }
2913}
2914
2915impl<T: 'static> AsStoreOpaque for Store<T> {
2916    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2917        &mut self.inner.inner
2918    }
2919}
2920
2921impl<T: 'static> AsStoreOpaque for StoreInner<T> {
2922    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2923        self
2924    }
2925}
2926
2927impl<T: AsStoreOpaque + ?Sized> AsStoreOpaque for &mut T {
2928    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2929        T::as_store_opaque(self)
2930    }
2931}
2932
2933#[cfg(test)]
2934mod tests {
2935    use super::*;
2936
2937    struct FuelTank {
2938        pub consumed_fuel: i64,
2939        pub reserve_fuel: u64,
2940        pub yield_interval: Option<NonZeroU64>,
2941    }
2942
2943    impl FuelTank {
2944        fn new() -> Self {
2945            FuelTank {
2946                consumed_fuel: 0,
2947                reserve_fuel: 0,
2948                yield_interval: None,
2949            }
2950        }
2951        fn get_fuel(&self) -> u64 {
2952            get_fuel(self.consumed_fuel, self.reserve_fuel)
2953        }
2954        fn refuel(&mut self) -> bool {
2955            refuel(
2956                &mut self.consumed_fuel,
2957                &mut self.reserve_fuel,
2958                self.yield_interval,
2959            )
2960        }
2961        fn set_fuel(&mut self, fuel: u64) {
2962            set_fuel(
2963                &mut self.consumed_fuel,
2964                &mut self.reserve_fuel,
2965                self.yield_interval,
2966                fuel,
2967            );
2968        }
2969    }
2970
2971    #[test]
2972    fn smoke() {
2973        let mut tank = FuelTank::new();
2974        tank.set_fuel(10);
2975        assert_eq!(tank.consumed_fuel, -10);
2976        assert_eq!(tank.reserve_fuel, 0);
2977
2978        tank.yield_interval = NonZeroU64::new(10);
2979        tank.set_fuel(25);
2980        assert_eq!(tank.consumed_fuel, -10);
2981        assert_eq!(tank.reserve_fuel, 15);
2982    }
2983
2984    #[test]
2985    fn does_not_lose_precision() {
2986        let mut tank = FuelTank::new();
2987        tank.set_fuel(u64::MAX);
2988        assert_eq!(tank.get_fuel(), u64::MAX);
2989
2990        tank.set_fuel(i64::MAX as u64);
2991        assert_eq!(tank.get_fuel(), i64::MAX as u64);
2992
2993        tank.set_fuel(i64::MAX as u64 + 1);
2994        assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
2995    }
2996
2997    #[test]
2998    fn yielding_does_not_lose_precision() {
2999        let mut tank = FuelTank::new();
3000
3001        tank.yield_interval = NonZeroU64::new(10);
3002        tank.set_fuel(u64::MAX);
3003        assert_eq!(tank.get_fuel(), u64::MAX);
3004        assert_eq!(tank.consumed_fuel, -10);
3005        assert_eq!(tank.reserve_fuel, u64::MAX - 10);
3006
3007        tank.yield_interval = NonZeroU64::new(u64::MAX);
3008        tank.set_fuel(u64::MAX);
3009        assert_eq!(tank.get_fuel(), u64::MAX);
3010        assert_eq!(tank.consumed_fuel, -i64::MAX);
3011        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3012
3013        tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
3014        tank.set_fuel(u64::MAX);
3015        assert_eq!(tank.get_fuel(), u64::MAX);
3016        assert_eq!(tank.consumed_fuel, -i64::MAX);
3017        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3018    }
3019
3020    #[test]
3021    fn refueling() {
3022        // It's possible to fuel to have consumed over the limit as some instructions can consume
3023        // multiple units of fuel at once. Refueling should be strict in it's consumption and not
3024        // add more fuel than there is.
3025        let mut tank = FuelTank::new();
3026
3027        tank.yield_interval = NonZeroU64::new(10);
3028        tank.reserve_fuel = 42;
3029        tank.consumed_fuel = 4;
3030        assert!(tank.refuel());
3031        assert_eq!(tank.reserve_fuel, 28);
3032        assert_eq!(tank.consumed_fuel, -10);
3033
3034        tank.yield_interval = NonZeroU64::new(1);
3035        tank.reserve_fuel = 8;
3036        tank.consumed_fuel = 4;
3037        assert_eq!(tank.get_fuel(), 4);
3038        assert!(tank.refuel());
3039        assert_eq!(tank.reserve_fuel, 3);
3040        assert_eq!(tank.consumed_fuel, -1);
3041        assert_eq!(tank.get_fuel(), 4);
3042
3043        tank.yield_interval = NonZeroU64::new(10);
3044        tank.reserve_fuel = 3;
3045        tank.consumed_fuel = 4;
3046        assert_eq!(tank.get_fuel(), 0);
3047        assert!(!tank.refuel());
3048        assert_eq!(tank.reserve_fuel, 3);
3049        assert_eq!(tank.consumed_fuel, 4);
3050        assert_eq!(tank.get_fuel(), 0);
3051    }
3052
3053    #[test]
3054    fn store_data_provenance() {
3055        // Test that we juggle pointer provenance and all that correctly, and
3056        // miri is happy with everything, while allowing both Rust code and
3057        // "Wasm" to access and modify the store's `T` data. Note that this is
3058        // not actually Wasm mutating the store data here because compiling Wasm
3059        // under miri is way too slow.
3060
3061        unsafe fn run_wasm(store: &mut Store<u32>) {
3062            let ptr = store
3063                .inner
3064                .inner
3065                .vm_store_context
3066                .store_data
3067                .as_ptr()
3068                .cast::<u32>();
3069            unsafe { *ptr += 1 }
3070        }
3071
3072        let engine = Engine::default();
3073        let mut store = Store::new(&engine, 0_u32);
3074
3075        assert_eq!(*store.data(), 0);
3076        *store.data_mut() += 1;
3077        assert_eq!(*store.data(), 1);
3078        unsafe { run_wasm(&mut store) }
3079        assert_eq!(*store.data(), 2);
3080        *store.data_mut() += 1;
3081        assert_eq!(*store.data(), 3);
3082    }
3083}