Skip to main content

wasmtime/runtime/
store.rs

1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//!   Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//!   intended to be consumed by the outside world. Note that the "just a
39//!   pointer large" is a load-bearing implementation detail in Wasmtime. This
40//!   enables it to store a pointer to its own trait object which doesn't need
41//!   to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//!   stored inside the `Box`. This is the general Rust pattern when one struct
45//!   is a layer over another. The surprising part, though, is that this is
46//!   further subdivided. This structure only contains things which actually
47//!   need `T` itself. The downside of this structure is that it's always
48//!   generic and means that code is monomorphized into consumer crates. We
49//!   strive to have things be as monomorphic as possible in `wasmtime` so this
50//!   type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//!   Stored inline in the outer type the "opaque" here means that it's a
54//!   "store" but it doesn't have access to the `T`. This is the primary
55//!   "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//!   internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//!   All references of Wasm items into a `Store` are actually indices into a
60//!   table in this structure, and the `StoreData` being separate makes it a bit
61//!   easier to manage/define/work with. There's no real fundamental reason this
62//!   is split out, although sometimes it's useful to have separate borrows into
63//!   these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79#[cfg(all(feature = "gc", feature = "debug"))]
80use crate::OwnedRooted;
81use crate::RootSet;
82#[cfg(feature = "gc")]
83use crate::ThrownException;
84use crate::error::OutOfMemory;
85#[cfg(feature = "async")]
86use crate::fiber;
87use crate::module::{RegisterBreakpointState, RegisteredModuleId};
88use crate::prelude::*;
89#[cfg(feature = "gc")]
90use crate::runtime::vm::GcRootsList;
91#[cfg(feature = "stack-switching")]
92use crate::runtime::vm::VMContRef;
93use crate::runtime::vm::mpk::ProtectionKey;
94use crate::runtime::vm::{
95    self, ExportMemory, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator,
96    InstanceHandle, Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator,
97    SendSyncPtr, SignalHandler, StoreBox, Unwind, VMContext, VMFuncRef, VMGcRef, VMStore,
98    VMStoreContext,
99};
100use crate::trampoline::VMHostGlobalContext;
101#[cfg(feature = "debug")]
102use crate::{BreakpointState, DebugHandler, FrameDataCache};
103use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry};
104#[cfg(feature = "gc")]
105use crate::{ExnRef, Rooted};
106use crate::{Global, Instance, Table};
107use core::convert::Infallible;
108use core::fmt;
109use core::marker;
110use core::mem::{self, ManuallyDrop, MaybeUninit};
111use core::num::NonZeroU64;
112use core::ops::{Deref, DerefMut};
113use core::pin::Pin;
114use core::ptr::NonNull;
115use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
116
117mod context;
118pub use self::context::*;
119mod data;
120pub use self::data::*;
121mod func_refs;
122use func_refs::FuncRefs;
123#[cfg(feature = "component-model-async")]
124mod token;
125#[cfg(feature = "component-model-async")]
126pub(crate) use token::StoreToken;
127#[cfg(feature = "async")]
128mod async_;
129#[cfg(all(feature = "async", feature = "call-hook"))]
130pub use self::async_::CallHookHandler;
131
132#[cfg(feature = "gc")]
133use super::vm::VMExnRef;
134#[cfg(feature = "gc")]
135mod gc;
136
137/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
138///
139/// All WebAssembly instances and items will be attached to and refer to a
140/// [`Store`]. For example instances, functions, globals, and tables are all
141/// attached to a [`Store`]. Instances are created by instantiating a
142/// [`Module`](crate::Module) within a [`Store`].
143///
144/// A [`Store`] is intended to be a short-lived object in a program. No form
145/// of GC is implemented at this time so once an instance is created within a
146/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
147/// This makes [`Store`] unsuitable for creating an unbounded number of
148/// instances in it because [`Store`] will never release this memory. It's
149/// recommended to have a [`Store`] correspond roughly to the lifetime of a
150/// "main instance" that an embedding is interested in executing.
151///
152/// ## Type parameter `T`
153///
154/// Each [`Store`] has a type parameter `T` associated with it. This `T`
155/// represents state defined by the host. This state will be accessible through
156/// the [`Caller`](crate::Caller) type that host-defined functions get access
157/// to. This `T` is suitable for storing `Store`-specific information which
158/// imported functions may want access to.
159///
160/// The data `T` can be accessed through methods like [`Store::data`] and
161/// [`Store::data_mut`].
162///
163/// ## Stores, contexts, oh my
164///
165/// Most methods in Wasmtime take something of the form
166/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
167/// the first argument. These two traits allow ergonomically passing in the
168/// context you currently have to any method. The primary two sources of
169/// contexts are:
170///
171/// * `Store<T>`
172/// * `Caller<'_, T>`
173///
174/// corresponding to what you create and what you have access to in a host
175/// function. You can also explicitly acquire a [`StoreContext`] or
176/// [`StoreContextMut`] and pass that around as well.
177///
178/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
179/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
180/// form of context you have you can call various methods, create objects, etc.
181///
182/// ## Stores and `Default`
183///
184/// You can create a store with default configuration settings using
185/// `Store::default()`. This will create a brand new [`Engine`] with default
186/// configuration (see [`Config`](crate::Config) for more information).
187///
188/// ## Cross-store usage of items
189///
190/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
191/// [`Store`]. The store they belong to is the one they were created with
192/// (passed in as a parameter) or instantiated with. This store is the only
193/// store that can be used to interact with wasm items after they're created.
194///
195/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
196/// operations is incorrect. In other words it's considered a programmer error
197/// rather than a recoverable error for the wrong [`Store`] to be used when
198/// calling APIs.
199///
200/// [`Memory`]: crate::Memory
201pub struct Store<T: 'static> {
202    // for comments about `ManuallyDrop`, see `Store::into_data`
203    inner: ManuallyDrop<Box<StoreInner<T>>>,
204}
205
206#[derive(Copy, Clone, Debug)]
207/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
208/// the WebAssembly VM.
209pub enum CallHook {
210    /// Indicates the VM is calling a WebAssembly function, from the host.
211    CallingWasm,
212    /// Indicates the VM is returning from a WebAssembly function, to the host.
213    ReturningFromWasm,
214    /// Indicates the VM is calling a host function, from WebAssembly.
215    CallingHost,
216    /// Indicates the VM is returning from a host function, to WebAssembly.
217    ReturningFromHost,
218}
219
220impl CallHook {
221    /// Indicates the VM is entering host code (exiting WebAssembly code)
222    pub fn entering_host(&self) -> bool {
223        match self {
224            CallHook::ReturningFromWasm | CallHook::CallingHost => true,
225            _ => false,
226        }
227    }
228    /// Indicates the VM is exiting host code (entering WebAssembly code)
229    pub fn exiting_host(&self) -> bool {
230        match self {
231            CallHook::ReturningFromHost | CallHook::CallingWasm => true,
232            _ => false,
233        }
234    }
235}
236
237/// Internal contents of a `Store<T>` that live on the heap.
238///
239/// The members of this struct are those that need to be generic over `T`, the
240/// store's internal type storage. Otherwise all things that don't rely on `T`
241/// should go into `StoreOpaque`.
242pub struct StoreInner<T: 'static> {
243    /// Generic metadata about the store that doesn't need access to `T`.
244    inner: StoreOpaque,
245
246    limiter: Option<ResourceLimiterInner<T>>,
247    call_hook: Option<CallHookInner<T>>,
248    #[cfg(target_has_atomic = "64")]
249    epoch_deadline_behavior:
250        Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
251
252    /// The user's `T` data.
253    ///
254    /// Don't actually access it via this field, however! Use the
255    /// `Store{,Inner,Context,ContextMut}::data[_mut]` methods instead, to
256    /// preserve stacked borrows and provenance in the face of potential
257    /// direct-access of `T` from Wasm code (via unsafe intrinsics).
258    ///
259    /// The only exception to the above is when taking ownership of the value,
260    /// e.g. in `Store::into_data`, after which nothing can access this field
261    /// via raw pointers anymore so there is no more provenance to preserve.
262    ///
263    /// For comments about `ManuallyDrop`, see `Store::into_data`.
264    data_no_provenance: ManuallyDrop<T>,
265
266    /// The user's debug handler, if any. See [`crate::DebugHandler`]
267    /// for more documentation.
268    ///
269    /// We need this to be an `Arc` because the handler itself takes
270    /// `&self` and also the whole Store mutably (via
271    /// `StoreContextMut`); so we need to hold a separate reference to
272    /// it while invoking it.
273    #[cfg(feature = "debug")]
274    debug_handler: Option<Box<dyn StoreDebugHandler<T>>>,
275}
276
277/// Adapter around `DebugHandler` that gets monomorphized into an
278/// object-safe dyn trait to place in `store.debug_handler`.
279#[cfg(feature = "debug")]
280trait StoreDebugHandler<T: 'static>: Send + Sync {
281    fn handle<'a>(
282        self: Box<Self>,
283        store: StoreContextMut<'a, T>,
284        event: crate::DebugEvent<'a>,
285    ) -> Box<dyn Future<Output = ()> + Send + 'a>;
286}
287
288#[cfg(feature = "debug")]
289impl<D> StoreDebugHandler<D::Data> for D
290where
291    D: DebugHandler,
292    D::Data: Send,
293{
294    fn handle<'a>(
295        self: Box<Self>,
296        store: StoreContextMut<'a, D::Data>,
297        event: crate::DebugEvent<'a>,
298    ) -> Box<dyn Future<Output = ()> + Send + 'a> {
299        // Clone the underlying `DebugHandler` (the trait requires
300        // Clone as a supertrait), not the Box. The clone happens here
301        // rather than at the callsite because `Clone::clone` is not
302        // object-safe so needs to be in a monomorphized context.
303        let handler: D = (*self).clone();
304        // Since we temporarily took `self` off the store at the
305        // callsite, put it back now that we've cloned it.
306        store.0.debug_handler = Some(self);
307        Box::new(async move { handler.handle(store, event).await })
308    }
309}
310
311enum ResourceLimiterInner<T> {
312    Sync(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync>),
313    #[cfg(feature = "async")]
314    Async(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiterAsync) + Send + Sync>),
315}
316
317/// Representation of a configured resource limiter for a store.
318///
319/// This is acquired with `resource_limiter_and_store_opaque` for example and is
320/// threaded through to growth operations on tables/memories. Note that this is
321/// passed around as `Option<&mut StoreResourceLimiter<'_>>` to make it
322/// efficient to pass around (nullable pointer) and it's also notably passed
323/// around as an `Option` to represent how this is optionally specified within a
324/// store.
325pub enum StoreResourceLimiter<'a> {
326    Sync(&'a mut dyn crate::ResourceLimiter),
327    #[cfg(feature = "async")]
328    Async(&'a mut dyn crate::ResourceLimiterAsync),
329}
330
331impl StoreResourceLimiter<'_> {
332    pub(crate) async fn memory_growing(
333        &mut self,
334        current: usize,
335        desired: usize,
336        maximum: Option<usize>,
337    ) -> Result<bool, Error> {
338        match self {
339            Self::Sync(s) => s.memory_growing(current, desired, maximum),
340            #[cfg(feature = "async")]
341            Self::Async(s) => s.memory_growing(current, desired, maximum).await,
342        }
343    }
344
345    pub(crate) fn memory_grow_failed(&mut self, error: crate::Error) -> Result<()> {
346        match self {
347            Self::Sync(s) => s.memory_grow_failed(error),
348            #[cfg(feature = "async")]
349            Self::Async(s) => s.memory_grow_failed(error),
350        }
351    }
352
353    pub(crate) async fn table_growing(
354        &mut self,
355        current: usize,
356        desired: usize,
357        maximum: Option<usize>,
358    ) -> Result<bool, Error> {
359        match self {
360            Self::Sync(s) => s.table_growing(current, desired, maximum),
361            #[cfg(feature = "async")]
362            Self::Async(s) => s.table_growing(current, desired, maximum).await,
363        }
364    }
365
366    pub(crate) fn table_grow_failed(&mut self, error: crate::Error) -> Result<()> {
367        match self {
368            Self::Sync(s) => s.table_grow_failed(error),
369            #[cfg(feature = "async")]
370            Self::Async(s) => s.table_grow_failed(error),
371        }
372    }
373}
374
375enum CallHookInner<T: 'static> {
376    #[cfg(feature = "call-hook")]
377    Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
378    #[cfg(all(feature = "async", feature = "call-hook"))]
379    Async(Box<dyn CallHookHandler<T> + Send + Sync>),
380    #[expect(
381        dead_code,
382        reason = "forcing, regardless of cfg, the type param to be used"
383    )]
384    ForceTypeParameterToBeUsed {
385        uninhabited: Infallible,
386        _marker: marker::PhantomData<T>,
387    },
388}
389
390/// What to do after returning from a callback when the engine epoch reaches
391/// the deadline for a Store during execution of a function using that store.
392#[non_exhaustive]
393pub enum UpdateDeadline {
394    /// Halt execution of WebAssembly, don't update the epoch deadline, and
395    /// raise a trap.
396    Interrupt,
397    /// Extend the deadline by the specified number of ticks.
398    Continue(u64),
399    /// Extend the deadline by the specified number of ticks after yielding to
400    /// the async executor loop.
401    ///
402    /// This can only be used when WebAssembly is invoked with `*_async`
403    /// methods. If WebAssembly was invoked with a synchronous method then
404    /// returning this variant will raise a trap.
405    #[cfg(feature = "async")]
406    Yield(u64),
407    /// Extend the deadline by the specified number of ticks after yielding to
408    /// the async executor loop.
409    ///
410    /// This can only be used when WebAssembly is invoked with `*_async`
411    /// methods. If WebAssembly was invoked with a synchronous method then
412    /// returning this variant will raise a trap.
413    ///
414    /// The yield will be performed by the future provided; when using `tokio`
415    /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
416    /// here.
417    #[cfg(feature = "async")]
418    YieldCustom(
419        u64,
420        ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
421    ),
422}
423
424// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
425impl<T> Deref for StoreInner<T> {
426    type Target = StoreOpaque;
427    fn deref(&self) -> &Self::Target {
428        &self.inner
429    }
430}
431
432impl<T> DerefMut for StoreInner<T> {
433    fn deref_mut(&mut self) -> &mut Self::Target {
434        &mut self.inner
435    }
436}
437
438/// Monomorphic storage for a `Store<T>`.
439///
440/// This structure contains the bulk of the metadata about a `Store`. This is
441/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
442/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
443/// crate itself.
444pub struct StoreOpaque {
445    // This `StoreOpaque` structure has references to itself. These aren't
446    // immediately evident, however, so we need to tell the compiler that it
447    // contains self-references. This notably suppresses `noalias` annotations
448    // when this shows up in compiled code because types of this structure do
449    // indeed alias itself. An example of this is `default_callee` holds a
450    // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
451    // aliasing!
452    //
453    // It's somewhat unclear to me at this time if this is 100% sufficient to
454    // get all the right codegen in all the right places. For example does
455    // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
456    // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
457    // enough with `Pin` to understand if it's appropriate here (we do, for
458    // example want to allow movement in and out of `data: T`, just not movement
459    // of most of the other members). It's also not clear if using `Pin` in a
460    // few places buys us much other than a bunch of `unsafe` that we already
461    // sort of hand-wave away.
462    //
463    // In any case this seems like a good mid-ground for now where we're at
464    // least telling the compiler something about all the aliasing happening
465    // within a `Store`.
466    _marker: marker::PhantomPinned,
467
468    engine: Engine,
469    vm_store_context: VMStoreContext,
470
471    // Contains all continuations ever allocated throughout the lifetime of this
472    // store.
473    #[cfg(feature = "stack-switching")]
474    continuations: Vec<Box<VMContRef>>,
475
476    instances: wasmtime_environ::collections::PrimaryMap<InstanceId, StoreInstance>,
477
478    signal_handler: Option<SignalHandler>,
479    modules: ModuleRegistry,
480    func_refs: FuncRefs,
481    host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
482    // GC-related fields.
483    gc_store: Option<GcStore>,
484    gc_roots: RootSet,
485    #[cfg(feature = "gc")]
486    gc_roots_list: GcRootsList,
487    // Types for which the embedder has created an allocator for.
488    #[cfg(feature = "gc")]
489    gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
490    /// Pending exception, if any. This is also a GC root, because it
491    /// needs to be rooted somewhere between the time that a pending
492    /// exception is set and the time that the handling code takes the
493    /// exception object. We use this rooting strategy rather than a
494    /// root in an `Err` branch of a `Result` on the host side because
495    /// it is less error-prone with respect to rooting behavior. See
496    /// `throw()`, `take_pending_exception()`,
497    /// `peek_pending_exception()`, `has_pending_exception()`, and
498    /// `catch()`.
499    #[cfg(feature = "gc")]
500    pending_exception: Option<VMExnRef>,
501
502    // Numbers of resources instantiated in this store, and their limits
503    instance_count: usize,
504    instance_limit: usize,
505    memory_count: usize,
506    memory_limit: usize,
507    table_count: usize,
508    table_limit: usize,
509    #[cfg(feature = "async")]
510    async_state: fiber::AsyncState,
511
512    // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
513    // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
514    // together. Then when we run out of gas, we inject the yield amount from the reserve
515    // until the reserve is empty.
516    fuel_reserve: u64,
517    pub(crate) fuel_yield_interval: Option<NonZeroU64>,
518    /// Indexed data within this `Store`, used to store information about
519    /// globals, functions, memories, etc.
520    store_data: StoreData,
521    traitobj: StorePtr,
522    default_caller_vmctx: SendSyncPtr<VMContext>,
523
524    /// Used to optimized wasm->host calls when the host function is defined with
525    /// `Func::new` to avoid allocating a new vector each time a function is
526    /// called.
527    hostcall_val_storage: Vec<Val>,
528    /// Same as `hostcall_val_storage`, but for the direction of the host
529    /// calling wasm.
530    wasm_val_raw_storage: Vec<ValRaw>,
531
532    /// Keep track of what protection key is being used during allocation so
533    /// that the right memory pages can be enabled when entering WebAssembly
534    /// guest code.
535    pkey: Option<ProtectionKey>,
536
537    /// State related to the executor of wasm code.
538    ///
539    /// For example if Pulley is enabled and configured then this will store a
540    /// Pulley interpreter.
541    executor: Executor,
542
543    /// The debug breakpoint state for this store.
544    ///
545    /// When guest debugging is enabled, a given store may have a set
546    /// of breakpoints defined, denoted by module and Wasm PC within
547    /// that module. Or alternately, it may be in "single-step" mode,
548    /// where every possible breakpoint is logically enabled.
549    ///
550    /// When execution of any instance in this store hits any defined
551    /// breakpoint, a `Breakpoint` debug event is emitted and the
552    /// handler defined above, if any, has a chance to perform some
553    /// logic before returning to allow execution to resume.
554    #[cfg(feature = "debug")]
555    breakpoints: BreakpointState,
556
557    /// The debug PC-to-FrameData cache for this store.
558    ///
559    /// When guest debugging is enabled, we parse compiler metadata
560    /// and pass out `FrameHandle`s that represent Wasm guest
561    /// frames. These handles represent a specific frame within a
562    /// frozen stack and are invalidated upon further execution. In
563    /// order to keep these handles lightweight, and to avoid
564    /// redundant work when passing out *new* handles after further
565    /// execution, we cache the mapping from store-specific PCs to
566    /// parsed frame data. (This cache needs to be store-specific
567    /// rather than e.g. engine-specific because each store has its
568    /// own privately mapped copy of guest code when debugging is
569    /// enabled, so the key-space is unique for each store.)
570    #[cfg(feature = "debug")]
571    frame_data_cache: FrameDataCache,
572}
573
574/// Self-pointer to `StoreInner<T>` from within a `StoreOpaque` which is chiefly
575/// used to copy into instances during instantiation.
576///
577/// FIXME: ideally this type would get deleted and Wasmtime's reliance on it
578/// would go away.
579struct StorePtr(Option<NonNull<dyn VMStore>>);
580
581// We can't make `VMStore: Send + Sync` because that requires making all of
582// Wastime's internals generic over the `Store`'s `T`. So instead, we take care
583// in the whole VM layer to only use the `VMStore` in ways that are `Send`- and
584// `Sync`-safe and we have to have these unsafe impls.
585unsafe impl Send for StorePtr {}
586unsafe impl Sync for StorePtr {}
587
588/// Executor state within `StoreOpaque`.
589///
590/// Effectively stores Pulley interpreter state and handles conditional support
591/// for Cranelift at compile time.
592pub(crate) enum Executor {
593    Interpreter(Interpreter),
594    #[cfg(has_host_compiler_backend)]
595    Native,
596}
597
598impl Executor {
599    pub(crate) fn new(engine: &Engine) -> Result<Self, OutOfMemory> {
600        #[cfg(has_host_compiler_backend)]
601        if cfg!(feature = "pulley") && engine.target().is_pulley() {
602            Ok(Executor::Interpreter(Interpreter::new(engine)?))
603        } else {
604            Ok(Executor::Native)
605        }
606        #[cfg(not(has_host_compiler_backend))]
607        {
608            debug_assert!(engine.target().is_pulley());
609            Ok(Executor::Interpreter(Interpreter::new(engine)?))
610        }
611    }
612}
613
614/// A borrowed reference to `Executor` above.
615pub(crate) enum ExecutorRef<'a> {
616    Interpreter(InterpreterRef<'a>),
617    #[cfg(has_host_compiler_backend)]
618    Native,
619}
620
621/// An RAII type to automatically mark a region of code as unsafe for GC.
622#[doc(hidden)]
623pub struct AutoAssertNoGc<'a> {
624    store: &'a mut StoreOpaque,
625    entered: bool,
626}
627
628impl<'a> AutoAssertNoGc<'a> {
629    #[inline]
630    pub fn new(store: &'a mut StoreOpaque) -> Self {
631        let entered = if !cfg!(feature = "gc") {
632            false
633        } else if let Some(gc_store) = store.gc_store.as_mut() {
634            gc_store.gc_heap.enter_no_gc_scope();
635            true
636        } else {
637            false
638        };
639
640        AutoAssertNoGc { store, entered }
641    }
642
643    /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
644    /// disables checks for no GC happening for the duration of this value.
645    ///
646    /// This is used when it is statically otherwise known that a GC doesn't
647    /// happen for the various types involved.
648    ///
649    /// # Unsafety
650    ///
651    /// This method is `unsafe` as it does not provide the same safety
652    /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
653    /// caller that a GC doesn't happen.
654    #[inline]
655    pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
656        if cfg!(debug_assertions) {
657            AutoAssertNoGc::new(store)
658        } else {
659            AutoAssertNoGc {
660                store,
661                entered: false,
662            }
663        }
664    }
665}
666
667impl core::ops::Deref for AutoAssertNoGc<'_> {
668    type Target = StoreOpaque;
669
670    #[inline]
671    fn deref(&self) -> &Self::Target {
672        &*self.store
673    }
674}
675
676impl core::ops::DerefMut for AutoAssertNoGc<'_> {
677    #[inline]
678    fn deref_mut(&mut self) -> &mut Self::Target {
679        &mut *self.store
680    }
681}
682
683impl Drop for AutoAssertNoGc<'_> {
684    #[inline]
685    fn drop(&mut self) {
686        if self.entered {
687            self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
688        }
689    }
690}
691
692/// Used to associate instances with the store.
693///
694/// This is needed to track if the instance was allocated explicitly with the on-demand
695/// instance allocator.
696struct StoreInstance {
697    handle: InstanceHandle,
698    kind: StoreInstanceKind,
699}
700
701enum StoreInstanceKind {
702    /// An actual, non-dummy instance.
703    Real {
704        /// The id of this instance's module inside our owning store's
705        /// `ModuleRegistry`.
706        module_id: RegisteredModuleId,
707    },
708
709    /// This is a dummy instance that is just an implementation detail for
710    /// something else. For example, host-created memories internally create a
711    /// dummy instance.
712    ///
713    /// Regardless of the configured instance allocator for the engine, dummy
714    /// instances always use the on-demand allocator to deallocate the instance.
715    Dummy,
716}
717
718impl<T> Store<T> {
719    /// Creates a new [`Store`] to be associated with the given [`Engine`] and
720    /// `data` provided.
721    ///
722    /// The created [`Store`] will place no additional limits on the size of
723    /// linear memories or tables at runtime. Linear memories and tables will
724    /// be allowed to grow to any upper limit specified in their definitions.
725    /// The store will limit the number of instances, linear memories, and
726    /// tables created to 10,000. This can be overridden with the
727    /// [`Store::limiter`] configuration method.
728    pub fn new(engine: &Engine, data: T) -> Self {
729        Self::try_new(engine, data).expect(
730            "allocation failure during `Store::new` (use `Store::try_new` to handle such errors)",
731        )
732    }
733
734    /// Like `Store::new` but returns an error on allocation failure.
735    pub fn try_new(engine: &Engine, data: T) -> Result<Self> {
736        let store_data = StoreData::new(engine);
737        log::trace!("creating new store {:?}", store_data.id());
738
739        let pkey = engine.allocator().next_available_pkey();
740
741        let inner = StoreOpaque {
742            _marker: marker::PhantomPinned,
743            engine: engine.clone(),
744            vm_store_context: Default::default(),
745            #[cfg(feature = "stack-switching")]
746            continuations: Vec::new(),
747            instances: wasmtime_environ::collections::PrimaryMap::new(),
748            signal_handler: None,
749            gc_store: None,
750            gc_roots: RootSet::default(),
751            #[cfg(feature = "gc")]
752            gc_roots_list: GcRootsList::default(),
753            #[cfg(feature = "gc")]
754            gc_host_alloc_types: Default::default(),
755            #[cfg(feature = "gc")]
756            pending_exception: None,
757            modules: ModuleRegistry::default(),
758            func_refs: FuncRefs::default(),
759            host_globals: PrimaryMap::new(),
760            instance_count: 0,
761            instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
762            memory_count: 0,
763            memory_limit: crate::DEFAULT_MEMORY_LIMIT,
764            table_count: 0,
765            table_limit: crate::DEFAULT_TABLE_LIMIT,
766            #[cfg(feature = "async")]
767            async_state: Default::default(),
768            fuel_reserve: 0,
769            fuel_yield_interval: None,
770            store_data,
771            traitobj: StorePtr(None),
772            default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
773            hostcall_val_storage: Vec::new(),
774            wasm_val_raw_storage: Vec::new(),
775            pkey,
776            executor: Executor::new(engine)?,
777            #[cfg(feature = "debug")]
778            breakpoints: Default::default(),
779            #[cfg(feature = "debug")]
780            frame_data_cache: FrameDataCache::new(),
781        };
782        let mut inner = try_new::<Box<_>>(StoreInner {
783            inner,
784            limiter: None,
785            call_hook: None,
786            #[cfg(target_has_atomic = "64")]
787            epoch_deadline_behavior: None,
788            data_no_provenance: ManuallyDrop::new(data),
789            #[cfg(feature = "debug")]
790            debug_handler: None,
791        })?;
792
793        let store_data =
794            <NonNull<ManuallyDrop<T>>>::from(&mut inner.data_no_provenance).cast::<()>();
795        inner.inner.vm_store_context.store_data = store_data.into();
796
797        inner.traitobj = StorePtr(Some(NonNull::from(&mut *inner)));
798
799        // Wasmtime uses the callee argument to host functions to learn about
800        // the original pointer to the `Store` itself, allowing it to
801        // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
802        // however, there's no "callee" to provide. To fix this we allocate a
803        // single "default callee" for the entire `Store`. This is then used as
804        // part of `Func::call` to guarantee that the `callee: *mut VMContext`
805        // is never null.
806        let allocator = OnDemandInstanceAllocator::default();
807        let info = engine.empty_module_runtime_info();
808        allocator
809            .validate_module(info.env_module(), info.offsets())
810            .unwrap();
811
812        unsafe {
813            // Note that this dummy instance doesn't allocate tables or memories
814            // (also no limiter is passed in) so it won't have an async await
815            // point meaning that it should be ok to assert the future is
816            // always ready.
817            let result = vm::assert_ready(inner.allocate_instance(
818                None,
819                AllocateInstanceKind::Dummy {
820                    allocator: &allocator,
821                },
822                info,
823                Default::default(),
824            ));
825            let id = match result {
826                Ok(id) => id,
827                Err(e) => {
828                    if e.is::<OutOfMemory>() {
829                        return Err(e);
830                    }
831                    panic!("instance allocator failed to allocate default callee")
832                }
833            };
834            let default_caller_vmctx = inner.instance(id).vmctx();
835            inner.default_caller_vmctx = default_caller_vmctx.into();
836        }
837
838        Ok(Self {
839            inner: ManuallyDrop::new(inner),
840        })
841    }
842
843    /// Access the underlying `T` data owned by this `Store`.
844    #[inline]
845    pub fn data(&self) -> &T {
846        self.inner.data()
847    }
848
849    /// Access the underlying `T` data owned by this `Store`.
850    #[inline]
851    pub fn data_mut(&mut self) -> &mut T {
852        self.inner.data_mut()
853    }
854
855    fn run_manual_drop_routines(&mut self) {
856        StoreData::run_manual_drop_routines(StoreContextMut(&mut self.inner));
857
858        // Ensure all fiber stacks, even cached ones, are all flushed out to the
859        // instance allocator.
860        self.inner.flush_fiber_stack();
861    }
862
863    /// Consumes this [`Store`], destroying it, and returns the underlying data.
864    pub fn into_data(mut self) -> T {
865        self.run_manual_drop_routines();
866
867        // This is an unsafe operation because we want to avoid having a runtime
868        // check or boolean for whether the data is actually contained within a
869        // `Store`. The data itself is stored as `ManuallyDrop` since we're
870        // manually managing the memory here, and there's also a `ManuallyDrop`
871        // around the `Box<StoreInner<T>>`. The way this works though is a bit
872        // tricky, so here's how things get dropped appropriately:
873        //
874        // * When a `Store<T>` is normally dropped, the custom destructor for
875        //   `Store<T>` will drop `T`, then the `self.inner` field. The
876        //   rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
877        //   `StoreInner<T>`. This cleans up all internal fields and doesn't
878        //   touch `T` because it's wrapped in `ManuallyDrop`.
879        //
880        // * When calling this method we skip the top-level destructor for
881        //   `Store<T>` with `mem::forget`. This skips both the destructor for
882        //   `T` and the destructor for `StoreInner<T>`. We do, however, run the
883        //   destructor for `Box<StoreInner<T>>` which, like above, will skip
884        //   the destructor for `T` since it's `ManuallyDrop`.
885        //
886        // In both cases all the other fields of `StoreInner<T>` should all get
887        // dropped, and the manual management of destructors is basically
888        // between this method and `Drop for Store<T>`. Note that this also
889        // means that `Drop for StoreInner<T>` cannot access `self.data`, so
890        // there is a comment indicating this as well.
891        unsafe {
892            let mut inner = ManuallyDrop::take(&mut self.inner);
893            core::mem::forget(self);
894            ManuallyDrop::take(&mut inner.data_no_provenance)
895        }
896    }
897
898    /// Configures the [`ResourceLimiter`] used to limit resource creation
899    /// within this [`Store`].
900    ///
901    /// Whenever resources such as linear memory, tables, or instances are
902    /// allocated the `limiter` specified here is invoked with the store's data
903    /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
904    /// being allocated. The returned [`ResourceLimiter`] is intended to live
905    /// within the `T` itself, for example by storing a
906    /// [`StoreLimits`](crate::StoreLimits).
907    ///
908    /// Note that this limiter is only used to limit the creation/growth of
909    /// resources in the future, this does not retroactively attempt to apply
910    /// limits to the [`Store`].
911    ///
912    /// # Examples
913    ///
914    /// ```
915    /// use wasmtime::*;
916    ///
917    /// struct MyApplicationState {
918    ///     my_state: u32,
919    ///     limits: StoreLimits,
920    /// }
921    ///
922    /// let engine = Engine::default();
923    /// let my_state = MyApplicationState {
924    ///     my_state: 42,
925    ///     limits: StoreLimitsBuilder::new()
926    ///         .memory_size(1 << 20 /* 1 MB */)
927    ///         .instances(2)
928    ///         .build(),
929    /// };
930    /// let mut store = Store::new(&engine, my_state);
931    /// store.limiter(|state| &mut state.limits);
932    ///
933    /// // Creation of smaller memories is allowed
934    /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
935    ///
936    /// // Creation of a larger memory, however, will exceed the 1MB limit we've
937    /// // configured
938    /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
939    ///
940    /// // The number of instances in this store is limited to 2, so the third
941    /// // instance here should fail.
942    /// let module = Module::new(&engine, "(module)").unwrap();
943    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
944    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
945    /// assert!(Instance::new(&mut store, &module, &[]).is_err());
946    /// ```
947    ///
948    /// [`ResourceLimiter`]: crate::ResourceLimiter
949    pub fn limiter(
950        &mut self,
951        mut limiter: impl (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync + 'static,
952    ) {
953        // Apply the limits on instances, tables, and memory given by the limiter:
954        let inner = &mut self.inner;
955        let (instance_limit, table_limit, memory_limit) = {
956            let l = limiter(inner.data_mut());
957            (l.instances(), l.tables(), l.memories())
958        };
959        let innermost = &mut inner.inner;
960        innermost.instance_limit = instance_limit;
961        innermost.table_limit = table_limit;
962        innermost.memory_limit = memory_limit;
963
964        // Save the limiter accessor function:
965        inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
966    }
967
968    /// Configure a function that runs on calls and returns between WebAssembly
969    /// and host code.
970    ///
971    /// The function is passed a [`CallHook`] argument, which indicates which
972    /// state transition the VM is making.
973    ///
974    /// This function may return a [`Trap`]. If a trap is returned when an
975    /// import was called, it is immediately raised as-if the host import had
976    /// returned the trap. If a trap is returned after wasm returns to the host
977    /// then the wasm function's result is ignored and this trap is returned
978    /// instead.
979    ///
980    /// After this function returns a trap, it may be called for subsequent returns
981    /// to host or wasm code as the trap propagates to the root call.
982    ///
983    /// [`Trap`]: crate::Trap
984    #[cfg(feature = "call-hook")]
985    pub fn call_hook(
986        &mut self,
987        hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
988    ) {
989        self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
990    }
991
992    /// Returns the [`Engine`] that this store is associated with.
993    pub fn engine(&self) -> &Engine {
994        self.inner.engine()
995    }
996
997    /// Perform garbage collection.
998    ///
999    /// Note that it is not required to actively call this function. GC will
1000    /// automatically happen according to various internal heuristics. This is
1001    /// provided if fine-grained control over the GC is desired.
1002    ///
1003    /// If you are calling this method after an attempted allocation failed, you
1004    /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
1005    /// When you do so, this method will attempt to create enough space in the
1006    /// GC heap for that allocation, so that it will succeed on the next
1007    /// attempt.
1008    ///
1009    /// # Errors
1010    ///
1011    /// This method will fail if an [async limiter is
1012    /// configured](Store::limiter_async) in which case [`Store::gc_async`] must
1013    /// be used instead.
1014    #[cfg(feature = "gc")]
1015    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> {
1016        StoreContextMut(&mut self.inner).gc(why)
1017    }
1018
1019    /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
1020    /// be configured via [`Store::set_fuel`].
1021    ///
1022    /// # Errors
1023    ///
1024    /// This function will return an error if fuel consumption is not enabled
1025    /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
1026    pub fn get_fuel(&self) -> Result<u64> {
1027        self.inner.get_fuel()
1028    }
1029
1030    /// Set the fuel to this [`Store`] for wasm to consume while executing.
1031    ///
1032    /// For this method to work fuel consumption must be enabled via
1033    /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
1034    /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
1035    /// immediately trap). This function must be called for the store to have
1036    /// some fuel to allow WebAssembly to execute.
1037    ///
1038    /// Most WebAssembly instructions consume 1 unit of fuel. Some
1039    /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
1040    /// units, as any execution cost associated with them involves other
1041    /// instructions which do consume fuel.
1042    ///
1043    /// Note that when fuel is entirely consumed it will cause wasm to trap.
1044    ///
1045    /// # Errors
1046    ///
1047    /// This function will return an error if fuel consumption is not enabled via
1048    /// [`Config::consume_fuel`](crate::Config::consume_fuel).
1049    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1050        self.inner.set_fuel(fuel)
1051    }
1052
1053    /// Configures a [`Store`] to yield execution of async WebAssembly code
1054    /// periodically.
1055    ///
1056    /// When a [`Store`] is configured to consume fuel with
1057    /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
1058    /// configure WebAssembly to be suspended and control will be yielded back
1059    /// to the caller every `interval` units of fuel consumed. When using this
1060    /// method it requires further invocations of WebAssembly to use `*_async`
1061    /// entrypoints.
1062    ///
1063    /// The purpose of this behavior is to ensure that futures which represent
1064    /// execution of WebAssembly do not execute too long inside their
1065    /// `Future::poll` method. This allows for some form of cooperative
1066    /// multitasking where WebAssembly will voluntarily yield control
1067    /// periodically (based on fuel consumption) back to the running thread.
1068    ///
1069    /// Note that futures returned by this crate will automatically flag
1070    /// themselves to get re-polled if a yield happens. This means that
1071    /// WebAssembly will continue to execute, just after giving the host an
1072    /// opportunity to do something else.
1073    ///
1074    /// The `interval` parameter indicates how much fuel should be
1075    /// consumed between yields of an async future. When fuel runs out wasm will trap.
1076    ///
1077    /// # Error
1078    ///
1079    /// This method will error if fuel is not enabled or `interval` is
1080    /// `Some(0)`.
1081    #[cfg(feature = "async")]
1082    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1083        self.inner.fuel_async_yield_interval(interval)
1084    }
1085
1086    /// Sets the epoch deadline to a certain number of ticks in the future.
1087    ///
1088    /// When the Wasm guest code is compiled with epoch-interruption
1089    /// instrumentation
1090    /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
1091    /// and when the `Engine`'s epoch is incremented
1092    /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
1093    /// past a deadline, execution can be configured to either trap or
1094    /// yield and then continue.
1095    ///
1096    /// This deadline is always set relative to the current epoch:
1097    /// `ticks_beyond_current` ticks in the future. The deadline can
1098    /// be set explicitly via this method, or refilled automatically
1099    /// on a yield if configured via
1100    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
1101    /// this method is invoked, the deadline is reached when
1102    /// [`Engine::increment_epoch()`] has been invoked at least
1103    /// `ticks_beyond_current` times.
1104    ///
1105    /// By default a store will trap immediately with an epoch deadline of 0
1106    /// (which has always "elapsed"). This method is required to be configured
1107    /// for stores with epochs enabled to some future epoch deadline.
1108    ///
1109    /// See documentation on
1110    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1111    /// for an introduction to epoch-based interruption.
1112    #[cfg(target_has_atomic = "64")]
1113    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1114        self.inner.set_epoch_deadline(ticks_beyond_current);
1115    }
1116
1117    /// Configures epoch-deadline expiration to trap.
1118    ///
1119    /// When epoch-interruption-instrumented code is executed on this
1120    /// store and the epoch deadline is reached before completion,
1121    /// with the store configured in this way, execution will
1122    /// terminate with a trap as soon as an epoch check in the
1123    /// instrumented code is reached.
1124    ///
1125    /// This behavior is the default if the store is not otherwise
1126    /// configured via
1127    /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
1128    /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
1129    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
1130    ///
1131    /// This setting is intended to allow for coarse-grained
1132    /// interruption, but not a deterministic deadline of a fixed,
1133    /// finite interval. For deterministic interruption, see the
1134    /// "fuel" mechanism instead.
1135    ///
1136    /// Note that when this is used it's required to call
1137    /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
1138    /// trap.
1139    ///
1140    /// See documentation on
1141    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1142    /// for an introduction to epoch-based interruption.
1143    #[cfg(target_has_atomic = "64")]
1144    pub fn epoch_deadline_trap(&mut self) {
1145        self.inner.epoch_deadline_trap();
1146    }
1147
1148    /// Configures epoch-deadline expiration to invoke a custom callback
1149    /// function.
1150    ///
1151    /// When epoch-interruption-instrumented code is executed on this
1152    /// store and the epoch deadline is reached before completion, the
1153    /// provided callback function is invoked.
1154    ///
1155    /// This callback should either return an [`UpdateDeadline`], or
1156    /// return an error, which will terminate execution with a trap.
1157    ///
1158    /// The [`UpdateDeadline`] is a positive number of ticks to
1159    /// add to the epoch deadline, as well as indicating what
1160    /// to do after the callback returns. If the [`Store`] is
1161    /// configured with async support, then the callback may return
1162    /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
1163    /// to yield to the async executor before updating the epoch deadline.
1164    /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
1165    /// update the epoch deadline immediately.
1166    ///
1167    /// This setting is intended to allow for coarse-grained
1168    /// interruption, but not a deterministic deadline of a fixed,
1169    /// finite interval. For deterministic interruption, see the
1170    /// "fuel" mechanism instead.
1171    ///
1172    /// See documentation on
1173    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1174    /// for an introduction to epoch-based interruption.
1175    #[cfg(target_has_atomic = "64")]
1176    pub fn epoch_deadline_callback(
1177        &mut self,
1178        callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
1179    ) {
1180        self.inner.epoch_deadline_callback(Box::new(callback));
1181    }
1182
1183    /// Set an exception as the currently pending exception, and
1184    /// return an error that propagates the throw.
1185    ///
1186    /// This method takes an exception object and stores it in the
1187    /// `Store` as the currently pending exception. This is a special
1188    /// rooted slot that holds the exception as long as it is
1189    /// propagating. This method then returns a `ThrownException`
1190    /// error, which is a special type that indicates a pending
1191    /// exception exists. When this type propagates as an error
1192    /// returned from a Wasm-to-host call, the pending exception is
1193    /// thrown within the Wasm context, and either caught or
1194    /// propagated further to the host-to-Wasm call boundary. If an
1195    /// exception is thrown out of Wasm (or across Wasm from a
1196    /// hostcall) back to the host-to-Wasm call boundary, *that*
1197    /// invocation returns a `ThrownException`, and the pending
1198    /// exception slot is again set. In other words, the
1199    /// `ThrownException` error type should propagate upward exactly
1200    /// and only when a pending exception is set.
1201    ///
1202    /// To take the pending exception, use [`Self::take_pending_exception`].
1203    ///
1204    /// This method is parameterized over `R` for convenience, but
1205    /// will always return an `Err`.
1206    ///
1207    /// # Panics
1208    ///
1209    /// - Will panic if `exception` has been unrooted.
1210    /// - Will panic if `exception` is a null reference.
1211    /// - Will panic if a pending exception has already been set.
1212    #[cfg(feature = "gc")]
1213    pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1214        self.inner.throw_impl(exception);
1215        Err(ThrownException)
1216    }
1217
1218    /// Take the currently pending exception, if any, and return it,
1219    /// removing it from the "pending exception" slot.
1220    ///
1221    /// If there is no pending exception, returns `None`.
1222    ///
1223    /// Note: the returned exception is a LIFO root (see
1224    /// [`crate::Rooted`]), rooted in the current handle scope. Take
1225    /// care to ensure that it is re-rooted or otherwise does not
1226    /// escape this scope! It is usually best to allow an exception
1227    /// object to be rooted in the store's "pending exception" slot
1228    /// until the final consumer has taken it, rather than root it and
1229    /// pass it up the callstack in some other way.
1230    ///
1231    /// This method is useful to implement ad-hoc exception plumbing
1232    /// in various ways, but for the most idiomatic handling, see
1233    /// [`StoreContextMut::throw`].
1234    #[cfg(feature = "gc")]
1235    pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1236        self.inner.take_pending_exception_rooted()
1237    }
1238
1239    /// Tests whether there is a pending exception.
1240    ///
1241    /// Ordinarily, a pending exception will be set on a store if and
1242    /// only if a host-side callstack is propagating a
1243    /// [`crate::ThrownException`] error. The final consumer that
1244    /// catches the exception takes it; it may re-place it to re-throw
1245    /// (using [`Self::throw`]) if it chooses not to actually handle the
1246    /// exception.
1247    ///
1248    /// This method is useful to tell whether a store is in this
1249    /// state, but should not be used as part of the ordinary
1250    /// exception-handling flow. For the most idiomatic handling, see
1251    /// [`StoreContextMut::throw`].
1252    #[cfg(feature = "gc")]
1253    pub fn has_pending_exception(&self) -> bool {
1254        self.inner.pending_exception.is_some()
1255    }
1256
1257    /// Return all breakpoints.
1258    #[cfg(feature = "debug")]
1259    pub fn breakpoints(&self) -> Option<impl Iterator<Item = crate::Breakpoint> + '_> {
1260        self.as_context().breakpoints()
1261    }
1262
1263    /// Indicate whether single-step mode is enabled.
1264    #[cfg(feature = "debug")]
1265    pub fn is_single_step(&self) -> bool {
1266        self.as_context().is_single_step()
1267    }
1268
1269    /// Set the debug callback on this store.
1270    ///
1271    /// See [`crate::DebugHandler`] for more documentation.
1272    ///
1273    /// # Panics
1274    ///
1275    /// - Will panic if guest-debug support was not enabled via
1276    ///   [`crate::Config::guest_debug`].
1277    #[cfg(feature = "debug")]
1278    pub fn set_debug_handler(&mut self, handler: impl DebugHandler<Data = T>)
1279    where
1280        // We require `Send` here because the debug handler becomes
1281        // referenced from a future: when `DebugHandler::handle` is
1282        // invoked, its `self` references the `handler` with the
1283        // user's state. Note that we are careful to keep this bound
1284        // constrained to debug-handler-related code only and not
1285        // propagate it outward to the store in general. The presence
1286        // of the trait implementation serves as a witness that `T:
1287        // Send`. This is required in particular because we will have
1288        // a `&mut dyn VMStore` on the stack when we pause a fiber
1289        // with `block_on` to run a debugger hook; that `VMStore` must
1290        // be a `Store<T> where T: Send`.
1291        T: Send,
1292    {
1293        // Debug hooks rely on async support, so async entrypoints are required.
1294        self.inner.set_async_required(Asyncness::Yes);
1295
1296        assert!(
1297            self.engine().tunables().debug_guest,
1298            "debug hooks require guest debugging to be enabled"
1299        );
1300        self.inner.debug_handler = Some(Box::new(handler));
1301    }
1302
1303    /// Clear the debug handler on this store. If any existed, it will
1304    /// be dropped.
1305    #[cfg(feature = "debug")]
1306    pub fn clear_debug_handler(&mut self) {
1307        self.inner.debug_handler = None;
1308    }
1309}
1310
1311impl<'a, T> StoreContext<'a, T> {
1312    /// Returns the underlying [`Engine`] this store is connected to.
1313    pub fn engine(&self) -> &Engine {
1314        self.0.engine()
1315    }
1316
1317    /// Access the underlying data owned by this `Store`.
1318    ///
1319    /// Same as [`Store::data`].
1320    pub fn data(&self) -> &'a T {
1321        self.0.data()
1322    }
1323
1324    /// Returns the remaining fuel in this store.
1325    ///
1326    /// For more information see [`Store::get_fuel`].
1327    pub fn get_fuel(&self) -> Result<u64> {
1328        self.0.get_fuel()
1329    }
1330}
1331
1332impl<'a, T> StoreContextMut<'a, T> {
1333    /// Access the underlying data owned by this `Store`.
1334    ///
1335    /// Same as [`Store::data`].
1336    pub fn data(&self) -> &T {
1337        self.0.data()
1338    }
1339
1340    /// Access the underlying data owned by this `Store`.
1341    ///
1342    /// Same as [`Store::data_mut`].
1343    pub fn data_mut(&mut self) -> &mut T {
1344        self.0.data_mut()
1345    }
1346
1347    /// Returns the underlying [`Engine`] this store is connected to.
1348    pub fn engine(&self) -> &Engine {
1349        self.0.engine()
1350    }
1351
1352    /// Perform garbage collection of `ExternRef`s.
1353    ///
1354    /// Same as [`Store::gc`].
1355    #[cfg(feature = "gc")]
1356    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> {
1357        let (mut limiter, store) = self.0.validate_sync_resource_limiter_and_store_opaque()?;
1358        vm::assert_ready(store.gc(
1359            limiter.as_mut(),
1360            None,
1361            why.map(|e| e.bytes_needed()),
1362            Asyncness::No,
1363        ));
1364        Ok(())
1365    }
1366
1367    /// Returns remaining fuel in this store.
1368    ///
1369    /// For more information see [`Store::get_fuel`]
1370    pub fn get_fuel(&self) -> Result<u64> {
1371        self.0.get_fuel()
1372    }
1373
1374    /// Set the amount of fuel in this store.
1375    ///
1376    /// For more information see [`Store::set_fuel`]
1377    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1378        self.0.set_fuel(fuel)
1379    }
1380
1381    /// Configures this `Store` to periodically yield while executing futures.
1382    ///
1383    /// For more information see [`Store::fuel_async_yield_interval`]
1384    #[cfg(feature = "async")]
1385    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1386        self.0.fuel_async_yield_interval(interval)
1387    }
1388
1389    /// Sets the epoch deadline to a certain number of ticks in the future.
1390    ///
1391    /// For more information see [`Store::set_epoch_deadline`].
1392    #[cfg(target_has_atomic = "64")]
1393    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1394        self.0.set_epoch_deadline(ticks_beyond_current);
1395    }
1396
1397    /// Configures epoch-deadline expiration to trap.
1398    ///
1399    /// For more information see [`Store::epoch_deadline_trap`].
1400    #[cfg(target_has_atomic = "64")]
1401    pub fn epoch_deadline_trap(&mut self) {
1402        self.0.epoch_deadline_trap();
1403    }
1404
1405    /// Set an exception as the currently pending exception, and
1406    /// return an error that propagates the throw.
1407    ///
1408    /// See [`Store::throw`] for more details.
1409    #[cfg(feature = "gc")]
1410    pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1411        self.0.inner.throw_impl(exception);
1412        Err(ThrownException)
1413    }
1414
1415    /// Take the currently pending exception, if any, and return it,
1416    /// removing it from the "pending exception" slot.
1417    ///
1418    /// See [`Store::take_pending_exception`] for more details.
1419    #[cfg(feature = "gc")]
1420    pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1421        self.0.inner.take_pending_exception_rooted()
1422    }
1423
1424    /// Tests whether there is a pending exception.
1425    ///
1426    /// See [`Store::has_pending_exception`] for more details.
1427    #[cfg(feature = "gc")]
1428    pub fn has_pending_exception(&self) -> bool {
1429        self.0.inner.pending_exception.is_some()
1430    }
1431}
1432
1433impl<T> StoreInner<T> {
1434    #[inline]
1435    fn data(&self) -> &T {
1436        // We are actually just accessing `&self.data_no_provenance` but we must
1437        // do so with the `VMStoreContext::store_data` pointer's provenance. If
1438        // we did otherwise, i.e. directly accessed the field, we would
1439        // invalidate that pointer, which would in turn invalidate any direct
1440        // `T` accesses that Wasm code makes via unsafe intrinsics.
1441        let data: *const ManuallyDrop<T> = &raw const self.data_no_provenance;
1442        let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1443        let ptr = provenance.with_addr(data.addr());
1444
1445        // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1446        // to access because of our `&self` borrow.
1447        debug_assert_ne!(ptr, core::ptr::null_mut());
1448        debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1449        unsafe { &*ptr }
1450    }
1451
1452    #[inline]
1453    fn data_limiter_and_opaque(
1454        &mut self,
1455    ) -> (
1456        &mut T,
1457        Option<&mut ResourceLimiterInner<T>>,
1458        &mut StoreOpaque,
1459    ) {
1460        // See the comments about provenance in `StoreInner::data` above.
1461        let data: *mut ManuallyDrop<T> = &raw mut self.data_no_provenance;
1462        let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1463        let ptr = provenance.with_addr(data.addr());
1464
1465        // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1466        // to access because of our `&mut self` borrow.
1467        debug_assert_ne!(ptr, core::ptr::null_mut());
1468        debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1469        let data = unsafe { &mut *ptr };
1470
1471        let limiter = self.limiter.as_mut();
1472
1473        (data, limiter, &mut self.inner)
1474    }
1475
1476    #[inline]
1477    fn data_mut(&mut self) -> &mut T {
1478        self.data_limiter_and_opaque().0
1479    }
1480
1481    #[inline]
1482    pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1483        if self.inner.pkey.is_none() && self.call_hook.is_none() {
1484            Ok(())
1485        } else {
1486            self.call_hook_slow_path(s)
1487        }
1488    }
1489
1490    fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1491        if let Some(pkey) = &self.inner.pkey {
1492            let allocator = self.engine().allocator();
1493            match s {
1494                CallHook::CallingWasm | CallHook::ReturningFromHost => {
1495                    allocator.restrict_to_pkey(*pkey)
1496                }
1497                CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1498            }
1499        }
1500
1501        // Temporarily take the configured behavior to avoid mutably borrowing
1502        // multiple times.
1503        if let Some(mut call_hook) = self.call_hook.take() {
1504            let result = self.invoke_call_hook(&mut call_hook, s);
1505            self.call_hook = Some(call_hook);
1506            return result;
1507        }
1508
1509        Ok(())
1510    }
1511
1512    fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1513        match call_hook {
1514            #[cfg(feature = "call-hook")]
1515            CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1516
1517            #[cfg(all(feature = "async", feature = "call-hook"))]
1518            CallHookInner::Async(handler) => {
1519                if !self.can_block() {
1520                    bail!("couldn't grab async_cx for call hook")
1521                }
1522                return (&mut *self)
1523                    .as_context_mut()
1524                    .with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
1525            }
1526
1527            CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1528                let _ = s;
1529                match *uninhabited {}
1530            }
1531        }
1532    }
1533
1534    #[cfg(not(feature = "async"))]
1535    fn flush_fiber_stack(&mut self) {
1536        // noop shim so code can assume this always exists.
1537    }
1538
1539    /// Splits this `StoreInner<T>` into a `limiter`/`StoerOpaque` borrow while
1540    /// validating that an async limiter is not configured.
1541    ///
1542    /// This is used for sync entrypoints which need to fail if an async limiter
1543    /// is configured as otherwise the async entrypoint must be used instead.
1544    pub(crate) fn validate_sync_resource_limiter_and_store_opaque(
1545        &mut self,
1546    ) -> Result<(Option<StoreResourceLimiter<'_>>, &mut StoreOpaque)> {
1547        let (limiter, store) = self.resource_limiter_and_store_opaque();
1548        if !matches!(limiter, None | Some(StoreResourceLimiter::Sync(_))) {
1549            bail!(
1550                "when using an async resource limiter `*_async` functions must \
1551             be used instead"
1552            );
1553        }
1554        Ok((limiter, store))
1555    }
1556}
1557
1558fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1559    fuel_reserve.saturating_add_signed(-injected_fuel)
1560}
1561
1562// Add remaining fuel from the reserve into the active fuel if there is any left.
1563fn refuel(
1564    injected_fuel: &mut i64,
1565    fuel_reserve: &mut u64,
1566    yield_interval: Option<NonZeroU64>,
1567) -> bool {
1568    let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1569    if fuel > 0 {
1570        set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1571        true
1572    } else {
1573        false
1574    }
1575}
1576
1577fn set_fuel(
1578    injected_fuel: &mut i64,
1579    fuel_reserve: &mut u64,
1580    yield_interval: Option<NonZeroU64>,
1581    new_fuel_amount: u64,
1582) {
1583    let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1584    // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1585    // for the VM to use.
1586    let injected = core::cmp::min(interval, new_fuel_amount);
1587    // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1588    // VM at once to be i64 range.
1589    let injected = core::cmp::min(injected, i64::MAX as u64);
1590    // Add whatever is left over after injection to the reserve for later use.
1591    *fuel_reserve = new_fuel_amount - injected;
1592    // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1593    // this counter is positive.
1594    *injected_fuel = -(injected as i64);
1595}
1596
1597#[doc(hidden)]
1598impl StoreOpaque {
1599    pub fn id(&self) -> StoreId {
1600        self.store_data.id()
1601    }
1602
1603    pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1604        fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1605            let new = slot.saturating_add(amt);
1606            if new > max {
1607                bail!("resource limit exceeded: {desc} count too high at {new}");
1608            }
1609            *slot = new;
1610            Ok(())
1611        }
1612
1613        let module = module.env_module();
1614        let memories = module.num_defined_memories();
1615        let tables = module.num_defined_tables();
1616
1617        bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1618        bump(
1619            &mut self.memory_count,
1620            self.memory_limit,
1621            memories,
1622            "memory",
1623        )?;
1624        bump(&mut self.table_count, self.table_limit, tables, "table")?;
1625
1626        Ok(())
1627    }
1628
1629    #[inline]
1630    pub fn engine(&self) -> &Engine {
1631        &self.engine
1632    }
1633
1634    #[inline]
1635    pub fn store_data(&self) -> &StoreData {
1636        &self.store_data
1637    }
1638
1639    #[inline]
1640    pub fn store_data_mut(&mut self) -> &mut StoreData {
1641        &mut self.store_data
1642    }
1643
1644    pub fn store_data_mut_and_registry(&mut self) -> (&mut StoreData, &ModuleRegistry) {
1645        (&mut self.store_data, &self.modules)
1646    }
1647
1648    #[cfg(feature = "debug")]
1649    pub(crate) fn breakpoints_and_registry_mut(
1650        &mut self,
1651    ) -> (&mut BreakpointState, &mut ModuleRegistry) {
1652        (&mut self.breakpoints, &mut self.modules)
1653    }
1654
1655    #[cfg(feature = "debug")]
1656    pub(crate) fn breakpoints_and_registry(&self) -> (&BreakpointState, &ModuleRegistry) {
1657        (&self.breakpoints, &self.modules)
1658    }
1659
1660    #[cfg(feature = "debug")]
1661    pub(crate) fn frame_data_cache_mut_and_registry(
1662        &mut self,
1663    ) -> (&mut FrameDataCache, &ModuleRegistry) {
1664        (&mut self.frame_data_cache, &self.modules)
1665    }
1666
1667    #[inline]
1668    pub(crate) fn modules(&self) -> &ModuleRegistry {
1669        &self.modules
1670    }
1671
1672    #[inline]
1673    pub(crate) fn modules_and_engine_and_breakpoints_mut(
1674        &mut self,
1675    ) -> (&mut ModuleRegistry, &Engine, RegisterBreakpointState<'_>) {
1676        #[cfg(feature = "debug")]
1677        let breakpoints = RegisterBreakpointState(&self.breakpoints);
1678        #[cfg(not(feature = "debug"))]
1679        let breakpoints = RegisterBreakpointState(core::marker::PhantomData);
1680
1681        (&mut self.modules, &self.engine, breakpoints)
1682    }
1683
1684    pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1685        (&mut self.func_refs, &self.modules)
1686    }
1687
1688    pub(crate) fn host_globals(
1689        &self,
1690    ) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1691        &self.host_globals
1692    }
1693
1694    pub(crate) fn host_globals_mut(
1695        &mut self,
1696    ) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1697        &mut self.host_globals
1698    }
1699
1700    pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1701        instance.store_id().assert_belongs_to(self.id());
1702        match self.instances[instance.instance()].kind {
1703            StoreInstanceKind::Dummy => None,
1704            StoreInstanceKind::Real { module_id } => {
1705                let module = self
1706                    .modules()
1707                    .module_by_id(module_id)
1708                    .expect("should always have a registered module for real instances");
1709                Some(module)
1710            }
1711        }
1712    }
1713
1714    /// Accessor from `InstanceId` to `&vm::Instance`.
1715    ///
1716    /// Note that if you have a `StoreInstanceId` you should use
1717    /// `StoreInstanceId::get` instead. This assumes that `id` has been
1718    /// validated to already belong to this store.
1719    #[inline]
1720    pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1721        self.instances[id].handle.get()
1722    }
1723
1724    /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1725    ///
1726    /// Note that if you have a `StoreInstanceId` you should use
1727    /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1728    /// validated to already belong to this store.
1729    #[inline]
1730    pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1731        self.instances[id].handle.get_mut()
1732    }
1733
1734    /// Accessor from `InstanceId` to both `Pin<&mut vm::Instance>`
1735    /// and `&ModuleRegistry`.
1736    #[inline]
1737    pub fn instance_and_module_registry_mut(
1738        &mut self,
1739        id: InstanceId,
1740    ) -> (Pin<&mut vm::Instance>, &ModuleRegistry) {
1741        (self.instances[id].handle.get_mut(), &self.modules)
1742    }
1743
1744    /// Access multiple instances specified via `ids`.
1745    ///
1746    /// # Panics
1747    ///
1748    /// This method will panic if any indices in `ids` overlap.
1749    ///
1750    /// # Safety
1751    ///
1752    /// This method is not safe if the returned instances are used to traverse
1753    /// "laterally" between other instances. For example accessing imported
1754    /// items in an instance may traverse laterally to a sibling instance thus
1755    /// aliasing a returned value here. The caller must ensure that only defined
1756    /// items within the instances themselves are accessed.
1757    #[inline]
1758    pub unsafe fn optional_gc_store_and_instances_mut<const N: usize>(
1759        &mut self,
1760        ids: [InstanceId; N],
1761    ) -> (Option<&mut GcStore>, [Pin<&mut vm::Instance>; N]) {
1762        let instances = self
1763            .instances
1764            .get_disjoint_mut(ids)
1765            .unwrap()
1766            .map(|h| h.handle.get_mut());
1767        (self.gc_store.as_mut(), instances)
1768    }
1769
1770    /// Pair of `Self::optional_gc_store_mut` and `Self::instance_mut`
1771    pub fn optional_gc_store_and_instance_mut(
1772        &mut self,
1773        id: InstanceId,
1774    ) -> (Option<&mut GcStore>, Pin<&mut vm::Instance>) {
1775        (self.gc_store.as_mut(), self.instances[id].handle.get_mut())
1776    }
1777
1778    /// Tuple of `Self::optional_gc_store_mut`, `Self::modules`, and
1779    /// `Self::instance_mut`.
1780    pub fn optional_gc_store_and_registry_and_instance_mut(
1781        &mut self,
1782        id: InstanceId,
1783    ) -> (
1784        Option<&mut GcStore>,
1785        &ModuleRegistry,
1786        Pin<&mut vm::Instance>,
1787    ) {
1788        (
1789            self.gc_store.as_mut(),
1790            &self.modules,
1791            self.instances[id].handle.get_mut(),
1792        )
1793    }
1794
1795    /// Get all instances (ignoring dummy instances) within this store.
1796    pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1797        let instances = self
1798            .instances
1799            .iter()
1800            .filter_map(|(id, inst)| {
1801                if let StoreInstanceKind::Dummy = inst.kind {
1802                    None
1803                } else {
1804                    Some(id)
1805                }
1806            })
1807            .collect::<Vec<_>>();
1808        instances
1809            .into_iter()
1810            .map(|i| Instance::from_wasmtime(i, self))
1811    }
1812
1813    /// Get all memories (host- or Wasm-defined) within this store.
1814    pub fn all_memories<'a>(&'a self) -> impl Iterator<Item = ExportMemory> + 'a {
1815        // NB: Host-created memories have dummy instances. Therefore, we can get
1816        // all memories in the store by iterating over all instances (including
1817        // dummy instances) and getting each of their defined memories.
1818        let id = self.id();
1819        self.instances
1820            .iter()
1821            .flat_map(move |(_, instance)| instance.handle.get().defined_memories(id))
1822    }
1823
1824    /// Iterate over all tables (host- or Wasm-defined) within this store.
1825    pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1826        // NB: Host-created tables have dummy instances. Therefore, we can get
1827        // all tables in the store by iterating over all instances (including
1828        // dummy instances) and getting each of their defined memories.
1829        for id in self.instances.keys() {
1830            let instance = StoreInstanceId::new(self.id(), id);
1831            for table in 0..self.instance(id).env_module().num_defined_tables() {
1832                let table = DefinedTableIndex::new(table);
1833                f(self, Table::from_raw(instance, table));
1834            }
1835        }
1836    }
1837
1838    /// Iterate over all globals (host- or Wasm-defined) within this store.
1839    pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1840        // First enumerate all the host-created globals.
1841        for global in self.host_globals.keys() {
1842            let global = Global::new_host(self, global);
1843            f(self, global);
1844        }
1845
1846        // Then enumerate all instances' defined globals.
1847        for id in self.instances.keys() {
1848            for index in 0..self.instance(id).env_module().num_defined_globals() {
1849                let index = DefinedGlobalIndex::new(index);
1850                let global = Global::new_instance(self, id, index);
1851                f(self, global);
1852            }
1853        }
1854    }
1855
1856    #[cfg(all(feature = "std", any(unix, windows)))]
1857    pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1858        self.signal_handler = handler;
1859    }
1860
1861    #[inline]
1862    pub fn vm_store_context(&self) -> &VMStoreContext {
1863        &self.vm_store_context
1864    }
1865
1866    #[inline]
1867    pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
1868        &mut self.vm_store_context
1869    }
1870
1871    /// Performs a lazy allocation of the `GcStore` within this store, returning
1872    /// the previous allocation if it's already present.
1873    ///
1874    /// This method will, if necessary, allocate a new `GcStore` -- linear
1875    /// memory and all. This is a blocking operation due to
1876    /// `ResourceLimiterAsync` which means that this should only be executed
1877    /// in a fiber context at this time.
1878    #[inline]
1879    pub(crate) async fn ensure_gc_store(
1880        &mut self,
1881        limiter: Option<&mut StoreResourceLimiter<'_>>,
1882    ) -> Result<&mut GcStore> {
1883        if self.gc_store.is_some() {
1884            return Ok(self.gc_store.as_mut().unwrap());
1885        }
1886        self.allocate_gc_store(limiter).await
1887    }
1888
1889    #[inline(never)]
1890    async fn allocate_gc_store(
1891        &mut self,
1892        limiter: Option<&mut StoreResourceLimiter<'_>>,
1893    ) -> Result<&mut GcStore> {
1894        log::trace!("allocating GC heap for store {:?}", self.id());
1895
1896        assert!(self.gc_store.is_none());
1897        assert_eq!(
1898            self.vm_store_context.gc_heap.base.as_non_null(),
1899            NonNull::dangling(),
1900        );
1901        assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1902
1903        let gc_store = allocate_gc_store(self, limiter).await?;
1904        self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1905        return Ok(self.gc_store.insert(gc_store));
1906
1907        #[cfg(feature = "gc")]
1908        async fn allocate_gc_store(
1909            store: &mut StoreOpaque,
1910            limiter: Option<&mut StoreResourceLimiter<'_>>,
1911        ) -> Result<GcStore> {
1912            use wasmtime_environ::packed_option::ReservedValue;
1913
1914            let engine = store.engine();
1915            let mem_ty = engine.tunables().gc_heap_memory_type();
1916            ensure!(
1917                engine.features().gc_types(),
1918                "cannot allocate a GC store when GC is disabled at configuration time"
1919            );
1920
1921            // First, allocate the memory that will be our GC heap's storage.
1922            let mut request = InstanceAllocationRequest {
1923                id: InstanceId::reserved_value(),
1924                runtime_info: engine.empty_module_runtime_info(),
1925                imports: vm::Imports::default(),
1926                store,
1927                limiter,
1928            };
1929
1930            let (mem_alloc_index, mem) = engine
1931                .allocator()
1932                .allocate_memory(&mut request, &mem_ty, None)
1933                .await?;
1934
1935            // Then, allocate the actual GC heap, passing in that memory
1936            // storage.
1937            let gc_runtime = engine
1938                .gc_runtime()
1939                .context("no GC runtime: GC disabled at compile time or configuration time")?;
1940            let (index, heap) =
1941                engine
1942                    .allocator()
1943                    .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
1944
1945            Ok(GcStore::new(index, heap))
1946        }
1947
1948        #[cfg(not(feature = "gc"))]
1949        async fn allocate_gc_store(
1950            _: &mut StoreOpaque,
1951            _: Option<&mut StoreResourceLimiter<'_>>,
1952        ) -> Result<GcStore> {
1953            bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1954        }
1955    }
1956
1957    /// Helper method to require that a `GcStore` was previously allocated for
1958    /// this store, failing if it has not yet been allocated.
1959    ///
1960    /// Note that this should only be used in a context where allocation of a
1961    /// `GcStore` is sure to have already happened prior, otherwise this may
1962    /// return a confusing error to embedders which is a bug in Wasmtime.
1963    ///
1964    /// Some situations where it's safe to call this method:
1965    ///
1966    /// * There's already a non-null and non-i31 `VMGcRef` in scope. By existing
1967    ///   this shows proof that the `GcStore` was previously allocated.
1968    /// * During instantiation and instance's `needs_gc_heap` flag will be
1969    ///   handled and instantiation will automatically create a GC store.
1970    #[inline]
1971    #[cfg(feature = "gc")]
1972    pub(crate) fn require_gc_store(&self) -> Result<&GcStore> {
1973        match &self.gc_store {
1974            Some(gc_store) => Ok(gc_store),
1975            None => bail!("GC heap not initialized yet"),
1976        }
1977    }
1978
1979    /// Same as [`Self::require_gc_store`], but mutable.
1980    #[inline]
1981    #[cfg(feature = "gc")]
1982    pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> {
1983        match &mut self.gc_store {
1984            Some(gc_store) => Ok(gc_store),
1985            None => bail!("GC heap not initialized yet"),
1986        }
1987    }
1988
1989    /// Attempts to access the GC store that has been previously allocated.
1990    ///
1991    /// This method will return `Some` if the GC store was previously allocated.
1992    /// A `None` return value means either that the GC heap hasn't yet been
1993    /// allocated or that it does not need to be allocated for this store. Note
1994    /// that to require a GC store in a particular situation it's recommended to
1995    /// use [`Self::require_gc_store_mut`] instead.
1996    #[inline]
1997    pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
1998        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1999            debug_assert!(self.gc_store.is_none());
2000            None
2001        } else {
2002            self.gc_store.as_mut()
2003        }
2004    }
2005
2006    /// Helper to assert that a GC store was previously allocated and is
2007    /// present.
2008    ///
2009    /// # Panics
2010    ///
2011    /// This method will panic if the GC store has not yet been allocated. This
2012    /// should only be used in a context where there's an existing GC reference,
2013    /// for example, or if `ensure_gc_store` has already been called.
2014    #[inline]
2015    #[track_caller]
2016    pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
2017        self.gc_store
2018            .as_ref()
2019            .expect("attempted to access the store's GC heap before it has been allocated")
2020    }
2021
2022    /// Same as [`Self::unwrap_gc_store`], but mutable.
2023    #[inline]
2024    #[track_caller]
2025    pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
2026        self.gc_store
2027            .as_mut()
2028            .expect("attempted to access the store's GC heap before it has been allocated")
2029    }
2030
2031    #[inline]
2032    pub(crate) fn gc_roots(&self) -> &RootSet {
2033        &self.gc_roots
2034    }
2035
2036    #[inline]
2037    #[cfg(feature = "gc")]
2038    pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
2039        &mut self.gc_roots
2040    }
2041
2042    #[inline]
2043    pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
2044        self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
2045    }
2046
2047    #[cfg(feature = "gc")]
2048    async fn do_gc(&mut self, asyncness: Asyncness) {
2049        // If the GC heap hasn't been initialized, there is nothing to collect.
2050        if self.gc_store.is_none() {
2051            return;
2052        }
2053
2054        log::trace!("============ Begin GC ===========");
2055
2056        // Take the GC roots out of `self` so we can borrow it mutably but still
2057        // call mutable methods on `self`.
2058        let mut roots = core::mem::take(&mut self.gc_roots_list);
2059
2060        self.trace_roots(&mut roots, asyncness).await;
2061        self.unwrap_gc_store_mut()
2062            .gc(asyncness, unsafe { roots.iter() })
2063            .await;
2064
2065        // Restore the GC roots for the next GC.
2066        roots.clear();
2067        self.gc_roots_list = roots;
2068
2069        log::trace!("============ End GC ===========");
2070    }
2071
2072    #[cfg(feature = "gc")]
2073    async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList, asyncness: Asyncness) {
2074        log::trace!("Begin trace GC roots");
2075
2076        // We shouldn't have any leftover, stale GC roots.
2077        assert!(gc_roots_list.is_empty());
2078
2079        self.trace_wasm_stack_roots(gc_roots_list);
2080        if asyncness != Asyncness::No {
2081            vm::Yield::new().await;
2082        }
2083        #[cfg(feature = "stack-switching")]
2084        {
2085            self.trace_wasm_continuation_roots(gc_roots_list);
2086            if asyncness != Asyncness::No {
2087                vm::Yield::new().await;
2088            }
2089        }
2090        self.trace_vmctx_roots(gc_roots_list);
2091        if asyncness != Asyncness::No {
2092            vm::Yield::new().await;
2093        }
2094        self.trace_user_roots(gc_roots_list);
2095        self.trace_pending_exception_roots(gc_roots_list);
2096
2097        log::trace!("End trace GC roots")
2098    }
2099
2100    #[cfg(feature = "gc")]
2101    fn trace_wasm_stack_frame(
2102        &self,
2103        gc_roots_list: &mut GcRootsList,
2104        frame: crate::runtime::vm::Frame,
2105    ) {
2106        let pc = frame.pc();
2107        debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
2108
2109        let fp = frame.fp() as *mut usize;
2110        debug_assert!(
2111            !fp.is_null(),
2112            "we should always get a valid frame pointer for Wasm frames"
2113        );
2114
2115        let (module_with_code, _offset) = self
2116            .modules()
2117            .module_and_code_by_pc(pc)
2118            .expect("should have module info for Wasm frame");
2119
2120        if let Some(stack_map) = module_with_code.lookup_stack_map(pc) {
2121            log::trace!(
2122                "We have a stack map that maps {} bytes in this Wasm frame",
2123                stack_map.frame_size()
2124            );
2125
2126            let sp = unsafe { stack_map.sp(fp) };
2127            for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
2128                unsafe {
2129                    self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2130                }
2131            }
2132        }
2133
2134        #[cfg(feature = "debug")]
2135        if let Some(frame_table) = module_with_code.module().frame_table() {
2136            let relpc = module_with_code
2137                .text_offset(pc)
2138                .expect("PC should be within module");
2139            for stack_slot in super::debug::gc_refs_in_frame(frame_table, relpc, fp) {
2140                unsafe {
2141                    self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2142                }
2143            }
2144        }
2145    }
2146
2147    #[cfg(feature = "gc")]
2148    unsafe fn trace_wasm_stack_slot(&self, gc_roots_list: &mut GcRootsList, stack_slot: *mut u32) {
2149        use crate::runtime::vm::SendSyncPtr;
2150        use core::ptr::NonNull;
2151
2152        let raw: u32 = unsafe { core::ptr::read(stack_slot) };
2153        log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
2154
2155        let gc_ref = vm::VMGcRef::from_raw_u32(raw);
2156        if gc_ref.is_some() {
2157            unsafe {
2158                gc_roots_list
2159                    .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
2160            }
2161        }
2162    }
2163
2164    #[cfg(feature = "gc")]
2165    fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2166        use crate::runtime::vm::Backtrace;
2167        log::trace!("Begin trace GC roots :: Wasm stack");
2168
2169        Backtrace::trace(self, |frame| {
2170            self.trace_wasm_stack_frame(gc_roots_list, frame);
2171            core::ops::ControlFlow::Continue(())
2172        });
2173
2174        log::trace!("End trace GC roots :: Wasm stack");
2175    }
2176
2177    #[cfg(all(feature = "gc", feature = "stack-switching"))]
2178    fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2179        use crate::{runtime::vm::Backtrace, vm::VMStackState};
2180        log::trace!("Begin trace GC roots :: continuations");
2181
2182        for continuation in &self.continuations {
2183            let state = continuation.common_stack_information.state;
2184
2185            // FIXME(frank-emrich) In general, it is not enough to just trace
2186            // through the stacks of continuations; we also need to look through
2187            // their `cont.bind` arguments. However, we don't currently have
2188            // enough RTTI information to check if any of the values in the
2189            // buffers used by `cont.bind` are GC values. As a workaround, note
2190            // that we currently disallow cont.bind-ing GC values altogether.
2191            // This way, it is okay not to check them here.
2192            match state {
2193                VMStackState::Suspended => {
2194                    Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
2195                        self.trace_wasm_stack_frame(gc_roots_list, frame);
2196                        core::ops::ControlFlow::Continue(())
2197                    });
2198                }
2199                VMStackState::Running => {
2200                    // Handled by `trace_wasm_stack_roots`.
2201                }
2202                VMStackState::Parent => {
2203                    // We don't know whether our child is suspended or running, but in
2204                    // either case things should be handled correctly when traversing
2205                    // further along in the chain, nothing required at this point.
2206                }
2207                VMStackState::Fresh | VMStackState::Returned => {
2208                    // Fresh/Returned continuations have no gc values on their stack.
2209                }
2210            }
2211        }
2212
2213        log::trace!("End trace GC roots :: continuations");
2214    }
2215
2216    #[cfg(feature = "gc")]
2217    fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2218        log::trace!("Begin trace GC roots :: vmctx");
2219        self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
2220        self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
2221        log::trace!("End trace GC roots :: vmctx");
2222    }
2223
2224    #[cfg(feature = "gc")]
2225    fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2226        log::trace!("Begin trace GC roots :: user");
2227        self.gc_roots.trace_roots(gc_roots_list);
2228        log::trace!("End trace GC roots :: user");
2229    }
2230
2231    #[cfg(feature = "gc")]
2232    fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2233        log::trace!("Begin trace GC roots :: pending exception");
2234        if let Some(pending_exception) = self.pending_exception.as_mut() {
2235            unsafe {
2236                let root = pending_exception.as_gc_ref_mut();
2237                gc_roots_list.add_root(root.into(), "Pending exception");
2238            }
2239        }
2240        log::trace!("End trace GC roots :: pending exception");
2241    }
2242
2243    /// Insert a host-allocated GC type into this store.
2244    ///
2245    /// This makes it suitable for the embedder to allocate instances of this
2246    /// type in this store, and we don't have to worry about the type being
2247    /// reclaimed (since it is possible that none of the Wasm modules in this
2248    /// store are holding it alive).
2249    #[cfg(feature = "gc")]
2250    pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
2251        self.gc_host_alloc_types.insert(ty);
2252    }
2253
2254    /// Helper function execute a `init_gc_ref` when placing `gc_ref` in `dest`.
2255    ///
2256    /// This avoids allocating `GcStore` where possible.
2257    pub(crate) fn init_gc_ref(
2258        &mut self,
2259        dest: &mut MaybeUninit<Option<VMGcRef>>,
2260        gc_ref: Option<&VMGcRef>,
2261    ) {
2262        if GcStore::needs_init_barrier(gc_ref) {
2263            self.unwrap_gc_store_mut().init_gc_ref(dest, gc_ref)
2264        } else {
2265            dest.write(gc_ref.map(|r| r.copy_i31()));
2266        }
2267    }
2268
2269    /// Helper function execute a write barrier when placing `gc_ref` in `dest`.
2270    ///
2271    /// This avoids allocating `GcStore` where possible.
2272    pub(crate) fn write_gc_ref(&mut self, dest: &mut Option<VMGcRef>, gc_ref: Option<&VMGcRef>) {
2273        GcStore::write_gc_ref_optional_store(self.optional_gc_store_mut(), dest, gc_ref)
2274    }
2275
2276    /// Helper function to clone `gc_ref` notably avoiding allocating a
2277    /// `GcStore` where possible.
2278    pub(crate) fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
2279        if gc_ref.is_i31() {
2280            gc_ref.copy_i31()
2281        } else {
2282            self.unwrap_gc_store_mut().clone_gc_ref(gc_ref)
2283        }
2284    }
2285
2286    pub fn get_fuel(&self) -> Result<u64> {
2287        crate::ensure!(
2288            self.engine().tunables().consume_fuel,
2289            "fuel is not configured in this store"
2290        );
2291        let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
2292        Ok(get_fuel(injected_fuel, self.fuel_reserve))
2293    }
2294
2295    pub(crate) fn refuel(&mut self) -> bool {
2296        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2297        refuel(
2298            injected_fuel,
2299            &mut self.fuel_reserve,
2300            self.fuel_yield_interval,
2301        )
2302    }
2303
2304    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
2305        crate::ensure!(
2306            self.engine().tunables().consume_fuel,
2307            "fuel is not configured in this store"
2308        );
2309        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2310        set_fuel(
2311            injected_fuel,
2312            &mut self.fuel_reserve,
2313            self.fuel_yield_interval,
2314            fuel,
2315        );
2316        Ok(())
2317    }
2318
2319    #[cfg(feature = "async")]
2320    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
2321        crate::ensure!(
2322            self.engine().tunables().consume_fuel,
2323            "fuel is not configured in this store"
2324        );
2325        crate::ensure!(
2326            interval != Some(0),
2327            "fuel_async_yield_interval must not be 0"
2328        );
2329
2330        // All future entrypoints must be async to handle the case that fuel
2331        // runs out and an async yield is needed.
2332        self.set_async_required(Asyncness::Yes);
2333
2334        self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
2335        // Reset the fuel active + reserve states by resetting the amount.
2336        self.set_fuel(self.get_fuel()?)
2337    }
2338
2339    #[inline]
2340    pub fn signal_handler(&self) -> Option<*const SignalHandler> {
2341        let handler = self.signal_handler.as_ref()?;
2342        Some(handler)
2343    }
2344
2345    #[inline]
2346    pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
2347        NonNull::from(&self.vm_store_context)
2348    }
2349
2350    #[inline]
2351    pub fn default_caller(&self) -> NonNull<VMContext> {
2352        self.default_caller_vmctx.as_non_null()
2353    }
2354
2355    #[inline]
2356    pub fn traitobj(&self) -> NonNull<dyn VMStore> {
2357        self.traitobj.0.unwrap()
2358    }
2359
2360    /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
2361    /// used as part of calling the host in a `Func::new` method invocation.
2362    #[inline]
2363    pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
2364        mem::take(&mut self.hostcall_val_storage)
2365    }
2366
2367    /// Restores the vector previously taken by `take_hostcall_val_storage`
2368    /// above back into the store, allowing it to be used in the future for the
2369    /// next wasm->host call.
2370    #[inline]
2371    pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
2372        if storage.capacity() > self.hostcall_val_storage.capacity() {
2373            self.hostcall_val_storage = storage;
2374        }
2375    }
2376
2377    /// Same as `take_hostcall_val_storage`, but for the direction of the host
2378    /// calling wasm.
2379    #[inline]
2380    pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
2381        mem::take(&mut self.wasm_val_raw_storage)
2382    }
2383
2384    /// Same as `save_hostcall_val_storage`, but for the direction of the host
2385    /// calling wasm.
2386    #[inline]
2387    pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
2388        if storage.capacity() > self.wasm_val_raw_storage.capacity() {
2389            self.wasm_val_raw_storage = storage;
2390        }
2391    }
2392
2393    /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
2394    /// WebAssembly-relative fault.
2395    ///
2396    /// This function may abort the process if `addr` is not found to actually
2397    /// reside in any linear memory. In such a situation it means that the
2398    /// segfault was erroneously caught by Wasmtime and is possibly indicative
2399    /// of a code generator bug.
2400    ///
2401    /// This function returns `None` for dynamically-bounds-checked-memories
2402    /// with spectre mitigations enabled since the hardware fault address is
2403    /// always zero in these situations which means that the trapping context
2404    /// doesn't have enough information to report the fault address.
2405    pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
2406        // There are a few instances where a "close to zero" pointer is loaded
2407        // and we expect that to happen:
2408        //
2409        // * Explicitly bounds-checked memories with spectre-guards enabled will
2410        //   cause out-of-bounds accesses to get routed to address 0, so allow
2411        //   wasm instructions to fault on the null address.
2412        // * `call_indirect` when invoking a null function pointer may load data
2413        //   from the a `VMFuncRef` whose address is null, meaning any field of
2414        //   `VMFuncRef` could be the address of the fault.
2415        //
2416        // In these situations where the address is so small it won't be in any
2417        // instance, so skip the checks below.
2418        if addr <= mem::size_of::<VMFuncRef>() {
2419            const _: () = {
2420                // static-assert that `VMFuncRef` isn't too big to ensure that
2421                // it lives solely within the first page as we currently only
2422                // have the guarantee that the first page of memory is unmapped,
2423                // no more.
2424                assert!(mem::size_of::<VMFuncRef>() <= 512);
2425            };
2426            return None;
2427        }
2428
2429        // Search all known instances in this store for this address. Note that
2430        // this is probably not the speediest way to do this. Traps, however,
2431        // are generally not expected to be super fast and additionally stores
2432        // probably don't have all that many instances or memories.
2433        //
2434        // If this loop becomes hot in the future, however, it should be
2435        // possible to precompute maps about linear memories in a store and have
2436        // a quicker lookup.
2437        let mut fault = None;
2438        for (_, instance) in self.instances.iter() {
2439            if let Some(f) = instance.handle.get().wasm_fault(addr) {
2440                assert!(fault.is_none());
2441                fault = Some(f);
2442            }
2443        }
2444        if fault.is_some() {
2445            return fault;
2446        }
2447
2448        cfg_if::cfg_if! {
2449            if #[cfg(feature = "std")] {
2450                // With the standard library a rich error can be printed here
2451                // to stderr and the native abort path is used.
2452                eprintln!(
2453                    "\
2454Wasmtime caught a segfault for a wasm program because the faulting instruction
2455is allowed to segfault due to how linear memories are implemented. The address
2456that was accessed, however, is not known to any linear memory in use within this
2457Store. This may be indicative of a critical bug in Wasmtime's code generation
2458because all addresses which are known to be reachable from wasm won't reach this
2459message.
2460
2461    pc:      0x{pc:x}
2462    address: 0x{addr:x}
2463
2464This is a possible security issue because WebAssembly has accessed something it
2465shouldn't have been able to. Other accesses may have succeeded and this one just
2466happened to be caught. The process will now be aborted to prevent this damage
2467from going any further and to alert what's going on. If this is a security
2468issue please reach out to the Wasmtime team via its security policy
2469at https://bytecodealliance.org/security.
2470"
2471                );
2472                std::process::abort();
2473            } else if #[cfg(panic = "abort")] {
2474                // Without the standard library but with `panic=abort` then
2475                // it's safe to panic as that's known to halt execution. For
2476                // now avoid the above error message as well since without
2477                // `std` it's probably best to be a bit more size-conscious.
2478                let _ = pc;
2479                panic!("invalid fault");
2480            } else {
2481                // Without `std` and with `panic = "unwind"` there's no
2482                // dedicated API to abort the process portably, so manufacture
2483                // this with a double-panic.
2484                let _ = pc;
2485
2486                struct PanicAgainOnDrop;
2487
2488                impl Drop for PanicAgainOnDrop {
2489                    fn drop(&mut self) {
2490                        panic!("panicking again to trigger a process abort");
2491                    }
2492
2493                }
2494
2495                let _bomb = PanicAgainOnDrop;
2496
2497                panic!("invalid fault");
2498            }
2499        }
2500    }
2501
2502    /// Retrieve the store's protection key.
2503    #[inline]
2504    #[cfg(feature = "pooling-allocator")]
2505    pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
2506        self.pkey
2507    }
2508
2509    #[cfg(feature = "async")]
2510    pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
2511        &mut self.async_state
2512    }
2513
2514    #[cfg(feature = "async")]
2515    pub(crate) fn has_pkey(&self) -> bool {
2516        self.pkey.is_some()
2517    }
2518
2519    pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
2520        match &mut self.executor {
2521            Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
2522            #[cfg(has_host_compiler_backend)]
2523            Executor::Native => ExecutorRef::Native,
2524        }
2525    }
2526
2527    #[cfg(feature = "async")]
2528    pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
2529        mem::swap(&mut self.executor, executor);
2530    }
2531
2532    pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
2533        match &self.executor {
2534            Executor::Interpreter(i) => i.unwinder(),
2535            #[cfg(has_host_compiler_backend)]
2536            Executor::Native => &vm::UnwindHost,
2537        }
2538    }
2539
2540    /// Allocates a new continuation. Note that we currently don't support
2541    /// deallocating them. Instead, all continuations remain allocated
2542    /// throughout the store's lifetime.
2543    #[cfg(feature = "stack-switching")]
2544    pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
2545        // FIXME(frank-emrich) Do we need to pin this?
2546        let mut continuation = Box::new(VMContRef::empty());
2547        let stack_size = self.engine.config().async_stack_size;
2548        let stack = crate::vm::VMContinuationStack::new(stack_size)?;
2549        continuation.stack = stack;
2550        let ptr = continuation.deref_mut() as *mut VMContRef;
2551        self.continuations.push(continuation);
2552        Ok(ptr)
2553    }
2554
2555    /// Constructs and executes an `InstanceAllocationRequest` and pushes the
2556    /// returned instance into the store.
2557    ///
2558    /// This is a helper method for invoking
2559    /// `InstanceAllocator::allocate_module` with the appropriate parameters
2560    /// from this store's own configuration. The `kind` provided is used to
2561    /// distinguish between "real" modules and dummy ones that are synthesized
2562    /// for embedder-created memories, globals, tables, etc. The `kind` will
2563    /// also use a different instance allocator by default, the one passed in,
2564    /// rather than the engine's default allocator.
2565    ///
2566    /// This method will push the instance within `StoreOpaque` onto the
2567    /// `instances` array and return the `InstanceId` which can be use to look
2568    /// it up within the store.
2569    ///
2570    /// # Safety
2571    ///
2572    /// The `imports` provided must be correctly sized/typed for the module
2573    /// being allocated.
2574    pub(crate) async unsafe fn allocate_instance(
2575        &mut self,
2576        limiter: Option<&mut StoreResourceLimiter<'_>>,
2577        kind: AllocateInstanceKind<'_>,
2578        runtime_info: &ModuleRuntimeInfo,
2579        imports: Imports<'_>,
2580    ) -> Result<InstanceId> {
2581        let id = self.instances.next_key();
2582
2583        let allocator = match kind {
2584            AllocateInstanceKind::Module(_) => self.engine().allocator(),
2585            AllocateInstanceKind::Dummy { allocator } => allocator,
2586        };
2587        // SAFETY: this function's own contract is the same as
2588        // `allocate_module`, namely the imports provided are valid.
2589        let handle = unsafe {
2590            allocator
2591                .allocate_module(InstanceAllocationRequest {
2592                    id,
2593                    runtime_info,
2594                    imports,
2595                    store: self,
2596                    limiter,
2597                })
2598                .await?
2599        };
2600
2601        let actual = match kind {
2602            AllocateInstanceKind::Module(module_id) => {
2603                log::trace!(
2604                    "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2605                    self.id()
2606                );
2607                self.instances.push(StoreInstance {
2608                    handle,
2609                    kind: StoreInstanceKind::Real { module_id },
2610                })?
2611            }
2612            AllocateInstanceKind::Dummy { .. } => {
2613                log::trace!(
2614                    "Adding dummy instance to store: store={:?}, instance={id:?}",
2615                    self.id()
2616                );
2617                self.instances.push(StoreInstance {
2618                    handle,
2619                    kind: StoreInstanceKind::Dummy,
2620                })?
2621            }
2622        };
2623
2624        // double-check we didn't accidentally allocate two instances and our
2625        // prediction of what the id would be is indeed the id it should be.
2626        assert_eq!(id, actual);
2627
2628        Ok(id)
2629    }
2630
2631    /// Set a pending exception. The `exnref` is taken and held on
2632    /// this store to be fetched later by an unwind. This method does
2633    /// *not* set up an unwind request on the TLS call state; that
2634    /// must be done separately.
2635    #[cfg(feature = "gc")]
2636    pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) {
2637        self.pending_exception = Some(exnref);
2638    }
2639
2640    /// Take a pending exception, if any.
2641    #[cfg(feature = "gc")]
2642    pub(crate) fn take_pending_exception(&mut self) -> Option<VMExnRef> {
2643        self.pending_exception.take()
2644    }
2645
2646    /// Tests whether there is a pending exception.
2647    #[cfg(feature = "gc")]
2648    pub fn has_pending_exception(&self) -> bool {
2649        self.pending_exception.is_some()
2650    }
2651
2652    #[cfg(feature = "gc")]
2653    fn take_pending_exception_rooted(&mut self) -> Option<Rooted<ExnRef>> {
2654        let vmexnref = self.take_pending_exception()?;
2655        let mut nogc = AutoAssertNoGc::new(self);
2656        Some(Rooted::new(&mut nogc, vmexnref.into()))
2657    }
2658
2659    /// Get an owned rooted reference to the pending exception,
2660    /// without taking it off the store.
2661    #[cfg(all(feature = "gc", feature = "debug"))]
2662    pub(crate) fn pending_exception_owned_rooted(
2663        &mut self,
2664    ) -> Result<Option<OwnedRooted<ExnRef>>, crate::error::OutOfMemory> {
2665        let mut nogc = AutoAssertNoGc::new(self);
2666        nogc.pending_exception
2667            .take()
2668            .map(|vmexnref| {
2669                let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref());
2670                nogc.pending_exception = Some(cloned.into_exnref_unchecked());
2671                OwnedRooted::new(&mut nogc, vmexnref.into())
2672            })
2673            .transpose()
2674    }
2675
2676    #[cfg(feature = "gc")]
2677    fn throw_impl(&mut self, exception: Rooted<ExnRef>) {
2678        let mut nogc = AutoAssertNoGc::new(self);
2679        let exnref = exception._to_raw(&mut nogc).unwrap();
2680        let exnref = VMGcRef::from_raw_u32(exnref)
2681            .expect("exception cannot be null")
2682            .into_exnref_unchecked();
2683        nogc.set_pending_exception(exnref);
2684    }
2685
2686    #[cfg(target_has_atomic = "64")]
2687    pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2688        // Set a new deadline based on the "epoch deadline delta".
2689        //
2690        // Also, note that when this update is performed while Wasm is
2691        // on the stack, the Wasm will reload the new value once we
2692        // return into it.
2693        let current_epoch = self.engine().current_epoch();
2694        let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2695        *epoch_deadline = current_epoch + delta;
2696    }
2697
2698    pub(crate) fn get_epoch_deadline(&mut self) -> u64 {
2699        *self.vm_store_context.epoch_deadline.get_mut()
2700    }
2701
2702    #[inline]
2703    pub(crate) fn validate_sync_call(&self) -> Result<()> {
2704        #[cfg(feature = "async")]
2705        if self.async_state.async_required {
2706            bail!("store configuration requires that `*_async` functions are used instead");
2707        }
2708        Ok(())
2709    }
2710
2711    /// Returns whether this store is presently on a fiber and is allowed to
2712    /// block via `block_on` with fibers.
2713    pub(crate) fn can_block(&mut self) -> bool {
2714        #[cfg(feature = "async")]
2715        if true {
2716            return self.fiber_async_state_mut().can_block();
2717        }
2718
2719        false
2720    }
2721
2722    #[cfg(not(feature = "async"))]
2723    pub(crate) fn set_async_required(&mut self, asyncness: Asyncness) {
2724        match asyncness {
2725            Asyncness::No => {}
2726        }
2727    }
2728}
2729
2730/// Helper parameter to [`StoreOpaque::allocate_instance`].
2731pub(crate) enum AllocateInstanceKind<'a> {
2732    /// An embedder-provided module is being allocated meaning that the default
2733    /// engine's allocator will be used.
2734    Module(RegisteredModuleId),
2735
2736    /// Add a dummy instance that to the store.
2737    ///
2738    /// These are instances that are just implementation details of something
2739    /// else (e.g. host-created memories that are not actually defined in any
2740    /// Wasm module) and therefore shouldn't show up in things like core dumps.
2741    ///
2742    /// A custom, typically OnDemand-flavored, allocator is provided to execute
2743    /// the allocation.
2744    Dummy {
2745        allocator: &'a dyn InstanceAllocator,
2746    },
2747}
2748
2749unsafe impl<T> VMStore for StoreInner<T> {
2750    #[cfg(feature = "component-model-async")]
2751    fn component_async_store(
2752        &mut self,
2753    ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2754        self
2755    }
2756
2757    #[cfg(feature = "component-model")]
2758    fn component_task_state_mut(&mut self) -> &mut crate::component::store::ComponentTaskState {
2759        StoreOpaque::component_task_state_mut(self)
2760    }
2761
2762    fn store_opaque(&self) -> &StoreOpaque {
2763        &self.inner
2764    }
2765
2766    fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2767        &mut self.inner
2768    }
2769
2770    fn resource_limiter_and_store_opaque(
2771        &mut self,
2772    ) -> (Option<StoreResourceLimiter<'_>>, &mut StoreOpaque) {
2773        let (data, limiter, opaque) = self.data_limiter_and_opaque();
2774
2775        let limiter = limiter.map(|l| match l {
2776            ResourceLimiterInner::Sync(s) => StoreResourceLimiter::Sync(s(data)),
2777            #[cfg(feature = "async")]
2778            ResourceLimiterInner::Async(s) => StoreResourceLimiter::Async(s(data)),
2779        });
2780
2781        (limiter, opaque)
2782    }
2783
2784    #[cfg(target_has_atomic = "64")]
2785    fn new_epoch_updated_deadline(&mut self) -> Result<UpdateDeadline> {
2786        // Temporarily take the configured behavior to avoid mutably borrowing
2787        // multiple times.
2788        let mut behavior = self.epoch_deadline_behavior.take();
2789        let update = match &mut behavior {
2790            Some(callback) => callback((&mut *self).as_context_mut()),
2791            None => Ok(UpdateDeadline::Interrupt),
2792        };
2793
2794        // Put back the original behavior which was replaced by `take`.
2795        self.epoch_deadline_behavior = behavior;
2796        update
2797    }
2798
2799    #[cfg(feature = "debug")]
2800    fn block_on_debug_handler(&mut self, event: crate::DebugEvent<'_>) -> crate::Result<()> {
2801        if let Some(handler) = self.debug_handler.take() {
2802            if !self.can_block() {
2803                bail!("could not invoke debug handler without async context");
2804            }
2805            log::trace!("about to raise debug event {event:?}");
2806            StoreContextMut(self).with_blocking(|store, cx| {
2807                cx.block_on(Pin::from(handler.handle(store, event)).as_mut())
2808            })
2809        } else {
2810            Ok(())
2811        }
2812    }
2813}
2814
2815impl<T> StoreInner<T> {
2816    #[cfg(target_has_atomic = "64")]
2817    fn epoch_deadline_trap(&mut self) {
2818        self.epoch_deadline_behavior = None;
2819    }
2820
2821    #[cfg(target_has_atomic = "64")]
2822    fn epoch_deadline_callback(
2823        &mut self,
2824        callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2825    ) {
2826        self.epoch_deadline_behavior = Some(callback);
2827    }
2828}
2829
2830impl<T: Default> Default for Store<T> {
2831    fn default() -> Store<T> {
2832        Store::new(&Engine::default(), T::default())
2833    }
2834}
2835
2836impl<T: fmt::Debug> fmt::Debug for Store<T> {
2837    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2838        let inner = &**self.inner as *const StoreInner<T>;
2839        f.debug_struct("Store")
2840            .field("inner", &inner)
2841            .field("data", self.inner.data())
2842            .finish()
2843    }
2844}
2845
2846impl<T> Drop for Store<T> {
2847    fn drop(&mut self) {
2848        self.run_manual_drop_routines();
2849
2850        // For documentation on this `unsafe`, see `into_data`.
2851        unsafe {
2852            ManuallyDrop::drop(&mut self.inner.data_no_provenance);
2853            ManuallyDrop::drop(&mut self.inner);
2854        }
2855    }
2856}
2857
2858impl Drop for StoreOpaque {
2859    fn drop(&mut self) {
2860        // NB it's important that this destructor does not access `self.data`.
2861        // That is deallocated by `Drop for Store<T>` above.
2862
2863        unsafe {
2864            let allocator = self.engine.allocator();
2865            let ondemand = OnDemandInstanceAllocator::default();
2866            let store_id = self.id();
2867
2868            #[cfg(feature = "gc")]
2869            if let Some(gc_store) = self.gc_store.take() {
2870                let gc_alloc_index = gc_store.allocation_index;
2871                log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2872                debug_assert!(self.engine.features().gc_types());
2873                let (mem_alloc_index, mem) =
2874                    allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2875                allocator.deallocate_memory(None, mem_alloc_index, mem);
2876            }
2877
2878            for (id, instance) in self.instances.iter_mut() {
2879                log::trace!("store {store_id:?} is deallocating {id:?}");
2880                let allocator = match instance.kind {
2881                    StoreInstanceKind::Dummy => &ondemand,
2882                    _ => allocator,
2883                };
2884                allocator.deallocate_module(&mut instance.handle);
2885            }
2886
2887            self.store_data.decrement_allocator_resources(allocator);
2888        }
2889    }
2890}
2891
2892#[cfg_attr(
2893    not(any(feature = "gc", feature = "async")),
2894    // NB: Rust 1.89, current stable, does not fire this lint. Rust 1.90,
2895    // however, does, so use #[allow] until our MSRV is 1.90.
2896    allow(dead_code, reason = "don't want to put #[cfg] on all impls below too")
2897)]
2898pub(crate) trait AsStoreOpaque {
2899    fn as_store_opaque(&mut self) -> &mut StoreOpaque;
2900}
2901
2902impl AsStoreOpaque for StoreOpaque {
2903    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2904        self
2905    }
2906}
2907
2908impl AsStoreOpaque for dyn VMStore {
2909    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2910        self
2911    }
2912}
2913
2914impl<T: 'static> AsStoreOpaque for Store<T> {
2915    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2916        &mut self.inner.inner
2917    }
2918}
2919
2920impl<T: 'static> AsStoreOpaque for StoreInner<T> {
2921    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2922        self
2923    }
2924}
2925
2926impl<T: AsStoreOpaque + ?Sized> AsStoreOpaque for &mut T {
2927    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2928        T::as_store_opaque(self)
2929    }
2930}
2931
2932/// Helper enum to indicate, in some function contexts, whether `async` should
2933/// be taken advantage of or not.
2934///
2935/// This is used throughout Wasmtime where internal functions are all `async`
2936/// but external functions might be either sync or `async`. If the external
2937/// function is sync, then internally Wasmtime shouldn't yield as it won't do
2938/// anything. If the external function is `async`, however, yields are fine.
2939///
2940/// An example of this is GC. Right now GC will cooperatively yield after phases
2941/// of GC have passed, but this cooperative yielding is only enabled with
2942/// `Asyncness::Yes`.
2943///
2944/// This enum is additionally conditionally defined such that `Yes` is only
2945/// present in `async`-enabled builds. That ensures that this compiles down to a
2946/// zero-sized type in `async`-disabled builds in case that interests embedders.
2947#[derive(PartialEq, Eq, Copy, Clone)]
2948pub enum Asyncness {
2949    /// Don't do async things, don't yield, etc. It's ok to execute an `async`
2950    /// function, but it should be validated ahead of time that when doing so a
2951    /// yield isn't possible (e.g. `validate_sync_*` methods on Store.
2952    No,
2953
2954    /// Async things is OK. This should only be used when the API entrypoint is
2955    /// itself `async`.
2956    #[cfg(feature = "async")]
2957    Yes,
2958}
2959
2960impl core::ops::BitOr for Asyncness {
2961    type Output = Self;
2962
2963    fn bitor(self, rhs: Self) -> Self::Output {
2964        match (self, rhs) {
2965            (Asyncness::No, Asyncness::No) => Asyncness::No,
2966            #[cfg(feature = "async")]
2967            (Asyncness::Yes, _) | (_, Asyncness::Yes) => Asyncness::Yes,
2968        }
2969    }
2970}
2971
2972#[cfg(test)]
2973mod tests {
2974    use super::*;
2975
2976    struct FuelTank {
2977        pub consumed_fuel: i64,
2978        pub reserve_fuel: u64,
2979        pub yield_interval: Option<NonZeroU64>,
2980    }
2981
2982    impl FuelTank {
2983        fn new() -> Self {
2984            FuelTank {
2985                consumed_fuel: 0,
2986                reserve_fuel: 0,
2987                yield_interval: None,
2988            }
2989        }
2990        fn get_fuel(&self) -> u64 {
2991            get_fuel(self.consumed_fuel, self.reserve_fuel)
2992        }
2993        fn refuel(&mut self) -> bool {
2994            refuel(
2995                &mut self.consumed_fuel,
2996                &mut self.reserve_fuel,
2997                self.yield_interval,
2998            )
2999        }
3000        fn set_fuel(&mut self, fuel: u64) {
3001            set_fuel(
3002                &mut self.consumed_fuel,
3003                &mut self.reserve_fuel,
3004                self.yield_interval,
3005                fuel,
3006            );
3007        }
3008    }
3009
3010    #[test]
3011    fn smoke() {
3012        let mut tank = FuelTank::new();
3013        tank.set_fuel(10);
3014        assert_eq!(tank.consumed_fuel, -10);
3015        assert_eq!(tank.reserve_fuel, 0);
3016
3017        tank.yield_interval = NonZeroU64::new(10);
3018        tank.set_fuel(25);
3019        assert_eq!(tank.consumed_fuel, -10);
3020        assert_eq!(tank.reserve_fuel, 15);
3021    }
3022
3023    #[test]
3024    fn does_not_lose_precision() {
3025        let mut tank = FuelTank::new();
3026        tank.set_fuel(u64::MAX);
3027        assert_eq!(tank.get_fuel(), u64::MAX);
3028
3029        tank.set_fuel(i64::MAX as u64);
3030        assert_eq!(tank.get_fuel(), i64::MAX as u64);
3031
3032        tank.set_fuel(i64::MAX as u64 + 1);
3033        assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
3034    }
3035
3036    #[test]
3037    fn yielding_does_not_lose_precision() {
3038        let mut tank = FuelTank::new();
3039
3040        tank.yield_interval = NonZeroU64::new(10);
3041        tank.set_fuel(u64::MAX);
3042        assert_eq!(tank.get_fuel(), u64::MAX);
3043        assert_eq!(tank.consumed_fuel, -10);
3044        assert_eq!(tank.reserve_fuel, u64::MAX - 10);
3045
3046        tank.yield_interval = NonZeroU64::new(u64::MAX);
3047        tank.set_fuel(u64::MAX);
3048        assert_eq!(tank.get_fuel(), u64::MAX);
3049        assert_eq!(tank.consumed_fuel, -i64::MAX);
3050        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3051
3052        tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
3053        tank.set_fuel(u64::MAX);
3054        assert_eq!(tank.get_fuel(), u64::MAX);
3055        assert_eq!(tank.consumed_fuel, -i64::MAX);
3056        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3057    }
3058
3059    #[test]
3060    fn refueling() {
3061        // It's possible to fuel to have consumed over the limit as some instructions can consume
3062        // multiple units of fuel at once. Refueling should be strict in it's consumption and not
3063        // add more fuel than there is.
3064        let mut tank = FuelTank::new();
3065
3066        tank.yield_interval = NonZeroU64::new(10);
3067        tank.reserve_fuel = 42;
3068        tank.consumed_fuel = 4;
3069        assert!(tank.refuel());
3070        assert_eq!(tank.reserve_fuel, 28);
3071        assert_eq!(tank.consumed_fuel, -10);
3072
3073        tank.yield_interval = NonZeroU64::new(1);
3074        tank.reserve_fuel = 8;
3075        tank.consumed_fuel = 4;
3076        assert_eq!(tank.get_fuel(), 4);
3077        assert!(tank.refuel());
3078        assert_eq!(tank.reserve_fuel, 3);
3079        assert_eq!(tank.consumed_fuel, -1);
3080        assert_eq!(tank.get_fuel(), 4);
3081
3082        tank.yield_interval = NonZeroU64::new(10);
3083        tank.reserve_fuel = 3;
3084        tank.consumed_fuel = 4;
3085        assert_eq!(tank.get_fuel(), 0);
3086        assert!(!tank.refuel());
3087        assert_eq!(tank.reserve_fuel, 3);
3088        assert_eq!(tank.consumed_fuel, 4);
3089        assert_eq!(tank.get_fuel(), 0);
3090    }
3091
3092    #[test]
3093    fn store_data_provenance() {
3094        // Test that we juggle pointer provenance and all that correctly, and
3095        // miri is happy with everything, while allowing both Rust code and
3096        // "Wasm" to access and modify the store's `T` data. Note that this is
3097        // not actually Wasm mutating the store data here because compiling Wasm
3098        // under miri is way too slow.
3099
3100        unsafe fn run_wasm(store: &mut Store<u32>) {
3101            let ptr = store
3102                .inner
3103                .inner
3104                .vm_store_context
3105                .store_data
3106                .as_ptr()
3107                .cast::<u32>();
3108            unsafe { *ptr += 1 }
3109        }
3110
3111        let engine = Engine::default();
3112        let mut store = Store::new(&engine, 0_u32);
3113
3114        assert_eq!(*store.data(), 0);
3115        *store.data_mut() += 1;
3116        assert_eq!(*store.data(), 1);
3117        unsafe { run_wasm(&mut store) }
3118        assert_eq!(*store.data(), 2);
3119        *store.data_mut() += 1;
3120        assert_eq!(*store.data(), 3);
3121    }
3122}