wasmtime/runtime/
store.rs

1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//!   Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//!   intended to be consumed by the outside world. Note that the "just a
39//!   pointer large" is a load-bearing implementation detail in Wasmtime. This
40//!   enables it to store a pointer to its own trait object which doesn't need
41//!   to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//!   stored inside the `Box`. This is the general Rust pattern when one struct
45//!   is a layer over another. The surprising part, though, is that this is
46//!   further subdivided. This structure only contains things which actually
47//!   need `T` itself. The downside of this structure is that it's always
48//!   generic and means that code is monomorphized into consumer crates. We
49//!   strive to have things be as monomorphic as possible in `wasmtime` so this
50//!   type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//!   Stored inline in the outer type the "opaque" here means that it's a
54//!   "store" but it doesn't have access to the `T`. This is the primary
55//!   "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//!   internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//!   All references of Wasm items into a `Store` are actually indices into a
60//!   table in this structure, and the `StoreData` being separate makes it a bit
61//!   easier to manage/define/work with. There's no real fundamental reason this
62//!   is split out, although sometimes it's useful to have separate borrows into
63//!   these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79#[cfg(all(feature = "gc", feature = "debug"))]
80use crate::OwnedRooted;
81use crate::RootSet;
82#[cfg(feature = "gc")]
83use crate::ThrownException;
84#[cfg(feature = "component-model-async")]
85use crate::component::ComponentStoreData;
86#[cfg(feature = "component-model")]
87use crate::component::concurrent;
88#[cfg(feature = "async")]
89use crate::fiber;
90use crate::module::RegisteredModuleId;
91use crate::prelude::*;
92#[cfg(feature = "gc")]
93use crate::runtime::vm::GcRootsList;
94#[cfg(feature = "stack-switching")]
95use crate::runtime::vm::VMContRef;
96use crate::runtime::vm::mpk::ProtectionKey;
97use crate::runtime::vm::{
98    self, ExportMemory, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator,
99    InstanceHandle, Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator,
100    SendSyncPtr, SignalHandler, StoreBox, Unwind, VMContext, VMFuncRef, VMGcRef, VMStore,
101    VMStoreContext,
102};
103use crate::trampoline::VMHostGlobalContext;
104#[cfg(feature = "debug")]
105use crate::{BreakpointState, DebugHandler};
106use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry};
107#[cfg(feature = "gc")]
108use crate::{ExnRef, Rooted};
109use crate::{Global, Instance, Table};
110use alloc::sync::Arc;
111use core::convert::Infallible;
112use core::fmt;
113use core::marker;
114use core::mem::{self, ManuallyDrop, MaybeUninit};
115use core::num::NonZeroU64;
116use core::ops::{Deref, DerefMut};
117use core::pin::Pin;
118use core::ptr::NonNull;
119use wasmtime_environ::StaticModuleIndex;
120use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
121
122mod context;
123pub use self::context::*;
124mod data;
125pub use self::data::*;
126mod func_refs;
127use func_refs::FuncRefs;
128#[cfg(feature = "component-model-async")]
129mod token;
130#[cfg(feature = "component-model-async")]
131pub(crate) use token::StoreToken;
132#[cfg(feature = "async")]
133mod async_;
134#[cfg(all(feature = "async", feature = "call-hook"))]
135pub use self::async_::CallHookHandler;
136
137#[cfg(feature = "gc")]
138use super::vm::VMExnRef;
139#[cfg(feature = "gc")]
140mod gc;
141
142/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
143///
144/// All WebAssembly instances and items will be attached to and refer to a
145/// [`Store`]. For example instances, functions, globals, and tables are all
146/// attached to a [`Store`]. Instances are created by instantiating a
147/// [`Module`](crate::Module) within a [`Store`].
148///
149/// A [`Store`] is intended to be a short-lived object in a program. No form
150/// of GC is implemented at this time so once an instance is created within a
151/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
152/// This makes [`Store`] unsuitable for creating an unbounded number of
153/// instances in it because [`Store`] will never release this memory. It's
154/// recommended to have a [`Store`] correspond roughly to the lifetime of a
155/// "main instance" that an embedding is interested in executing.
156///
157/// ## Type parameter `T`
158///
159/// Each [`Store`] has a type parameter `T` associated with it. This `T`
160/// represents state defined by the host. This state will be accessible through
161/// the [`Caller`](crate::Caller) type that host-defined functions get access
162/// to. This `T` is suitable for storing `Store`-specific information which
163/// imported functions may want access to.
164///
165/// The data `T` can be accessed through methods like [`Store::data`] and
166/// [`Store::data_mut`].
167///
168/// ## Stores, contexts, oh my
169///
170/// Most methods in Wasmtime take something of the form
171/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
172/// the first argument. These two traits allow ergonomically passing in the
173/// context you currently have to any method. The primary two sources of
174/// contexts are:
175///
176/// * `Store<T>`
177/// * `Caller<'_, T>`
178///
179/// corresponding to what you create and what you have access to in a host
180/// function. You can also explicitly acquire a [`StoreContext`] or
181/// [`StoreContextMut`] and pass that around as well.
182///
183/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
184/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
185/// form of context you have you can call various methods, create objects, etc.
186///
187/// ## Stores and `Default`
188///
189/// You can create a store with default configuration settings using
190/// `Store::default()`. This will create a brand new [`Engine`] with default
191/// configuration (see [`Config`](crate::Config) for more information).
192///
193/// ## Cross-store usage of items
194///
195/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
196/// [`Store`]. The store they belong to is the one they were created with
197/// (passed in as a parameter) or instantiated with. This store is the only
198/// store that can be used to interact with wasm items after they're created.
199///
200/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
201/// operations is incorrect. In other words it's considered a programmer error
202/// rather than a recoverable error for the wrong [`Store`] to be used when
203/// calling APIs.
204pub struct Store<T: 'static> {
205    // for comments about `ManuallyDrop`, see `Store::into_data`
206    inner: ManuallyDrop<Box<StoreInner<T>>>,
207}
208
209#[derive(Copy, Clone, Debug)]
210/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
211/// the WebAssembly VM.
212pub enum CallHook {
213    /// Indicates the VM is calling a WebAssembly function, from the host.
214    CallingWasm,
215    /// Indicates the VM is returning from a WebAssembly function, to the host.
216    ReturningFromWasm,
217    /// Indicates the VM is calling a host function, from WebAssembly.
218    CallingHost,
219    /// Indicates the VM is returning from a host function, to WebAssembly.
220    ReturningFromHost,
221}
222
223impl CallHook {
224    /// Indicates the VM is entering host code (exiting WebAssembly code)
225    pub fn entering_host(&self) -> bool {
226        match self {
227            CallHook::ReturningFromWasm | CallHook::CallingHost => true,
228            _ => false,
229        }
230    }
231    /// Indicates the VM is exiting host code (entering WebAssembly code)
232    pub fn exiting_host(&self) -> bool {
233        match self {
234            CallHook::ReturningFromHost | CallHook::CallingWasm => true,
235            _ => false,
236        }
237    }
238}
239
240/// Internal contents of a `Store<T>` that live on the heap.
241///
242/// The members of this struct are those that need to be generic over `T`, the
243/// store's internal type storage. Otherwise all things that don't rely on `T`
244/// should go into `StoreOpaque`.
245pub struct StoreInner<T: 'static> {
246    /// Generic metadata about the store that doesn't need access to `T`.
247    inner: StoreOpaque,
248
249    limiter: Option<ResourceLimiterInner<T>>,
250    call_hook: Option<CallHookInner<T>>,
251    #[cfg(target_has_atomic = "64")]
252    epoch_deadline_behavior:
253        Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
254
255    /// The user's `T` data.
256    ///
257    /// Don't actually access it via this field, however! Use the
258    /// `Store{,Inner,Context,ContextMut}::data[_mut]` methods instead, to
259    /// preserve stacked borrows and provenance in the face of potential
260    /// direct-access of `T` from Wasm code (via unsafe intrinsics).
261    ///
262    /// The only exception to the above is when taking ownership of the value,
263    /// e.g. in `Store::into_data`, after which nothing can access this field
264    /// via raw pointers anymore so there is no more provenance to preserve.
265    ///
266    /// For comments about `ManuallyDrop`, see `Store::into_data`.
267    data_no_provenance: ManuallyDrop<T>,
268
269    /// The user's debug handler, if any. See [`crate::DebugHandler`]
270    /// for more documentation.
271    ///
272    /// We need this to be an `Arc` because the handler itself takes
273    /// `&self` and also the whole Store mutably (via
274    /// `StoreContextMut`); so we need to hold a separate reference to
275    /// it while invoking it.
276    #[cfg(feature = "debug")]
277    debug_handler: Option<Box<dyn StoreDebugHandler<T>>>,
278}
279
280/// Adapter around `DebugHandler` that gets monomorphized into an
281/// object-safe dyn trait to place in `store.debug_handler`.
282#[cfg(feature = "debug")]
283trait StoreDebugHandler<T: 'static>: Send + Sync {
284    fn handle<'a>(
285        self: Box<Self>,
286        store: StoreContextMut<'a, T>,
287        event: crate::DebugEvent<'a>,
288    ) -> Box<dyn Future<Output = ()> + Send + 'a>;
289}
290
291#[cfg(feature = "debug")]
292impl<D> StoreDebugHandler<D::Data> for D
293where
294    D: DebugHandler,
295    D::Data: Send,
296{
297    fn handle<'a>(
298        self: Box<Self>,
299        store: StoreContextMut<'a, D::Data>,
300        event: crate::DebugEvent<'a>,
301    ) -> Box<dyn Future<Output = ()> + Send + 'a> {
302        // Clone the underlying `DebugHandler` (the trait requires
303        // Clone as a supertrait), not the Box. The clone happens here
304        // rather than at the callsite because `Clone::clone` is not
305        // object-safe so needs to be in a monomorphized context.
306        let handler: D = (*self).clone();
307        // Since we temporarily took `self` off the store at the
308        // callsite, put it back now that we've cloned it.
309        store.0.debug_handler = Some(self);
310        Box::new(async move { handler.handle(store, event).await })
311    }
312}
313
314enum ResourceLimiterInner<T> {
315    Sync(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync>),
316    #[cfg(feature = "async")]
317    Async(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiterAsync) + Send + Sync>),
318}
319
320/// Representation of a configured resource limiter for a store.
321///
322/// This is acquired with `resource_limiter_and_store_opaque` for example and is
323/// threaded through to growth operations on tables/memories. Note that this is
324/// passed around as `Option<&mut StoreResourceLimiter<'_>>` to make it
325/// efficient to pass around (nullable pointer) and it's also notably passed
326/// around as an `Option` to represent how this is optionally specified within a
327/// store.
328pub enum StoreResourceLimiter<'a> {
329    Sync(&'a mut dyn crate::ResourceLimiter),
330    #[cfg(feature = "async")]
331    Async(&'a mut dyn crate::ResourceLimiterAsync),
332}
333
334impl StoreResourceLimiter<'_> {
335    pub(crate) async fn memory_growing(
336        &mut self,
337        current: usize,
338        desired: usize,
339        maximum: Option<usize>,
340    ) -> Result<bool, Error> {
341        match self {
342            Self::Sync(s) => s.memory_growing(current, desired, maximum),
343            #[cfg(feature = "async")]
344            Self::Async(s) => s.memory_growing(current, desired, maximum).await,
345        }
346    }
347
348    pub(crate) fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
349        match self {
350            Self::Sync(s) => s.memory_grow_failed(error),
351            #[cfg(feature = "async")]
352            Self::Async(s) => s.memory_grow_failed(error),
353        }
354    }
355
356    pub(crate) async fn table_growing(
357        &mut self,
358        current: usize,
359        desired: usize,
360        maximum: Option<usize>,
361    ) -> Result<bool, Error> {
362        match self {
363            Self::Sync(s) => s.table_growing(current, desired, maximum),
364            #[cfg(feature = "async")]
365            Self::Async(s) => s.table_growing(current, desired, maximum).await,
366        }
367    }
368
369    pub(crate) fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
370        match self {
371            Self::Sync(s) => s.table_grow_failed(error),
372            #[cfg(feature = "async")]
373            Self::Async(s) => s.table_grow_failed(error),
374        }
375    }
376}
377
378enum CallHookInner<T: 'static> {
379    #[cfg(feature = "call-hook")]
380    Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
381    #[cfg(all(feature = "async", feature = "call-hook"))]
382    Async(Box<dyn CallHookHandler<T> + Send + Sync>),
383    #[expect(
384        dead_code,
385        reason = "forcing, regardless of cfg, the type param to be used"
386    )]
387    ForceTypeParameterToBeUsed {
388        uninhabited: Infallible,
389        _marker: marker::PhantomData<T>,
390    },
391}
392
393/// What to do after returning from a callback when the engine epoch reaches
394/// the deadline for a Store during execution of a function using that store.
395#[non_exhaustive]
396pub enum UpdateDeadline {
397    /// Halt execution of WebAssembly, don't update the epoch deadline, and
398    /// raise a trap.
399    Interrupt,
400    /// Extend the deadline by the specified number of ticks.
401    Continue(u64),
402    /// Extend the deadline by the specified number of ticks after yielding to
403    /// the async executor loop. This can only be used with an async [`Store`]
404    /// configured via [`Config::async_support`](crate::Config::async_support).
405    #[cfg(feature = "async")]
406    Yield(u64),
407    /// Extend the deadline by the specified number of ticks after yielding to
408    /// the async executor loop. This can only be used with an async [`Store`]
409    /// configured via [`Config::async_support`](crate::Config::async_support).
410    ///
411    /// The yield will be performed by the future provided; when using `tokio`
412    /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
413    /// here.
414    #[cfg(feature = "async")]
415    YieldCustom(
416        u64,
417        ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
418    ),
419}
420
421// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
422impl<T> Deref for StoreInner<T> {
423    type Target = StoreOpaque;
424    fn deref(&self) -> &Self::Target {
425        &self.inner
426    }
427}
428
429impl<T> DerefMut for StoreInner<T> {
430    fn deref_mut(&mut self) -> &mut Self::Target {
431        &mut self.inner
432    }
433}
434
435/// Monomorphic storage for a `Store<T>`.
436///
437/// This structure contains the bulk of the metadata about a `Store`. This is
438/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
439/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
440/// crate itself.
441pub struct StoreOpaque {
442    // This `StoreOpaque` structure has references to itself. These aren't
443    // immediately evident, however, so we need to tell the compiler that it
444    // contains self-references. This notably suppresses `noalias` annotations
445    // when this shows up in compiled code because types of this structure do
446    // indeed alias itself. An example of this is `default_callee` holds a
447    // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
448    // aliasing!
449    //
450    // It's somewhat unclear to me at this time if this is 100% sufficient to
451    // get all the right codegen in all the right places. For example does
452    // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
453    // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
454    // enough with `Pin` to understand if it's appropriate here (we do, for
455    // example want to allow movement in and out of `data: T`, just not movement
456    // of most of the other members). It's also not clear if using `Pin` in a
457    // few places buys us much other than a bunch of `unsafe` that we already
458    // sort of hand-wave away.
459    //
460    // In any case this seems like a good mid-ground for now where we're at
461    // least telling the compiler something about all the aliasing happening
462    // within a `Store`.
463    _marker: marker::PhantomPinned,
464
465    engine: Engine,
466    vm_store_context: VMStoreContext,
467
468    // Contains all continuations ever allocated throughout the lifetime of this
469    // store.
470    #[cfg(feature = "stack-switching")]
471    continuations: Vec<Box<VMContRef>>,
472
473    instances: PrimaryMap<InstanceId, StoreInstance>,
474
475    #[cfg(feature = "component-model")]
476    num_component_instances: usize,
477    signal_handler: Option<SignalHandler>,
478    modules: ModuleRegistry,
479    func_refs: FuncRefs,
480    host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
481    // GC-related fields.
482    gc_store: Option<GcStore>,
483    gc_roots: RootSet,
484    #[cfg(feature = "gc")]
485    gc_roots_list: GcRootsList,
486    // Types for which the embedder has created an allocator for.
487    #[cfg(feature = "gc")]
488    gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
489    /// Pending exception, if any. This is also a GC root, because it
490    /// needs to be rooted somewhere between the time that a pending
491    /// exception is set and the time that the handling code takes the
492    /// exception object. We use this rooting strategy rather than a
493    /// root in an `Err` branch of a `Result` on the host side because
494    /// it is less error-prone with respect to rooting behavior. See
495    /// `throw()`, `take_pending_exception()`,
496    /// `peek_pending_exception()`, `has_pending_exception()`, and
497    /// `catch()`.
498    #[cfg(feature = "gc")]
499    pending_exception: Option<VMExnRef>,
500
501    // Numbers of resources instantiated in this store, and their limits
502    instance_count: usize,
503    instance_limit: usize,
504    memory_count: usize,
505    memory_limit: usize,
506    table_count: usize,
507    table_limit: usize,
508    #[cfg(feature = "async")]
509    async_state: fiber::AsyncState,
510
511    // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
512    // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
513    // together. Then when we run out of gas, we inject the yield amount from the reserve
514    // until the reserve is empty.
515    fuel_reserve: u64,
516    pub(crate) fuel_yield_interval: Option<NonZeroU64>,
517    /// Indexed data within this `Store`, used to store information about
518    /// globals, functions, memories, etc.
519    store_data: StoreData,
520    traitobj: StorePtr,
521    default_caller_vmctx: SendSyncPtr<VMContext>,
522
523    /// Used to optimized wasm->host calls when the host function is defined with
524    /// `Func::new` to avoid allocating a new vector each time a function is
525    /// called.
526    hostcall_val_storage: Vec<Val>,
527    /// Same as `hostcall_val_storage`, but for the direction of the host
528    /// calling wasm.
529    wasm_val_raw_storage: Vec<ValRaw>,
530
531    /// Keep track of what protection key is being used during allocation so
532    /// that the right memory pages can be enabled when entering WebAssembly
533    /// guest code.
534    pkey: Option<ProtectionKey>,
535
536    /// Runtime state for components used in the handling of resources, borrow,
537    /// and calls. These also interact with the `ResourceAny` type and its
538    /// internal representation.
539    #[cfg(feature = "component-model")]
540    component_host_table: vm::component::HandleTable,
541    #[cfg(feature = "component-model")]
542    component_calls: vm::component::CallContexts,
543    #[cfg(feature = "component-model")]
544    host_resource_data: crate::component::HostResourceData,
545    #[cfg(feature = "component-model")]
546    concurrent_state: concurrent::ConcurrentState,
547
548    /// State related to the executor of wasm code.
549    ///
550    /// For example if Pulley is enabled and configured then this will store a
551    /// Pulley interpreter.
552    executor: Executor,
553
554    /// The debug breakpoint state for this store.
555    ///
556    /// When guest debugging is enabled, a given store may have a set
557    /// of breakpoints defined, denoted by module and Wasm PC within
558    /// that module. Or alternately, it may be in "single-step" mode,
559    /// where every possible breakpoint is logically enabled.
560    ///
561    /// When execution of any instance in this store hits any defined
562    /// breakpoint, a `Breakpoint` debug event is emitted and the
563    /// handler defined above, if any, has a chance to perform some
564    /// logic before returning to allow execution to resume.
565    #[cfg(feature = "debug")]
566    breakpoints: BreakpointState,
567}
568
569/// Self-pointer to `StoreInner<T>` from within a `StoreOpaque` which is chiefly
570/// used to copy into instances during instantiation.
571///
572/// FIXME: ideally this type would get deleted and Wasmtime's reliance on it
573/// would go away.
574struct StorePtr(Option<NonNull<dyn VMStore>>);
575
576// We can't make `VMStore: Send + Sync` because that requires making all of
577// Wastime's internals generic over the `Store`'s `T`. So instead, we take care
578// in the whole VM layer to only use the `VMStore` in ways that are `Send`- and
579// `Sync`-safe and we have to have these unsafe impls.
580unsafe impl Send for StorePtr {}
581unsafe impl Sync for StorePtr {}
582
583/// Executor state within `StoreOpaque`.
584///
585/// Effectively stores Pulley interpreter state and handles conditional support
586/// for Cranelift at compile time.
587pub(crate) enum Executor {
588    Interpreter(Interpreter),
589    #[cfg(has_host_compiler_backend)]
590    Native,
591}
592
593impl Executor {
594    pub(crate) fn new(engine: &Engine) -> Self {
595        #[cfg(has_host_compiler_backend)]
596        if cfg!(feature = "pulley") && engine.target().is_pulley() {
597            Executor::Interpreter(Interpreter::new(engine))
598        } else {
599            Executor::Native
600        }
601        #[cfg(not(has_host_compiler_backend))]
602        {
603            debug_assert!(engine.target().is_pulley());
604            Executor::Interpreter(Interpreter::new(engine))
605        }
606    }
607}
608
609/// A borrowed reference to `Executor` above.
610pub(crate) enum ExecutorRef<'a> {
611    Interpreter(InterpreterRef<'a>),
612    #[cfg(has_host_compiler_backend)]
613    Native,
614}
615
616/// An RAII type to automatically mark a region of code as unsafe for GC.
617#[doc(hidden)]
618pub struct AutoAssertNoGc<'a> {
619    store: &'a mut StoreOpaque,
620    entered: bool,
621}
622
623impl<'a> AutoAssertNoGc<'a> {
624    #[inline]
625    pub fn new(store: &'a mut StoreOpaque) -> Self {
626        let entered = if !cfg!(feature = "gc") {
627            false
628        } else if let Some(gc_store) = store.gc_store.as_mut() {
629            gc_store.gc_heap.enter_no_gc_scope();
630            true
631        } else {
632            false
633        };
634
635        AutoAssertNoGc { store, entered }
636    }
637
638    /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
639    /// disables checks for no GC happening for the duration of this value.
640    ///
641    /// This is used when it is statically otherwise known that a GC doesn't
642    /// happen for the various types involved.
643    ///
644    /// # Unsafety
645    ///
646    /// This method is `unsafe` as it does not provide the same safety
647    /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
648    /// caller that a GC doesn't happen.
649    #[inline]
650    pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
651        if cfg!(debug_assertions) {
652            AutoAssertNoGc::new(store)
653        } else {
654            AutoAssertNoGc {
655                store,
656                entered: false,
657            }
658        }
659    }
660}
661
662impl core::ops::Deref for AutoAssertNoGc<'_> {
663    type Target = StoreOpaque;
664
665    #[inline]
666    fn deref(&self) -> &Self::Target {
667        &*self.store
668    }
669}
670
671impl core::ops::DerefMut for AutoAssertNoGc<'_> {
672    #[inline]
673    fn deref_mut(&mut self) -> &mut Self::Target {
674        &mut *self.store
675    }
676}
677
678impl Drop for AutoAssertNoGc<'_> {
679    #[inline]
680    fn drop(&mut self) {
681        if self.entered {
682            self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
683        }
684    }
685}
686
687/// Used to associate instances with the store.
688///
689/// This is needed to track if the instance was allocated explicitly with the on-demand
690/// instance allocator.
691struct StoreInstance {
692    handle: InstanceHandle,
693    kind: StoreInstanceKind,
694}
695
696enum StoreInstanceKind {
697    /// An actual, non-dummy instance.
698    Real {
699        /// The id of this instance's module inside our owning store's
700        /// `ModuleRegistry`.
701        module_id: RegisteredModuleId,
702    },
703
704    /// This is a dummy instance that is just an implementation detail for
705    /// something else. For example, host-created memories internally create a
706    /// dummy instance.
707    ///
708    /// Regardless of the configured instance allocator for the engine, dummy
709    /// instances always use the on-demand allocator to deallocate the instance.
710    Dummy,
711}
712
713impl<T> Store<T> {
714    /// Creates a new [`Store`] to be associated with the given [`Engine`] and
715    /// `data` provided.
716    ///
717    /// The created [`Store`] will place no additional limits on the size of
718    /// linear memories or tables at runtime. Linear memories and tables will
719    /// be allowed to grow to any upper limit specified in their definitions.
720    /// The store will limit the number of instances, linear memories, and
721    /// tables created to 10,000. This can be overridden with the
722    /// [`Store::limiter`] configuration method.
723    pub fn new(engine: &Engine, data: T) -> Self {
724        let store_data = StoreData::new();
725        log::trace!("creating new store {:?}", store_data.id());
726
727        let pkey = engine.allocator().next_available_pkey();
728
729        let inner = StoreOpaque {
730            _marker: marker::PhantomPinned,
731            engine: engine.clone(),
732            vm_store_context: Default::default(),
733            #[cfg(feature = "stack-switching")]
734            continuations: Vec::new(),
735            instances: PrimaryMap::new(),
736            #[cfg(feature = "component-model")]
737            num_component_instances: 0,
738            signal_handler: None,
739            gc_store: None,
740            gc_roots: RootSet::default(),
741            #[cfg(feature = "gc")]
742            gc_roots_list: GcRootsList::default(),
743            #[cfg(feature = "gc")]
744            gc_host_alloc_types: Default::default(),
745            #[cfg(feature = "gc")]
746            pending_exception: None,
747            modules: ModuleRegistry::default(),
748            func_refs: FuncRefs::default(),
749            host_globals: PrimaryMap::new(),
750            instance_count: 0,
751            instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
752            memory_count: 0,
753            memory_limit: crate::DEFAULT_MEMORY_LIMIT,
754            table_count: 0,
755            table_limit: crate::DEFAULT_TABLE_LIMIT,
756            #[cfg(feature = "async")]
757            async_state: Default::default(),
758            fuel_reserve: 0,
759            fuel_yield_interval: None,
760            store_data,
761            traitobj: StorePtr(None),
762            default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
763            hostcall_val_storage: Vec::new(),
764            wasm_val_raw_storage: Vec::new(),
765            pkey,
766            #[cfg(feature = "component-model")]
767            component_host_table: Default::default(),
768            #[cfg(feature = "component-model")]
769            component_calls: Default::default(),
770            #[cfg(feature = "component-model")]
771            host_resource_data: Default::default(),
772            executor: Executor::new(engine),
773            #[cfg(feature = "component-model")]
774            concurrent_state: Default::default(),
775            #[cfg(feature = "debug")]
776            breakpoints: Default::default(),
777        };
778        let mut inner = Box::new(StoreInner {
779            inner,
780            limiter: None,
781            call_hook: None,
782            #[cfg(target_has_atomic = "64")]
783            epoch_deadline_behavior: None,
784            data_no_provenance: ManuallyDrop::new(data),
785            #[cfg(feature = "debug")]
786            debug_handler: None,
787        });
788
789        let store_data =
790            <NonNull<ManuallyDrop<T>>>::from(&mut inner.data_no_provenance).cast::<()>();
791        inner.inner.vm_store_context.store_data = store_data.into();
792
793        inner.traitobj = StorePtr(Some(NonNull::from(&mut *inner)));
794
795        // Wasmtime uses the callee argument to host functions to learn about
796        // the original pointer to the `Store` itself, allowing it to
797        // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
798        // however, there's no "callee" to provide. To fix this we allocate a
799        // single "default callee" for the entire `Store`. This is then used as
800        // part of `Func::call` to guarantee that the `callee: *mut VMContext`
801        // is never null.
802        let module = Arc::new(wasmtime_environ::Module::new(StaticModuleIndex::from_u32(
803            0,
804        )));
805        let shim = ModuleRuntimeInfo::bare(module);
806        let allocator = OnDemandInstanceAllocator::default();
807
808        allocator
809            .validate_module(shim.env_module(), shim.offsets())
810            .unwrap();
811
812        unsafe {
813            // Note that this dummy instance doesn't allocate tables or memories
814            // (also no limiter is passed in) so it won't have an async await
815            // point meaning that it should be ok to assert the future is
816            // always ready.
817            let id = vm::assert_ready(inner.allocate_instance(
818                None,
819                AllocateInstanceKind::Dummy {
820                    allocator: &allocator,
821                },
822                &shim,
823                Default::default(),
824            ))
825            .expect("failed to allocate default callee");
826            let default_caller_vmctx = inner.instance(id).vmctx();
827            inner.default_caller_vmctx = default_caller_vmctx.into();
828        }
829
830        Self {
831            inner: ManuallyDrop::new(inner),
832        }
833    }
834
835    /// Access the underlying `T` data owned by this `Store`.
836    #[inline]
837    pub fn data(&self) -> &T {
838        self.inner.data()
839    }
840
841    /// Access the underlying `T` data owned by this `Store`.
842    #[inline]
843    pub fn data_mut(&mut self) -> &mut T {
844        self.inner.data_mut()
845    }
846
847    fn run_manual_drop_routines(&mut self) {
848        // We need to drop the fibers of each component instance before
849        // attempting to drop the instances themselves since the fibers may need
850        // to be resumed and allowed to exit cleanly before we yank the state
851        // out from under them.
852        //
853        // This will also drop any futures which might use a `&Accessor` fields
854        // in their `Drop::drop` implementations, in which case they'll need to
855        // be called from with in the context of a `tls::set` closure.
856        #[cfg(feature = "component-model-async")]
857        ComponentStoreData::drop_fibers_and_futures(&mut **self.inner);
858
859        // Ensure all fiber stacks, even cached ones, are all flushed out to the
860        // instance allocator.
861        self.inner.flush_fiber_stack();
862    }
863
864    /// Consumes this [`Store`], destroying it, and returns the underlying data.
865    pub fn into_data(mut self) -> T {
866        self.run_manual_drop_routines();
867
868        // This is an unsafe operation because we want to avoid having a runtime
869        // check or boolean for whether the data is actually contained within a
870        // `Store`. The data itself is stored as `ManuallyDrop` since we're
871        // manually managing the memory here, and there's also a `ManuallyDrop`
872        // around the `Box<StoreInner<T>>`. The way this works though is a bit
873        // tricky, so here's how things get dropped appropriately:
874        //
875        // * When a `Store<T>` is normally dropped, the custom destructor for
876        //   `Store<T>` will drop `T`, then the `self.inner` field. The
877        //   rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
878        //   `StoreInner<T>`. This cleans up all internal fields and doesn't
879        //   touch `T` because it's wrapped in `ManuallyDrop`.
880        //
881        // * When calling this method we skip the top-level destructor for
882        //   `Store<T>` with `mem::forget`. This skips both the destructor for
883        //   `T` and the destructor for `StoreInner<T>`. We do, however, run the
884        //   destructor for `Box<StoreInner<T>>` which, like above, will skip
885        //   the destructor for `T` since it's `ManuallyDrop`.
886        //
887        // In both cases all the other fields of `StoreInner<T>` should all get
888        // dropped, and the manual management of destructors is basically
889        // between this method and `Drop for Store<T>`. Note that this also
890        // means that `Drop for StoreInner<T>` cannot access `self.data`, so
891        // there is a comment indicating this as well.
892        unsafe {
893            let mut inner = ManuallyDrop::take(&mut self.inner);
894            core::mem::forget(self);
895            ManuallyDrop::take(&mut inner.data_no_provenance)
896        }
897    }
898
899    /// Configures the [`ResourceLimiter`] used to limit resource creation
900    /// within this [`Store`].
901    ///
902    /// Whenever resources such as linear memory, tables, or instances are
903    /// allocated the `limiter` specified here is invoked with the store's data
904    /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
905    /// being allocated. The returned [`ResourceLimiter`] is intended to live
906    /// within the `T` itself, for example by storing a
907    /// [`StoreLimits`](crate::StoreLimits).
908    ///
909    /// Note that this limiter is only used to limit the creation/growth of
910    /// resources in the future, this does not retroactively attempt to apply
911    /// limits to the [`Store`].
912    ///
913    /// # Examples
914    ///
915    /// ```
916    /// use wasmtime::*;
917    ///
918    /// struct MyApplicationState {
919    ///     my_state: u32,
920    ///     limits: StoreLimits,
921    /// }
922    ///
923    /// let engine = Engine::default();
924    /// let my_state = MyApplicationState {
925    ///     my_state: 42,
926    ///     limits: StoreLimitsBuilder::new()
927    ///         .memory_size(1 << 20 /* 1 MB */)
928    ///         .instances(2)
929    ///         .build(),
930    /// };
931    /// let mut store = Store::new(&engine, my_state);
932    /// store.limiter(|state| &mut state.limits);
933    ///
934    /// // Creation of smaller memories is allowed
935    /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
936    ///
937    /// // Creation of a larger memory, however, will exceed the 1MB limit we've
938    /// // configured
939    /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
940    ///
941    /// // The number of instances in this store is limited to 2, so the third
942    /// // instance here should fail.
943    /// let module = Module::new(&engine, "(module)").unwrap();
944    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
945    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
946    /// assert!(Instance::new(&mut store, &module, &[]).is_err());
947    /// ```
948    ///
949    /// [`ResourceLimiter`]: crate::ResourceLimiter
950    pub fn limiter(
951        &mut self,
952        mut limiter: impl (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync + 'static,
953    ) {
954        // Apply the limits on instances, tables, and memory given by the limiter:
955        let inner = &mut self.inner;
956        let (instance_limit, table_limit, memory_limit) = {
957            let l = limiter(inner.data_mut());
958            (l.instances(), l.tables(), l.memories())
959        };
960        let innermost = &mut inner.inner;
961        innermost.instance_limit = instance_limit;
962        innermost.table_limit = table_limit;
963        innermost.memory_limit = memory_limit;
964
965        // Save the limiter accessor function:
966        inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
967    }
968
969    /// Configure a function that runs on calls and returns between WebAssembly
970    /// and host code.
971    ///
972    /// The function is passed a [`CallHook`] argument, which indicates which
973    /// state transition the VM is making.
974    ///
975    /// This function may return a [`Trap`]. If a trap is returned when an
976    /// import was called, it is immediately raised as-if the host import had
977    /// returned the trap. If a trap is returned after wasm returns to the host
978    /// then the wasm function's result is ignored and this trap is returned
979    /// instead.
980    ///
981    /// After this function returns a trap, it may be called for subsequent returns
982    /// to host or wasm code as the trap propagates to the root call.
983    #[cfg(feature = "call-hook")]
984    pub fn call_hook(
985        &mut self,
986        hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
987    ) {
988        self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
989    }
990
991    /// Returns the [`Engine`] that this store is associated with.
992    pub fn engine(&self) -> &Engine {
993        self.inner.engine()
994    }
995
996    /// Perform garbage collection.
997    ///
998    /// Note that it is not required to actively call this function. GC will
999    /// automatically happen according to various internal heuristics. This is
1000    /// provided if fine-grained control over the GC is desired.
1001    ///
1002    /// If you are calling this method after an attempted allocation failed, you
1003    /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
1004    /// When you do so, this method will attempt to create enough space in the
1005    /// GC heap for that allocation, so that it will succeed on the next
1006    /// attempt.
1007    ///
1008    /// This method is only available when the `gc` Cargo feature is enabled.
1009    #[cfg(feature = "gc")]
1010    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
1011        StoreContextMut(&mut self.inner).gc(why)
1012    }
1013
1014    /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
1015    /// be configured via [`Store::set_fuel`].
1016    ///
1017    /// # Errors
1018    ///
1019    /// This function will return an error if fuel consumption is not enabled
1020    /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
1021    pub fn get_fuel(&self) -> Result<u64> {
1022        self.inner.get_fuel()
1023    }
1024
1025    /// Set the fuel to this [`Store`] for wasm to consume while executing.
1026    ///
1027    /// For this method to work fuel consumption must be enabled via
1028    /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
1029    /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
1030    /// immediately trap). This function must be called for the store to have
1031    /// some fuel to allow WebAssembly to execute.
1032    ///
1033    /// Most WebAssembly instructions consume 1 unit of fuel. Some
1034    /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
1035    /// units, as any execution cost associated with them involves other
1036    /// instructions which do consume fuel.
1037    ///
1038    /// Note that when fuel is entirely consumed it will cause wasm to trap.
1039    ///
1040    /// # Errors
1041    ///
1042    /// This function will return an error if fuel consumption is not enabled via
1043    /// [`Config::consume_fuel`](crate::Config::consume_fuel).
1044    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1045        self.inner.set_fuel(fuel)
1046    }
1047
1048    /// Configures a [`Store`] to yield execution of async WebAssembly code
1049    /// periodically.
1050    ///
1051    /// When a [`Store`] is configured to consume fuel with
1052    /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
1053    /// configure WebAssembly to be suspended and control will be yielded back to the
1054    /// caller every `interval` units of fuel consumed. This is only suitable with use of
1055    /// a store associated with an [async config](crate::Config::async_support) because
1056    /// only then are futures used and yields are possible.
1057    ///
1058    /// The purpose of this behavior is to ensure that futures which represent
1059    /// execution of WebAssembly do not execute too long inside their
1060    /// `Future::poll` method. This allows for some form of cooperative
1061    /// multitasking where WebAssembly will voluntarily yield control
1062    /// periodically (based on fuel consumption) back to the running thread.
1063    ///
1064    /// Note that futures returned by this crate will automatically flag
1065    /// themselves to get re-polled if a yield happens. This means that
1066    /// WebAssembly will continue to execute, just after giving the host an
1067    /// opportunity to do something else.
1068    ///
1069    /// The `interval` parameter indicates how much fuel should be
1070    /// consumed between yields of an async future. When fuel runs out wasm will trap.
1071    ///
1072    /// # Error
1073    ///
1074    /// This method will error if it is not called on a store associated with an [async
1075    /// config](crate::Config::async_support).
1076    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1077        self.inner.fuel_async_yield_interval(interval)
1078    }
1079
1080    /// Sets the epoch deadline to a certain number of ticks in the future.
1081    ///
1082    /// When the Wasm guest code is compiled with epoch-interruption
1083    /// instrumentation
1084    /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
1085    /// and when the `Engine`'s epoch is incremented
1086    /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
1087    /// past a deadline, execution can be configured to either trap or
1088    /// yield and then continue.
1089    ///
1090    /// This deadline is always set relative to the current epoch:
1091    /// `ticks_beyond_current` ticks in the future. The deadline can
1092    /// be set explicitly via this method, or refilled automatically
1093    /// on a yield if configured via
1094    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
1095    /// this method is invoked, the deadline is reached when
1096    /// [`Engine::increment_epoch()`] has been invoked at least
1097    /// `ticks_beyond_current` times.
1098    ///
1099    /// By default a store will trap immediately with an epoch deadline of 0
1100    /// (which has always "elapsed"). This method is required to be configured
1101    /// for stores with epochs enabled to some future epoch deadline.
1102    ///
1103    /// See documentation on
1104    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1105    /// for an introduction to epoch-based interruption.
1106    #[cfg(target_has_atomic = "64")]
1107    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1108        self.inner.set_epoch_deadline(ticks_beyond_current);
1109    }
1110
1111    /// Configures epoch-deadline expiration to trap.
1112    ///
1113    /// When epoch-interruption-instrumented code is executed on this
1114    /// store and the epoch deadline is reached before completion,
1115    /// with the store configured in this way, execution will
1116    /// terminate with a trap as soon as an epoch check in the
1117    /// instrumented code is reached.
1118    ///
1119    /// This behavior is the default if the store is not otherwise
1120    /// configured via
1121    /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
1122    /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
1123    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
1124    ///
1125    /// This setting is intended to allow for coarse-grained
1126    /// interruption, but not a deterministic deadline of a fixed,
1127    /// finite interval. For deterministic interruption, see the
1128    /// "fuel" mechanism instead.
1129    ///
1130    /// Note that when this is used it's required to call
1131    /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
1132    /// trap.
1133    ///
1134    /// See documentation on
1135    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1136    /// for an introduction to epoch-based interruption.
1137    #[cfg(target_has_atomic = "64")]
1138    pub fn epoch_deadline_trap(&mut self) {
1139        self.inner.epoch_deadline_trap();
1140    }
1141
1142    /// Configures epoch-deadline expiration to invoke a custom callback
1143    /// function.
1144    ///
1145    /// When epoch-interruption-instrumented code is executed on this
1146    /// store and the epoch deadline is reached before completion, the
1147    /// provided callback function is invoked.
1148    ///
1149    /// This callback should either return an [`UpdateDeadline`], or
1150    /// return an error, which will terminate execution with a trap.
1151    ///
1152    /// The [`UpdateDeadline`] is a positive number of ticks to
1153    /// add to the epoch deadline, as well as indicating what
1154    /// to do after the callback returns. If the [`Store`] is
1155    /// configured with async support, then the callback may return
1156    /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
1157    /// to yield to the async executor before updating the epoch deadline.
1158    /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
1159    /// update the epoch deadline immediately.
1160    ///
1161    /// This setting is intended to allow for coarse-grained
1162    /// interruption, but not a deterministic deadline of a fixed,
1163    /// finite interval. For deterministic interruption, see the
1164    /// "fuel" mechanism instead.
1165    ///
1166    /// See documentation on
1167    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1168    /// for an introduction to epoch-based interruption.
1169    #[cfg(target_has_atomic = "64")]
1170    pub fn epoch_deadline_callback(
1171        &mut self,
1172        callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
1173    ) {
1174        self.inner.epoch_deadline_callback(Box::new(callback));
1175    }
1176
1177    /// Set an exception as the currently pending exception, and
1178    /// return an error that propagates the throw.
1179    ///
1180    /// This method takes an exception object and stores it in the
1181    /// `Store` as the currently pending exception. This is a special
1182    /// rooted slot that holds the exception as long as it is
1183    /// propagating. This method then returns a `ThrownException`
1184    /// error, which is a special type that indicates a pending
1185    /// exception exists. When this type propagates as an error
1186    /// returned from a Wasm-to-host call, the pending exception is
1187    /// thrown within the Wasm context, and either caught or
1188    /// propagated further to the host-to-Wasm call boundary. If an
1189    /// exception is thrown out of Wasm (or across Wasm from a
1190    /// hostcall) back to the host-to-Wasm call boundary, *that*
1191    /// invocation returns a `ThrownException`, and the pending
1192    /// exception slot is again set. In other words, the
1193    /// `ThrownException` error type should propagate upward exactly
1194    /// and only when a pending exception is set.
1195    ///
1196    /// To inspect or take the pending exception, use
1197    /// [`peek_pending_exception`] and [`take_pending_exception`]. For
1198    /// a convenient wrapper that invokes a closure and provides any
1199    /// caught exception from the closure to a separate handler
1200    /// closure, see [`StoreContextMut::catch`].
1201    ///
1202    /// This method is parameterized over `R` for convenience, but
1203    /// will always return an `Err`.
1204    ///
1205    /// # Panics
1206    ///
1207    /// - Will panic if `exception` has been unrooted.
1208    /// - Will panic if `exception` is a null reference.
1209    /// - Will panic if a pending exception has already been set.
1210    #[cfg(feature = "gc")]
1211    pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1212        self.inner.throw_impl(exception);
1213        Err(ThrownException)
1214    }
1215
1216    /// Take the currently pending exception, if any, and return it,
1217    /// removing it from the "pending exception" slot.
1218    ///
1219    /// If there is no pending exception, returns `None`.
1220    ///
1221    /// Note: the returned exception is a LIFO root (see
1222    /// [`crate::Rooted`]), rooted in the current handle scope. Take
1223    /// care to ensure that it is re-rooted or otherwise does not
1224    /// escape this scope! It is usually best to allow an exception
1225    /// object to be rooted in the store's "pending exception" slot
1226    /// until the final consumer has taken it, rather than root it and
1227    /// pass it up the callstack in some other way.
1228    ///
1229    /// This method is useful to implement ad-hoc exception plumbing
1230    /// in various ways, but for the most idiomatic handling, see
1231    /// [`StoreContextMut::catch`].
1232    #[cfg(feature = "gc")]
1233    pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1234        self.inner.take_pending_exception_rooted()
1235    }
1236
1237    /// Tests whether there is a pending exception.
1238    ///
1239    /// Ordinarily, a pending exception will be set on a store if and
1240    /// only if a host-side callstack is propagating a
1241    /// [`crate::ThrownException`] error. The final consumer that
1242    /// catches the exception takes it; it may re-place it to re-throw
1243    /// (using [`throw`]) if it chooses not to actually handle the
1244    /// exception.
1245    ///
1246    /// This method is useful to tell whether a store is in this
1247    /// state, but should not be used as part of the ordinary
1248    /// exception-handling flow. For the most idiomatic handling, see
1249    /// [`StoreContextMut::catch`].
1250    #[cfg(feature = "gc")]
1251    pub fn has_pending_exception(&self) -> bool {
1252        self.inner.pending_exception.is_some()
1253    }
1254
1255    /// Provide an object that views Wasm stack state, including Wasm
1256    /// VM-level values (locals and operand stack), when debugging is
1257    /// enabled.
1258    ///
1259    /// This object views the frames from the most recent Wasm entry
1260    /// onward (up to the exit that allows this host code to run). Any
1261    /// Wasm stack frames upward from the most recent entry to Wasm
1262    /// are not visible to this cursor.
1263    ///
1264    /// Returns `None` if debug instrumentation is not enabled for
1265    /// the engine containing this store.
1266    #[cfg(feature = "debug")]
1267    pub fn debug_frames(&mut self) -> Option<crate::DebugFrameCursor<'_, T>> {
1268        self.as_context_mut().debug_frames()
1269    }
1270
1271    /// Start an edit session to update breakpoints.
1272    #[cfg(feature = "debug")]
1273    pub fn edit_breakpoints(&mut self) -> Option<crate::BreakpointEdit<'_>> {
1274        self.as_context_mut().edit_breakpoints()
1275    }
1276
1277    /// Return all breakpoints.
1278    #[cfg(feature = "debug")]
1279    pub fn breakpoints(&self) -> Option<impl Iterator<Item = crate::Breakpoint> + '_> {
1280        self.as_context().breakpoints()
1281    }
1282
1283    /// Indicate whether single-step mode is enabled.
1284    #[cfg(feature = "debug")]
1285    pub fn is_single_step(&self) -> bool {
1286        self.as_context().is_single_step()
1287    }
1288
1289    /// Set the debug callback on this store.
1290    ///
1291    /// See [`crate::DebugHandler`] for more documentation.
1292    ///
1293    /// # Panics
1294    ///
1295    /// - Will panic if this store is not configured for async
1296    ///   support.
1297    /// - Will panic if guest-debug support was not enabled via
1298    ///   [`crate::Config::guest_debug`].
1299    #[cfg(feature = "debug")]
1300    pub fn set_debug_handler(&mut self, handler: impl DebugHandler<Data = T>)
1301    where
1302        // We require `Send` here because the debug handler becomes
1303        // referenced from a future: when `DebugHandler::handle` is
1304        // invoked, its `self` references the `handler` with the
1305        // user's state. Note that we are careful to keep this bound
1306        // constrained to debug-handler-related code only and not
1307        // propagate it outward to the store in general. The presence
1308        // of the trait implementation serves as a witness that `T:
1309        // Send`. This is required in particular because we will have
1310        // a `&mut dyn VMStore` on the stack when we pause a fiber
1311        // with `block_on` to run a debugger hook; that `VMStore` must
1312        // be a `Store<T> where T: Send`.
1313        T: Send,
1314    {
1315        assert!(
1316            self.inner.async_support(),
1317            "debug hooks rely on async support"
1318        );
1319        assert!(
1320            self.engine().tunables().debug_guest,
1321            "debug hooks require guest debugging to be enabled"
1322        );
1323        self.inner.debug_handler = Some(Box::new(handler));
1324    }
1325
1326    /// Clear the debug handler on this store. If any existed, it will
1327    /// be dropped.
1328    #[cfg(feature = "debug")]
1329    pub fn clear_debug_handler(&mut self) {
1330        self.inner.debug_handler = None;
1331    }
1332}
1333
1334impl<'a, T> StoreContext<'a, T> {
1335    pub(crate) fn async_support(&self) -> bool {
1336        self.0.async_support()
1337    }
1338
1339    /// Returns the underlying [`Engine`] this store is connected to.
1340    pub fn engine(&self) -> &Engine {
1341        self.0.engine()
1342    }
1343
1344    /// Access the underlying data owned by this `Store`.
1345    ///
1346    /// Same as [`Store::data`].
1347    pub fn data(&self) -> &'a T {
1348        self.0.data()
1349    }
1350
1351    /// Returns the remaining fuel in this store.
1352    ///
1353    /// For more information see [`Store::get_fuel`].
1354    pub fn get_fuel(&self) -> Result<u64> {
1355        self.0.get_fuel()
1356    }
1357}
1358
1359impl<'a, T> StoreContextMut<'a, T> {
1360    /// Access the underlying data owned by this `Store`.
1361    ///
1362    /// Same as [`Store::data`].
1363    pub fn data(&self) -> &T {
1364        self.0.data()
1365    }
1366
1367    /// Access the underlying data owned by this `Store`.
1368    ///
1369    /// Same as [`Store::data_mut`].
1370    pub fn data_mut(&mut self) -> &mut T {
1371        self.0.data_mut()
1372    }
1373
1374    /// Returns the underlying [`Engine`] this store is connected to.
1375    pub fn engine(&self) -> &Engine {
1376        self.0.engine()
1377    }
1378
1379    /// Perform garbage collection of `ExternRef`s.
1380    ///
1381    /// Same as [`Store::gc`].
1382    ///
1383    /// This method is only available when the `gc` Cargo feature is enabled.
1384    #[cfg(feature = "gc")]
1385    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
1386        assert!(!self.0.async_support());
1387        let (mut limiter, store) = self.0.resource_limiter_and_store_opaque();
1388        vm::assert_ready(store.gc(limiter.as_mut(), None, why.map(|e| e.bytes_needed())));
1389    }
1390
1391    /// Returns remaining fuel in this store.
1392    ///
1393    /// For more information see [`Store::get_fuel`]
1394    pub fn get_fuel(&self) -> Result<u64> {
1395        self.0.get_fuel()
1396    }
1397
1398    /// Set the amount of fuel in this store.
1399    ///
1400    /// For more information see [`Store::set_fuel`]
1401    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1402        self.0.set_fuel(fuel)
1403    }
1404
1405    /// Configures this `Store` to periodically yield while executing futures.
1406    ///
1407    /// For more information see [`Store::fuel_async_yield_interval`]
1408    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1409        self.0.fuel_async_yield_interval(interval)
1410    }
1411
1412    /// Sets the epoch deadline to a certain number of ticks in the future.
1413    ///
1414    /// For more information see [`Store::set_epoch_deadline`].
1415    #[cfg(target_has_atomic = "64")]
1416    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1417        self.0.set_epoch_deadline(ticks_beyond_current);
1418    }
1419
1420    /// Configures epoch-deadline expiration to trap.
1421    ///
1422    /// For more information see [`Store::epoch_deadline_trap`].
1423    #[cfg(target_has_atomic = "64")]
1424    pub fn epoch_deadline_trap(&mut self) {
1425        self.0.epoch_deadline_trap();
1426    }
1427
1428    /// Set an exception as the currently pending exception, and
1429    /// return an error that propagates the throw.
1430    ///
1431    /// See [`Store::throw`] for more details.
1432    #[cfg(feature = "gc")]
1433    pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1434        self.0.inner.throw_impl(exception);
1435        Err(ThrownException)
1436    }
1437
1438    /// Take the currently pending exception, if any, and return it,
1439    /// removing it from the "pending exception" slot.
1440    ///
1441    /// See [`Store::take_pending_exception`] for more details.
1442    #[cfg(feature = "gc")]
1443    pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1444        self.0.inner.take_pending_exception_rooted()
1445    }
1446
1447    /// Tests whether there is a pending exception.
1448    ///
1449    /// See [`Store::has_pending_exception`] for more details.
1450    #[cfg(feature = "gc")]
1451    pub fn has_pending_exception(&self) -> bool {
1452        self.0.inner.pending_exception.is_some()
1453    }
1454}
1455
1456impl<T> StoreInner<T> {
1457    #[inline]
1458    fn data(&self) -> &T {
1459        // We are actually just accessing `&self.data_no_provenance` but we must
1460        // do so with the `VMStoreContext::store_data` pointer's provenance. If
1461        // we did otherwise, i.e. directly accessed the field, we would
1462        // invalidate that pointer, which would in turn invalidate any direct
1463        // `T` accesses that Wasm code makes via unsafe intrinsics.
1464        let data: *const ManuallyDrop<T> = &raw const self.data_no_provenance;
1465        let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1466        let ptr = provenance.with_addr(data.addr());
1467
1468        // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1469        // to access because of our `&self` borrow.
1470        debug_assert_ne!(ptr, core::ptr::null_mut());
1471        debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1472        unsafe { &*ptr }
1473    }
1474
1475    #[inline]
1476    fn data_limiter_and_opaque(
1477        &mut self,
1478    ) -> (
1479        &mut T,
1480        Option<&mut ResourceLimiterInner<T>>,
1481        &mut StoreOpaque,
1482    ) {
1483        // See the comments about provenance in `StoreInner::data` above.
1484        let data: *mut ManuallyDrop<T> = &raw mut self.data_no_provenance;
1485        let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1486        let ptr = provenance.with_addr(data.addr());
1487
1488        // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1489        // to access because of our `&mut self` borrow.
1490        debug_assert_ne!(ptr, core::ptr::null_mut());
1491        debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1492        let data = unsafe { &mut *ptr };
1493
1494        let limiter = self.limiter.as_mut();
1495
1496        (data, limiter, &mut self.inner)
1497    }
1498
1499    #[inline]
1500    fn data_mut(&mut self) -> &mut T {
1501        self.data_limiter_and_opaque().0
1502    }
1503
1504    #[inline]
1505    pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1506        if self.inner.pkey.is_none() && self.call_hook.is_none() {
1507            Ok(())
1508        } else {
1509            self.call_hook_slow_path(s)
1510        }
1511    }
1512
1513    fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1514        if let Some(pkey) = &self.inner.pkey {
1515            let allocator = self.engine().allocator();
1516            match s {
1517                CallHook::CallingWasm | CallHook::ReturningFromHost => {
1518                    allocator.restrict_to_pkey(*pkey)
1519                }
1520                CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1521            }
1522        }
1523
1524        // Temporarily take the configured behavior to avoid mutably borrowing
1525        // multiple times.
1526        if let Some(mut call_hook) = self.call_hook.take() {
1527            let result = self.invoke_call_hook(&mut call_hook, s);
1528            self.call_hook = Some(call_hook);
1529            return result;
1530        }
1531
1532        Ok(())
1533    }
1534
1535    fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1536        match call_hook {
1537            #[cfg(feature = "call-hook")]
1538            CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1539
1540            #[cfg(all(feature = "async", feature = "call-hook"))]
1541            CallHookInner::Async(handler) => {
1542                if !self.can_block() {
1543                    bail!("couldn't grab async_cx for call hook")
1544                }
1545                return (&mut *self)
1546                    .as_context_mut()
1547                    .with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
1548            }
1549
1550            CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1551                let _ = s;
1552                match *uninhabited {}
1553            }
1554        }
1555    }
1556
1557    #[cfg(not(feature = "async"))]
1558    fn flush_fiber_stack(&mut self) {
1559        // noop shim so code can assume this always exists.
1560    }
1561}
1562
1563fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1564    fuel_reserve.saturating_add_signed(-injected_fuel)
1565}
1566
1567// Add remaining fuel from the reserve into the active fuel if there is any left.
1568fn refuel(
1569    injected_fuel: &mut i64,
1570    fuel_reserve: &mut u64,
1571    yield_interval: Option<NonZeroU64>,
1572) -> bool {
1573    let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1574    if fuel > 0 {
1575        set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1576        true
1577    } else {
1578        false
1579    }
1580}
1581
1582fn set_fuel(
1583    injected_fuel: &mut i64,
1584    fuel_reserve: &mut u64,
1585    yield_interval: Option<NonZeroU64>,
1586    new_fuel_amount: u64,
1587) {
1588    let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1589    // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1590    // for the VM to use.
1591    let injected = core::cmp::min(interval, new_fuel_amount);
1592    // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1593    // VM at once to be i64 range.
1594    let injected = core::cmp::min(injected, i64::MAX as u64);
1595    // Add whatever is left over after injection to the reserve for later use.
1596    *fuel_reserve = new_fuel_amount - injected;
1597    // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1598    // this counter is positive.
1599    *injected_fuel = -(injected as i64);
1600}
1601
1602#[doc(hidden)]
1603impl StoreOpaque {
1604    pub fn id(&self) -> StoreId {
1605        self.store_data.id()
1606    }
1607
1608    pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1609        fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1610            let new = slot.saturating_add(amt);
1611            if new > max {
1612                bail!("resource limit exceeded: {desc} count too high at {new}");
1613            }
1614            *slot = new;
1615            Ok(())
1616        }
1617
1618        let module = module.env_module();
1619        let memories = module.num_defined_memories();
1620        let tables = module.num_defined_tables();
1621
1622        bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1623        bump(
1624            &mut self.memory_count,
1625            self.memory_limit,
1626            memories,
1627            "memory",
1628        )?;
1629        bump(&mut self.table_count, self.table_limit, tables, "table")?;
1630
1631        Ok(())
1632    }
1633
1634    #[inline]
1635    pub fn async_support(&self) -> bool {
1636        cfg!(feature = "async") && self.engine().config().async_support
1637    }
1638
1639    #[inline]
1640    pub fn engine(&self) -> &Engine {
1641        &self.engine
1642    }
1643
1644    #[inline]
1645    pub fn store_data(&self) -> &StoreData {
1646        &self.store_data
1647    }
1648
1649    #[inline]
1650    pub fn store_data_mut(&mut self) -> &mut StoreData {
1651        &mut self.store_data
1652    }
1653
1654    pub fn store_data_mut_and_registry(&mut self) -> (&mut StoreData, &ModuleRegistry) {
1655        (&mut self.store_data, &self.modules)
1656    }
1657
1658    #[cfg(feature = "debug")]
1659    pub(crate) fn breakpoints_and_registry_mut(
1660        &mut self,
1661    ) -> (&mut BreakpointState, &mut ModuleRegistry) {
1662        (&mut self.breakpoints, &mut self.modules)
1663    }
1664
1665    #[cfg(feature = "debug")]
1666    pub(crate) fn breakpoints_and_registry(&self) -> (&BreakpointState, &ModuleRegistry) {
1667        (&self.breakpoints, &self.modules)
1668    }
1669
1670    #[inline]
1671    pub(crate) fn modules(&self) -> &ModuleRegistry {
1672        &self.modules
1673    }
1674
1675    pub(crate) fn register_module(&mut self, module: &Module) -> Result<RegisteredModuleId> {
1676        self.modules.register_module(module, &self.engine)
1677    }
1678
1679    #[cfg(feature = "component-model")]
1680    pub(crate) fn register_component(
1681        &mut self,
1682        component: &crate::component::Component,
1683    ) -> Result<()> {
1684        self.modules.register_component(component, &self.engine)
1685    }
1686
1687    pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1688        (&mut self.func_refs, &self.modules)
1689    }
1690
1691    pub(crate) fn host_globals(
1692        &self,
1693    ) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1694        &self.host_globals
1695    }
1696
1697    pub(crate) fn host_globals_mut(
1698        &mut self,
1699    ) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1700        &mut self.host_globals
1701    }
1702
1703    pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1704        instance.store_id().assert_belongs_to(self.id());
1705        match self.instances[instance.instance()].kind {
1706            StoreInstanceKind::Dummy => None,
1707            StoreInstanceKind::Real { module_id } => {
1708                let module = self
1709                    .modules()
1710                    .module_by_id(module_id)
1711                    .expect("should always have a registered module for real instances");
1712                Some(module)
1713            }
1714        }
1715    }
1716
1717    /// Accessor from `InstanceId` to `&vm::Instance`.
1718    ///
1719    /// Note that if you have a `StoreInstanceId` you should use
1720    /// `StoreInstanceId::get` instead. This assumes that `id` has been
1721    /// validated to already belong to this store.
1722    #[inline]
1723    pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1724        self.instances[id].handle.get()
1725    }
1726
1727    /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1728    ///
1729    /// Note that if you have a `StoreInstanceId` you should use
1730    /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1731    /// validated to already belong to this store.
1732    #[inline]
1733    pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1734        self.instances[id].handle.get_mut()
1735    }
1736
1737    /// Accessor from `InstanceId` to both `Pin<&mut vm::Instance>`
1738    /// and `&ModuleRegistry`.
1739    #[inline]
1740    pub fn instance_and_module_registry_mut(
1741        &mut self,
1742        id: InstanceId,
1743    ) -> (Pin<&mut vm::Instance>, &ModuleRegistry) {
1744        (self.instances[id].handle.get_mut(), &self.modules)
1745    }
1746
1747    /// Access multiple instances specified via `ids`.
1748    ///
1749    /// # Panics
1750    ///
1751    /// This method will panic if any indices in `ids` overlap.
1752    ///
1753    /// # Safety
1754    ///
1755    /// This method is not safe if the returned instances are used to traverse
1756    /// "laterally" between other instances. For example accessing imported
1757    /// items in an instance may traverse laterally to a sibling instance thus
1758    /// aliasing a returned value here. The caller must ensure that only defined
1759    /// items within the instances themselves are accessed.
1760    #[inline]
1761    pub unsafe fn optional_gc_store_and_instances_mut<const N: usize>(
1762        &mut self,
1763        ids: [InstanceId; N],
1764    ) -> (Option<&mut GcStore>, [Pin<&mut vm::Instance>; N]) {
1765        let instances = self
1766            .instances
1767            .get_disjoint_mut(ids)
1768            .unwrap()
1769            .map(|h| h.handle.get_mut());
1770        (self.gc_store.as_mut(), instances)
1771    }
1772
1773    /// Pair of `Self::optional_gc_store_mut` and `Self::instance_mut`
1774    pub fn optional_gc_store_and_instance_mut(
1775        &mut self,
1776        id: InstanceId,
1777    ) -> (Option<&mut GcStore>, Pin<&mut vm::Instance>) {
1778        (self.gc_store.as_mut(), self.instances[id].handle.get_mut())
1779    }
1780
1781    /// Tuple of `Self::optional_gc_store_mut`, `Self::modules`, and
1782    /// `Self::instance_mut`.
1783    pub fn optional_gc_store_and_registry_and_instance_mut(
1784        &mut self,
1785        id: InstanceId,
1786    ) -> (
1787        Option<&mut GcStore>,
1788        &ModuleRegistry,
1789        Pin<&mut vm::Instance>,
1790    ) {
1791        (
1792            self.gc_store.as_mut(),
1793            &self.modules,
1794            self.instances[id].handle.get_mut(),
1795        )
1796    }
1797
1798    /// Get all instances (ignoring dummy instances) within this store.
1799    pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1800        let instances = self
1801            .instances
1802            .iter()
1803            .filter_map(|(id, inst)| {
1804                if let StoreInstanceKind::Dummy = inst.kind {
1805                    None
1806                } else {
1807                    Some(id)
1808                }
1809            })
1810            .collect::<Vec<_>>();
1811        instances
1812            .into_iter()
1813            .map(|i| Instance::from_wasmtime(i, self))
1814    }
1815
1816    /// Get all memories (host- or Wasm-defined) within this store.
1817    pub fn all_memories<'a>(&'a self) -> impl Iterator<Item = ExportMemory> + 'a {
1818        // NB: Host-created memories have dummy instances. Therefore, we can get
1819        // all memories in the store by iterating over all instances (including
1820        // dummy instances) and getting each of their defined memories.
1821        let id = self.id();
1822        self.instances
1823            .iter()
1824            .flat_map(move |(_, instance)| instance.handle.get().defined_memories(id))
1825    }
1826
1827    /// Iterate over all tables (host- or Wasm-defined) within this store.
1828    pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1829        // NB: Host-created tables have dummy instances. Therefore, we can get
1830        // all tables in the store by iterating over all instances (including
1831        // dummy instances) and getting each of their defined memories.
1832        for id in self.instances.keys() {
1833            let instance = StoreInstanceId::new(self.id(), id);
1834            for table in 0..self.instance(id).env_module().num_defined_tables() {
1835                let table = DefinedTableIndex::new(table);
1836                f(self, Table::from_raw(instance, table));
1837            }
1838        }
1839    }
1840
1841    /// Iterate over all globals (host- or Wasm-defined) within this store.
1842    pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1843        // First enumerate all the host-created globals.
1844        for global in self.host_globals.keys() {
1845            let global = Global::new_host(self, global);
1846            f(self, global);
1847        }
1848
1849        // Then enumerate all instances' defined globals.
1850        for id in self.instances.keys() {
1851            for index in 0..self.instance(id).env_module().num_defined_globals() {
1852                let index = DefinedGlobalIndex::new(index);
1853                let global = Global::new_instance(self, id, index);
1854                f(self, global);
1855            }
1856        }
1857    }
1858
1859    #[cfg(all(feature = "std", any(unix, windows)))]
1860    pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1861        self.signal_handler = handler;
1862    }
1863
1864    #[inline]
1865    pub fn vm_store_context(&self) -> &VMStoreContext {
1866        &self.vm_store_context
1867    }
1868
1869    #[inline]
1870    pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
1871        &mut self.vm_store_context
1872    }
1873
1874    /// Performs a lazy allocation of the `GcStore` within this store, returning
1875    /// the previous allocation if it's already present.
1876    ///
1877    /// This method will, if necessary, allocate a new `GcStore` -- linear
1878    /// memory and all. This is a blocking operation due to
1879    /// `ResourceLimiterAsync` which means that this should only be executed
1880    /// in a fiber context at this time.
1881    #[inline]
1882    pub(crate) async fn ensure_gc_store(
1883        &mut self,
1884        limiter: Option<&mut StoreResourceLimiter<'_>>,
1885    ) -> Result<&mut GcStore> {
1886        if self.gc_store.is_some() {
1887            return Ok(self.gc_store.as_mut().unwrap());
1888        }
1889        self.allocate_gc_store(limiter).await
1890    }
1891
1892    #[inline(never)]
1893    async fn allocate_gc_store(
1894        &mut self,
1895        limiter: Option<&mut StoreResourceLimiter<'_>>,
1896    ) -> Result<&mut GcStore> {
1897        log::trace!("allocating GC heap for store {:?}", self.id());
1898
1899        assert!(self.gc_store.is_none());
1900        assert_eq!(
1901            self.vm_store_context.gc_heap.base.as_non_null(),
1902            NonNull::dangling(),
1903        );
1904        assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1905
1906        let gc_store = allocate_gc_store(self, limiter).await?;
1907        self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1908        return Ok(self.gc_store.insert(gc_store));
1909
1910        #[cfg(feature = "gc")]
1911        async fn allocate_gc_store(
1912            store: &mut StoreOpaque,
1913            limiter: Option<&mut StoreResourceLimiter<'_>>,
1914        ) -> Result<GcStore> {
1915            use wasmtime_environ::{StaticModuleIndex, packed_option::ReservedValue};
1916
1917            let engine = store.engine();
1918            let mem_ty = engine.tunables().gc_heap_memory_type();
1919            ensure!(
1920                engine.features().gc_types(),
1921                "cannot allocate a GC store when GC is disabled at configuration time"
1922            );
1923
1924            // First, allocate the memory that will be our GC heap's storage.
1925            let mut request = InstanceAllocationRequest {
1926                id: InstanceId::reserved_value(),
1927                runtime_info: &ModuleRuntimeInfo::bare(Arc::new(wasmtime_environ::Module::new(
1928                    StaticModuleIndex::from_u32(0),
1929                ))),
1930                imports: vm::Imports::default(),
1931                store,
1932                limiter,
1933            };
1934
1935            let (mem_alloc_index, mem) = engine
1936                .allocator()
1937                .allocate_memory(&mut request, &mem_ty, None)
1938                .await?;
1939
1940            // Then, allocate the actual GC heap, passing in that memory
1941            // storage.
1942            let gc_runtime = engine
1943                .gc_runtime()
1944                .context("no GC runtime: GC disabled at compile time or configuration time")?;
1945            let (index, heap) =
1946                engine
1947                    .allocator()
1948                    .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
1949
1950            Ok(GcStore::new(index, heap))
1951        }
1952
1953        #[cfg(not(feature = "gc"))]
1954        async fn allocate_gc_store(
1955            _: &mut StoreOpaque,
1956            _: Option<&mut StoreResourceLimiter<'_>>,
1957        ) -> Result<GcStore> {
1958            bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1959        }
1960    }
1961
1962    /// Helper method to require that a `GcStore` was previously allocated for
1963    /// this store, failing if it has not yet been allocated.
1964    ///
1965    /// Note that this should only be used in a context where allocation of a
1966    /// `GcStore` is sure to have already happened prior, otherwise this may
1967    /// return a confusing error to embedders which is a bug in Wasmtime.
1968    ///
1969    /// Some situations where it's safe to call this method:
1970    ///
1971    /// * There's already a non-null and non-i31 `VMGcRef` in scope. By existing
1972    ///   this shows proof that the `GcStore` was previously allocated.
1973    /// * During instantiation and instance's `needs_gc_heap` flag will be
1974    ///   handled and instantiation will automatically create a GC store.
1975    #[inline]
1976    #[cfg(feature = "gc")]
1977    pub(crate) fn require_gc_store(&self) -> Result<&GcStore> {
1978        match &self.gc_store {
1979            Some(gc_store) => Ok(gc_store),
1980            None => bail!("GC heap not initialized yet"),
1981        }
1982    }
1983
1984    /// Same as [`Self::require_gc_store`], but mutable.
1985    #[inline]
1986    #[cfg(feature = "gc")]
1987    pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> {
1988        match &mut self.gc_store {
1989            Some(gc_store) => Ok(gc_store),
1990            None => bail!("GC heap not initialized yet"),
1991        }
1992    }
1993
1994    /// Attempts to access the GC store that has been previously allocated.
1995    ///
1996    /// This method will return `Some` if the GC store was previously allocated.
1997    /// A `None` return value means either that the GC heap hasn't yet been
1998    /// allocated or that it does not need to be allocated for this store. Note
1999    /// that to require a GC store in a particular situation it's recommended to
2000    /// use [`Self::require_gc_store_mut`] instead.
2001    #[inline]
2002    pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
2003        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
2004            debug_assert!(self.gc_store.is_none());
2005            None
2006        } else {
2007            self.gc_store.as_mut()
2008        }
2009    }
2010
2011    /// Helper to assert that a GC store was previously allocated and is
2012    /// present.
2013    ///
2014    /// # Panics
2015    ///
2016    /// This method will panic if the GC store has not yet been allocated. This
2017    /// should only be used in a context where there's an existing GC reference,
2018    /// for example, or if `ensure_gc_store` has already been called.
2019    #[inline]
2020    #[track_caller]
2021    pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
2022        self.gc_store
2023            .as_ref()
2024            .expect("attempted to access the store's GC heap before it has been allocated")
2025    }
2026
2027    /// Same as [`Self::unwrap_gc_store`], but mutable.
2028    #[inline]
2029    #[track_caller]
2030    pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
2031        self.gc_store
2032            .as_mut()
2033            .expect("attempted to access the store's GC heap before it has been allocated")
2034    }
2035
2036    #[inline]
2037    pub(crate) fn gc_roots(&self) -> &RootSet {
2038        &self.gc_roots
2039    }
2040
2041    #[inline]
2042    #[cfg(feature = "gc")]
2043    pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
2044        &mut self.gc_roots
2045    }
2046
2047    #[inline]
2048    pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
2049        self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
2050    }
2051
2052    #[cfg(feature = "gc")]
2053    async fn do_gc(&mut self) {
2054        // If the GC heap hasn't been initialized, there is nothing to collect.
2055        if self.gc_store.is_none() {
2056            return;
2057        }
2058
2059        log::trace!("============ Begin GC ===========");
2060
2061        // Take the GC roots out of `self` so we can borrow it mutably but still
2062        // call mutable methods on `self`.
2063        let mut roots = core::mem::take(&mut self.gc_roots_list);
2064
2065        self.trace_roots(&mut roots).await;
2066        let async_yield = self.async_support();
2067        self.unwrap_gc_store_mut()
2068            .gc(async_yield, unsafe { roots.iter() })
2069            .await;
2070
2071        // Restore the GC roots for the next GC.
2072        roots.clear();
2073        self.gc_roots_list = roots;
2074
2075        log::trace!("============ End GC ===========");
2076    }
2077
2078    #[cfg(feature = "gc")]
2079    async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2080        log::trace!("Begin trace GC roots");
2081
2082        // We shouldn't have any leftover, stale GC roots.
2083        assert!(gc_roots_list.is_empty());
2084
2085        self.trace_wasm_stack_roots(gc_roots_list);
2086        #[cfg(feature = "async")]
2087        if self.async_support() {
2088            vm::Yield::new().await;
2089        }
2090        #[cfg(feature = "stack-switching")]
2091        {
2092            self.trace_wasm_continuation_roots(gc_roots_list);
2093            #[cfg(feature = "async")]
2094            if self.async_support() {
2095                vm::Yield::new().await;
2096            }
2097        }
2098        self.trace_vmctx_roots(gc_roots_list);
2099        #[cfg(feature = "async")]
2100        if self.async_support() {
2101            vm::Yield::new().await;
2102        }
2103        self.trace_user_roots(gc_roots_list);
2104        self.trace_pending_exception_roots(gc_roots_list);
2105
2106        log::trace!("End trace GC roots")
2107    }
2108
2109    #[cfg(feature = "gc")]
2110    fn trace_wasm_stack_frame(
2111        &self,
2112        gc_roots_list: &mut GcRootsList,
2113        frame: crate::runtime::vm::Frame,
2114    ) {
2115        let pc = frame.pc();
2116        debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
2117
2118        let fp = frame.fp() as *mut usize;
2119        debug_assert!(
2120            !fp.is_null(),
2121            "we should always get a valid frame pointer for Wasm frames"
2122        );
2123
2124        let (module_with_code, _offset) = self
2125            .modules()
2126            .module_and_code_by_pc(pc)
2127            .expect("should have module info for Wasm frame");
2128
2129        if let Some(stack_map) = module_with_code.lookup_stack_map(pc) {
2130            log::trace!(
2131                "We have a stack map that maps {} bytes in this Wasm frame",
2132                stack_map.frame_size()
2133            );
2134
2135            let sp = unsafe { stack_map.sp(fp) };
2136            for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
2137                unsafe {
2138                    self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2139                }
2140            }
2141        }
2142
2143        #[cfg(feature = "debug")]
2144        if let Some(frame_table) = module_with_code.module().frame_table() {
2145            let relpc = module_with_code
2146                .text_offset(pc)
2147                .expect("PC should be within module");
2148            for stack_slot in super::debug::gc_refs_in_frame(frame_table, relpc, fp) {
2149                unsafe {
2150                    self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2151                }
2152            }
2153        }
2154    }
2155
2156    #[cfg(feature = "gc")]
2157    unsafe fn trace_wasm_stack_slot(&self, gc_roots_list: &mut GcRootsList, stack_slot: *mut u32) {
2158        use crate::runtime::vm::SendSyncPtr;
2159        use core::ptr::NonNull;
2160
2161        let raw: u32 = unsafe { core::ptr::read(stack_slot) };
2162        log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
2163
2164        let gc_ref = vm::VMGcRef::from_raw_u32(raw);
2165        if gc_ref.is_some() {
2166            unsafe {
2167                gc_roots_list
2168                    .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
2169            }
2170        }
2171    }
2172
2173    #[cfg(feature = "gc")]
2174    fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2175        use crate::runtime::vm::Backtrace;
2176        log::trace!("Begin trace GC roots :: Wasm stack");
2177
2178        Backtrace::trace(self, |frame| {
2179            self.trace_wasm_stack_frame(gc_roots_list, frame);
2180            core::ops::ControlFlow::Continue(())
2181        });
2182
2183        log::trace!("End trace GC roots :: Wasm stack");
2184    }
2185
2186    #[cfg(all(feature = "gc", feature = "stack-switching"))]
2187    fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2188        use crate::{runtime::vm::Backtrace, vm::VMStackState};
2189        log::trace!("Begin trace GC roots :: continuations");
2190
2191        for continuation in &self.continuations {
2192            let state = continuation.common_stack_information.state;
2193
2194            // FIXME(frank-emrich) In general, it is not enough to just trace
2195            // through the stacks of continuations; we also need to look through
2196            // their `cont.bind` arguments. However, we don't currently have
2197            // enough RTTI information to check if any of the values in the
2198            // buffers used by `cont.bind` are GC values. As a workaround, note
2199            // that we currently disallow cont.bind-ing GC values altogether.
2200            // This way, it is okay not to check them here.
2201            match state {
2202                VMStackState::Suspended => {
2203                    Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
2204                        self.trace_wasm_stack_frame(gc_roots_list, frame);
2205                        core::ops::ControlFlow::Continue(())
2206                    });
2207                }
2208                VMStackState::Running => {
2209                    // Handled by `trace_wasm_stack_roots`.
2210                }
2211                VMStackState::Parent => {
2212                    // We don't know whether our child is suspended or running, but in
2213                    // either case things should be handled correctly when traversing
2214                    // further along in the chain, nothing required at this point.
2215                }
2216                VMStackState::Fresh | VMStackState::Returned => {
2217                    // Fresh/Returned continuations have no gc values on their stack.
2218                }
2219            }
2220        }
2221
2222        log::trace!("End trace GC roots :: continuations");
2223    }
2224
2225    #[cfg(feature = "gc")]
2226    fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2227        log::trace!("Begin trace GC roots :: vmctx");
2228        self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
2229        self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
2230        log::trace!("End trace GC roots :: vmctx");
2231    }
2232
2233    #[cfg(feature = "gc")]
2234    fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2235        log::trace!("Begin trace GC roots :: user");
2236        self.gc_roots.trace_roots(gc_roots_list);
2237        log::trace!("End trace GC roots :: user");
2238    }
2239
2240    #[cfg(feature = "gc")]
2241    fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2242        log::trace!("Begin trace GC roots :: pending exception");
2243        if let Some(pending_exception) = self.pending_exception.as_mut() {
2244            unsafe {
2245                let root = pending_exception.as_gc_ref_mut();
2246                gc_roots_list.add_root(root.into(), "Pending exception");
2247            }
2248        }
2249        log::trace!("End trace GC roots :: pending exception");
2250    }
2251
2252    /// Insert a host-allocated GC type into this store.
2253    ///
2254    /// This makes it suitable for the embedder to allocate instances of this
2255    /// type in this store, and we don't have to worry about the type being
2256    /// reclaimed (since it is possible that none of the Wasm modules in this
2257    /// store are holding it alive).
2258    #[cfg(feature = "gc")]
2259    pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
2260        self.gc_host_alloc_types.insert(ty);
2261    }
2262
2263    /// Helper function execute a `init_gc_ref` when placing `gc_ref` in `dest`.
2264    ///
2265    /// This avoids allocating `GcStore` where possible.
2266    pub(crate) fn init_gc_ref(
2267        &mut self,
2268        dest: &mut MaybeUninit<Option<VMGcRef>>,
2269        gc_ref: Option<&VMGcRef>,
2270    ) {
2271        if GcStore::needs_init_barrier(gc_ref) {
2272            self.unwrap_gc_store_mut().init_gc_ref(dest, gc_ref)
2273        } else {
2274            dest.write(gc_ref.map(|r| r.copy_i31()));
2275        }
2276    }
2277
2278    /// Helper function execute a write barrier when placing `gc_ref` in `dest`.
2279    ///
2280    /// This avoids allocating `GcStore` where possible.
2281    pub(crate) fn write_gc_ref(&mut self, dest: &mut Option<VMGcRef>, gc_ref: Option<&VMGcRef>) {
2282        GcStore::write_gc_ref_optional_store(self.optional_gc_store_mut(), dest, gc_ref)
2283    }
2284
2285    /// Helper function to clone `gc_ref` notably avoiding allocating a
2286    /// `GcStore` where possible.
2287    pub(crate) fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
2288        if gc_ref.is_i31() {
2289            gc_ref.copy_i31()
2290        } else {
2291            self.unwrap_gc_store_mut().clone_gc_ref(gc_ref)
2292        }
2293    }
2294
2295    pub fn get_fuel(&self) -> Result<u64> {
2296        anyhow::ensure!(
2297            self.engine().tunables().consume_fuel,
2298            "fuel is not configured in this store"
2299        );
2300        let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
2301        Ok(get_fuel(injected_fuel, self.fuel_reserve))
2302    }
2303
2304    pub(crate) fn refuel(&mut self) -> bool {
2305        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2306        refuel(
2307            injected_fuel,
2308            &mut self.fuel_reserve,
2309            self.fuel_yield_interval,
2310        )
2311    }
2312
2313    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
2314        anyhow::ensure!(
2315            self.engine().tunables().consume_fuel,
2316            "fuel is not configured in this store"
2317        );
2318        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2319        set_fuel(
2320            injected_fuel,
2321            &mut self.fuel_reserve,
2322            self.fuel_yield_interval,
2323            fuel,
2324        );
2325        Ok(())
2326    }
2327
2328    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
2329        anyhow::ensure!(
2330            self.engine().tunables().consume_fuel,
2331            "fuel is not configured in this store"
2332        );
2333        anyhow::ensure!(
2334            self.engine().config().async_support,
2335            "async support is not configured in this store"
2336        );
2337        anyhow::ensure!(
2338            interval != Some(0),
2339            "fuel_async_yield_interval must not be 0"
2340        );
2341        self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
2342        // Reset the fuel active + reserve states by resetting the amount.
2343        self.set_fuel(self.get_fuel()?)
2344    }
2345
2346    #[inline]
2347    pub fn signal_handler(&self) -> Option<*const SignalHandler> {
2348        let handler = self.signal_handler.as_ref()?;
2349        Some(handler)
2350    }
2351
2352    #[inline]
2353    pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
2354        NonNull::from(&self.vm_store_context)
2355    }
2356
2357    #[inline]
2358    pub fn default_caller(&self) -> NonNull<VMContext> {
2359        self.default_caller_vmctx.as_non_null()
2360    }
2361
2362    #[inline]
2363    pub fn traitobj(&self) -> NonNull<dyn VMStore> {
2364        self.traitobj.0.unwrap()
2365    }
2366
2367    /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
2368    /// used as part of calling the host in a `Func::new` method invocation.
2369    #[inline]
2370    pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
2371        mem::take(&mut self.hostcall_val_storage)
2372    }
2373
2374    /// Restores the vector previously taken by `take_hostcall_val_storage`
2375    /// above back into the store, allowing it to be used in the future for the
2376    /// next wasm->host call.
2377    #[inline]
2378    pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
2379        if storage.capacity() > self.hostcall_val_storage.capacity() {
2380            self.hostcall_val_storage = storage;
2381        }
2382    }
2383
2384    /// Same as `take_hostcall_val_storage`, but for the direction of the host
2385    /// calling wasm.
2386    #[inline]
2387    pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
2388        mem::take(&mut self.wasm_val_raw_storage)
2389    }
2390
2391    /// Same as `save_hostcall_val_storage`, but for the direction of the host
2392    /// calling wasm.
2393    #[inline]
2394    pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
2395        if storage.capacity() > self.wasm_val_raw_storage.capacity() {
2396            self.wasm_val_raw_storage = storage;
2397        }
2398    }
2399
2400    /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
2401    /// WebAssembly-relative fault.
2402    ///
2403    /// This function may abort the process if `addr` is not found to actually
2404    /// reside in any linear memory. In such a situation it means that the
2405    /// segfault was erroneously caught by Wasmtime and is possibly indicative
2406    /// of a code generator bug.
2407    ///
2408    /// This function returns `None` for dynamically-bounds-checked-memories
2409    /// with spectre mitigations enabled since the hardware fault address is
2410    /// always zero in these situations which means that the trapping context
2411    /// doesn't have enough information to report the fault address.
2412    pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
2413        // There are a few instances where a "close to zero" pointer is loaded
2414        // and we expect that to happen:
2415        //
2416        // * Explicitly bounds-checked memories with spectre-guards enabled will
2417        //   cause out-of-bounds accesses to get routed to address 0, so allow
2418        //   wasm instructions to fault on the null address.
2419        // * `call_indirect` when invoking a null function pointer may load data
2420        //   from the a `VMFuncRef` whose address is null, meaning any field of
2421        //   `VMFuncRef` could be the address of the fault.
2422        //
2423        // In these situations where the address is so small it won't be in any
2424        // instance, so skip the checks below.
2425        if addr <= mem::size_of::<VMFuncRef>() {
2426            const _: () = {
2427                // static-assert that `VMFuncRef` isn't too big to ensure that
2428                // it lives solely within the first page as we currently only
2429                // have the guarantee that the first page of memory is unmapped,
2430                // no more.
2431                assert!(mem::size_of::<VMFuncRef>() <= 512);
2432            };
2433            return None;
2434        }
2435
2436        // Search all known instances in this store for this address. Note that
2437        // this is probably not the speediest way to do this. Traps, however,
2438        // are generally not expected to be super fast and additionally stores
2439        // probably don't have all that many instances or memories.
2440        //
2441        // If this loop becomes hot in the future, however, it should be
2442        // possible to precompute maps about linear memories in a store and have
2443        // a quicker lookup.
2444        let mut fault = None;
2445        for (_, instance) in self.instances.iter() {
2446            if let Some(f) = instance.handle.get().wasm_fault(addr) {
2447                assert!(fault.is_none());
2448                fault = Some(f);
2449            }
2450        }
2451        if fault.is_some() {
2452            return fault;
2453        }
2454
2455        cfg_if::cfg_if! {
2456            if #[cfg(feature = "std")] {
2457                // With the standard library a rich error can be printed here
2458                // to stderr and the native abort path is used.
2459                eprintln!(
2460                    "\
2461Wasmtime caught a segfault for a wasm program because the faulting instruction
2462is allowed to segfault due to how linear memories are implemented. The address
2463that was accessed, however, is not known to any linear memory in use within this
2464Store. This may be indicative of a critical bug in Wasmtime's code generation
2465because all addresses which are known to be reachable from wasm won't reach this
2466message.
2467
2468    pc:      0x{pc:x}
2469    address: 0x{addr:x}
2470
2471This is a possible security issue because WebAssembly has accessed something it
2472shouldn't have been able to. Other accesses may have succeeded and this one just
2473happened to be caught. The process will now be aborted to prevent this damage
2474from going any further and to alert what's going on. If this is a security
2475issue please reach out to the Wasmtime team via its security policy
2476at https://bytecodealliance.org/security.
2477"
2478                );
2479                std::process::abort();
2480            } else if #[cfg(panic = "abort")] {
2481                // Without the standard library but with `panic=abort` then
2482                // it's safe to panic as that's known to halt execution. For
2483                // now avoid the above error message as well since without
2484                // `std` it's probably best to be a bit more size-conscious.
2485                let _ = pc;
2486                panic!("invalid fault");
2487            } else {
2488                // Without `std` and with `panic = "unwind"` there's no
2489                // dedicated API to abort the process portably, so manufacture
2490                // this with a double-panic.
2491                let _ = pc;
2492
2493                struct PanicAgainOnDrop;
2494
2495                impl Drop for PanicAgainOnDrop {
2496                    fn drop(&mut self) {
2497                        panic!("panicking again to trigger a process abort");
2498                    }
2499
2500                }
2501
2502                let _bomb = PanicAgainOnDrop;
2503
2504                panic!("invalid fault");
2505            }
2506        }
2507    }
2508
2509    /// Retrieve the store's protection key.
2510    #[inline]
2511    #[cfg(feature = "pooling-allocator")]
2512    pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
2513        self.pkey
2514    }
2515
2516    #[inline]
2517    #[cfg(feature = "component-model")]
2518    pub(crate) fn component_resource_state(
2519        &mut self,
2520    ) -> (
2521        &mut vm::component::CallContexts,
2522        &mut vm::component::HandleTable,
2523        &mut crate::component::HostResourceData,
2524    ) {
2525        (
2526            &mut self.component_calls,
2527            &mut self.component_host_table,
2528            &mut self.host_resource_data,
2529        )
2530    }
2531
2532    #[cfg(feature = "component-model")]
2533    pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
2534        // We don't actually need the instance itself right now, but it seems
2535        // like something we will almost certainly eventually want to keep
2536        // around, so force callers to provide it.
2537        let _ = instance;
2538
2539        self.num_component_instances += 1;
2540    }
2541
2542    #[inline]
2543    #[cfg(feature = "component-model")]
2544    pub(crate) fn component_resource_state_with_instance(
2545        &mut self,
2546        instance: crate::component::Instance,
2547    ) -> (
2548        &mut vm::component::CallContexts,
2549        &mut vm::component::HandleTable,
2550        &mut crate::component::HostResourceData,
2551        Pin<&mut vm::component::ComponentInstance>,
2552    ) {
2553        (
2554            &mut self.component_calls,
2555            &mut self.component_host_table,
2556            &mut self.host_resource_data,
2557            instance.id().from_data_get_mut(&mut self.store_data),
2558        )
2559    }
2560
2561    #[cfg(feature = "component-model")]
2562    pub(crate) fn component_resource_state_with_instance_and_concurrent_state(
2563        &mut self,
2564        instance: crate::component::Instance,
2565    ) -> (
2566        &mut vm::component::CallContexts,
2567        &mut vm::component::HandleTable,
2568        &mut crate::component::HostResourceData,
2569        Pin<&mut vm::component::ComponentInstance>,
2570        &mut concurrent::ConcurrentState,
2571    ) {
2572        (
2573            &mut self.component_calls,
2574            &mut self.component_host_table,
2575            &mut self.host_resource_data,
2576            instance.id().from_data_get_mut(&mut self.store_data),
2577            &mut self.concurrent_state,
2578        )
2579    }
2580
2581    #[cfg(feature = "async")]
2582    pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
2583        &mut self.async_state
2584    }
2585
2586    #[cfg(feature = "component-model-async")]
2587    pub(crate) fn concurrent_state_mut(&mut self) -> &mut concurrent::ConcurrentState {
2588        &mut self.concurrent_state
2589    }
2590
2591    #[cfg(feature = "async")]
2592    pub(crate) fn has_pkey(&self) -> bool {
2593        self.pkey.is_some()
2594    }
2595
2596    pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
2597        match &mut self.executor {
2598            Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
2599            #[cfg(has_host_compiler_backend)]
2600            Executor::Native => ExecutorRef::Native,
2601        }
2602    }
2603
2604    #[cfg(feature = "async")]
2605    pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
2606        mem::swap(&mut self.executor, executor);
2607    }
2608
2609    pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
2610        match &self.executor {
2611            Executor::Interpreter(i) => i.unwinder(),
2612            #[cfg(has_host_compiler_backend)]
2613            Executor::Native => &vm::UnwindHost,
2614        }
2615    }
2616
2617    /// Allocates a new continuation. Note that we currently don't support
2618    /// deallocating them. Instead, all continuations remain allocated
2619    /// throughout the store's lifetime.
2620    #[cfg(feature = "stack-switching")]
2621    pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
2622        // FIXME(frank-emrich) Do we need to pin this?
2623        let mut continuation = Box::new(VMContRef::empty());
2624        let stack_size = self.engine.config().async_stack_size;
2625        let stack = crate::vm::VMContinuationStack::new(stack_size)?;
2626        continuation.stack = stack;
2627        let ptr = continuation.deref_mut() as *mut VMContRef;
2628        self.continuations.push(continuation);
2629        Ok(ptr)
2630    }
2631
2632    /// Constructs and executes an `InstanceAllocationRequest` and pushes the
2633    /// returned instance into the store.
2634    ///
2635    /// This is a helper method for invoking
2636    /// `InstanceAllocator::allocate_module` with the appropriate parameters
2637    /// from this store's own configuration. The `kind` provided is used to
2638    /// distinguish between "real" modules and dummy ones that are synthesized
2639    /// for embedder-created memories, globals, tables, etc. The `kind` will
2640    /// also use a different instance allocator by default, the one passed in,
2641    /// rather than the engine's default allocator.
2642    ///
2643    /// This method will push the instance within `StoreOpaque` onto the
2644    /// `instances` array and return the `InstanceId` which can be use to look
2645    /// it up within the store.
2646    ///
2647    /// # Safety
2648    ///
2649    /// The `imports` provided must be correctly sized/typed for the module
2650    /// being allocated.
2651    pub(crate) async unsafe fn allocate_instance(
2652        &mut self,
2653        limiter: Option<&mut StoreResourceLimiter<'_>>,
2654        kind: AllocateInstanceKind<'_>,
2655        runtime_info: &ModuleRuntimeInfo,
2656        imports: Imports<'_>,
2657    ) -> Result<InstanceId> {
2658        let id = self.instances.next_key();
2659
2660        let allocator = match kind {
2661            AllocateInstanceKind::Module(_) => self.engine().allocator(),
2662            AllocateInstanceKind::Dummy { allocator } => allocator,
2663        };
2664        // SAFETY: this function's own contract is the same as
2665        // `allocate_module`, namely the imports provided are valid.
2666        let handle = unsafe {
2667            allocator
2668                .allocate_module(InstanceAllocationRequest {
2669                    id,
2670                    runtime_info,
2671                    imports,
2672                    store: self,
2673                    limiter,
2674                })
2675                .await?
2676        };
2677
2678        let actual = match kind {
2679            AllocateInstanceKind::Module(module_id) => {
2680                log::trace!(
2681                    "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2682                    self.id()
2683                );
2684                self.instances.push(StoreInstance {
2685                    handle,
2686                    kind: StoreInstanceKind::Real { module_id },
2687                })
2688            }
2689            AllocateInstanceKind::Dummy { .. } => {
2690                log::trace!(
2691                    "Adding dummy instance to store: store={:?}, instance={id:?}",
2692                    self.id()
2693                );
2694                self.instances.push(StoreInstance {
2695                    handle,
2696                    kind: StoreInstanceKind::Dummy,
2697                })
2698            }
2699        };
2700
2701        // double-check we didn't accidentally allocate two instances and our
2702        // prediction of what the id would be is indeed the id it should be.
2703        assert_eq!(id, actual);
2704
2705        Ok(id)
2706    }
2707
2708    /// Set a pending exception. The `exnref` is taken and held on
2709    /// this store to be fetched later by an unwind. This method does
2710    /// *not* set up an unwind request on the TLS call state; that
2711    /// must be done separately.
2712    #[cfg(feature = "gc")]
2713    pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) {
2714        self.pending_exception = Some(exnref);
2715    }
2716
2717    /// Take a pending exception, if any.
2718    #[cfg(feature = "gc")]
2719    pub(crate) fn take_pending_exception(&mut self) -> Option<VMExnRef> {
2720        self.pending_exception.take()
2721    }
2722
2723    /// Tests whether there is a pending exception.
2724    #[cfg(feature = "gc")]
2725    pub fn has_pending_exception(&self) -> bool {
2726        self.pending_exception.is_some()
2727    }
2728
2729    #[cfg(feature = "gc")]
2730    fn take_pending_exception_rooted(&mut self) -> Option<Rooted<ExnRef>> {
2731        let vmexnref = self.take_pending_exception()?;
2732        let mut nogc = AutoAssertNoGc::new(self);
2733        Some(Rooted::new(&mut nogc, vmexnref.into()))
2734    }
2735
2736    /// Get an owned rooted reference to the pending exception,
2737    /// without taking it off the store.
2738    #[cfg(all(feature = "gc", feature = "debug"))]
2739    pub(crate) fn pending_exception_owned_rooted(&mut self) -> Option<OwnedRooted<ExnRef>> {
2740        let mut nogc = AutoAssertNoGc::new(self);
2741        nogc.pending_exception.take().map(|vmexnref| {
2742            let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref());
2743            nogc.pending_exception = Some(cloned.into_exnref_unchecked());
2744            OwnedRooted::new(&mut nogc, vmexnref.into())
2745        })
2746    }
2747
2748    #[cfg(feature = "gc")]
2749    fn throw_impl(&mut self, exception: Rooted<ExnRef>) {
2750        let mut nogc = AutoAssertNoGc::new(self);
2751        let exnref = exception._to_raw(&mut nogc).unwrap();
2752        let exnref = VMGcRef::from_raw_u32(exnref)
2753            .expect("exception cannot be null")
2754            .into_exnref_unchecked();
2755        nogc.set_pending_exception(exnref);
2756    }
2757
2758    #[cfg(target_has_atomic = "64")]
2759    pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2760        // Set a new deadline based on the "epoch deadline delta".
2761        //
2762        // Also, note that when this update is performed while Wasm is
2763        // on the stack, the Wasm will reload the new value once we
2764        // return into it.
2765        let current_epoch = self.engine().current_epoch();
2766        let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2767        *epoch_deadline = current_epoch + delta;
2768    }
2769
2770    pub(crate) fn get_epoch_deadline(&mut self) -> u64 {
2771        *self.vm_store_context.epoch_deadline.get_mut()
2772    }
2773}
2774
2775/// Helper parameter to [`StoreOpaque::allocate_instance`].
2776pub(crate) enum AllocateInstanceKind<'a> {
2777    /// An embedder-provided module is being allocated meaning that the default
2778    /// engine's allocator will be used.
2779    Module(RegisteredModuleId),
2780
2781    /// Add a dummy instance that to the store.
2782    ///
2783    /// These are instances that are just implementation details of something
2784    /// else (e.g. host-created memories that are not actually defined in any
2785    /// Wasm module) and therefore shouldn't show up in things like core dumps.
2786    ///
2787    /// A custom, typically OnDemand-flavored, allocator is provided to execute
2788    /// the allocation.
2789    Dummy {
2790        allocator: &'a dyn InstanceAllocator,
2791    },
2792}
2793
2794unsafe impl<T> VMStore for StoreInner<T> {
2795    #[cfg(feature = "component-model-async")]
2796    fn component_async_store(
2797        &mut self,
2798    ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2799        self
2800    }
2801
2802    fn store_opaque(&self) -> &StoreOpaque {
2803        &self.inner
2804    }
2805
2806    fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2807        &mut self.inner
2808    }
2809
2810    fn resource_limiter_and_store_opaque(
2811        &mut self,
2812    ) -> (Option<StoreResourceLimiter<'_>>, &mut StoreOpaque) {
2813        let (data, limiter, opaque) = self.data_limiter_and_opaque();
2814
2815        let limiter = limiter.map(|l| match l {
2816            ResourceLimiterInner::Sync(s) => StoreResourceLimiter::Sync(s(data)),
2817            #[cfg(feature = "async")]
2818            ResourceLimiterInner::Async(s) => StoreResourceLimiter::Async(s(data)),
2819        });
2820
2821        (limiter, opaque)
2822    }
2823
2824    #[cfg(target_has_atomic = "64")]
2825    fn new_epoch_updated_deadline(&mut self) -> Result<UpdateDeadline> {
2826        // Temporarily take the configured behavior to avoid mutably borrowing
2827        // multiple times.
2828        let mut behavior = self.epoch_deadline_behavior.take();
2829        let update = match &mut behavior {
2830            Some(callback) => callback((&mut *self).as_context_mut()),
2831            None => Ok(UpdateDeadline::Interrupt),
2832        };
2833
2834        // Put back the original behavior which was replaced by `take`.
2835        self.epoch_deadline_behavior = behavior;
2836        update
2837    }
2838
2839    #[cfg(feature = "component-model")]
2840    fn component_calls(&mut self) -> &mut vm::component::CallContexts {
2841        &mut self.component_calls
2842    }
2843
2844    #[cfg(feature = "debug")]
2845    fn block_on_debug_handler(&mut self, event: crate::DebugEvent<'_>) -> anyhow::Result<()> {
2846        if let Some(handler) = self.debug_handler.take() {
2847            log::trace!("about to raise debug event {event:?}");
2848            StoreContextMut(self).with_blocking(|store, cx| {
2849                cx.block_on(Pin::from(handler.handle(store, event)).as_mut())
2850            })
2851        } else {
2852            Ok(())
2853        }
2854    }
2855}
2856
2857impl<T> StoreInner<T> {
2858    #[cfg(target_has_atomic = "64")]
2859    fn epoch_deadline_trap(&mut self) {
2860        self.epoch_deadline_behavior = None;
2861    }
2862
2863    #[cfg(target_has_atomic = "64")]
2864    fn epoch_deadline_callback(
2865        &mut self,
2866        callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2867    ) {
2868        self.epoch_deadline_behavior = Some(callback);
2869    }
2870}
2871
2872impl<T: Default> Default for Store<T> {
2873    fn default() -> Store<T> {
2874        Store::new(&Engine::default(), T::default())
2875    }
2876}
2877
2878impl<T: fmt::Debug> fmt::Debug for Store<T> {
2879    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2880        let inner = &**self.inner as *const StoreInner<T>;
2881        f.debug_struct("Store")
2882            .field("inner", &inner)
2883            .field("data", self.inner.data())
2884            .finish()
2885    }
2886}
2887
2888impl<T> Drop for Store<T> {
2889    fn drop(&mut self) {
2890        self.run_manual_drop_routines();
2891
2892        // For documentation on this `unsafe`, see `into_data`.
2893        unsafe {
2894            ManuallyDrop::drop(&mut self.inner.data_no_provenance);
2895            ManuallyDrop::drop(&mut self.inner);
2896        }
2897    }
2898}
2899
2900impl Drop for StoreOpaque {
2901    fn drop(&mut self) {
2902        // NB it's important that this destructor does not access `self.data`.
2903        // That is deallocated by `Drop for Store<T>` above.
2904
2905        unsafe {
2906            let allocator = self.engine.allocator();
2907            let ondemand = OnDemandInstanceAllocator::default();
2908            let store_id = self.id();
2909
2910            #[cfg(feature = "gc")]
2911            if let Some(gc_store) = self.gc_store.take() {
2912                let gc_alloc_index = gc_store.allocation_index;
2913                log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2914                debug_assert!(self.engine.features().gc_types());
2915                let (mem_alloc_index, mem) =
2916                    allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2917                allocator.deallocate_memory(None, mem_alloc_index, mem);
2918            }
2919
2920            for (id, instance) in self.instances.iter_mut() {
2921                log::trace!("store {store_id:?} is deallocating {id:?}");
2922                let allocator = match instance.kind {
2923                    StoreInstanceKind::Dummy => &ondemand,
2924                    _ => allocator,
2925                };
2926                allocator.deallocate_module(&mut instance.handle);
2927            }
2928
2929            #[cfg(feature = "component-model")]
2930            {
2931                for _ in 0..self.num_component_instances {
2932                    allocator.decrement_component_instance_count();
2933                }
2934            }
2935        }
2936    }
2937}
2938
2939#[cfg_attr(
2940    not(any(feature = "gc", feature = "async")),
2941    // NB: Rust 1.89, current stable, does not fire this lint. Rust 1.90,
2942    // however, does, so use #[allow] until our MSRV is 1.90.
2943    allow(dead_code, reason = "don't want to put #[cfg] on all impls below too")
2944)]
2945pub(crate) trait AsStoreOpaque {
2946    fn as_store_opaque(&mut self) -> &mut StoreOpaque;
2947}
2948
2949impl AsStoreOpaque for StoreOpaque {
2950    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2951        self
2952    }
2953}
2954
2955impl AsStoreOpaque for dyn VMStore {
2956    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2957        self
2958    }
2959}
2960
2961impl<T: 'static> AsStoreOpaque for Store<T> {
2962    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2963        &mut self.inner.inner
2964    }
2965}
2966
2967impl<T: 'static> AsStoreOpaque for StoreInner<T> {
2968    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2969        self
2970    }
2971}
2972
2973impl<T: AsStoreOpaque + ?Sized> AsStoreOpaque for &mut T {
2974    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2975        T::as_store_opaque(self)
2976    }
2977}
2978
2979#[cfg(test)]
2980mod tests {
2981    use super::*;
2982
2983    struct FuelTank {
2984        pub consumed_fuel: i64,
2985        pub reserve_fuel: u64,
2986        pub yield_interval: Option<NonZeroU64>,
2987    }
2988
2989    impl FuelTank {
2990        fn new() -> Self {
2991            FuelTank {
2992                consumed_fuel: 0,
2993                reserve_fuel: 0,
2994                yield_interval: None,
2995            }
2996        }
2997        fn get_fuel(&self) -> u64 {
2998            get_fuel(self.consumed_fuel, self.reserve_fuel)
2999        }
3000        fn refuel(&mut self) -> bool {
3001            refuel(
3002                &mut self.consumed_fuel,
3003                &mut self.reserve_fuel,
3004                self.yield_interval,
3005            )
3006        }
3007        fn set_fuel(&mut self, fuel: u64) {
3008            set_fuel(
3009                &mut self.consumed_fuel,
3010                &mut self.reserve_fuel,
3011                self.yield_interval,
3012                fuel,
3013            );
3014        }
3015    }
3016
3017    #[test]
3018    fn smoke() {
3019        let mut tank = FuelTank::new();
3020        tank.set_fuel(10);
3021        assert_eq!(tank.consumed_fuel, -10);
3022        assert_eq!(tank.reserve_fuel, 0);
3023
3024        tank.yield_interval = NonZeroU64::new(10);
3025        tank.set_fuel(25);
3026        assert_eq!(tank.consumed_fuel, -10);
3027        assert_eq!(tank.reserve_fuel, 15);
3028    }
3029
3030    #[test]
3031    fn does_not_lose_precision() {
3032        let mut tank = FuelTank::new();
3033        tank.set_fuel(u64::MAX);
3034        assert_eq!(tank.get_fuel(), u64::MAX);
3035
3036        tank.set_fuel(i64::MAX as u64);
3037        assert_eq!(tank.get_fuel(), i64::MAX as u64);
3038
3039        tank.set_fuel(i64::MAX as u64 + 1);
3040        assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
3041    }
3042
3043    #[test]
3044    fn yielding_does_not_lose_precision() {
3045        let mut tank = FuelTank::new();
3046
3047        tank.yield_interval = NonZeroU64::new(10);
3048        tank.set_fuel(u64::MAX);
3049        assert_eq!(tank.get_fuel(), u64::MAX);
3050        assert_eq!(tank.consumed_fuel, -10);
3051        assert_eq!(tank.reserve_fuel, u64::MAX - 10);
3052
3053        tank.yield_interval = NonZeroU64::new(u64::MAX);
3054        tank.set_fuel(u64::MAX);
3055        assert_eq!(tank.get_fuel(), u64::MAX);
3056        assert_eq!(tank.consumed_fuel, -i64::MAX);
3057        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3058
3059        tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
3060        tank.set_fuel(u64::MAX);
3061        assert_eq!(tank.get_fuel(), u64::MAX);
3062        assert_eq!(tank.consumed_fuel, -i64::MAX);
3063        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3064    }
3065
3066    #[test]
3067    fn refueling() {
3068        // It's possible to fuel to have consumed over the limit as some instructions can consume
3069        // multiple units of fuel at once. Refueling should be strict in it's consumption and not
3070        // add more fuel than there is.
3071        let mut tank = FuelTank::new();
3072
3073        tank.yield_interval = NonZeroU64::new(10);
3074        tank.reserve_fuel = 42;
3075        tank.consumed_fuel = 4;
3076        assert!(tank.refuel());
3077        assert_eq!(tank.reserve_fuel, 28);
3078        assert_eq!(tank.consumed_fuel, -10);
3079
3080        tank.yield_interval = NonZeroU64::new(1);
3081        tank.reserve_fuel = 8;
3082        tank.consumed_fuel = 4;
3083        assert_eq!(tank.get_fuel(), 4);
3084        assert!(tank.refuel());
3085        assert_eq!(tank.reserve_fuel, 3);
3086        assert_eq!(tank.consumed_fuel, -1);
3087        assert_eq!(tank.get_fuel(), 4);
3088
3089        tank.yield_interval = NonZeroU64::new(10);
3090        tank.reserve_fuel = 3;
3091        tank.consumed_fuel = 4;
3092        assert_eq!(tank.get_fuel(), 0);
3093        assert!(!tank.refuel());
3094        assert_eq!(tank.reserve_fuel, 3);
3095        assert_eq!(tank.consumed_fuel, 4);
3096        assert_eq!(tank.get_fuel(), 0);
3097    }
3098
3099    #[test]
3100    fn store_data_provenance() {
3101        // Test that we juggle pointer provenance and all that correctly, and
3102        // miri is happy with everything, while allowing both Rust code and
3103        // "Wasm" to access and modify the store's `T` data. Note that this is
3104        // not actually Wasm mutating the store data here because compiling Wasm
3105        // under miri is way too slow.
3106
3107        unsafe fn run_wasm(store: &mut Store<u32>) {
3108            let ptr = store
3109                .inner
3110                .inner
3111                .vm_store_context
3112                .store_data
3113                .as_ptr()
3114                .cast::<u32>();
3115            unsafe { *ptr += 1 }
3116        }
3117
3118        let engine = Engine::default();
3119        let mut store = Store::new(&engine, 0_u32);
3120
3121        assert_eq!(*store.data(), 0);
3122        *store.data_mut() += 1;
3123        assert_eq!(*store.data(), 1);
3124        unsafe { run_wasm(&mut store) }
3125        assert_eq!(*store.data(), 2);
3126        *store.data_mut() += 1;
3127        assert_eq!(*store.data(), 3);
3128    }
3129}