wasmtime/runtime/
store.rs

1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//!   Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//!   intended to be consumed by the outside world. Note that the "just a
39//!   pointer large" is a load-bearing implementation detail in Wasmtime. This
40//!   enables it to store a pointer to its own trait object which doesn't need
41//!   to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//!   stored inside the `Box`. This is the general Rust pattern when one struct
45//!   is a layer over another. The surprising part, though, is that this is
46//!   further subdivided. This structure only contains things which actually
47//!   need `T` itself. The downside of this structure is that it's always
48//!   generic and means that code is monomorphized into consumer crates. We
49//!   strive to have things be as monomorphic as possible in `wasmtime` so this
50//!   type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//!   Stored inline in the outer type the "opaque" here means that it's a
54//!   "store" but it doesn't have access to the `T`. This is the primary
55//!   "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//!   internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//!   All references of Wasm items into a `Store` are actually indices into a
60//!   table in this structure, and the `StoreData` being separate makes it a bit
61//!   easier to manage/define/work with. There's no real fundamental reason this
62//!   is split out, although sometimes it's useful to have separate borrows into
63//!   these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79use crate::RootSet;
80#[cfg(feature = "gc")]
81use crate::ThrownException;
82#[cfg(feature = "component-model-async")]
83use crate::component::ComponentStoreData;
84#[cfg(feature = "component-model-async")]
85use crate::component::concurrent;
86#[cfg(feature = "async")]
87use crate::fiber;
88use crate::module::RegisteredModuleId;
89use crate::prelude::*;
90#[cfg(feature = "gc")]
91use crate::runtime::vm::GcRootsList;
92#[cfg(feature = "stack-switching")]
93use crate::runtime::vm::VMContRef;
94use crate::runtime::vm::mpk::ProtectionKey;
95use crate::runtime::vm::{
96    self, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
97    Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator, SendSyncPtr,
98    SignalHandler, StoreBox, Unwind, VMContext, VMFuncRef, VMGcRef, VMStore, VMStoreContext,
99};
100use crate::trampoline::VMHostGlobalContext;
101use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry};
102#[cfg(feature = "gc")]
103use crate::{ExnRef, Rooted};
104use crate::{Global, Instance, Memory, Table, Uninhabited};
105use alloc::sync::Arc;
106use core::fmt;
107use core::marker;
108use core::mem::{self, ManuallyDrop, MaybeUninit};
109use core::num::NonZeroU64;
110use core::ops::{Deref, DerefMut};
111use core::pin::Pin;
112use core::ptr::NonNull;
113use wasmtime_environ::StaticModuleIndex;
114use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
115
116mod context;
117pub use self::context::*;
118mod data;
119pub use self::data::*;
120mod func_refs;
121use func_refs::FuncRefs;
122#[cfg(feature = "component-model-async")]
123mod token;
124#[cfg(feature = "component-model-async")]
125pub(crate) use token::StoreToken;
126#[cfg(feature = "async")]
127mod async_;
128#[cfg(all(feature = "async", feature = "call-hook"))]
129pub use self::async_::CallHookHandler;
130
131#[cfg(feature = "gc")]
132use super::vm::VMExnRef;
133#[cfg(feature = "gc")]
134mod gc;
135
136/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
137///
138/// All WebAssembly instances and items will be attached to and refer to a
139/// [`Store`]. For example instances, functions, globals, and tables are all
140/// attached to a [`Store`]. Instances are created by instantiating a
141/// [`Module`](crate::Module) within a [`Store`].
142///
143/// A [`Store`] is intended to be a short-lived object in a program. No form
144/// of GC is implemented at this time so once an instance is created within a
145/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
146/// This makes [`Store`] unsuitable for creating an unbounded number of
147/// instances in it because [`Store`] will never release this memory. It's
148/// recommended to have a [`Store`] correspond roughly to the lifetime of a
149/// "main instance" that an embedding is interested in executing.
150///
151/// ## Type parameter `T`
152///
153/// Each [`Store`] has a type parameter `T` associated with it. This `T`
154/// represents state defined by the host. This state will be accessible through
155/// the [`Caller`](crate::Caller) type that host-defined functions get access
156/// to. This `T` is suitable for storing `Store`-specific information which
157/// imported functions may want access to.
158///
159/// The data `T` can be accessed through methods like [`Store::data`] and
160/// [`Store::data_mut`].
161///
162/// ## Stores, contexts, oh my
163///
164/// Most methods in Wasmtime take something of the form
165/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
166/// the first argument. These two traits allow ergonomically passing in the
167/// context you currently have to any method. The primary two sources of
168/// contexts are:
169///
170/// * `Store<T>`
171/// * `Caller<'_, T>`
172///
173/// corresponding to what you create and what you have access to in a host
174/// function. You can also explicitly acquire a [`StoreContext`] or
175/// [`StoreContextMut`] and pass that around as well.
176///
177/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
178/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
179/// form of context you have you can call various methods, create objects, etc.
180///
181/// ## Stores and `Default`
182///
183/// You can create a store with default configuration settings using
184/// `Store::default()`. This will create a brand new [`Engine`] with default
185/// configuration (see [`Config`](crate::Config) for more information).
186///
187/// ## Cross-store usage of items
188///
189/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
190/// [`Store`]. The store they belong to is the one they were created with
191/// (passed in as a parameter) or instantiated with. This store is the only
192/// store that can be used to interact with wasm items after they're created.
193///
194/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
195/// operations is incorrect. In other words it's considered a programmer error
196/// rather than a recoverable error for the wrong [`Store`] to be used when
197/// calling APIs.
198pub struct Store<T: 'static> {
199    // for comments about `ManuallyDrop`, see `Store::into_data`
200    inner: ManuallyDrop<Box<StoreInner<T>>>,
201}
202
203#[derive(Copy, Clone, Debug)]
204/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
205/// the WebAssembly VM.
206pub enum CallHook {
207    /// Indicates the VM is calling a WebAssembly function, from the host.
208    CallingWasm,
209    /// Indicates the VM is returning from a WebAssembly function, to the host.
210    ReturningFromWasm,
211    /// Indicates the VM is calling a host function, from WebAssembly.
212    CallingHost,
213    /// Indicates the VM is returning from a host function, to WebAssembly.
214    ReturningFromHost,
215}
216
217impl CallHook {
218    /// Indicates the VM is entering host code (exiting WebAssembly code)
219    pub fn entering_host(&self) -> bool {
220        match self {
221            CallHook::ReturningFromWasm | CallHook::CallingHost => true,
222            _ => false,
223        }
224    }
225    /// Indicates the VM is exiting host code (entering WebAssembly code)
226    pub fn exiting_host(&self) -> bool {
227        match self {
228            CallHook::ReturningFromHost | CallHook::CallingWasm => true,
229            _ => false,
230        }
231    }
232}
233
234/// Internal contents of a `Store<T>` that live on the heap.
235///
236/// The members of this struct are those that need to be generic over `T`, the
237/// store's internal type storage. Otherwise all things that don't rely on `T`
238/// should go into `StoreOpaque`.
239pub struct StoreInner<T: 'static> {
240    /// Generic metadata about the store that doesn't need access to `T`.
241    inner: StoreOpaque,
242
243    limiter: Option<ResourceLimiterInner<T>>,
244    call_hook: Option<CallHookInner<T>>,
245    #[cfg(target_has_atomic = "64")]
246    epoch_deadline_behavior:
247        Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
248    // for comments about `ManuallyDrop`, see `Store::into_data`
249    data: ManuallyDrop<T>,
250}
251
252enum ResourceLimiterInner<T> {
253    Sync(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync>),
254    #[cfg(feature = "async")]
255    Async(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiterAsync) + Send + Sync>),
256}
257
258/// Representation of a configured resource limiter for a store.
259///
260/// This is acquired with `resource_limiter_and_store_opaque` for example and is
261/// threaded through to growth operations on tables/memories. Note that this is
262/// passed around as `Option<&mut StoreResourceLimiter<'_>>` to make it
263/// efficient to pass around (nullable pointer) and it's also notably passed
264/// around as an `Option` to represent how this is optionally specified within a
265/// store.
266pub enum StoreResourceLimiter<'a> {
267    Sync(&'a mut dyn crate::ResourceLimiter),
268    #[cfg(feature = "async")]
269    Async(&'a mut dyn crate::ResourceLimiterAsync),
270}
271
272impl StoreResourceLimiter<'_> {
273    pub(crate) async fn memory_growing(
274        &mut self,
275        current: usize,
276        desired: usize,
277        maximum: Option<usize>,
278    ) -> Result<bool, Error> {
279        match self {
280            Self::Sync(s) => s.memory_growing(current, desired, maximum),
281            #[cfg(feature = "async")]
282            Self::Async(s) => s.memory_growing(current, desired, maximum).await,
283        }
284    }
285
286    pub(crate) fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
287        match self {
288            Self::Sync(s) => s.memory_grow_failed(error),
289            #[cfg(feature = "async")]
290            Self::Async(s) => s.memory_grow_failed(error),
291        }
292    }
293
294    pub(crate) async fn table_growing(
295        &mut self,
296        current: usize,
297        desired: usize,
298        maximum: Option<usize>,
299    ) -> Result<bool, Error> {
300        match self {
301            Self::Sync(s) => s.table_growing(current, desired, maximum),
302            #[cfg(feature = "async")]
303            Self::Async(s) => s.table_growing(current, desired, maximum).await,
304        }
305    }
306
307    pub(crate) fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
308        match self {
309            Self::Sync(s) => s.table_grow_failed(error),
310            #[cfg(feature = "async")]
311            Self::Async(s) => s.table_grow_failed(error),
312        }
313    }
314}
315
316enum CallHookInner<T: 'static> {
317    #[cfg(feature = "call-hook")]
318    Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
319    #[cfg(all(feature = "async", feature = "call-hook"))]
320    Async(Box<dyn CallHookHandler<T> + Send + Sync>),
321    #[expect(
322        dead_code,
323        reason = "forcing, regardless of cfg, the type param to be used"
324    )]
325    ForceTypeParameterToBeUsed {
326        uninhabited: Uninhabited,
327        _marker: marker::PhantomData<T>,
328    },
329}
330
331/// What to do after returning from a callback when the engine epoch reaches
332/// the deadline for a Store during execution of a function using that store.
333#[non_exhaustive]
334pub enum UpdateDeadline {
335    /// Halt execution of WebAssembly, don't update the epoch deadline, and
336    /// raise a trap.
337    Interrupt,
338    /// Extend the deadline by the specified number of ticks.
339    Continue(u64),
340    /// Extend the deadline by the specified number of ticks after yielding to
341    /// the async executor loop. This can only be used with an async [`Store`]
342    /// configured via [`Config::async_support`](crate::Config::async_support).
343    #[cfg(feature = "async")]
344    Yield(u64),
345    /// Extend the deadline by the specified number of ticks after yielding to
346    /// the async executor loop. This can only be used with an async [`Store`]
347    /// configured via [`Config::async_support`](crate::Config::async_support).
348    ///
349    /// The yield will be performed by the future provided; when using `tokio`
350    /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
351    /// here.
352    #[cfg(feature = "async")]
353    YieldCustom(
354        u64,
355        ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
356    ),
357}
358
359// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
360impl<T> Deref for StoreInner<T> {
361    type Target = StoreOpaque;
362    fn deref(&self) -> &Self::Target {
363        &self.inner
364    }
365}
366
367impl<T> DerefMut for StoreInner<T> {
368    fn deref_mut(&mut self) -> &mut Self::Target {
369        &mut self.inner
370    }
371}
372
373/// Monomorphic storage for a `Store<T>`.
374///
375/// This structure contains the bulk of the metadata about a `Store`. This is
376/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
377/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
378/// crate itself.
379pub struct StoreOpaque {
380    // This `StoreOpaque` structure has references to itself. These aren't
381    // immediately evident, however, so we need to tell the compiler that it
382    // contains self-references. This notably suppresses `noalias` annotations
383    // when this shows up in compiled code because types of this structure do
384    // indeed alias itself. An example of this is `default_callee` holds a
385    // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
386    // aliasing!
387    //
388    // It's somewhat unclear to me at this time if this is 100% sufficient to
389    // get all the right codegen in all the right places. For example does
390    // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
391    // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
392    // enough with `Pin` to understand if it's appropriate here (we do, for
393    // example want to allow movement in and out of `data: T`, just not movement
394    // of most of the other members). It's also not clear if using `Pin` in a
395    // few places buys us much other than a bunch of `unsafe` that we already
396    // sort of hand-wave away.
397    //
398    // In any case this seems like a good mid-ground for now where we're at
399    // least telling the compiler something about all the aliasing happening
400    // within a `Store`.
401    _marker: marker::PhantomPinned,
402
403    engine: Engine,
404    vm_store_context: VMStoreContext,
405
406    // Contains all continuations ever allocated throughout the lifetime of this
407    // store.
408    #[cfg(feature = "stack-switching")]
409    continuations: Vec<Box<VMContRef>>,
410
411    instances: PrimaryMap<InstanceId, StoreInstance>,
412
413    #[cfg(feature = "component-model")]
414    num_component_instances: usize,
415    signal_handler: Option<SignalHandler>,
416    modules: ModuleRegistry,
417    func_refs: FuncRefs,
418    host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
419    // GC-related fields.
420    gc_store: Option<GcStore>,
421    gc_roots: RootSet,
422    #[cfg(feature = "gc")]
423    gc_roots_list: GcRootsList,
424    // Types for which the embedder has created an allocator for.
425    #[cfg(feature = "gc")]
426    gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
427    /// Pending exception, if any. This is also a GC root, because it
428    /// needs to be rooted somewhere between the time that a pending
429    /// exception is set and the time that the handling code takes the
430    /// exception object. We use this rooting strategy rather than a
431    /// root in an `Err` branch of a `Result` on the host side because
432    /// it is less error-prone with respect to rooting behavior. See
433    /// `throw()`, `take_pending_exception()`,
434    /// `peek_pending_exception()`, `has_pending_exception()`, and
435    /// `catch()`.
436    #[cfg(feature = "gc")]
437    pending_exception: Option<VMExnRef>,
438
439    // Numbers of resources instantiated in this store, and their limits
440    instance_count: usize,
441    instance_limit: usize,
442    memory_count: usize,
443    memory_limit: usize,
444    table_count: usize,
445    table_limit: usize,
446    #[cfg(feature = "async")]
447    async_state: fiber::AsyncState,
448
449    // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
450    // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
451    // together. Then when we run out of gas, we inject the yield amount from the reserve
452    // until the reserve is empty.
453    fuel_reserve: u64,
454    pub(crate) fuel_yield_interval: Option<NonZeroU64>,
455    /// Indexed data within this `Store`, used to store information about
456    /// globals, functions, memories, etc.
457    store_data: StoreData,
458    traitobj: StorePtr,
459    default_caller_vmctx: SendSyncPtr<VMContext>,
460
461    /// Used to optimized wasm->host calls when the host function is defined with
462    /// `Func::new` to avoid allocating a new vector each time a function is
463    /// called.
464    hostcall_val_storage: Vec<Val>,
465    /// Same as `hostcall_val_storage`, but for the direction of the host
466    /// calling wasm.
467    wasm_val_raw_storage: Vec<ValRaw>,
468
469    /// Keep track of what protection key is being used during allocation so
470    /// that the right memory pages can be enabled when entering WebAssembly
471    /// guest code.
472    pkey: Option<ProtectionKey>,
473
474    /// Runtime state for components used in the handling of resources, borrow,
475    /// and calls. These also interact with the `ResourceAny` type and its
476    /// internal representation.
477    #[cfg(feature = "component-model")]
478    component_host_table: vm::component::HandleTable,
479    #[cfg(feature = "component-model")]
480    component_calls: vm::component::CallContexts,
481    #[cfg(feature = "component-model")]
482    host_resource_data: crate::component::HostResourceData,
483
484    #[cfg(feature = "component-model-async")]
485    concurrent_async_state: concurrent::AsyncState,
486
487    /// State related to the executor of wasm code.
488    ///
489    /// For example if Pulley is enabled and configured then this will store a
490    /// Pulley interpreter.
491    executor: Executor,
492}
493
494/// Self-pointer to `StoreInner<T>` from within a `StoreOpaque` which is chiefly
495/// used to copy into instances during instantiation.
496///
497/// FIXME: ideally this type would get deleted and Wasmtime's reliance on it
498/// would go away.
499struct StorePtr(Option<NonNull<dyn VMStore>>);
500
501// We can't make `VMStore: Send + Sync` because that requires making all of
502// Wastime's internals generic over the `Store`'s `T`. So instead, we take care
503// in the whole VM layer to only use the `VMStore` in ways that are `Send`- and
504// `Sync`-safe and we have to have these unsafe impls.
505unsafe impl Send for StorePtr {}
506unsafe impl Sync for StorePtr {}
507
508/// Executor state within `StoreOpaque`.
509///
510/// Effectively stores Pulley interpreter state and handles conditional support
511/// for Cranelift at compile time.
512pub(crate) enum Executor {
513    Interpreter(Interpreter),
514    #[cfg(has_host_compiler_backend)]
515    Native,
516}
517
518impl Executor {
519    pub(crate) fn new(engine: &Engine) -> Self {
520        #[cfg(has_host_compiler_backend)]
521        if cfg!(feature = "pulley") && engine.target().is_pulley() {
522            Executor::Interpreter(Interpreter::new(engine))
523        } else {
524            Executor::Native
525        }
526        #[cfg(not(has_host_compiler_backend))]
527        {
528            debug_assert!(engine.target().is_pulley());
529            Executor::Interpreter(Interpreter::new(engine))
530        }
531    }
532}
533
534/// A borrowed reference to `Executor` above.
535pub(crate) enum ExecutorRef<'a> {
536    Interpreter(InterpreterRef<'a>),
537    #[cfg(has_host_compiler_backend)]
538    Native,
539}
540
541/// An RAII type to automatically mark a region of code as unsafe for GC.
542#[doc(hidden)]
543pub struct AutoAssertNoGc<'a> {
544    store: &'a mut StoreOpaque,
545    entered: bool,
546}
547
548impl<'a> AutoAssertNoGc<'a> {
549    #[inline]
550    pub fn new(store: &'a mut StoreOpaque) -> Self {
551        let entered = if !cfg!(feature = "gc") {
552            false
553        } else if let Some(gc_store) = store.gc_store.as_mut() {
554            gc_store.gc_heap.enter_no_gc_scope();
555            true
556        } else {
557            false
558        };
559
560        AutoAssertNoGc { store, entered }
561    }
562
563    /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
564    /// disables checks for no GC happening for the duration of this value.
565    ///
566    /// This is used when it is statically otherwise known that a GC doesn't
567    /// happen for the various types involved.
568    ///
569    /// # Unsafety
570    ///
571    /// This method is `unsafe` as it does not provide the same safety
572    /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
573    /// caller that a GC doesn't happen.
574    #[inline]
575    pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
576        if cfg!(debug_assertions) {
577            AutoAssertNoGc::new(store)
578        } else {
579            AutoAssertNoGc {
580                store,
581                entered: false,
582            }
583        }
584    }
585}
586
587impl core::ops::Deref for AutoAssertNoGc<'_> {
588    type Target = StoreOpaque;
589
590    #[inline]
591    fn deref(&self) -> &Self::Target {
592        &*self.store
593    }
594}
595
596impl core::ops::DerefMut for AutoAssertNoGc<'_> {
597    #[inline]
598    fn deref_mut(&mut self) -> &mut Self::Target {
599        &mut *self.store
600    }
601}
602
603impl Drop for AutoAssertNoGc<'_> {
604    #[inline]
605    fn drop(&mut self) {
606        if self.entered {
607            self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
608        }
609    }
610}
611
612/// Used to associate instances with the store.
613///
614/// This is needed to track if the instance was allocated explicitly with the on-demand
615/// instance allocator.
616struct StoreInstance {
617    handle: InstanceHandle,
618    kind: StoreInstanceKind,
619}
620
621enum StoreInstanceKind {
622    /// An actual, non-dummy instance.
623    Real {
624        /// The id of this instance's module inside our owning store's
625        /// `ModuleRegistry`.
626        module_id: RegisteredModuleId,
627    },
628
629    /// This is a dummy instance that is just an implementation detail for
630    /// something else. For example, host-created memories internally create a
631    /// dummy instance.
632    ///
633    /// Regardless of the configured instance allocator for the engine, dummy
634    /// instances always use the on-demand allocator to deallocate the instance.
635    Dummy,
636}
637
638impl<T> Store<T> {
639    /// Creates a new [`Store`] to be associated with the given [`Engine`] and
640    /// `data` provided.
641    ///
642    /// The created [`Store`] will place no additional limits on the size of
643    /// linear memories or tables at runtime. Linear memories and tables will
644    /// be allowed to grow to any upper limit specified in their definitions.
645    /// The store will limit the number of instances, linear memories, and
646    /// tables created to 10,000. This can be overridden with the
647    /// [`Store::limiter`] configuration method.
648    pub fn new(engine: &Engine, data: T) -> Self {
649        let store_data = StoreData::new();
650        log::trace!("creating new store {:?}", store_data.id());
651
652        let pkey = engine.allocator().next_available_pkey();
653
654        let inner = StoreOpaque {
655            _marker: marker::PhantomPinned,
656            engine: engine.clone(),
657            vm_store_context: Default::default(),
658            #[cfg(feature = "stack-switching")]
659            continuations: Vec::new(),
660            instances: PrimaryMap::new(),
661            #[cfg(feature = "component-model")]
662            num_component_instances: 0,
663            signal_handler: None,
664            gc_store: None,
665            gc_roots: RootSet::default(),
666            #[cfg(feature = "gc")]
667            gc_roots_list: GcRootsList::default(),
668            #[cfg(feature = "gc")]
669            gc_host_alloc_types: Default::default(),
670            #[cfg(feature = "gc")]
671            pending_exception: None,
672            modules: ModuleRegistry::default(),
673            func_refs: FuncRefs::default(),
674            host_globals: PrimaryMap::new(),
675            instance_count: 0,
676            instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
677            memory_count: 0,
678            memory_limit: crate::DEFAULT_MEMORY_LIMIT,
679            table_count: 0,
680            table_limit: crate::DEFAULT_TABLE_LIMIT,
681            #[cfg(feature = "async")]
682            async_state: Default::default(),
683            fuel_reserve: 0,
684            fuel_yield_interval: None,
685            store_data,
686            traitobj: StorePtr(None),
687            default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
688            hostcall_val_storage: Vec::new(),
689            wasm_val_raw_storage: Vec::new(),
690            pkey,
691            #[cfg(feature = "component-model")]
692            component_host_table: Default::default(),
693            #[cfg(feature = "component-model")]
694            component_calls: Default::default(),
695            #[cfg(feature = "component-model")]
696            host_resource_data: Default::default(),
697            executor: Executor::new(engine),
698            #[cfg(feature = "component-model-async")]
699            concurrent_async_state: Default::default(),
700        };
701        let mut inner = Box::new(StoreInner {
702            inner,
703            limiter: None,
704            call_hook: None,
705            #[cfg(target_has_atomic = "64")]
706            epoch_deadline_behavior: None,
707            data: ManuallyDrop::new(data),
708        });
709
710        inner.traitobj = StorePtr(Some(NonNull::from(&mut *inner)));
711
712        // Wasmtime uses the callee argument to host functions to learn about
713        // the original pointer to the `Store` itself, allowing it to
714        // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
715        // however, there's no "callee" to provide. To fix this we allocate a
716        // single "default callee" for the entire `Store`. This is then used as
717        // part of `Func::call` to guarantee that the `callee: *mut VMContext`
718        // is never null.
719        let module = Arc::new(wasmtime_environ::Module::new(StaticModuleIndex::from_u32(
720            0,
721        )));
722        let shim = ModuleRuntimeInfo::bare(module);
723        let allocator = OnDemandInstanceAllocator::default();
724
725        allocator
726            .validate_module(shim.env_module(), shim.offsets())
727            .unwrap();
728
729        unsafe {
730            // Note that this dummy instance doesn't allocate tables or memories
731            // (also no limiter is passed in) so it won't have an async await
732            // point meaning that it should be ok to assert the future is
733            // always ready.
734            let id = vm::assert_ready(inner.allocate_instance(
735                None,
736                AllocateInstanceKind::Dummy {
737                    allocator: &allocator,
738                },
739                &shim,
740                Default::default(),
741            ))
742            .expect("failed to allocate default callee");
743            let default_caller_vmctx = inner.instance(id).vmctx();
744            inner.default_caller_vmctx = default_caller_vmctx.into();
745        }
746
747        Self {
748            inner: ManuallyDrop::new(inner),
749        }
750    }
751
752    /// Access the underlying data owned by this `Store`.
753    #[inline]
754    pub fn data(&self) -> &T {
755        self.inner.data()
756    }
757
758    /// Access the underlying data owned by this `Store`.
759    #[inline]
760    pub fn data_mut(&mut self) -> &mut T {
761        self.inner.data_mut()
762    }
763
764    fn run_manual_drop_routines(&mut self) {
765        // We need to drop the fibers of each component instance before
766        // attempting to drop the instances themselves since the fibers may need
767        // to be resumed and allowed to exit cleanly before we yank the state
768        // out from under them.
769        //
770        // This will also drop any futures which might use a `&Accessor` fields
771        // in their `Drop::drop` implementations, in which case they'll need to
772        // be called from with in the context of a `tls::set` closure.
773        #[cfg(feature = "component-model-async")]
774        ComponentStoreData::drop_fibers_and_futures(&mut **self.inner);
775
776        // Ensure all fiber stacks, even cached ones, are all flushed out to the
777        // instance allocator.
778        self.inner.flush_fiber_stack();
779    }
780
781    /// Consumes this [`Store`], destroying it, and returns the underlying data.
782    pub fn into_data(mut self) -> T {
783        self.run_manual_drop_routines();
784
785        // This is an unsafe operation because we want to avoid having a runtime
786        // check or boolean for whether the data is actually contained within a
787        // `Store`. The data itself is stored as `ManuallyDrop` since we're
788        // manually managing the memory here, and there's also a `ManuallyDrop`
789        // around the `Box<StoreInner<T>>`. The way this works though is a bit
790        // tricky, so here's how things get dropped appropriately:
791        //
792        // * When a `Store<T>` is normally dropped, the custom destructor for
793        //   `Store<T>` will drop `T`, then the `self.inner` field. The
794        //   rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
795        //   `StoreInner<T>`. This cleans up all internal fields and doesn't
796        //   touch `T` because it's wrapped in `ManuallyDrop`.
797        //
798        // * When calling this method we skip the top-level destructor for
799        //   `Store<T>` with `mem::forget`. This skips both the destructor for
800        //   `T` and the destructor for `StoreInner<T>`. We do, however, run the
801        //   destructor for `Box<StoreInner<T>>` which, like above, will skip
802        //   the destructor for `T` since it's `ManuallyDrop`.
803        //
804        // In both cases all the other fields of `StoreInner<T>` should all get
805        // dropped, and the manual management of destructors is basically
806        // between this method and `Drop for Store<T>`. Note that this also
807        // means that `Drop for StoreInner<T>` cannot access `self.data`, so
808        // there is a comment indicating this as well.
809        unsafe {
810            let mut inner = ManuallyDrop::take(&mut self.inner);
811            core::mem::forget(self);
812            ManuallyDrop::take(&mut inner.data)
813        }
814    }
815
816    /// Configures the [`ResourceLimiter`] used to limit resource creation
817    /// within this [`Store`].
818    ///
819    /// Whenever resources such as linear memory, tables, or instances are
820    /// allocated the `limiter` specified here is invoked with the store's data
821    /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
822    /// being allocated. The returned [`ResourceLimiter`] is intended to live
823    /// within the `T` itself, for example by storing a
824    /// [`StoreLimits`](crate::StoreLimits).
825    ///
826    /// Note that this limiter is only used to limit the creation/growth of
827    /// resources in the future, this does not retroactively attempt to apply
828    /// limits to the [`Store`].
829    ///
830    /// # Examples
831    ///
832    /// ```
833    /// use wasmtime::*;
834    ///
835    /// struct MyApplicationState {
836    ///     my_state: u32,
837    ///     limits: StoreLimits,
838    /// }
839    ///
840    /// let engine = Engine::default();
841    /// let my_state = MyApplicationState {
842    ///     my_state: 42,
843    ///     limits: StoreLimitsBuilder::new()
844    ///         .memory_size(1 << 20 /* 1 MB */)
845    ///         .instances(2)
846    ///         .build(),
847    /// };
848    /// let mut store = Store::new(&engine, my_state);
849    /// store.limiter(|state| &mut state.limits);
850    ///
851    /// // Creation of smaller memories is allowed
852    /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
853    ///
854    /// // Creation of a larger memory, however, will exceed the 1MB limit we've
855    /// // configured
856    /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
857    ///
858    /// // The number of instances in this store is limited to 2, so the third
859    /// // instance here should fail.
860    /// let module = Module::new(&engine, "(module)").unwrap();
861    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
862    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
863    /// assert!(Instance::new(&mut store, &module, &[]).is_err());
864    /// ```
865    ///
866    /// [`ResourceLimiter`]: crate::ResourceLimiter
867    pub fn limiter(
868        &mut self,
869        mut limiter: impl (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync + 'static,
870    ) {
871        // Apply the limits on instances, tables, and memory given by the limiter:
872        let inner = &mut self.inner;
873        let (instance_limit, table_limit, memory_limit) = {
874            let l = limiter(&mut inner.data);
875            (l.instances(), l.tables(), l.memories())
876        };
877        let innermost = &mut inner.inner;
878        innermost.instance_limit = instance_limit;
879        innermost.table_limit = table_limit;
880        innermost.memory_limit = memory_limit;
881
882        // Save the limiter accessor function:
883        inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
884    }
885
886    /// Configure a function that runs on calls and returns between WebAssembly
887    /// and host code.
888    ///
889    /// The function is passed a [`CallHook`] argument, which indicates which
890    /// state transition the VM is making.
891    ///
892    /// This function may return a [`Trap`]. If a trap is returned when an
893    /// import was called, it is immediately raised as-if the host import had
894    /// returned the trap. If a trap is returned after wasm returns to the host
895    /// then the wasm function's result is ignored and this trap is returned
896    /// instead.
897    ///
898    /// After this function returns a trap, it may be called for subsequent returns
899    /// to host or wasm code as the trap propagates to the root call.
900    #[cfg(feature = "call-hook")]
901    pub fn call_hook(
902        &mut self,
903        hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
904    ) {
905        self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
906    }
907
908    /// Returns the [`Engine`] that this store is associated with.
909    pub fn engine(&self) -> &Engine {
910        self.inner.engine()
911    }
912
913    /// Perform garbage collection.
914    ///
915    /// Note that it is not required to actively call this function. GC will
916    /// automatically happen according to various internal heuristics. This is
917    /// provided if fine-grained control over the GC is desired.
918    ///
919    /// If you are calling this method after an attempted allocation failed, you
920    /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
921    /// When you do so, this method will attempt to create enough space in the
922    /// GC heap for that allocation, so that it will succeed on the next
923    /// attempt.
924    ///
925    /// This method is only available when the `gc` Cargo feature is enabled.
926    #[cfg(feature = "gc")]
927    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
928        StoreContextMut(&mut self.inner).gc(why)
929    }
930
931    /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
932    /// be configured via [`Store::set_fuel`].
933    ///
934    /// # Errors
935    ///
936    /// This function will return an error if fuel consumption is not enabled
937    /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
938    pub fn get_fuel(&self) -> Result<u64> {
939        self.inner.get_fuel()
940    }
941
942    /// Set the fuel to this [`Store`] for wasm to consume while executing.
943    ///
944    /// For this method to work fuel consumption must be enabled via
945    /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
946    /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
947    /// immediately trap). This function must be called for the store to have
948    /// some fuel to allow WebAssembly to execute.
949    ///
950    /// Most WebAssembly instructions consume 1 unit of fuel. Some
951    /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
952    /// units, as any execution cost associated with them involves other
953    /// instructions which do consume fuel.
954    ///
955    /// Note that when fuel is entirely consumed it will cause wasm to trap.
956    ///
957    /// # Errors
958    ///
959    /// This function will return an error if fuel consumption is not enabled via
960    /// [`Config::consume_fuel`](crate::Config::consume_fuel).
961    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
962        self.inner.set_fuel(fuel)
963    }
964
965    /// Configures a [`Store`] to yield execution of async WebAssembly code
966    /// periodically.
967    ///
968    /// When a [`Store`] is configured to consume fuel with
969    /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
970    /// configure WebAssembly to be suspended and control will be yielded back to the
971    /// caller every `interval` units of fuel consumed. This is only suitable with use of
972    /// a store associated with an [async config](crate::Config::async_support) because
973    /// only then are futures used and yields are possible.
974    ///
975    /// The purpose of this behavior is to ensure that futures which represent
976    /// execution of WebAssembly do not execute too long inside their
977    /// `Future::poll` method. This allows for some form of cooperative
978    /// multitasking where WebAssembly will voluntarily yield control
979    /// periodically (based on fuel consumption) back to the running thread.
980    ///
981    /// Note that futures returned by this crate will automatically flag
982    /// themselves to get re-polled if a yield happens. This means that
983    /// WebAssembly will continue to execute, just after giving the host an
984    /// opportunity to do something else.
985    ///
986    /// The `interval` parameter indicates how much fuel should be
987    /// consumed between yields of an async future. When fuel runs out wasm will trap.
988    ///
989    /// # Error
990    ///
991    /// This method will error if it is not called on a store associated with an [async
992    /// config](crate::Config::async_support).
993    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
994        self.inner.fuel_async_yield_interval(interval)
995    }
996
997    /// Sets the epoch deadline to a certain number of ticks in the future.
998    ///
999    /// When the Wasm guest code is compiled with epoch-interruption
1000    /// instrumentation
1001    /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
1002    /// and when the `Engine`'s epoch is incremented
1003    /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
1004    /// past a deadline, execution can be configured to either trap or
1005    /// yield and then continue.
1006    ///
1007    /// This deadline is always set relative to the current epoch:
1008    /// `ticks_beyond_current` ticks in the future. The deadline can
1009    /// be set explicitly via this method, or refilled automatically
1010    /// on a yield if configured via
1011    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
1012    /// this method is invoked, the deadline is reached when
1013    /// [`Engine::increment_epoch()`] has been invoked at least
1014    /// `ticks_beyond_current` times.
1015    ///
1016    /// By default a store will trap immediately with an epoch deadline of 0
1017    /// (which has always "elapsed"). This method is required to be configured
1018    /// for stores with epochs enabled to some future epoch deadline.
1019    ///
1020    /// See documentation on
1021    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1022    /// for an introduction to epoch-based interruption.
1023    #[cfg(target_has_atomic = "64")]
1024    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1025        self.inner.set_epoch_deadline(ticks_beyond_current);
1026    }
1027
1028    /// Configures epoch-deadline expiration to trap.
1029    ///
1030    /// When epoch-interruption-instrumented code is executed on this
1031    /// store and the epoch deadline is reached before completion,
1032    /// with the store configured in this way, execution will
1033    /// terminate with a trap as soon as an epoch check in the
1034    /// instrumented code is reached.
1035    ///
1036    /// This behavior is the default if the store is not otherwise
1037    /// configured via
1038    /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
1039    /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
1040    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
1041    ///
1042    /// This setting is intended to allow for coarse-grained
1043    /// interruption, but not a deterministic deadline of a fixed,
1044    /// finite interval. For deterministic interruption, see the
1045    /// "fuel" mechanism instead.
1046    ///
1047    /// Note that when this is used it's required to call
1048    /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
1049    /// trap.
1050    ///
1051    /// See documentation on
1052    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1053    /// for an introduction to epoch-based interruption.
1054    #[cfg(target_has_atomic = "64")]
1055    pub fn epoch_deadline_trap(&mut self) {
1056        self.inner.epoch_deadline_trap();
1057    }
1058
1059    /// Configures epoch-deadline expiration to invoke a custom callback
1060    /// function.
1061    ///
1062    /// When epoch-interruption-instrumented code is executed on this
1063    /// store and the epoch deadline is reached before completion, the
1064    /// provided callback function is invoked.
1065    ///
1066    /// This callback should either return an [`UpdateDeadline`], or
1067    /// return an error, which will terminate execution with a trap.
1068    ///
1069    /// The [`UpdateDeadline`] is a positive number of ticks to
1070    /// add to the epoch deadline, as well as indicating what
1071    /// to do after the callback returns. If the [`Store`] is
1072    /// configured with async support, then the callback may return
1073    /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
1074    /// to yield to the async executor before updating the epoch deadline.
1075    /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
1076    /// update the epoch deadline immediately.
1077    ///
1078    /// This setting is intended to allow for coarse-grained
1079    /// interruption, but not a deterministic deadline of a fixed,
1080    /// finite interval. For deterministic interruption, see the
1081    /// "fuel" mechanism instead.
1082    ///
1083    /// See documentation on
1084    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1085    /// for an introduction to epoch-based interruption.
1086    #[cfg(target_has_atomic = "64")]
1087    pub fn epoch_deadline_callback(
1088        &mut self,
1089        callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
1090    ) {
1091        self.inner.epoch_deadline_callback(Box::new(callback));
1092    }
1093
1094    /// Set an exception as the currently pending exception, and
1095    /// return an error that propagates the throw.
1096    ///
1097    /// This method takes an exception object and stores it in the
1098    /// `Store` as the currently pending exception. This is a special
1099    /// rooted slot that holds the exception as long as it is
1100    /// propagating. This method then returns a `ThrownException`
1101    /// error, which is a special type that indicates a pending
1102    /// exception exists. When this type propagates as an error
1103    /// returned from a Wasm-to-host call, the pending exception is
1104    /// thrown within the Wasm context, and either caught or
1105    /// propagated further to the host-to-Wasm call boundary. If an
1106    /// exception is thrown out of Wasm (or across Wasm from a
1107    /// hostcall) back to the host-to-Wasm call boundary, *that*
1108    /// invocation returns a `ThrownException`, and the pending
1109    /// exception slot is again set. In other words, the
1110    /// `ThrownException` error type should propagate upward exactly
1111    /// and only when a pending exception is set.
1112    ///
1113    /// To inspect or take the pending exception, use
1114    /// [`peek_pending_exception`] and [`take_pending_exception`]. For
1115    /// a convenient wrapper that invokes a closure and provides any
1116    /// caught exception from the closure to a separate handler
1117    /// closure, see [`StoreContextMut::catch`].
1118    ///
1119    /// This method is parameterized over `R` for convenience, but
1120    /// will always return an `Err`.
1121    ///
1122    /// # Panics
1123    ///
1124    /// - Will panic if `exception` has been unrooted.
1125    /// - Will panic if `exception` is a null reference.
1126    /// - Will panic if a pending exception has already been set.
1127    #[cfg(feature = "gc")]
1128    pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1129        self.inner.throw_impl(exception);
1130        Err(ThrownException)
1131    }
1132
1133    /// Take the currently pending exception, if any, and return it,
1134    /// removing it from the "pending exception" slot.
1135    ///
1136    /// If there is no pending exception, returns `None`.
1137    ///
1138    /// Note: the returned exception is a LIFO root (see
1139    /// [`crate::Rooted`]), rooted in the current handle scope. Take
1140    /// care to ensure that it is re-rooted or otherwise does not
1141    /// escape this scope! It is usually best to allow an exception
1142    /// object to be rooted in the store's "pending exception" slot
1143    /// until the final consumer has taken it, rather than root it and
1144    /// pass it up the callstack in some other way.
1145    ///
1146    /// This method is useful to implement ad-hoc exception plumbing
1147    /// in various ways, but for the most idiomatic handling, see
1148    /// [`StoreContextMut::catch`].
1149    #[cfg(feature = "gc")]
1150    pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1151        self.inner.take_pending_exception_rooted()
1152    }
1153
1154    /// Tests whether there is a pending exception.
1155    ///
1156    /// Ordinarily, a pending exception will be set on a store if and
1157    /// only if a host-side callstack is propagating a
1158    /// [`crate::ThrownException`] error. The final consumer that
1159    /// catches the exception takes it; it may re-place it to re-throw
1160    /// (using [`throw`]) if it chooses not to actually handle the
1161    /// exception.
1162    ///
1163    /// This method is useful to tell whether a store is in this
1164    /// state, but should not be used as part of the ordinary
1165    /// exception-handling flow. For the most idiomatic handling, see
1166    /// [`StoreContextMut::catch`].
1167    #[cfg(feature = "gc")]
1168    pub fn has_pending_exception(&self) -> bool {
1169        self.inner.pending_exception.is_some()
1170    }
1171}
1172
1173impl<'a, T> StoreContext<'a, T> {
1174    pub(crate) fn async_support(&self) -> bool {
1175        self.0.async_support()
1176    }
1177
1178    /// Returns the underlying [`Engine`] this store is connected to.
1179    pub fn engine(&self) -> &Engine {
1180        self.0.engine()
1181    }
1182
1183    /// Access the underlying data owned by this `Store`.
1184    ///
1185    /// Same as [`Store::data`].
1186    pub fn data(&self) -> &'a T {
1187        self.0.data()
1188    }
1189
1190    /// Returns the remaining fuel in this store.
1191    ///
1192    /// For more information see [`Store::get_fuel`].
1193    pub fn get_fuel(&self) -> Result<u64> {
1194        self.0.get_fuel()
1195    }
1196}
1197
1198impl<'a, T> StoreContextMut<'a, T> {
1199    /// Access the underlying data owned by this `Store`.
1200    ///
1201    /// Same as [`Store::data`].
1202    pub fn data(&self) -> &T {
1203        self.0.data()
1204    }
1205
1206    /// Access the underlying data owned by this `Store`.
1207    ///
1208    /// Same as [`Store::data_mut`].
1209    pub fn data_mut(&mut self) -> &mut T {
1210        self.0.data_mut()
1211    }
1212
1213    /// Returns the underlying [`Engine`] this store is connected to.
1214    pub fn engine(&self) -> &Engine {
1215        self.0.engine()
1216    }
1217
1218    /// Perform garbage collection of `ExternRef`s.
1219    ///
1220    /// Same as [`Store::gc`].
1221    ///
1222    /// This method is only available when the `gc` Cargo feature is enabled.
1223    #[cfg(feature = "gc")]
1224    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
1225        assert!(!self.0.async_support());
1226        let (mut limiter, store) = self.0.resource_limiter_and_store_opaque();
1227        vm::assert_ready(store.gc(limiter.as_mut(), None, why.map(|e| e.bytes_needed())));
1228    }
1229
1230    /// Returns remaining fuel in this store.
1231    ///
1232    /// For more information see [`Store::get_fuel`]
1233    pub fn get_fuel(&self) -> Result<u64> {
1234        self.0.get_fuel()
1235    }
1236
1237    /// Set the amount of fuel in this store.
1238    ///
1239    /// For more information see [`Store::set_fuel`]
1240    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1241        self.0.set_fuel(fuel)
1242    }
1243
1244    /// Configures this `Store` to periodically yield while executing futures.
1245    ///
1246    /// For more information see [`Store::fuel_async_yield_interval`]
1247    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1248        self.0.fuel_async_yield_interval(interval)
1249    }
1250
1251    /// Sets the epoch deadline to a certain number of ticks in the future.
1252    ///
1253    /// For more information see [`Store::set_epoch_deadline`].
1254    #[cfg(target_has_atomic = "64")]
1255    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1256        self.0.set_epoch_deadline(ticks_beyond_current);
1257    }
1258
1259    /// Configures epoch-deadline expiration to trap.
1260    ///
1261    /// For more information see [`Store::epoch_deadline_trap`].
1262    #[cfg(target_has_atomic = "64")]
1263    pub fn epoch_deadline_trap(&mut self) {
1264        self.0.epoch_deadline_trap();
1265    }
1266
1267    /// Set an exception as the currently pending exception, and
1268    /// return an error that propagates the throw.
1269    ///
1270    /// See [`Store::throw`] for more details.
1271    #[cfg(feature = "gc")]
1272    pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1273        self.0.inner.throw_impl(exception);
1274        Err(ThrownException)
1275    }
1276
1277    /// Take the currently pending exception, if any, and return it,
1278    /// removing it from the "pending exception" slot.
1279    ///
1280    /// See [`Store::take_pending_exception`] for more details.
1281    #[cfg(feature = "gc")]
1282    pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1283        self.0.inner.take_pending_exception_rooted()
1284    }
1285
1286    /// Tests whether there is a pending exception.
1287    ///
1288    ///
1289    /// See [`Store::has_pending_exception`] for more details.
1290    #[cfg(feature = "gc")]
1291    pub fn has_pending_exception(&self) -> bool {
1292        self.0.inner.pending_exception.is_some()
1293    }
1294}
1295
1296impl<T> StoreInner<T> {
1297    #[inline]
1298    fn data(&self) -> &T {
1299        &self.data
1300    }
1301
1302    #[inline]
1303    fn data_mut(&mut self) -> &mut T {
1304        &mut self.data
1305    }
1306
1307    #[inline]
1308    pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1309        if self.inner.pkey.is_none() && self.call_hook.is_none() {
1310            Ok(())
1311        } else {
1312            self.call_hook_slow_path(s)
1313        }
1314    }
1315
1316    fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1317        if let Some(pkey) = &self.inner.pkey {
1318            let allocator = self.engine().allocator();
1319            match s {
1320                CallHook::CallingWasm | CallHook::ReturningFromHost => {
1321                    allocator.restrict_to_pkey(*pkey)
1322                }
1323                CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1324            }
1325        }
1326
1327        // Temporarily take the configured behavior to avoid mutably borrowing
1328        // multiple times.
1329        if let Some(mut call_hook) = self.call_hook.take() {
1330            let result = self.invoke_call_hook(&mut call_hook, s);
1331            self.call_hook = Some(call_hook);
1332            return result;
1333        }
1334
1335        Ok(())
1336    }
1337
1338    fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1339        match call_hook {
1340            #[cfg(feature = "call-hook")]
1341            CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1342
1343            #[cfg(all(feature = "async", feature = "call-hook"))]
1344            CallHookInner::Async(handler) => {
1345                if !self.can_block() {
1346                    bail!("couldn't grab async_cx for call hook")
1347                }
1348                return (&mut *self)
1349                    .as_context_mut()
1350                    .with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
1351            }
1352
1353            CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1354                let _ = s;
1355                match *uninhabited {}
1356            }
1357        }
1358    }
1359
1360    #[cfg(not(feature = "async"))]
1361    fn flush_fiber_stack(&mut self) {
1362        // noop shim so code can assume this always exists.
1363    }
1364}
1365
1366fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1367    fuel_reserve.saturating_add_signed(-injected_fuel)
1368}
1369
1370// Add remaining fuel from the reserve into the active fuel if there is any left.
1371fn refuel(
1372    injected_fuel: &mut i64,
1373    fuel_reserve: &mut u64,
1374    yield_interval: Option<NonZeroU64>,
1375) -> bool {
1376    let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1377    if fuel > 0 {
1378        set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1379        true
1380    } else {
1381        false
1382    }
1383}
1384
1385fn set_fuel(
1386    injected_fuel: &mut i64,
1387    fuel_reserve: &mut u64,
1388    yield_interval: Option<NonZeroU64>,
1389    new_fuel_amount: u64,
1390) {
1391    let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1392    // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1393    // for the VM to use.
1394    let injected = core::cmp::min(interval, new_fuel_amount);
1395    // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1396    // VM at once to be i64 range.
1397    let injected = core::cmp::min(injected, i64::MAX as u64);
1398    // Add whatever is left over after injection to the reserve for later use.
1399    *fuel_reserve = new_fuel_amount - injected;
1400    // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1401    // this counter is positive.
1402    *injected_fuel = -(injected as i64);
1403}
1404
1405#[doc(hidden)]
1406impl StoreOpaque {
1407    pub fn id(&self) -> StoreId {
1408        self.store_data.id()
1409    }
1410
1411    pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1412        fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1413            let new = slot.saturating_add(amt);
1414            if new > max {
1415                bail!(
1416                    "resource limit exceeded: {} count too high at {}",
1417                    desc,
1418                    new
1419                );
1420            }
1421            *slot = new;
1422            Ok(())
1423        }
1424
1425        let module = module.env_module();
1426        let memories = module.num_defined_memories();
1427        let tables = module.num_defined_tables();
1428
1429        bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1430        bump(
1431            &mut self.memory_count,
1432            self.memory_limit,
1433            memories,
1434            "memory",
1435        )?;
1436        bump(&mut self.table_count, self.table_limit, tables, "table")?;
1437
1438        Ok(())
1439    }
1440
1441    #[inline]
1442    pub fn async_support(&self) -> bool {
1443        cfg!(feature = "async") && self.engine().config().async_support
1444    }
1445
1446    #[inline]
1447    pub fn engine(&self) -> &Engine {
1448        &self.engine
1449    }
1450
1451    #[inline]
1452    pub fn store_data(&self) -> &StoreData {
1453        &self.store_data
1454    }
1455
1456    #[inline]
1457    pub fn store_data_mut(&mut self) -> &mut StoreData {
1458        &mut self.store_data
1459    }
1460
1461    #[inline]
1462    pub(crate) fn modules(&self) -> &ModuleRegistry {
1463        &self.modules
1464    }
1465
1466    #[inline]
1467    pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
1468        &mut self.modules
1469    }
1470
1471    pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1472        (&mut self.func_refs, &self.modules)
1473    }
1474
1475    pub(crate) fn host_globals(
1476        &self,
1477    ) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1478        &self.host_globals
1479    }
1480
1481    pub(crate) fn host_globals_mut(
1482        &mut self,
1483    ) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1484        &mut self.host_globals
1485    }
1486
1487    pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1488        instance.store_id().assert_belongs_to(self.id());
1489        match self.instances[instance.instance()].kind {
1490            StoreInstanceKind::Dummy => None,
1491            StoreInstanceKind::Real { module_id } => {
1492                let module = self
1493                    .modules()
1494                    .lookup_module_by_id(module_id)
1495                    .expect("should always have a registered module for real instances");
1496                Some(module)
1497            }
1498        }
1499    }
1500
1501    /// Accessor from `InstanceId` to `&vm::Instance`.
1502    ///
1503    /// Note that if you have a `StoreInstanceId` you should use
1504    /// `StoreInstanceId::get` instead. This assumes that `id` has been
1505    /// validated to already belong to this store.
1506    #[inline]
1507    pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1508        self.instances[id].handle.get()
1509    }
1510
1511    /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1512    ///
1513    /// Note that if you have a `StoreInstanceId` you should use
1514    /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1515    /// validated to already belong to this store.
1516    #[inline]
1517    pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1518        self.instances[id].handle.get_mut()
1519    }
1520
1521    /// Access multiple instances specified via `ids`.
1522    ///
1523    /// # Panics
1524    ///
1525    /// This method will panic if any indices in `ids` overlap.
1526    ///
1527    /// # Safety
1528    ///
1529    /// This method is not safe if the returned instances are used to traverse
1530    /// "laterally" between other instances. For example accessing imported
1531    /// items in an instance may traverse laterally to a sibling instance thus
1532    /// aliasing a returned value here. The caller must ensure that only defined
1533    /// items within the instances themselves are accessed.
1534    #[inline]
1535    pub unsafe fn optional_gc_store_and_instances_mut<const N: usize>(
1536        &mut self,
1537        ids: [InstanceId; N],
1538    ) -> (Option<&mut GcStore>, [Pin<&mut vm::Instance>; N]) {
1539        let instances = self
1540            .instances
1541            .get_disjoint_mut(ids)
1542            .unwrap()
1543            .map(|h| h.handle.get_mut());
1544        (self.gc_store.as_mut(), instances)
1545    }
1546
1547    /// Pair of `Self::optional_gc_store_mut` and `Self::instance_mut`
1548    pub fn optional_gc_store_and_instance_mut(
1549        &mut self,
1550        id: InstanceId,
1551    ) -> (Option<&mut GcStore>, Pin<&mut vm::Instance>) {
1552        (self.gc_store.as_mut(), self.instances[id].handle.get_mut())
1553    }
1554
1555    /// Get all instances (ignoring dummy instances) within this store.
1556    pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1557        let instances = self
1558            .instances
1559            .iter()
1560            .filter_map(|(id, inst)| {
1561                if let StoreInstanceKind::Dummy = inst.kind {
1562                    None
1563                } else {
1564                    Some(id)
1565                }
1566            })
1567            .collect::<Vec<_>>();
1568        instances
1569            .into_iter()
1570            .map(|i| Instance::from_wasmtime(i, self))
1571    }
1572
1573    /// Get all memories (host- or Wasm-defined) within this store.
1574    pub fn all_memories<'a>(&'a self) -> impl Iterator<Item = Memory> + 'a {
1575        // NB: Host-created memories have dummy instances. Therefore, we can get
1576        // all memories in the store by iterating over all instances (including
1577        // dummy instances) and getting each of their defined memories.
1578        let id = self.id();
1579        self.instances
1580            .iter()
1581            .flat_map(move |(_, instance)| instance.handle.get().defined_memories(id))
1582    }
1583
1584    /// Iterate over all tables (host- or Wasm-defined) within this store.
1585    pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1586        // NB: Host-created tables have dummy instances. Therefore, we can get
1587        // all tables in the store by iterating over all instances (including
1588        // dummy instances) and getting each of their defined memories.
1589        for id in self.instances.keys() {
1590            let instance = StoreInstanceId::new(self.id(), id);
1591            for table in 0..self.instance(id).env_module().num_defined_tables() {
1592                let table = DefinedTableIndex::new(table);
1593                f(self, Table::from_raw(instance, table));
1594            }
1595        }
1596    }
1597
1598    /// Iterate over all globals (host- or Wasm-defined) within this store.
1599    pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1600        // First enumerate all the host-created globals.
1601        for global in self.host_globals.keys() {
1602            let global = Global::new_host(self, global);
1603            f(self, global);
1604        }
1605
1606        // Then enumerate all instances' defined globals.
1607        for id in self.instances.keys() {
1608            for index in 0..self.instance(id).env_module().num_defined_globals() {
1609                let index = DefinedGlobalIndex::new(index);
1610                let global = Global::new_instance(self, id, index);
1611                f(self, global);
1612            }
1613        }
1614    }
1615
1616    #[cfg(all(feature = "std", any(unix, windows)))]
1617    pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1618        self.signal_handler = handler;
1619    }
1620
1621    #[inline]
1622    pub fn vm_store_context(&self) -> &VMStoreContext {
1623        &self.vm_store_context
1624    }
1625
1626    #[inline]
1627    pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
1628        &mut self.vm_store_context
1629    }
1630
1631    /// Performs a lazy allocation of the `GcStore` within this store, returning
1632    /// the previous allocation if it's already present.
1633    ///
1634    /// This method will, if necessary, allocate a new `GcStore` -- linear
1635    /// memory and all. This is a blocking operation due to
1636    /// `ResourceLimiterAsync` which means that this should only be executed
1637    /// in a fiber context at this time.
1638    #[inline]
1639    pub(crate) async fn ensure_gc_store(
1640        &mut self,
1641        limiter: Option<&mut StoreResourceLimiter<'_>>,
1642    ) -> Result<&mut GcStore> {
1643        if self.gc_store.is_some() {
1644            return Ok(self.gc_store.as_mut().unwrap());
1645        }
1646        self.allocate_gc_store(limiter).await
1647    }
1648
1649    #[inline(never)]
1650    async fn allocate_gc_store(
1651        &mut self,
1652        limiter: Option<&mut StoreResourceLimiter<'_>>,
1653    ) -> Result<&mut GcStore> {
1654        log::trace!("allocating GC heap for store {:?}", self.id());
1655
1656        assert!(self.gc_store.is_none());
1657        assert_eq!(
1658            self.vm_store_context.gc_heap.base.as_non_null(),
1659            NonNull::dangling(),
1660        );
1661        assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1662
1663        let gc_store = allocate_gc_store(self, limiter).await?;
1664        self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1665        return Ok(self.gc_store.insert(gc_store));
1666
1667        #[cfg(feature = "gc")]
1668        async fn allocate_gc_store(
1669            store: &mut StoreOpaque,
1670            limiter: Option<&mut StoreResourceLimiter<'_>>,
1671        ) -> Result<GcStore> {
1672            use wasmtime_environ::{StaticModuleIndex, packed_option::ReservedValue};
1673
1674            let engine = store.engine();
1675            let mem_ty = engine.tunables().gc_heap_memory_type();
1676            ensure!(
1677                engine.features().gc_types(),
1678                "cannot allocate a GC store when GC is disabled at configuration time"
1679            );
1680
1681            // First, allocate the memory that will be our GC heap's storage.
1682            let mut request = InstanceAllocationRequest {
1683                id: InstanceId::reserved_value(),
1684                runtime_info: &ModuleRuntimeInfo::bare(Arc::new(wasmtime_environ::Module::new(
1685                    StaticModuleIndex::from_u32(0),
1686                ))),
1687                imports: vm::Imports::default(),
1688                store,
1689                limiter,
1690            };
1691
1692            let (mem_alloc_index, mem) = engine
1693                .allocator()
1694                .allocate_memory(&mut request, &mem_ty, None)
1695                .await?;
1696
1697            // Then, allocate the actual GC heap, passing in that memory
1698            // storage.
1699            let gc_runtime = engine
1700                .gc_runtime()
1701                .context("no GC runtime: GC disabled at compile time or configuration time")?;
1702            let (index, heap) =
1703                engine
1704                    .allocator()
1705                    .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
1706
1707            Ok(GcStore::new(index, heap))
1708        }
1709
1710        #[cfg(not(feature = "gc"))]
1711        async fn allocate_gc_store(
1712            _: &mut StoreOpaque,
1713            _: Option<&mut StoreResourceLimiter<'_>>,
1714        ) -> Result<GcStore> {
1715            bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1716        }
1717    }
1718
1719    /// Helper method to require that a `GcStore` was previously allocated for
1720    /// this store, failing if it has not yet been allocated.
1721    ///
1722    /// Note that this should only be used in a context where allocation of a
1723    /// `GcStore` is sure to have already happened prior, otherwise this may
1724    /// return a confusing error to embedders which is a bug in Wasmtime.
1725    ///
1726    /// Some situations where it's safe to call this method:
1727    ///
1728    /// * There's already a non-null and non-i31 `VMGcRef` in scope. By existing
1729    ///   this shows proof that the `GcStore` was previously allocated.
1730    /// * During instantiation and instance's `needs_gc_heap` flag will be
1731    ///   handled and instantiation will automatically create a GC store.
1732    #[inline]
1733    #[cfg(feature = "gc")]
1734    pub(crate) fn require_gc_store(&self) -> Result<&GcStore> {
1735        match &self.gc_store {
1736            Some(gc_store) => Ok(gc_store),
1737            None => bail!("GC heap not initialized yet"),
1738        }
1739    }
1740
1741    /// Same as [`Self::require_gc_store`], but mutable.
1742    #[inline]
1743    #[cfg(feature = "gc")]
1744    pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> {
1745        match &mut self.gc_store {
1746            Some(gc_store) => Ok(gc_store),
1747            None => bail!("GC heap not initialized yet"),
1748        }
1749    }
1750
1751    /// Attempts to access the GC store that has been previously allocated.
1752    ///
1753    /// This method will return `Some` if the GC store was previously allocated.
1754    /// A `None` return value means either that the GC heap hasn't yet been
1755    /// allocated or that it does not need to be allocated for this store. Note
1756    /// that to require a GC store in a particular situation it's recommended to
1757    /// use [`Self::require_gc_store_mut`] instead.
1758    #[inline]
1759    pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
1760        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1761            debug_assert!(self.gc_store.is_none());
1762            None
1763        } else {
1764            self.gc_store.as_mut()
1765        }
1766    }
1767
1768    /// Helper to assert that a GC store was previously allocated and is
1769    /// present.
1770    ///
1771    /// # Panics
1772    ///
1773    /// This method will panic if the GC store has not yet been allocated. This
1774    /// should only be used in a context where there's an existing GC reference,
1775    /// for example, or if `ensure_gc_store` has already been called.
1776    #[inline]
1777    #[track_caller]
1778    pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
1779        self.gc_store
1780            .as_ref()
1781            .expect("attempted to access the store's GC heap before it has been allocated")
1782    }
1783
1784    /// Same as [`Self::unwrap_gc_store`], but mutable.
1785    #[inline]
1786    #[track_caller]
1787    pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
1788        self.gc_store
1789            .as_mut()
1790            .expect("attempted to access the store's GC heap before it has been allocated")
1791    }
1792
1793    #[inline]
1794    pub(crate) fn gc_roots(&self) -> &RootSet {
1795        &self.gc_roots
1796    }
1797
1798    #[inline]
1799    #[cfg(feature = "gc")]
1800    pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
1801        &mut self.gc_roots
1802    }
1803
1804    #[inline]
1805    pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
1806        self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
1807    }
1808
1809    #[cfg(feature = "gc")]
1810    async fn do_gc(&mut self) {
1811        // If the GC heap hasn't been initialized, there is nothing to collect.
1812        if self.gc_store.is_none() {
1813            return;
1814        }
1815
1816        log::trace!("============ Begin GC ===========");
1817
1818        // Take the GC roots out of `self` so we can borrow it mutably but still
1819        // call mutable methods on `self`.
1820        let mut roots = core::mem::take(&mut self.gc_roots_list);
1821
1822        self.trace_roots(&mut roots).await;
1823        let async_yield = self.async_support();
1824        self.unwrap_gc_store_mut()
1825            .gc(async_yield, unsafe { roots.iter() })
1826            .await;
1827
1828        // Restore the GC roots for the next GC.
1829        roots.clear();
1830        self.gc_roots_list = roots;
1831
1832        log::trace!("============ End GC ===========");
1833    }
1834
1835    #[cfg(feature = "gc")]
1836    async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1837        log::trace!("Begin trace GC roots");
1838
1839        // We shouldn't have any leftover, stale GC roots.
1840        assert!(gc_roots_list.is_empty());
1841
1842        self.trace_wasm_stack_roots(gc_roots_list);
1843        #[cfg(feature = "async")]
1844        if self.async_support() {
1845            vm::Yield::new().await;
1846        }
1847        #[cfg(feature = "stack-switching")]
1848        {
1849            self.trace_wasm_continuation_roots(gc_roots_list);
1850            #[cfg(feature = "async")]
1851            if self.async_support() {
1852                vm::Yield::new().await;
1853            }
1854        }
1855        self.trace_vmctx_roots(gc_roots_list);
1856        #[cfg(feature = "async")]
1857        if self.async_support() {
1858            vm::Yield::new().await;
1859        }
1860        self.trace_user_roots(gc_roots_list);
1861        self.trace_pending_exception_roots(gc_roots_list);
1862
1863        log::trace!("End trace GC roots")
1864    }
1865
1866    #[cfg(feature = "gc")]
1867    fn trace_wasm_stack_frame(
1868        &self,
1869        gc_roots_list: &mut GcRootsList,
1870        frame: crate::runtime::vm::Frame,
1871    ) {
1872        use crate::runtime::vm::SendSyncPtr;
1873        use core::ptr::NonNull;
1874
1875        let pc = frame.pc();
1876        debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
1877
1878        let fp = frame.fp() as *mut usize;
1879        debug_assert!(
1880            !fp.is_null(),
1881            "we should always get a valid frame pointer for Wasm frames"
1882        );
1883
1884        let module_info = self
1885            .modules()
1886            .lookup_module_by_pc(pc)
1887            .expect("should have module info for Wasm frame");
1888
1889        let stack_map = match module_info.lookup_stack_map(pc) {
1890            Some(sm) => sm,
1891            None => {
1892                log::trace!("No stack map for this Wasm frame");
1893                return;
1894            }
1895        };
1896        log::trace!(
1897            "We have a stack map that maps {} bytes in this Wasm frame",
1898            stack_map.frame_size()
1899        );
1900
1901        let sp = unsafe { stack_map.sp(fp) };
1902        for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
1903            let raw: u32 = unsafe { core::ptr::read(stack_slot) };
1904            log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
1905
1906            let gc_ref = vm::VMGcRef::from_raw_u32(raw);
1907            if gc_ref.is_some() {
1908                unsafe {
1909                    gc_roots_list
1910                        .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
1911                }
1912            }
1913        }
1914    }
1915
1916    #[cfg(feature = "gc")]
1917    fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1918        use crate::runtime::vm::Backtrace;
1919        log::trace!("Begin trace GC roots :: Wasm stack");
1920
1921        Backtrace::trace(self, |frame| {
1922            self.trace_wasm_stack_frame(gc_roots_list, frame);
1923            core::ops::ControlFlow::Continue(())
1924        });
1925
1926        log::trace!("End trace GC roots :: Wasm stack");
1927    }
1928
1929    #[cfg(all(feature = "gc", feature = "stack-switching"))]
1930    fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1931        use crate::{runtime::vm::Backtrace, vm::VMStackState};
1932        log::trace!("Begin trace GC roots :: continuations");
1933
1934        for continuation in &self.continuations {
1935            let state = continuation.common_stack_information.state;
1936
1937            // FIXME(frank-emrich) In general, it is not enough to just trace
1938            // through the stacks of continuations; we also need to look through
1939            // their `cont.bind` arguments. However, we don't currently have
1940            // enough RTTI information to check if any of the values in the
1941            // buffers used by `cont.bind` are GC values. As a workaround, note
1942            // that we currently disallow cont.bind-ing GC values altogether.
1943            // This way, it is okay not to check them here.
1944            match state {
1945                VMStackState::Suspended => {
1946                    Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
1947                        self.trace_wasm_stack_frame(gc_roots_list, frame);
1948                        core::ops::ControlFlow::Continue(())
1949                    });
1950                }
1951                VMStackState::Running => {
1952                    // Handled by `trace_wasm_stack_roots`.
1953                }
1954                VMStackState::Parent => {
1955                    // We don't know whether our child is suspended or running, but in
1956                    // either case things should be handled correctly when traversing
1957                    // further along in the chain, nothing required at this point.
1958                }
1959                VMStackState::Fresh | VMStackState::Returned => {
1960                    // Fresh/Returned continuations have no gc values on their stack.
1961                }
1962            }
1963        }
1964
1965        log::trace!("End trace GC roots :: continuations");
1966    }
1967
1968    #[cfg(feature = "gc")]
1969    fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1970        log::trace!("Begin trace GC roots :: vmctx");
1971        self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
1972        self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
1973        log::trace!("End trace GC roots :: vmctx");
1974    }
1975
1976    #[cfg(feature = "gc")]
1977    fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1978        log::trace!("Begin trace GC roots :: user");
1979        self.gc_roots.trace_roots(gc_roots_list);
1980        log::trace!("End trace GC roots :: user");
1981    }
1982
1983    #[cfg(feature = "gc")]
1984    fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1985        log::trace!("Begin trace GC roots :: pending exception");
1986        if let Some(pending_exception) = self.pending_exception.as_mut() {
1987            unsafe {
1988                let root = pending_exception.as_gc_ref_mut();
1989                gc_roots_list.add_root(root.into(), "Pending exception");
1990            }
1991        }
1992        log::trace!("End trace GC roots :: pending exception");
1993    }
1994
1995    /// Insert a host-allocated GC type into this store.
1996    ///
1997    /// This makes it suitable for the embedder to allocate instances of this
1998    /// type in this store, and we don't have to worry about the type being
1999    /// reclaimed (since it is possible that none of the Wasm modules in this
2000    /// store are holding it alive).
2001    #[cfg(feature = "gc")]
2002    pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
2003        self.gc_host_alloc_types.insert(ty);
2004    }
2005
2006    /// Helper function execute a `init_gc_ref` when placing `gc_ref` in `dest`.
2007    ///
2008    /// This avoids allocating `GcStore` where possible.
2009    pub(crate) fn init_gc_ref(
2010        &mut self,
2011        dest: &mut MaybeUninit<Option<VMGcRef>>,
2012        gc_ref: Option<&VMGcRef>,
2013    ) {
2014        if GcStore::needs_init_barrier(gc_ref) {
2015            self.unwrap_gc_store_mut().init_gc_ref(dest, gc_ref)
2016        } else {
2017            dest.write(gc_ref.map(|r| r.copy_i31()));
2018        }
2019    }
2020
2021    /// Helper function execute a write barrier when placing `gc_ref` in `dest`.
2022    ///
2023    /// This avoids allocating `GcStore` where possible.
2024    pub(crate) fn write_gc_ref(&mut self, dest: &mut Option<VMGcRef>, gc_ref: Option<&VMGcRef>) {
2025        GcStore::write_gc_ref_optional_store(self.optional_gc_store_mut(), dest, gc_ref)
2026    }
2027
2028    /// Helper function to clone `gc_ref` notably avoiding allocating a
2029    /// `GcStore` where possible.
2030    pub(crate) fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
2031        if gc_ref.is_i31() {
2032            gc_ref.copy_i31()
2033        } else {
2034            self.unwrap_gc_store_mut().clone_gc_ref(gc_ref)
2035        }
2036    }
2037
2038    pub fn get_fuel(&self) -> Result<u64> {
2039        anyhow::ensure!(
2040            self.engine().tunables().consume_fuel,
2041            "fuel is not configured in this store"
2042        );
2043        let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
2044        Ok(get_fuel(injected_fuel, self.fuel_reserve))
2045    }
2046
2047    pub(crate) fn refuel(&mut self) -> bool {
2048        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2049        refuel(
2050            injected_fuel,
2051            &mut self.fuel_reserve,
2052            self.fuel_yield_interval,
2053        )
2054    }
2055
2056    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
2057        anyhow::ensure!(
2058            self.engine().tunables().consume_fuel,
2059            "fuel is not configured in this store"
2060        );
2061        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2062        set_fuel(
2063            injected_fuel,
2064            &mut self.fuel_reserve,
2065            self.fuel_yield_interval,
2066            fuel,
2067        );
2068        Ok(())
2069    }
2070
2071    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
2072        anyhow::ensure!(
2073            self.engine().tunables().consume_fuel,
2074            "fuel is not configured in this store"
2075        );
2076        anyhow::ensure!(
2077            self.engine().config().async_support,
2078            "async support is not configured in this store"
2079        );
2080        anyhow::ensure!(
2081            interval != Some(0),
2082            "fuel_async_yield_interval must not be 0"
2083        );
2084        self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
2085        // Reset the fuel active + reserve states by resetting the amount.
2086        self.set_fuel(self.get_fuel()?)
2087    }
2088
2089    #[inline]
2090    pub fn signal_handler(&self) -> Option<*const SignalHandler> {
2091        let handler = self.signal_handler.as_ref()?;
2092        Some(handler)
2093    }
2094
2095    #[inline]
2096    pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
2097        NonNull::from(&self.vm_store_context)
2098    }
2099
2100    #[inline]
2101    pub fn default_caller(&self) -> NonNull<VMContext> {
2102        self.default_caller_vmctx.as_non_null()
2103    }
2104
2105    #[inline]
2106    pub fn traitobj(&self) -> NonNull<dyn VMStore> {
2107        self.traitobj.0.unwrap()
2108    }
2109
2110    /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
2111    /// used as part of calling the host in a `Func::new` method invocation.
2112    #[inline]
2113    pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
2114        mem::take(&mut self.hostcall_val_storage)
2115    }
2116
2117    /// Restores the vector previously taken by `take_hostcall_val_storage`
2118    /// above back into the store, allowing it to be used in the future for the
2119    /// next wasm->host call.
2120    #[inline]
2121    pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
2122        if storage.capacity() > self.hostcall_val_storage.capacity() {
2123            self.hostcall_val_storage = storage;
2124        }
2125    }
2126
2127    /// Same as `take_hostcall_val_storage`, but for the direction of the host
2128    /// calling wasm.
2129    #[inline]
2130    pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
2131        mem::take(&mut self.wasm_val_raw_storage)
2132    }
2133
2134    /// Same as `save_hostcall_val_storage`, but for the direction of the host
2135    /// calling wasm.
2136    #[inline]
2137    pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
2138        if storage.capacity() > self.wasm_val_raw_storage.capacity() {
2139            self.wasm_val_raw_storage = storage;
2140        }
2141    }
2142
2143    /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
2144    /// WebAssembly-relative fault.
2145    ///
2146    /// This function may abort the process if `addr` is not found to actually
2147    /// reside in any linear memory. In such a situation it means that the
2148    /// segfault was erroneously caught by Wasmtime and is possibly indicative
2149    /// of a code generator bug.
2150    ///
2151    /// This function returns `None` for dynamically-bounds-checked-memories
2152    /// with spectre mitigations enabled since the hardware fault address is
2153    /// always zero in these situations which means that the trapping context
2154    /// doesn't have enough information to report the fault address.
2155    pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
2156        // There are a few instances where a "close to zero" pointer is loaded
2157        // and we expect that to happen:
2158        //
2159        // * Explicitly bounds-checked memories with spectre-guards enabled will
2160        //   cause out-of-bounds accesses to get routed to address 0, so allow
2161        //   wasm instructions to fault on the null address.
2162        // * `call_indirect` when invoking a null function pointer may load data
2163        //   from the a `VMFuncRef` whose address is null, meaning any field of
2164        //   `VMFuncRef` could be the address of the fault.
2165        //
2166        // In these situations where the address is so small it won't be in any
2167        // instance, so skip the checks below.
2168        if addr <= mem::size_of::<VMFuncRef>() {
2169            const _: () = {
2170                // static-assert that `VMFuncRef` isn't too big to ensure that
2171                // it lives solely within the first page as we currently only
2172                // have the guarantee that the first page of memory is unmapped,
2173                // no more.
2174                assert!(mem::size_of::<VMFuncRef>() <= 512);
2175            };
2176            return None;
2177        }
2178
2179        // Search all known instances in this store for this address. Note that
2180        // this is probably not the speediest way to do this. Traps, however,
2181        // are generally not expected to be super fast and additionally stores
2182        // probably don't have all that many instances or memories.
2183        //
2184        // If this loop becomes hot in the future, however, it should be
2185        // possible to precompute maps about linear memories in a store and have
2186        // a quicker lookup.
2187        let mut fault = None;
2188        for (_, instance) in self.instances.iter() {
2189            if let Some(f) = instance.handle.get().wasm_fault(addr) {
2190                assert!(fault.is_none());
2191                fault = Some(f);
2192            }
2193        }
2194        if fault.is_some() {
2195            return fault;
2196        }
2197
2198        cfg_if::cfg_if! {
2199            if #[cfg(feature = "std")] {
2200                // With the standard library a rich error can be printed here
2201                // to stderr and the native abort path is used.
2202                eprintln!(
2203                    "\
2204Wasmtime caught a segfault for a wasm program because the faulting instruction
2205is allowed to segfault due to how linear memories are implemented. The address
2206that was accessed, however, is not known to any linear memory in use within this
2207Store. This may be indicative of a critical bug in Wasmtime's code generation
2208because all addresses which are known to be reachable from wasm won't reach this
2209message.
2210
2211    pc:      0x{pc:x}
2212    address: 0x{addr:x}
2213
2214This is a possible security issue because WebAssembly has accessed something it
2215shouldn't have been able to. Other accesses may have succeeded and this one just
2216happened to be caught. The process will now be aborted to prevent this damage
2217from going any further and to alert what's going on. If this is a security
2218issue please reach out to the Wasmtime team via its security policy
2219at https://bytecodealliance.org/security.
2220"
2221                );
2222                std::process::abort();
2223            } else if #[cfg(panic = "abort")] {
2224                // Without the standard library but with `panic=abort` then
2225                // it's safe to panic as that's known to halt execution. For
2226                // now avoid the above error message as well since without
2227                // `std` it's probably best to be a bit more size-conscious.
2228                let _ = pc;
2229                panic!("invalid fault");
2230            } else {
2231                // Without `std` and with `panic = "unwind"` there's no
2232                // dedicated API to abort the process portably, so manufacture
2233                // this with a double-panic.
2234                let _ = pc;
2235
2236                struct PanicAgainOnDrop;
2237
2238                impl Drop for PanicAgainOnDrop {
2239                    fn drop(&mut self) {
2240                        panic!("panicking again to trigger a process abort");
2241                    }
2242
2243                }
2244
2245                let _bomb = PanicAgainOnDrop;
2246
2247                panic!("invalid fault");
2248            }
2249        }
2250    }
2251
2252    /// Retrieve the store's protection key.
2253    #[inline]
2254    #[cfg(feature = "pooling-allocator")]
2255    pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
2256        self.pkey
2257    }
2258
2259    #[inline]
2260    #[cfg(feature = "component-model")]
2261    pub(crate) fn component_resource_state(
2262        &mut self,
2263    ) -> (
2264        &mut vm::component::CallContexts,
2265        &mut vm::component::HandleTable,
2266        &mut crate::component::HostResourceData,
2267    ) {
2268        (
2269            &mut self.component_calls,
2270            &mut self.component_host_table,
2271            &mut self.host_resource_data,
2272        )
2273    }
2274
2275    #[cfg(feature = "component-model")]
2276    pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
2277        // We don't actually need the instance itself right now, but it seems
2278        // like something we will almost certainly eventually want to keep
2279        // around, so force callers to provide it.
2280        let _ = instance;
2281
2282        self.num_component_instances += 1;
2283    }
2284
2285    #[inline]
2286    #[cfg(feature = "component-model")]
2287    pub(crate) fn component_resource_state_with_instance(
2288        &mut self,
2289        instance: crate::component::Instance,
2290    ) -> (
2291        &mut vm::component::CallContexts,
2292        &mut vm::component::HandleTable,
2293        &mut crate::component::HostResourceData,
2294        Pin<&mut vm::component::ComponentInstance>,
2295    ) {
2296        (
2297            &mut self.component_calls,
2298            &mut self.component_host_table,
2299            &mut self.host_resource_data,
2300            instance.id().from_data_get_mut(&mut self.store_data),
2301        )
2302    }
2303
2304    #[cfg(feature = "async")]
2305    pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
2306        &mut self.async_state
2307    }
2308
2309    #[cfg(feature = "component-model-async")]
2310    pub(crate) fn concurrent_async_state_mut(&mut self) -> &mut concurrent::AsyncState {
2311        &mut self.concurrent_async_state
2312    }
2313
2314    #[cfg(feature = "async")]
2315    pub(crate) fn has_pkey(&self) -> bool {
2316        self.pkey.is_some()
2317    }
2318
2319    pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
2320        match &mut self.executor {
2321            Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
2322            #[cfg(has_host_compiler_backend)]
2323            Executor::Native => ExecutorRef::Native,
2324        }
2325    }
2326
2327    #[cfg(feature = "async")]
2328    pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
2329        mem::swap(&mut self.executor, executor);
2330    }
2331
2332    pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
2333        match &self.executor {
2334            Executor::Interpreter(i) => i.unwinder(),
2335            #[cfg(has_host_compiler_backend)]
2336            Executor::Native => &vm::UnwindHost,
2337        }
2338    }
2339
2340    /// Allocates a new continuation. Note that we currently don't support
2341    /// deallocating them. Instead, all continuations remain allocated
2342    /// throughout the store's lifetime.
2343    #[cfg(feature = "stack-switching")]
2344    pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
2345        // FIXME(frank-emrich) Do we need to pin this?
2346        let mut continuation = Box::new(VMContRef::empty());
2347        let stack_size = self.engine.config().async_stack_size;
2348        let stack = crate::vm::VMContinuationStack::new(stack_size)?;
2349        continuation.stack = stack;
2350        let ptr = continuation.deref_mut() as *mut VMContRef;
2351        self.continuations.push(continuation);
2352        Ok(ptr)
2353    }
2354
2355    /// Constructs and executes an `InstanceAllocationRequest` and pushes the
2356    /// returned instance into the store.
2357    ///
2358    /// This is a helper method for invoking
2359    /// `InstanceAllocator::allocate_module` with the appropriate parameters
2360    /// from this store's own configuration. The `kind` provided is used to
2361    /// distinguish between "real" modules and dummy ones that are synthesized
2362    /// for embedder-created memories, globals, tables, etc. The `kind` will
2363    /// also use a different instance allocator by default, the one passed in,
2364    /// rather than the engine's default allocator.
2365    ///
2366    /// This method will push the instance within `StoreOpaque` onto the
2367    /// `instances` array and return the `InstanceId` which can be use to look
2368    /// it up within the store.
2369    ///
2370    /// # Safety
2371    ///
2372    /// The `imports` provided must be correctly sized/typed for the module
2373    /// being allocated.
2374    pub(crate) async unsafe fn allocate_instance(
2375        &mut self,
2376        limiter: Option<&mut StoreResourceLimiter<'_>>,
2377        kind: AllocateInstanceKind<'_>,
2378        runtime_info: &ModuleRuntimeInfo,
2379        imports: Imports<'_>,
2380    ) -> Result<InstanceId> {
2381        let id = self.instances.next_key();
2382
2383        let allocator = match kind {
2384            AllocateInstanceKind::Module(_) => self.engine().allocator(),
2385            AllocateInstanceKind::Dummy { allocator } => allocator,
2386        };
2387        // SAFETY: this function's own contract is the same as
2388        // `allocate_module`, namely the imports provided are valid.
2389        let handle = unsafe {
2390            allocator
2391                .allocate_module(InstanceAllocationRequest {
2392                    id,
2393                    runtime_info,
2394                    imports,
2395                    store: self,
2396                    limiter,
2397                })
2398                .await?
2399        };
2400
2401        let actual = match kind {
2402            AllocateInstanceKind::Module(module_id) => {
2403                log::trace!(
2404                    "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2405                    self.id()
2406                );
2407                self.instances.push(StoreInstance {
2408                    handle,
2409                    kind: StoreInstanceKind::Real { module_id },
2410                })
2411            }
2412            AllocateInstanceKind::Dummy { .. } => {
2413                log::trace!(
2414                    "Adding dummy instance to store: store={:?}, instance={id:?}",
2415                    self.id()
2416                );
2417                self.instances.push(StoreInstance {
2418                    handle,
2419                    kind: StoreInstanceKind::Dummy,
2420                })
2421            }
2422        };
2423
2424        // double-check we didn't accidentally allocate two instances and our
2425        // prediction of what the id would be is indeed the id it should be.
2426        assert_eq!(id, actual);
2427
2428        Ok(id)
2429    }
2430
2431    /// Set a pending exception. The `exnref` is taken and held on
2432    /// this store to be fetched later by an unwind. This method does
2433    /// *not* set up an unwind request on the TLS call state; that
2434    /// must be done separately.
2435    #[cfg(feature = "gc")]
2436    pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) {
2437        self.pending_exception = Some(exnref);
2438    }
2439
2440    /// Take a pending exception, if any.
2441    #[cfg(feature = "gc")]
2442    pub(crate) fn take_pending_exception(&mut self) -> Option<VMExnRef> {
2443        self.pending_exception.take()
2444    }
2445
2446    #[cfg(feature = "gc")]
2447    fn take_pending_exception_rooted(&mut self) -> Option<Rooted<ExnRef>> {
2448        let vmexnref = self.take_pending_exception()?;
2449        let mut nogc = AutoAssertNoGc::new(self);
2450        Some(Rooted::new(&mut nogc, vmexnref.into()))
2451    }
2452
2453    #[cfg(feature = "gc")]
2454    fn throw_impl(&mut self, exception: Rooted<ExnRef>) {
2455        let mut nogc = AutoAssertNoGc::new(self);
2456        let exnref = exception._to_raw(&mut nogc).unwrap();
2457        let exnref = VMGcRef::from_raw_u32(exnref)
2458            .expect("exception cannot be null")
2459            .into_exnref_unchecked();
2460        nogc.set_pending_exception(exnref);
2461    }
2462
2463    #[cfg(target_has_atomic = "64")]
2464    pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2465        // Set a new deadline based on the "epoch deadline delta".
2466        //
2467        // Also, note that when this update is performed while Wasm is
2468        // on the stack, the Wasm will reload the new value once we
2469        // return into it.
2470        let current_epoch = self.engine().current_epoch();
2471        let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2472        *epoch_deadline = current_epoch + delta;
2473    }
2474
2475    pub(crate) fn get_epoch_deadline(&mut self) -> u64 {
2476        *self.vm_store_context.epoch_deadline.get_mut()
2477    }
2478}
2479
2480/// Helper parameter to [`StoreOpaque::allocate_instance`].
2481pub(crate) enum AllocateInstanceKind<'a> {
2482    /// An embedder-provided module is being allocated meaning that the default
2483    /// engine's allocator will be used.
2484    Module(RegisteredModuleId),
2485
2486    /// Add a dummy instance that to the store.
2487    ///
2488    /// These are instances that are just implementation details of something
2489    /// else (e.g. host-created memories that are not actually defined in any
2490    /// Wasm module) and therefore shouldn't show up in things like core dumps.
2491    ///
2492    /// A custom, typically OnDemand-flavored, allocator is provided to execute
2493    /// the allocation.
2494    Dummy {
2495        allocator: &'a dyn InstanceAllocator,
2496    },
2497}
2498
2499unsafe impl<T> VMStore for StoreInner<T> {
2500    #[cfg(feature = "component-model-async")]
2501    fn component_async_store(
2502        &mut self,
2503    ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2504        self
2505    }
2506
2507    fn store_opaque(&self) -> &StoreOpaque {
2508        &self.inner
2509    }
2510
2511    fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2512        &mut self.inner
2513    }
2514
2515    fn resource_limiter_and_store_opaque(
2516        &mut self,
2517    ) -> (Option<StoreResourceLimiter<'_>>, &mut StoreOpaque) {
2518        (
2519            self.limiter.as_mut().map(|l| match l {
2520                ResourceLimiterInner::Sync(s) => StoreResourceLimiter::Sync(s(&mut self.data)),
2521                #[cfg(feature = "async")]
2522                ResourceLimiterInner::Async(s) => StoreResourceLimiter::Async(s(&mut self.data)),
2523            }),
2524            &mut self.inner,
2525        )
2526    }
2527
2528    #[cfg(target_has_atomic = "64")]
2529    fn new_epoch_updated_deadline(&mut self) -> Result<UpdateDeadline> {
2530        // Temporarily take the configured behavior to avoid mutably borrowing
2531        // multiple times.
2532        let mut behavior = self.epoch_deadline_behavior.take();
2533        let update = match &mut behavior {
2534            Some(callback) => callback((&mut *self).as_context_mut()),
2535            None => Ok(UpdateDeadline::Interrupt),
2536        };
2537
2538        // Put back the original behavior which was replaced by `take`.
2539        self.epoch_deadline_behavior = behavior;
2540        update
2541    }
2542
2543    #[cfg(feature = "component-model")]
2544    fn component_calls(&mut self) -> &mut vm::component::CallContexts {
2545        &mut self.component_calls
2546    }
2547}
2548
2549impl<T> StoreInner<T> {
2550    #[cfg(target_has_atomic = "64")]
2551    fn epoch_deadline_trap(&mut self) {
2552        self.epoch_deadline_behavior = None;
2553    }
2554
2555    #[cfg(target_has_atomic = "64")]
2556    fn epoch_deadline_callback(
2557        &mut self,
2558        callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2559    ) {
2560        self.epoch_deadline_behavior = Some(callback);
2561    }
2562}
2563
2564impl<T: Default> Default for Store<T> {
2565    fn default() -> Store<T> {
2566        Store::new(&Engine::default(), T::default())
2567    }
2568}
2569
2570impl<T: fmt::Debug> fmt::Debug for Store<T> {
2571    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2572        let inner = &**self.inner as *const StoreInner<T>;
2573        f.debug_struct("Store")
2574            .field("inner", &inner)
2575            .field("data", &self.inner.data)
2576            .finish()
2577    }
2578}
2579
2580impl<T> Drop for Store<T> {
2581    fn drop(&mut self) {
2582        self.run_manual_drop_routines();
2583
2584        // for documentation on this `unsafe`, see `into_data`.
2585        unsafe {
2586            ManuallyDrop::drop(&mut self.inner.data);
2587            ManuallyDrop::drop(&mut self.inner);
2588        }
2589    }
2590}
2591
2592impl Drop for StoreOpaque {
2593    fn drop(&mut self) {
2594        // NB it's important that this destructor does not access `self.data`.
2595        // That is deallocated by `Drop for Store<T>` above.
2596
2597        unsafe {
2598            let allocator = self.engine.allocator();
2599            let ondemand = OnDemandInstanceAllocator::default();
2600            let store_id = self.id();
2601
2602            #[cfg(feature = "gc")]
2603            if let Some(gc_store) = self.gc_store.take() {
2604                let gc_alloc_index = gc_store.allocation_index;
2605                log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2606                debug_assert!(self.engine.features().gc_types());
2607                let (mem_alloc_index, mem) =
2608                    allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2609                allocator.deallocate_memory(None, mem_alloc_index, mem);
2610            }
2611
2612            for (id, instance) in self.instances.iter_mut() {
2613                log::trace!("store {store_id:?} is deallocating {id:?}");
2614                let allocator = match instance.kind {
2615                    StoreInstanceKind::Dummy => &ondemand,
2616                    _ => allocator,
2617                };
2618                allocator.deallocate_module(&mut instance.handle);
2619            }
2620
2621            #[cfg(feature = "component-model")]
2622            {
2623                for _ in 0..self.num_component_instances {
2624                    allocator.decrement_component_instance_count();
2625                }
2626            }
2627        }
2628    }
2629}
2630
2631#[cfg_attr(
2632    not(any(feature = "gc", feature = "async")),
2633    // NB: Rust 1.89, current stable, does not fire this lint. Rust 1.90,
2634    // however, does, so use #[allow] until our MSRV is 1.90.
2635    allow(dead_code, reason = "don't want to put #[cfg] on all impls below too")
2636)]
2637pub(crate) trait AsStoreOpaque {
2638    fn as_store_opaque(&mut self) -> &mut StoreOpaque;
2639}
2640
2641impl AsStoreOpaque for StoreOpaque {
2642    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2643        self
2644    }
2645}
2646
2647impl AsStoreOpaque for dyn VMStore {
2648    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2649        self
2650    }
2651}
2652
2653impl<T: 'static> AsStoreOpaque for StoreInner<T> {
2654    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2655        self
2656    }
2657}
2658
2659impl<T: AsStoreOpaque + ?Sized> AsStoreOpaque for &mut T {
2660    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2661        T::as_store_opaque(self)
2662    }
2663}
2664
2665#[cfg(test)]
2666mod tests {
2667    use super::{get_fuel, refuel, set_fuel};
2668    use std::num::NonZeroU64;
2669
2670    struct FuelTank {
2671        pub consumed_fuel: i64,
2672        pub reserve_fuel: u64,
2673        pub yield_interval: Option<NonZeroU64>,
2674    }
2675
2676    impl FuelTank {
2677        fn new() -> Self {
2678            FuelTank {
2679                consumed_fuel: 0,
2680                reserve_fuel: 0,
2681                yield_interval: None,
2682            }
2683        }
2684        fn get_fuel(&self) -> u64 {
2685            get_fuel(self.consumed_fuel, self.reserve_fuel)
2686        }
2687        fn refuel(&mut self) -> bool {
2688            refuel(
2689                &mut self.consumed_fuel,
2690                &mut self.reserve_fuel,
2691                self.yield_interval,
2692            )
2693        }
2694        fn set_fuel(&mut self, fuel: u64) {
2695            set_fuel(
2696                &mut self.consumed_fuel,
2697                &mut self.reserve_fuel,
2698                self.yield_interval,
2699                fuel,
2700            );
2701        }
2702    }
2703
2704    #[test]
2705    fn smoke() {
2706        let mut tank = FuelTank::new();
2707        tank.set_fuel(10);
2708        assert_eq!(tank.consumed_fuel, -10);
2709        assert_eq!(tank.reserve_fuel, 0);
2710
2711        tank.yield_interval = NonZeroU64::new(10);
2712        tank.set_fuel(25);
2713        assert_eq!(tank.consumed_fuel, -10);
2714        assert_eq!(tank.reserve_fuel, 15);
2715    }
2716
2717    #[test]
2718    fn does_not_lose_precision() {
2719        let mut tank = FuelTank::new();
2720        tank.set_fuel(u64::MAX);
2721        assert_eq!(tank.get_fuel(), u64::MAX);
2722
2723        tank.set_fuel(i64::MAX as u64);
2724        assert_eq!(tank.get_fuel(), i64::MAX as u64);
2725
2726        tank.set_fuel(i64::MAX as u64 + 1);
2727        assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
2728    }
2729
2730    #[test]
2731    fn yielding_does_not_lose_precision() {
2732        let mut tank = FuelTank::new();
2733
2734        tank.yield_interval = NonZeroU64::new(10);
2735        tank.set_fuel(u64::MAX);
2736        assert_eq!(tank.get_fuel(), u64::MAX);
2737        assert_eq!(tank.consumed_fuel, -10);
2738        assert_eq!(tank.reserve_fuel, u64::MAX - 10);
2739
2740        tank.yield_interval = NonZeroU64::new(u64::MAX);
2741        tank.set_fuel(u64::MAX);
2742        assert_eq!(tank.get_fuel(), u64::MAX);
2743        assert_eq!(tank.consumed_fuel, -i64::MAX);
2744        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2745
2746        tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
2747        tank.set_fuel(u64::MAX);
2748        assert_eq!(tank.get_fuel(), u64::MAX);
2749        assert_eq!(tank.consumed_fuel, -i64::MAX);
2750        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2751    }
2752
2753    #[test]
2754    fn refueling() {
2755        // It's possible to fuel to have consumed over the limit as some instructions can consume
2756        // multiple units of fuel at once. Refueling should be strict in it's consumption and not
2757        // add more fuel than there is.
2758        let mut tank = FuelTank::new();
2759
2760        tank.yield_interval = NonZeroU64::new(10);
2761        tank.reserve_fuel = 42;
2762        tank.consumed_fuel = 4;
2763        assert!(tank.refuel());
2764        assert_eq!(tank.reserve_fuel, 28);
2765        assert_eq!(tank.consumed_fuel, -10);
2766
2767        tank.yield_interval = NonZeroU64::new(1);
2768        tank.reserve_fuel = 8;
2769        tank.consumed_fuel = 4;
2770        assert_eq!(tank.get_fuel(), 4);
2771        assert!(tank.refuel());
2772        assert_eq!(tank.reserve_fuel, 3);
2773        assert_eq!(tank.consumed_fuel, -1);
2774        assert_eq!(tank.get_fuel(), 4);
2775
2776        tank.yield_interval = NonZeroU64::new(10);
2777        tank.reserve_fuel = 3;
2778        tank.consumed_fuel = 4;
2779        assert_eq!(tank.get_fuel(), 0);
2780        assert!(!tank.refuel());
2781        assert_eq!(tank.reserve_fuel, 3);
2782        assert_eq!(tank.consumed_fuel, 4);
2783        assert_eq!(tank.get_fuel(), 0);
2784    }
2785}