Skip to main content

wasmtime/runtime/
store.rs

1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//!   Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//!   intended to be consumed by the outside world. Note that the "just a
39//!   pointer large" is a load-bearing implementation detail in Wasmtime. This
40//!   enables it to store a pointer to its own trait object which doesn't need
41//!   to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//!   stored inside the `Box`. This is the general Rust pattern when one struct
45//!   is a layer over another. The surprising part, though, is that this is
46//!   further subdivided. This structure only contains things which actually
47//!   need `T` itself. The downside of this structure is that it's always
48//!   generic and means that code is monomorphized into consumer crates. We
49//!   strive to have things be as monomorphic as possible in `wasmtime` so this
50//!   type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//!   Stored inline in the outer type the "opaque" here means that it's a
54//!   "store" but it doesn't have access to the `T`. This is the primary
55//!   "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//!   internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//!   All references of Wasm items into a `Store` are actually indices into a
60//!   table in this structure, and the `StoreData` being separate makes it a bit
61//!   easier to manage/define/work with. There's no real fundamental reason this
62//!   is split out, although sometimes it's useful to have separate borrows into
63//!   these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79#[cfg(all(feature = "gc", feature = "debug"))]
80use crate::OwnedRooted;
81use crate::RootSet;
82#[cfg(feature = "gc")]
83use crate::ThrownException;
84#[cfg(feature = "component-model-async")]
85use crate::component::ComponentStoreData;
86#[cfg(feature = "component-model")]
87use crate::component::concurrent;
88use crate::error::OutOfMemory;
89#[cfg(feature = "async")]
90use crate::fiber;
91use crate::module::RegisteredModuleId;
92use crate::prelude::*;
93#[cfg(feature = "gc")]
94use crate::runtime::vm::GcRootsList;
95#[cfg(feature = "stack-switching")]
96use crate::runtime::vm::VMContRef;
97use crate::runtime::vm::mpk::ProtectionKey;
98use crate::runtime::vm::{
99    self, ExportMemory, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator,
100    InstanceHandle, Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator,
101    SendSyncPtr, SignalHandler, StoreBox, Unwind, VMContext, VMFuncRef, VMGcRef, VMStore,
102    VMStoreContext,
103};
104use crate::trampoline::VMHostGlobalContext;
105#[cfg(feature = "debug")]
106use crate::{BreakpointState, DebugHandler};
107use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry};
108#[cfg(feature = "gc")]
109use crate::{ExnRef, Rooted};
110use crate::{Global, Instance, Table};
111use core::convert::Infallible;
112use core::fmt;
113use core::marker;
114use core::mem::{self, ManuallyDrop, MaybeUninit};
115use core::num::NonZeroU64;
116use core::ops::{Deref, DerefMut};
117use core::pin::Pin;
118use core::ptr::NonNull;
119use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
120
121mod context;
122pub use self::context::*;
123mod data;
124pub use self::data::*;
125mod func_refs;
126use func_refs::FuncRefs;
127#[cfg(feature = "component-model-async")]
128mod token;
129#[cfg(feature = "component-model-async")]
130pub(crate) use token::StoreToken;
131#[cfg(feature = "async")]
132mod async_;
133#[cfg(all(feature = "async", feature = "call-hook"))]
134pub use self::async_::CallHookHandler;
135
136#[cfg(feature = "gc")]
137use super::vm::VMExnRef;
138#[cfg(feature = "gc")]
139mod gc;
140
141/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
142///
143/// All WebAssembly instances and items will be attached to and refer to a
144/// [`Store`]. For example instances, functions, globals, and tables are all
145/// attached to a [`Store`]. Instances are created by instantiating a
146/// [`Module`](crate::Module) within a [`Store`].
147///
148/// A [`Store`] is intended to be a short-lived object in a program. No form
149/// of GC is implemented at this time so once an instance is created within a
150/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
151/// This makes [`Store`] unsuitable for creating an unbounded number of
152/// instances in it because [`Store`] will never release this memory. It's
153/// recommended to have a [`Store`] correspond roughly to the lifetime of a
154/// "main instance" that an embedding is interested in executing.
155///
156/// ## Type parameter `T`
157///
158/// Each [`Store`] has a type parameter `T` associated with it. This `T`
159/// represents state defined by the host. This state will be accessible through
160/// the [`Caller`](crate::Caller) type that host-defined functions get access
161/// to. This `T` is suitable for storing `Store`-specific information which
162/// imported functions may want access to.
163///
164/// The data `T` can be accessed through methods like [`Store::data`] and
165/// [`Store::data_mut`].
166///
167/// ## Stores, contexts, oh my
168///
169/// Most methods in Wasmtime take something of the form
170/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
171/// the first argument. These two traits allow ergonomically passing in the
172/// context you currently have to any method. The primary two sources of
173/// contexts are:
174///
175/// * `Store<T>`
176/// * `Caller<'_, T>`
177///
178/// corresponding to what you create and what you have access to in a host
179/// function. You can also explicitly acquire a [`StoreContext`] or
180/// [`StoreContextMut`] and pass that around as well.
181///
182/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
183/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
184/// form of context you have you can call various methods, create objects, etc.
185///
186/// ## Stores and `Default`
187///
188/// You can create a store with default configuration settings using
189/// `Store::default()`. This will create a brand new [`Engine`] with default
190/// configuration (see [`Config`](crate::Config) for more information).
191///
192/// ## Cross-store usage of items
193///
194/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
195/// [`Store`]. The store they belong to is the one they were created with
196/// (passed in as a parameter) or instantiated with. This store is the only
197/// store that can be used to interact with wasm items after they're created.
198///
199/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
200/// operations is incorrect. In other words it's considered a programmer error
201/// rather than a recoverable error for the wrong [`Store`] to be used when
202/// calling APIs.
203///
204/// [`Memory`]: crate::Memory
205pub struct Store<T: 'static> {
206    // for comments about `ManuallyDrop`, see `Store::into_data`
207    inner: ManuallyDrop<Box<StoreInner<T>>>,
208}
209
210#[derive(Copy, Clone, Debug)]
211/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
212/// the WebAssembly VM.
213pub enum CallHook {
214    /// Indicates the VM is calling a WebAssembly function, from the host.
215    CallingWasm,
216    /// Indicates the VM is returning from a WebAssembly function, to the host.
217    ReturningFromWasm,
218    /// Indicates the VM is calling a host function, from WebAssembly.
219    CallingHost,
220    /// Indicates the VM is returning from a host function, to WebAssembly.
221    ReturningFromHost,
222}
223
224impl CallHook {
225    /// Indicates the VM is entering host code (exiting WebAssembly code)
226    pub fn entering_host(&self) -> bool {
227        match self {
228            CallHook::ReturningFromWasm | CallHook::CallingHost => true,
229            _ => false,
230        }
231    }
232    /// Indicates the VM is exiting host code (entering WebAssembly code)
233    pub fn exiting_host(&self) -> bool {
234        match self {
235            CallHook::ReturningFromHost | CallHook::CallingWasm => true,
236            _ => false,
237        }
238    }
239}
240
241/// Internal contents of a `Store<T>` that live on the heap.
242///
243/// The members of this struct are those that need to be generic over `T`, the
244/// store's internal type storage. Otherwise all things that don't rely on `T`
245/// should go into `StoreOpaque`.
246pub struct StoreInner<T: 'static> {
247    /// Generic metadata about the store that doesn't need access to `T`.
248    inner: StoreOpaque,
249
250    limiter: Option<ResourceLimiterInner<T>>,
251    call_hook: Option<CallHookInner<T>>,
252    #[cfg(target_has_atomic = "64")]
253    epoch_deadline_behavior:
254        Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
255
256    /// The user's `T` data.
257    ///
258    /// Don't actually access it via this field, however! Use the
259    /// `Store{,Inner,Context,ContextMut}::data[_mut]` methods instead, to
260    /// preserve stacked borrows and provenance in the face of potential
261    /// direct-access of `T` from Wasm code (via unsafe intrinsics).
262    ///
263    /// The only exception to the above is when taking ownership of the value,
264    /// e.g. in `Store::into_data`, after which nothing can access this field
265    /// via raw pointers anymore so there is no more provenance to preserve.
266    ///
267    /// For comments about `ManuallyDrop`, see `Store::into_data`.
268    data_no_provenance: ManuallyDrop<T>,
269
270    /// The user's debug handler, if any. See [`crate::DebugHandler`]
271    /// for more documentation.
272    ///
273    /// We need this to be an `Arc` because the handler itself takes
274    /// `&self` and also the whole Store mutably (via
275    /// `StoreContextMut`); so we need to hold a separate reference to
276    /// it while invoking it.
277    #[cfg(feature = "debug")]
278    debug_handler: Option<Box<dyn StoreDebugHandler<T>>>,
279}
280
281/// Adapter around `DebugHandler` that gets monomorphized into an
282/// object-safe dyn trait to place in `store.debug_handler`.
283#[cfg(feature = "debug")]
284trait StoreDebugHandler<T: 'static>: Send + Sync {
285    fn handle<'a>(
286        self: Box<Self>,
287        store: StoreContextMut<'a, T>,
288        event: crate::DebugEvent<'a>,
289    ) -> Box<dyn Future<Output = ()> + Send + 'a>;
290}
291
292#[cfg(feature = "debug")]
293impl<D> StoreDebugHandler<D::Data> for D
294where
295    D: DebugHandler,
296    D::Data: Send,
297{
298    fn handle<'a>(
299        self: Box<Self>,
300        store: StoreContextMut<'a, D::Data>,
301        event: crate::DebugEvent<'a>,
302    ) -> Box<dyn Future<Output = ()> + Send + 'a> {
303        // Clone the underlying `DebugHandler` (the trait requires
304        // Clone as a supertrait), not the Box. The clone happens here
305        // rather than at the callsite because `Clone::clone` is not
306        // object-safe so needs to be in a monomorphized context.
307        let handler: D = (*self).clone();
308        // Since we temporarily took `self` off the store at the
309        // callsite, put it back now that we've cloned it.
310        store.0.debug_handler = Some(self);
311        Box::new(async move { handler.handle(store, event).await })
312    }
313}
314
315enum ResourceLimiterInner<T> {
316    Sync(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync>),
317    #[cfg(feature = "async")]
318    Async(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiterAsync) + Send + Sync>),
319}
320
321/// Representation of a configured resource limiter for a store.
322///
323/// This is acquired with `resource_limiter_and_store_opaque` for example and is
324/// threaded through to growth operations on tables/memories. Note that this is
325/// passed around as `Option<&mut StoreResourceLimiter<'_>>` to make it
326/// efficient to pass around (nullable pointer) and it's also notably passed
327/// around as an `Option` to represent how this is optionally specified within a
328/// store.
329pub enum StoreResourceLimiter<'a> {
330    Sync(&'a mut dyn crate::ResourceLimiter),
331    #[cfg(feature = "async")]
332    Async(&'a mut dyn crate::ResourceLimiterAsync),
333}
334
335impl StoreResourceLimiter<'_> {
336    pub(crate) async fn memory_growing(
337        &mut self,
338        current: usize,
339        desired: usize,
340        maximum: Option<usize>,
341    ) -> Result<bool, Error> {
342        match self {
343            Self::Sync(s) => s.memory_growing(current, desired, maximum),
344            #[cfg(feature = "async")]
345            Self::Async(s) => s.memory_growing(current, desired, maximum).await,
346        }
347    }
348
349    pub(crate) fn memory_grow_failed(&mut self, error: crate::Error) -> Result<()> {
350        match self {
351            Self::Sync(s) => s.memory_grow_failed(error),
352            #[cfg(feature = "async")]
353            Self::Async(s) => s.memory_grow_failed(error),
354        }
355    }
356
357    pub(crate) async fn table_growing(
358        &mut self,
359        current: usize,
360        desired: usize,
361        maximum: Option<usize>,
362    ) -> Result<bool, Error> {
363        match self {
364            Self::Sync(s) => s.table_growing(current, desired, maximum),
365            #[cfg(feature = "async")]
366            Self::Async(s) => s.table_growing(current, desired, maximum).await,
367        }
368    }
369
370    pub(crate) fn table_grow_failed(&mut self, error: crate::Error) -> Result<()> {
371        match self {
372            Self::Sync(s) => s.table_grow_failed(error),
373            #[cfg(feature = "async")]
374            Self::Async(s) => s.table_grow_failed(error),
375        }
376    }
377}
378
379enum CallHookInner<T: 'static> {
380    #[cfg(feature = "call-hook")]
381    Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
382    #[cfg(all(feature = "async", feature = "call-hook"))]
383    Async(Box<dyn CallHookHandler<T> + Send + Sync>),
384    #[expect(
385        dead_code,
386        reason = "forcing, regardless of cfg, the type param to be used"
387    )]
388    ForceTypeParameterToBeUsed {
389        uninhabited: Infallible,
390        _marker: marker::PhantomData<T>,
391    },
392}
393
394/// What to do after returning from a callback when the engine epoch reaches
395/// the deadline for a Store during execution of a function using that store.
396#[non_exhaustive]
397pub enum UpdateDeadline {
398    /// Halt execution of WebAssembly, don't update the epoch deadline, and
399    /// raise a trap.
400    Interrupt,
401    /// Extend the deadline by the specified number of ticks.
402    Continue(u64),
403    /// Extend the deadline by the specified number of ticks after yielding to
404    /// the async executor loop.
405    ///
406    /// This can only be used when WebAssembly is invoked with `*_async`
407    /// methods. If WebAssembly was invoked with a synchronous method then
408    /// returning this variant will raise a trap.
409    #[cfg(feature = "async")]
410    Yield(u64),
411    /// Extend the deadline by the specified number of ticks after yielding to
412    /// the async executor loop.
413    ///
414    /// This can only be used when WebAssembly is invoked with `*_async`
415    /// methods. If WebAssembly was invoked with a synchronous method then
416    /// returning this variant will raise a trap.
417    ///
418    /// The yield will be performed by the future provided; when using `tokio`
419    /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
420    /// here.
421    #[cfg(feature = "async")]
422    YieldCustom(
423        u64,
424        ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
425    ),
426}
427
428// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
429impl<T> Deref for StoreInner<T> {
430    type Target = StoreOpaque;
431    fn deref(&self) -> &Self::Target {
432        &self.inner
433    }
434}
435
436impl<T> DerefMut for StoreInner<T> {
437    fn deref_mut(&mut self) -> &mut Self::Target {
438        &mut self.inner
439    }
440}
441
442/// Monomorphic storage for a `Store<T>`.
443///
444/// This structure contains the bulk of the metadata about a `Store`. This is
445/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
446/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
447/// crate itself.
448pub struct StoreOpaque {
449    // This `StoreOpaque` structure has references to itself. These aren't
450    // immediately evident, however, so we need to tell the compiler that it
451    // contains self-references. This notably suppresses `noalias` annotations
452    // when this shows up in compiled code because types of this structure do
453    // indeed alias itself. An example of this is `default_callee` holds a
454    // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
455    // aliasing!
456    //
457    // It's somewhat unclear to me at this time if this is 100% sufficient to
458    // get all the right codegen in all the right places. For example does
459    // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
460    // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
461    // enough with `Pin` to understand if it's appropriate here (we do, for
462    // example want to allow movement in and out of `data: T`, just not movement
463    // of most of the other members). It's also not clear if using `Pin` in a
464    // few places buys us much other than a bunch of `unsafe` that we already
465    // sort of hand-wave away.
466    //
467    // In any case this seems like a good mid-ground for now where we're at
468    // least telling the compiler something about all the aliasing happening
469    // within a `Store`.
470    _marker: marker::PhantomPinned,
471
472    engine: Engine,
473    vm_store_context: VMStoreContext,
474
475    // Contains all continuations ever allocated throughout the lifetime of this
476    // store.
477    #[cfg(feature = "stack-switching")]
478    continuations: Vec<Box<VMContRef>>,
479
480    instances: wasmtime_environ::collections::PrimaryMap<InstanceId, StoreInstance>,
481
482    #[cfg(feature = "component-model")]
483    num_component_instances: usize,
484    signal_handler: Option<SignalHandler>,
485    modules: ModuleRegistry,
486    func_refs: FuncRefs,
487    host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
488    // GC-related fields.
489    gc_store: Option<GcStore>,
490    gc_roots: RootSet,
491    #[cfg(feature = "gc")]
492    gc_roots_list: GcRootsList,
493    // Types for which the embedder has created an allocator for.
494    #[cfg(feature = "gc")]
495    gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
496    /// Pending exception, if any. This is also a GC root, because it
497    /// needs to be rooted somewhere between the time that a pending
498    /// exception is set and the time that the handling code takes the
499    /// exception object. We use this rooting strategy rather than a
500    /// root in an `Err` branch of a `Result` on the host side because
501    /// it is less error-prone with respect to rooting behavior. See
502    /// `throw()`, `take_pending_exception()`,
503    /// `peek_pending_exception()`, `has_pending_exception()`, and
504    /// `catch()`.
505    #[cfg(feature = "gc")]
506    pending_exception: Option<VMExnRef>,
507
508    // Numbers of resources instantiated in this store, and their limits
509    instance_count: usize,
510    instance_limit: usize,
511    memory_count: usize,
512    memory_limit: usize,
513    table_count: usize,
514    table_limit: usize,
515    #[cfg(feature = "async")]
516    async_state: fiber::AsyncState,
517
518    // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
519    // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
520    // together. Then when we run out of gas, we inject the yield amount from the reserve
521    // until the reserve is empty.
522    fuel_reserve: u64,
523    pub(crate) fuel_yield_interval: Option<NonZeroU64>,
524    /// Indexed data within this `Store`, used to store information about
525    /// globals, functions, memories, etc.
526    store_data: StoreData,
527    traitobj: StorePtr,
528    default_caller_vmctx: SendSyncPtr<VMContext>,
529
530    /// Used to optimized wasm->host calls when the host function is defined with
531    /// `Func::new` to avoid allocating a new vector each time a function is
532    /// called.
533    hostcall_val_storage: Vec<Val>,
534    /// Same as `hostcall_val_storage`, but for the direction of the host
535    /// calling wasm.
536    wasm_val_raw_storage: Vec<ValRaw>,
537
538    /// Keep track of what protection key is being used during allocation so
539    /// that the right memory pages can be enabled when entering WebAssembly
540    /// guest code.
541    pkey: Option<ProtectionKey>,
542
543    /// Runtime state for components used in the handling of resources, borrow,
544    /// and calls. These also interact with the `ResourceAny` type and its
545    /// internal representation.
546    #[cfg(feature = "component-model")]
547    component_host_table: vm::component::HandleTable,
548    #[cfg(feature = "component-model")]
549    component_calls: vm::component::CallContexts,
550    #[cfg(feature = "component-model")]
551    host_resource_data: crate::component::HostResourceData,
552    #[cfg(feature = "component-model")]
553    concurrent_state: Option<concurrent::ConcurrentState>,
554
555    /// State related to the executor of wasm code.
556    ///
557    /// For example if Pulley is enabled and configured then this will store a
558    /// Pulley interpreter.
559    executor: Executor,
560
561    /// The debug breakpoint state for this store.
562    ///
563    /// When guest debugging is enabled, a given store may have a set
564    /// of breakpoints defined, denoted by module and Wasm PC within
565    /// that module. Or alternately, it may be in "single-step" mode,
566    /// where every possible breakpoint is logically enabled.
567    ///
568    /// When execution of any instance in this store hits any defined
569    /// breakpoint, a `Breakpoint` debug event is emitted and the
570    /// handler defined above, if any, has a chance to perform some
571    /// logic before returning to allow execution to resume.
572    #[cfg(feature = "debug")]
573    breakpoints: BreakpointState,
574}
575
576/// Self-pointer to `StoreInner<T>` from within a `StoreOpaque` which is chiefly
577/// used to copy into instances during instantiation.
578///
579/// FIXME: ideally this type would get deleted and Wasmtime's reliance on it
580/// would go away.
581struct StorePtr(Option<NonNull<dyn VMStore>>);
582
583// We can't make `VMStore: Send + Sync` because that requires making all of
584// Wastime's internals generic over the `Store`'s `T`. So instead, we take care
585// in the whole VM layer to only use the `VMStore` in ways that are `Send`- and
586// `Sync`-safe and we have to have these unsafe impls.
587unsafe impl Send for StorePtr {}
588unsafe impl Sync for StorePtr {}
589
590/// Executor state within `StoreOpaque`.
591///
592/// Effectively stores Pulley interpreter state and handles conditional support
593/// for Cranelift at compile time.
594pub(crate) enum Executor {
595    Interpreter(Interpreter),
596    #[cfg(has_host_compiler_backend)]
597    Native,
598}
599
600impl Executor {
601    pub(crate) fn new(engine: &Engine) -> Self {
602        #[cfg(has_host_compiler_backend)]
603        if cfg!(feature = "pulley") && engine.target().is_pulley() {
604            Executor::Interpreter(Interpreter::new(engine))
605        } else {
606            Executor::Native
607        }
608        #[cfg(not(has_host_compiler_backend))]
609        {
610            debug_assert!(engine.target().is_pulley());
611            Executor::Interpreter(Interpreter::new(engine))
612        }
613    }
614}
615
616/// A borrowed reference to `Executor` above.
617pub(crate) enum ExecutorRef<'a> {
618    Interpreter(InterpreterRef<'a>),
619    #[cfg(has_host_compiler_backend)]
620    Native,
621}
622
623/// An RAII type to automatically mark a region of code as unsafe for GC.
624#[doc(hidden)]
625pub struct AutoAssertNoGc<'a> {
626    store: &'a mut StoreOpaque,
627    entered: bool,
628}
629
630impl<'a> AutoAssertNoGc<'a> {
631    #[inline]
632    pub fn new(store: &'a mut StoreOpaque) -> Self {
633        let entered = if !cfg!(feature = "gc") {
634            false
635        } else if let Some(gc_store) = store.gc_store.as_mut() {
636            gc_store.gc_heap.enter_no_gc_scope();
637            true
638        } else {
639            false
640        };
641
642        AutoAssertNoGc { store, entered }
643    }
644
645    /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
646    /// disables checks for no GC happening for the duration of this value.
647    ///
648    /// This is used when it is statically otherwise known that a GC doesn't
649    /// happen for the various types involved.
650    ///
651    /// # Unsafety
652    ///
653    /// This method is `unsafe` as it does not provide the same safety
654    /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
655    /// caller that a GC doesn't happen.
656    #[inline]
657    pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
658        if cfg!(debug_assertions) {
659            AutoAssertNoGc::new(store)
660        } else {
661            AutoAssertNoGc {
662                store,
663                entered: false,
664            }
665        }
666    }
667}
668
669impl core::ops::Deref for AutoAssertNoGc<'_> {
670    type Target = StoreOpaque;
671
672    #[inline]
673    fn deref(&self) -> &Self::Target {
674        &*self.store
675    }
676}
677
678impl core::ops::DerefMut for AutoAssertNoGc<'_> {
679    #[inline]
680    fn deref_mut(&mut self) -> &mut Self::Target {
681        &mut *self.store
682    }
683}
684
685impl Drop for AutoAssertNoGc<'_> {
686    #[inline]
687    fn drop(&mut self) {
688        if self.entered {
689            self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
690        }
691    }
692}
693
694/// Used to associate instances with the store.
695///
696/// This is needed to track if the instance was allocated explicitly with the on-demand
697/// instance allocator.
698struct StoreInstance {
699    handle: InstanceHandle,
700    kind: StoreInstanceKind,
701}
702
703enum StoreInstanceKind {
704    /// An actual, non-dummy instance.
705    Real {
706        /// The id of this instance's module inside our owning store's
707        /// `ModuleRegistry`.
708        module_id: RegisteredModuleId,
709    },
710
711    /// This is a dummy instance that is just an implementation detail for
712    /// something else. For example, host-created memories internally create a
713    /// dummy instance.
714    ///
715    /// Regardless of the configured instance allocator for the engine, dummy
716    /// instances always use the on-demand allocator to deallocate the instance.
717    Dummy,
718}
719
720impl<T> Store<T> {
721    /// Creates a new [`Store`] to be associated with the given [`Engine`] and
722    /// `data` provided.
723    ///
724    /// The created [`Store`] will place no additional limits on the size of
725    /// linear memories or tables at runtime. Linear memories and tables will
726    /// be allowed to grow to any upper limit specified in their definitions.
727    /// The store will limit the number of instances, linear memories, and
728    /// tables created to 10,000. This can be overridden with the
729    /// [`Store::limiter`] configuration method.
730    pub fn new(engine: &Engine, data: T) -> Self {
731        Self::try_new(engine, data).expect(
732            "allocation failure during `Store::new` (use `Store::try_new` to handle such errors)",
733        )
734    }
735
736    /// Like `Store::new` but returns an error on allocation failure.
737    pub fn try_new(engine: &Engine, data: T) -> Result<Self> {
738        let store_data = StoreData::new();
739        log::trace!("creating new store {:?}", store_data.id());
740
741        let pkey = engine.allocator().next_available_pkey();
742
743        let inner = StoreOpaque {
744            _marker: marker::PhantomPinned,
745            engine: engine.clone(),
746            vm_store_context: Default::default(),
747            #[cfg(feature = "stack-switching")]
748            continuations: Vec::new(),
749            instances: wasmtime_environ::collections::PrimaryMap::new(),
750            #[cfg(feature = "component-model")]
751            num_component_instances: 0,
752            signal_handler: None,
753            gc_store: None,
754            gc_roots: RootSet::default(),
755            #[cfg(feature = "gc")]
756            gc_roots_list: GcRootsList::default(),
757            #[cfg(feature = "gc")]
758            gc_host_alloc_types: Default::default(),
759            #[cfg(feature = "gc")]
760            pending_exception: None,
761            modules: ModuleRegistry::default(),
762            func_refs: FuncRefs::default(),
763            host_globals: PrimaryMap::new(),
764            instance_count: 0,
765            instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
766            memory_count: 0,
767            memory_limit: crate::DEFAULT_MEMORY_LIMIT,
768            table_count: 0,
769            table_limit: crate::DEFAULT_TABLE_LIMIT,
770            #[cfg(feature = "async")]
771            async_state: Default::default(),
772            fuel_reserve: 0,
773            fuel_yield_interval: None,
774            store_data,
775            traitobj: StorePtr(None),
776            default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
777            hostcall_val_storage: Vec::new(),
778            wasm_val_raw_storage: Vec::new(),
779            pkey,
780            #[cfg(feature = "component-model")]
781            component_host_table: Default::default(),
782            #[cfg(feature = "component-model")]
783            component_calls: Default::default(),
784            #[cfg(feature = "component-model")]
785            host_resource_data: Default::default(),
786            executor: Executor::new(engine),
787            #[cfg(feature = "component-model")]
788            concurrent_state: if engine.tunables().concurrency_support {
789                #[cfg(feature = "component-model-async")]
790                {
791                    Some(Default::default())
792                }
793                #[cfg(not(feature = "component-model-async"))]
794                {
795                    unreachable!()
796                }
797            } else {
798                None
799            },
800            #[cfg(feature = "debug")]
801            breakpoints: Default::default(),
802        };
803        let mut inner = try_new::<Box<_>>(StoreInner {
804            inner,
805            limiter: None,
806            call_hook: None,
807            #[cfg(target_has_atomic = "64")]
808            epoch_deadline_behavior: None,
809            data_no_provenance: ManuallyDrop::new(data),
810            #[cfg(feature = "debug")]
811            debug_handler: None,
812        })?;
813
814        let store_data =
815            <NonNull<ManuallyDrop<T>>>::from(&mut inner.data_no_provenance).cast::<()>();
816        inner.inner.vm_store_context.store_data = store_data.into();
817
818        inner.traitobj = StorePtr(Some(NonNull::from(&mut *inner)));
819
820        // Wasmtime uses the callee argument to host functions to learn about
821        // the original pointer to the `Store` itself, allowing it to
822        // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
823        // however, there's no "callee" to provide. To fix this we allocate a
824        // single "default callee" for the entire `Store`. This is then used as
825        // part of `Func::call` to guarantee that the `callee: *mut VMContext`
826        // is never null.
827        let allocator = OnDemandInstanceAllocator::default();
828        let info = engine.empty_module_runtime_info();
829        allocator
830            .validate_module(info.env_module(), info.offsets())
831            .unwrap();
832
833        unsafe {
834            // Note that this dummy instance doesn't allocate tables or memories
835            // (also no limiter is passed in) so it won't have an async await
836            // point meaning that it should be ok to assert the future is
837            // always ready.
838            let result = vm::assert_ready(inner.allocate_instance(
839                None,
840                AllocateInstanceKind::Dummy {
841                    allocator: &allocator,
842                },
843                info,
844                Default::default(),
845            ));
846            let id = match result {
847                Ok(id) => id,
848                Err(e) => {
849                    if e.is::<OutOfMemory>() {
850                        return Err(e);
851                    }
852                    panic!("instance allocator failed to allocate default callee")
853                }
854            };
855            let default_caller_vmctx = inner.instance(id).vmctx();
856            inner.default_caller_vmctx = default_caller_vmctx.into();
857        }
858
859        Ok(Self {
860            inner: ManuallyDrop::new(inner),
861        })
862    }
863
864    /// Access the underlying `T` data owned by this `Store`.
865    #[inline]
866    pub fn data(&self) -> &T {
867        self.inner.data()
868    }
869
870    /// Access the underlying `T` data owned by this `Store`.
871    #[inline]
872    pub fn data_mut(&mut self) -> &mut T {
873        self.inner.data_mut()
874    }
875
876    fn run_manual_drop_routines(&mut self) {
877        // We need to drop the fibers of each component instance before
878        // attempting to drop the instances themselves since the fibers may need
879        // to be resumed and allowed to exit cleanly before we yank the state
880        // out from under them.
881        //
882        // This will also drop any futures which might use a `&Accessor` fields
883        // in their `Drop::drop` implementations, in which case they'll need to
884        // be called from with in the context of a `tls::set` closure.
885        #[cfg(feature = "component-model-async")]
886        if self.inner.concurrent_state.is_some() {
887            ComponentStoreData::drop_fibers_and_futures(&mut **self.inner);
888        }
889
890        // Ensure all fiber stacks, even cached ones, are all flushed out to the
891        // instance allocator.
892        self.inner.flush_fiber_stack();
893    }
894
895    /// Consumes this [`Store`], destroying it, and returns the underlying data.
896    pub fn into_data(mut self) -> T {
897        self.run_manual_drop_routines();
898
899        // This is an unsafe operation because we want to avoid having a runtime
900        // check or boolean for whether the data is actually contained within a
901        // `Store`. The data itself is stored as `ManuallyDrop` since we're
902        // manually managing the memory here, and there's also a `ManuallyDrop`
903        // around the `Box<StoreInner<T>>`. The way this works though is a bit
904        // tricky, so here's how things get dropped appropriately:
905        //
906        // * When a `Store<T>` is normally dropped, the custom destructor for
907        //   `Store<T>` will drop `T`, then the `self.inner` field. The
908        //   rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
909        //   `StoreInner<T>`. This cleans up all internal fields and doesn't
910        //   touch `T` because it's wrapped in `ManuallyDrop`.
911        //
912        // * When calling this method we skip the top-level destructor for
913        //   `Store<T>` with `mem::forget`. This skips both the destructor for
914        //   `T` and the destructor for `StoreInner<T>`. We do, however, run the
915        //   destructor for `Box<StoreInner<T>>` which, like above, will skip
916        //   the destructor for `T` since it's `ManuallyDrop`.
917        //
918        // In both cases all the other fields of `StoreInner<T>` should all get
919        // dropped, and the manual management of destructors is basically
920        // between this method and `Drop for Store<T>`. Note that this also
921        // means that `Drop for StoreInner<T>` cannot access `self.data`, so
922        // there is a comment indicating this as well.
923        unsafe {
924            let mut inner = ManuallyDrop::take(&mut self.inner);
925            core::mem::forget(self);
926            ManuallyDrop::take(&mut inner.data_no_provenance)
927        }
928    }
929
930    /// Configures the [`ResourceLimiter`] used to limit resource creation
931    /// within this [`Store`].
932    ///
933    /// Whenever resources such as linear memory, tables, or instances are
934    /// allocated the `limiter` specified here is invoked with the store's data
935    /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
936    /// being allocated. The returned [`ResourceLimiter`] is intended to live
937    /// within the `T` itself, for example by storing a
938    /// [`StoreLimits`](crate::StoreLimits).
939    ///
940    /// Note that this limiter is only used to limit the creation/growth of
941    /// resources in the future, this does not retroactively attempt to apply
942    /// limits to the [`Store`].
943    ///
944    /// # Examples
945    ///
946    /// ```
947    /// use wasmtime::*;
948    ///
949    /// struct MyApplicationState {
950    ///     my_state: u32,
951    ///     limits: StoreLimits,
952    /// }
953    ///
954    /// let engine = Engine::default();
955    /// let my_state = MyApplicationState {
956    ///     my_state: 42,
957    ///     limits: StoreLimitsBuilder::new()
958    ///         .memory_size(1 << 20 /* 1 MB */)
959    ///         .instances(2)
960    ///         .build(),
961    /// };
962    /// let mut store = Store::new(&engine, my_state);
963    /// store.limiter(|state| &mut state.limits);
964    ///
965    /// // Creation of smaller memories is allowed
966    /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
967    ///
968    /// // Creation of a larger memory, however, will exceed the 1MB limit we've
969    /// // configured
970    /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
971    ///
972    /// // The number of instances in this store is limited to 2, so the third
973    /// // instance here should fail.
974    /// let module = Module::new(&engine, "(module)").unwrap();
975    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
976    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
977    /// assert!(Instance::new(&mut store, &module, &[]).is_err());
978    /// ```
979    ///
980    /// [`ResourceLimiter`]: crate::ResourceLimiter
981    pub fn limiter(
982        &mut self,
983        mut limiter: impl (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync + 'static,
984    ) {
985        // Apply the limits on instances, tables, and memory given by the limiter:
986        let inner = &mut self.inner;
987        let (instance_limit, table_limit, memory_limit) = {
988            let l = limiter(inner.data_mut());
989            (l.instances(), l.tables(), l.memories())
990        };
991        let innermost = &mut inner.inner;
992        innermost.instance_limit = instance_limit;
993        innermost.table_limit = table_limit;
994        innermost.memory_limit = memory_limit;
995
996        // Save the limiter accessor function:
997        inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
998    }
999
1000    /// Configure a function that runs on calls and returns between WebAssembly
1001    /// and host code.
1002    ///
1003    /// The function is passed a [`CallHook`] argument, which indicates which
1004    /// state transition the VM is making.
1005    ///
1006    /// This function may return a [`Trap`]. If a trap is returned when an
1007    /// import was called, it is immediately raised as-if the host import had
1008    /// returned the trap. If a trap is returned after wasm returns to the host
1009    /// then the wasm function's result is ignored and this trap is returned
1010    /// instead.
1011    ///
1012    /// After this function returns a trap, it may be called for subsequent returns
1013    /// to host or wasm code as the trap propagates to the root call.
1014    ///
1015    /// [`Trap`]: crate::Trap
1016    #[cfg(feature = "call-hook")]
1017    pub fn call_hook(
1018        &mut self,
1019        hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
1020    ) {
1021        self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
1022    }
1023
1024    /// Returns the [`Engine`] that this store is associated with.
1025    pub fn engine(&self) -> &Engine {
1026        self.inner.engine()
1027    }
1028
1029    /// Perform garbage collection.
1030    ///
1031    /// Note that it is not required to actively call this function. GC will
1032    /// automatically happen according to various internal heuristics. This is
1033    /// provided if fine-grained control over the GC is desired.
1034    ///
1035    /// If you are calling this method after an attempted allocation failed, you
1036    /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
1037    /// When you do so, this method will attempt to create enough space in the
1038    /// GC heap for that allocation, so that it will succeed on the next
1039    /// attempt.
1040    ///
1041    /// # Errors
1042    ///
1043    /// This method will fail if an [async limiter is
1044    /// configured](Store::limiter_async) in which case [`Store::gc_async`] must
1045    /// be used instead.
1046    #[cfg(feature = "gc")]
1047    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> {
1048        StoreContextMut(&mut self.inner).gc(why)
1049    }
1050
1051    /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
1052    /// be configured via [`Store::set_fuel`].
1053    ///
1054    /// # Errors
1055    ///
1056    /// This function will return an error if fuel consumption is not enabled
1057    /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
1058    pub fn get_fuel(&self) -> Result<u64> {
1059        self.inner.get_fuel()
1060    }
1061
1062    /// Set the fuel to this [`Store`] for wasm to consume while executing.
1063    ///
1064    /// For this method to work fuel consumption must be enabled via
1065    /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
1066    /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
1067    /// immediately trap). This function must be called for the store to have
1068    /// some fuel to allow WebAssembly to execute.
1069    ///
1070    /// Most WebAssembly instructions consume 1 unit of fuel. Some
1071    /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
1072    /// units, as any execution cost associated with them involves other
1073    /// instructions which do consume fuel.
1074    ///
1075    /// Note that when fuel is entirely consumed it will cause wasm to trap.
1076    ///
1077    /// # Errors
1078    ///
1079    /// This function will return an error if fuel consumption is not enabled via
1080    /// [`Config::consume_fuel`](crate::Config::consume_fuel).
1081    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1082        self.inner.set_fuel(fuel)
1083    }
1084
1085    /// Configures a [`Store`] to yield execution of async WebAssembly code
1086    /// periodically.
1087    ///
1088    /// When a [`Store`] is configured to consume fuel with
1089    /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
1090    /// configure WebAssembly to be suspended and control will be yielded back
1091    /// to the caller every `interval` units of fuel consumed. When using this
1092    /// method it requires further invocations of WebAssembly to use `*_async`
1093    /// entrypoints.
1094    ///
1095    /// The purpose of this behavior is to ensure that futures which represent
1096    /// execution of WebAssembly do not execute too long inside their
1097    /// `Future::poll` method. This allows for some form of cooperative
1098    /// multitasking where WebAssembly will voluntarily yield control
1099    /// periodically (based on fuel consumption) back to the running thread.
1100    ///
1101    /// Note that futures returned by this crate will automatically flag
1102    /// themselves to get re-polled if a yield happens. This means that
1103    /// WebAssembly will continue to execute, just after giving the host an
1104    /// opportunity to do something else.
1105    ///
1106    /// The `interval` parameter indicates how much fuel should be
1107    /// consumed between yields of an async future. When fuel runs out wasm will trap.
1108    ///
1109    /// # Error
1110    ///
1111    /// This method will error if fuel is not enabled or `interval` is
1112    /// `Some(0)`.
1113    #[cfg(feature = "async")]
1114    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1115        self.inner.fuel_async_yield_interval(interval)
1116    }
1117
1118    /// Sets the epoch deadline to a certain number of ticks in the future.
1119    ///
1120    /// When the Wasm guest code is compiled with epoch-interruption
1121    /// instrumentation
1122    /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
1123    /// and when the `Engine`'s epoch is incremented
1124    /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
1125    /// past a deadline, execution can be configured to either trap or
1126    /// yield and then continue.
1127    ///
1128    /// This deadline is always set relative to the current epoch:
1129    /// `ticks_beyond_current` ticks in the future. The deadline can
1130    /// be set explicitly via this method, or refilled automatically
1131    /// on a yield if configured via
1132    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
1133    /// this method is invoked, the deadline is reached when
1134    /// [`Engine::increment_epoch()`] has been invoked at least
1135    /// `ticks_beyond_current` times.
1136    ///
1137    /// By default a store will trap immediately with an epoch deadline of 0
1138    /// (which has always "elapsed"). This method is required to be configured
1139    /// for stores with epochs enabled to some future epoch deadline.
1140    ///
1141    /// See documentation on
1142    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1143    /// for an introduction to epoch-based interruption.
1144    #[cfg(target_has_atomic = "64")]
1145    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1146        self.inner.set_epoch_deadline(ticks_beyond_current);
1147    }
1148
1149    /// Configures epoch-deadline expiration to trap.
1150    ///
1151    /// When epoch-interruption-instrumented code is executed on this
1152    /// store and the epoch deadline is reached before completion,
1153    /// with the store configured in this way, execution will
1154    /// terminate with a trap as soon as an epoch check in the
1155    /// instrumented code is reached.
1156    ///
1157    /// This behavior is the default if the store is not otherwise
1158    /// configured via
1159    /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
1160    /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
1161    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
1162    ///
1163    /// This setting is intended to allow for coarse-grained
1164    /// interruption, but not a deterministic deadline of a fixed,
1165    /// finite interval. For deterministic interruption, see the
1166    /// "fuel" mechanism instead.
1167    ///
1168    /// Note that when this is used it's required to call
1169    /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
1170    /// trap.
1171    ///
1172    /// See documentation on
1173    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1174    /// for an introduction to epoch-based interruption.
1175    #[cfg(target_has_atomic = "64")]
1176    pub fn epoch_deadline_trap(&mut self) {
1177        self.inner.epoch_deadline_trap();
1178    }
1179
1180    /// Configures epoch-deadline expiration to invoke a custom callback
1181    /// function.
1182    ///
1183    /// When epoch-interruption-instrumented code is executed on this
1184    /// store and the epoch deadline is reached before completion, the
1185    /// provided callback function is invoked.
1186    ///
1187    /// This callback should either return an [`UpdateDeadline`], or
1188    /// return an error, which will terminate execution with a trap.
1189    ///
1190    /// The [`UpdateDeadline`] is a positive number of ticks to
1191    /// add to the epoch deadline, as well as indicating what
1192    /// to do after the callback returns. If the [`Store`] is
1193    /// configured with async support, then the callback may return
1194    /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
1195    /// to yield to the async executor before updating the epoch deadline.
1196    /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
1197    /// update the epoch deadline immediately.
1198    ///
1199    /// This setting is intended to allow for coarse-grained
1200    /// interruption, but not a deterministic deadline of a fixed,
1201    /// finite interval. For deterministic interruption, see the
1202    /// "fuel" mechanism instead.
1203    ///
1204    /// See documentation on
1205    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1206    /// for an introduction to epoch-based interruption.
1207    #[cfg(target_has_atomic = "64")]
1208    pub fn epoch_deadline_callback(
1209        &mut self,
1210        callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
1211    ) {
1212        self.inner.epoch_deadline_callback(Box::new(callback));
1213    }
1214
1215    /// Set an exception as the currently pending exception, and
1216    /// return an error that propagates the throw.
1217    ///
1218    /// This method takes an exception object and stores it in the
1219    /// `Store` as the currently pending exception. This is a special
1220    /// rooted slot that holds the exception as long as it is
1221    /// propagating. This method then returns a `ThrownException`
1222    /// error, which is a special type that indicates a pending
1223    /// exception exists. When this type propagates as an error
1224    /// returned from a Wasm-to-host call, the pending exception is
1225    /// thrown within the Wasm context, and either caught or
1226    /// propagated further to the host-to-Wasm call boundary. If an
1227    /// exception is thrown out of Wasm (or across Wasm from a
1228    /// hostcall) back to the host-to-Wasm call boundary, *that*
1229    /// invocation returns a `ThrownException`, and the pending
1230    /// exception slot is again set. In other words, the
1231    /// `ThrownException` error type should propagate upward exactly
1232    /// and only when a pending exception is set.
1233    ///
1234    /// To take the pending exception, use [`Self::take_pending_exception`].
1235    ///
1236    /// This method is parameterized over `R` for convenience, but
1237    /// will always return an `Err`.
1238    ///
1239    /// # Panics
1240    ///
1241    /// - Will panic if `exception` has been unrooted.
1242    /// - Will panic if `exception` is a null reference.
1243    /// - Will panic if a pending exception has already been set.
1244    #[cfg(feature = "gc")]
1245    pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1246        self.inner.throw_impl(exception);
1247        Err(ThrownException)
1248    }
1249
1250    /// Take the currently pending exception, if any, and return it,
1251    /// removing it from the "pending exception" slot.
1252    ///
1253    /// If there is no pending exception, returns `None`.
1254    ///
1255    /// Note: the returned exception is a LIFO root (see
1256    /// [`crate::Rooted`]), rooted in the current handle scope. Take
1257    /// care to ensure that it is re-rooted or otherwise does not
1258    /// escape this scope! It is usually best to allow an exception
1259    /// object to be rooted in the store's "pending exception" slot
1260    /// until the final consumer has taken it, rather than root it and
1261    /// pass it up the callstack in some other way.
1262    ///
1263    /// This method is useful to implement ad-hoc exception plumbing
1264    /// in various ways, but for the most idiomatic handling, see
1265    /// [`StoreContextMut::throw`].
1266    #[cfg(feature = "gc")]
1267    pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1268        self.inner.take_pending_exception_rooted()
1269    }
1270
1271    /// Tests whether there is a pending exception.
1272    ///
1273    /// Ordinarily, a pending exception will be set on a store if and
1274    /// only if a host-side callstack is propagating a
1275    /// [`crate::ThrownException`] error. The final consumer that
1276    /// catches the exception takes it; it may re-place it to re-throw
1277    /// (using [`Self::throw`]) if it chooses not to actually handle the
1278    /// exception.
1279    ///
1280    /// This method is useful to tell whether a store is in this
1281    /// state, but should not be used as part of the ordinary
1282    /// exception-handling flow. For the most idiomatic handling, see
1283    /// [`StoreContextMut::throw`].
1284    #[cfg(feature = "gc")]
1285    pub fn has_pending_exception(&self) -> bool {
1286        self.inner.pending_exception.is_some()
1287    }
1288
1289    /// Provide an object that views Wasm stack state, including Wasm
1290    /// VM-level values (locals and operand stack), when debugging is
1291    /// enabled.
1292    ///
1293    /// Returns `None` if debug instrumentation is not enabled for
1294    /// the engine containing this store.
1295    #[cfg(feature = "debug")]
1296    pub fn debug_frames(&mut self) -> Option<crate::DebugFrameCursor<'_, T>> {
1297        self.as_context_mut().debug_frames()
1298    }
1299
1300    /// Start an edit session to update breakpoints.
1301    #[cfg(feature = "debug")]
1302    pub fn edit_breakpoints(&mut self) -> Option<crate::BreakpointEdit<'_>> {
1303        self.as_context_mut().edit_breakpoints()
1304    }
1305
1306    /// Return all breakpoints.
1307    #[cfg(feature = "debug")]
1308    pub fn breakpoints(&self) -> Option<impl Iterator<Item = crate::Breakpoint> + '_> {
1309        self.as_context().breakpoints()
1310    }
1311
1312    /// Indicate whether single-step mode is enabled.
1313    #[cfg(feature = "debug")]
1314    pub fn is_single_step(&self) -> bool {
1315        self.as_context().is_single_step()
1316    }
1317
1318    /// Set the debug callback on this store.
1319    ///
1320    /// See [`crate::DebugHandler`] for more documentation.
1321    ///
1322    /// # Panics
1323    ///
1324    /// - Will panic if guest-debug support was not enabled via
1325    ///   [`crate::Config::guest_debug`].
1326    #[cfg(feature = "debug")]
1327    pub fn set_debug_handler(&mut self, handler: impl DebugHandler<Data = T>)
1328    where
1329        // We require `Send` here because the debug handler becomes
1330        // referenced from a future: when `DebugHandler::handle` is
1331        // invoked, its `self` references the `handler` with the
1332        // user's state. Note that we are careful to keep this bound
1333        // constrained to debug-handler-related code only and not
1334        // propagate it outward to the store in general. The presence
1335        // of the trait implementation serves as a witness that `T:
1336        // Send`. This is required in particular because we will have
1337        // a `&mut dyn VMStore` on the stack when we pause a fiber
1338        // with `block_on` to run a debugger hook; that `VMStore` must
1339        // be a `Store<T> where T: Send`.
1340        T: Send,
1341    {
1342        // Debug hooks rely on async support, so async entrypoints are required.
1343        self.inner.set_async_required(Asyncness::Yes);
1344
1345        assert!(
1346            self.engine().tunables().debug_guest,
1347            "debug hooks require guest debugging to be enabled"
1348        );
1349        self.inner.debug_handler = Some(Box::new(handler));
1350    }
1351
1352    /// Clear the debug handler on this store. If any existed, it will
1353    /// be dropped.
1354    #[cfg(feature = "debug")]
1355    pub fn clear_debug_handler(&mut self) {
1356        self.inner.debug_handler = None;
1357    }
1358}
1359
1360impl<'a, T> StoreContext<'a, T> {
1361    /// Returns the underlying [`Engine`] this store is connected to.
1362    pub fn engine(&self) -> &Engine {
1363        self.0.engine()
1364    }
1365
1366    /// Access the underlying data owned by this `Store`.
1367    ///
1368    /// Same as [`Store::data`].
1369    pub fn data(&self) -> &'a T {
1370        self.0.data()
1371    }
1372
1373    /// Returns the remaining fuel in this store.
1374    ///
1375    /// For more information see [`Store::get_fuel`].
1376    pub fn get_fuel(&self) -> Result<u64> {
1377        self.0.get_fuel()
1378    }
1379}
1380
1381impl<'a, T> StoreContextMut<'a, T> {
1382    /// Access the underlying data owned by this `Store`.
1383    ///
1384    /// Same as [`Store::data`].
1385    pub fn data(&self) -> &T {
1386        self.0.data()
1387    }
1388
1389    /// Access the underlying data owned by this `Store`.
1390    ///
1391    /// Same as [`Store::data_mut`].
1392    pub fn data_mut(&mut self) -> &mut T {
1393        self.0.data_mut()
1394    }
1395
1396    /// Returns the underlying [`Engine`] this store is connected to.
1397    pub fn engine(&self) -> &Engine {
1398        self.0.engine()
1399    }
1400
1401    /// Perform garbage collection of `ExternRef`s.
1402    ///
1403    /// Same as [`Store::gc`].
1404    #[cfg(feature = "gc")]
1405    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> {
1406        let (mut limiter, store) = self.0.validate_sync_resource_limiter_and_store_opaque()?;
1407        vm::assert_ready(store.gc(
1408            limiter.as_mut(),
1409            None,
1410            why.map(|e| e.bytes_needed()),
1411            Asyncness::No,
1412        ));
1413        Ok(())
1414    }
1415
1416    /// Returns remaining fuel in this store.
1417    ///
1418    /// For more information see [`Store::get_fuel`]
1419    pub fn get_fuel(&self) -> Result<u64> {
1420        self.0.get_fuel()
1421    }
1422
1423    /// Set the amount of fuel in this store.
1424    ///
1425    /// For more information see [`Store::set_fuel`]
1426    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1427        self.0.set_fuel(fuel)
1428    }
1429
1430    /// Configures this `Store` to periodically yield while executing futures.
1431    ///
1432    /// For more information see [`Store::fuel_async_yield_interval`]
1433    #[cfg(feature = "async")]
1434    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1435        self.0.fuel_async_yield_interval(interval)
1436    }
1437
1438    /// Sets the epoch deadline to a certain number of ticks in the future.
1439    ///
1440    /// For more information see [`Store::set_epoch_deadline`].
1441    #[cfg(target_has_atomic = "64")]
1442    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1443        self.0.set_epoch_deadline(ticks_beyond_current);
1444    }
1445
1446    /// Configures epoch-deadline expiration to trap.
1447    ///
1448    /// For more information see [`Store::epoch_deadline_trap`].
1449    #[cfg(target_has_atomic = "64")]
1450    pub fn epoch_deadline_trap(&mut self) {
1451        self.0.epoch_deadline_trap();
1452    }
1453
1454    /// Set an exception as the currently pending exception, and
1455    /// return an error that propagates the throw.
1456    ///
1457    /// See [`Store::throw`] for more details.
1458    #[cfg(feature = "gc")]
1459    pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1460        self.0.inner.throw_impl(exception);
1461        Err(ThrownException)
1462    }
1463
1464    /// Take the currently pending exception, if any, and return it,
1465    /// removing it from the "pending exception" slot.
1466    ///
1467    /// See [`Store::take_pending_exception`] for more details.
1468    #[cfg(feature = "gc")]
1469    pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1470        self.0.inner.take_pending_exception_rooted()
1471    }
1472
1473    /// Tests whether there is a pending exception.
1474    ///
1475    /// See [`Store::has_pending_exception`] for more details.
1476    #[cfg(feature = "gc")]
1477    pub fn has_pending_exception(&self) -> bool {
1478        self.0.inner.pending_exception.is_some()
1479    }
1480}
1481
1482impl<T> StoreInner<T> {
1483    #[inline]
1484    fn data(&self) -> &T {
1485        // We are actually just accessing `&self.data_no_provenance` but we must
1486        // do so with the `VMStoreContext::store_data` pointer's provenance. If
1487        // we did otherwise, i.e. directly accessed the field, we would
1488        // invalidate that pointer, which would in turn invalidate any direct
1489        // `T` accesses that Wasm code makes via unsafe intrinsics.
1490        let data: *const ManuallyDrop<T> = &raw const self.data_no_provenance;
1491        let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1492        let ptr = provenance.with_addr(data.addr());
1493
1494        // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1495        // to access because of our `&self` borrow.
1496        debug_assert_ne!(ptr, core::ptr::null_mut());
1497        debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1498        unsafe { &*ptr }
1499    }
1500
1501    #[inline]
1502    fn data_limiter_and_opaque(
1503        &mut self,
1504    ) -> (
1505        &mut T,
1506        Option<&mut ResourceLimiterInner<T>>,
1507        &mut StoreOpaque,
1508    ) {
1509        // See the comments about provenance in `StoreInner::data` above.
1510        let data: *mut ManuallyDrop<T> = &raw mut self.data_no_provenance;
1511        let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1512        let ptr = provenance.with_addr(data.addr());
1513
1514        // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1515        // to access because of our `&mut self` borrow.
1516        debug_assert_ne!(ptr, core::ptr::null_mut());
1517        debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1518        let data = unsafe { &mut *ptr };
1519
1520        let limiter = self.limiter.as_mut();
1521
1522        (data, limiter, &mut self.inner)
1523    }
1524
1525    #[inline]
1526    fn data_mut(&mut self) -> &mut T {
1527        self.data_limiter_and_opaque().0
1528    }
1529
1530    #[inline]
1531    pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1532        if self.inner.pkey.is_none() && self.call_hook.is_none() {
1533            Ok(())
1534        } else {
1535            self.call_hook_slow_path(s)
1536        }
1537    }
1538
1539    fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1540        if let Some(pkey) = &self.inner.pkey {
1541            let allocator = self.engine().allocator();
1542            match s {
1543                CallHook::CallingWasm | CallHook::ReturningFromHost => {
1544                    allocator.restrict_to_pkey(*pkey)
1545                }
1546                CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1547            }
1548        }
1549
1550        // Temporarily take the configured behavior to avoid mutably borrowing
1551        // multiple times.
1552        if let Some(mut call_hook) = self.call_hook.take() {
1553            let result = self.invoke_call_hook(&mut call_hook, s);
1554            self.call_hook = Some(call_hook);
1555            return result;
1556        }
1557
1558        Ok(())
1559    }
1560
1561    fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1562        match call_hook {
1563            #[cfg(feature = "call-hook")]
1564            CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1565
1566            #[cfg(all(feature = "async", feature = "call-hook"))]
1567            CallHookInner::Async(handler) => {
1568                if !self.can_block() {
1569                    bail!("couldn't grab async_cx for call hook")
1570                }
1571                return (&mut *self)
1572                    .as_context_mut()
1573                    .with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
1574            }
1575
1576            CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1577                let _ = s;
1578                match *uninhabited {}
1579            }
1580        }
1581    }
1582
1583    #[cfg(not(feature = "async"))]
1584    fn flush_fiber_stack(&mut self) {
1585        // noop shim so code can assume this always exists.
1586    }
1587
1588    /// Splits this `StoreInner<T>` into a `limiter`/`StoerOpaque` borrow while
1589    /// validating that an async limiter is not configured.
1590    ///
1591    /// This is used for sync entrypoints which need to fail if an async limiter
1592    /// is configured as otherwise the async entrypoint must be used instead.
1593    pub(crate) fn validate_sync_resource_limiter_and_store_opaque(
1594        &mut self,
1595    ) -> Result<(Option<StoreResourceLimiter<'_>>, &mut StoreOpaque)> {
1596        let (limiter, store) = self.resource_limiter_and_store_opaque();
1597        if !matches!(limiter, None | Some(StoreResourceLimiter::Sync(_))) {
1598            bail!(
1599                "when using an async resource limiter `*_async` functions must \
1600             be used instead"
1601            );
1602        }
1603        Ok((limiter, store))
1604    }
1605}
1606
1607fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1608    fuel_reserve.saturating_add_signed(-injected_fuel)
1609}
1610
1611// Add remaining fuel from the reserve into the active fuel if there is any left.
1612fn refuel(
1613    injected_fuel: &mut i64,
1614    fuel_reserve: &mut u64,
1615    yield_interval: Option<NonZeroU64>,
1616) -> bool {
1617    let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1618    if fuel > 0 {
1619        set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1620        true
1621    } else {
1622        false
1623    }
1624}
1625
1626fn set_fuel(
1627    injected_fuel: &mut i64,
1628    fuel_reserve: &mut u64,
1629    yield_interval: Option<NonZeroU64>,
1630    new_fuel_amount: u64,
1631) {
1632    let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1633    // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1634    // for the VM to use.
1635    let injected = core::cmp::min(interval, new_fuel_amount);
1636    // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1637    // VM at once to be i64 range.
1638    let injected = core::cmp::min(injected, i64::MAX as u64);
1639    // Add whatever is left over after injection to the reserve for later use.
1640    *fuel_reserve = new_fuel_amount - injected;
1641    // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1642    // this counter is positive.
1643    *injected_fuel = -(injected as i64);
1644}
1645
1646#[doc(hidden)]
1647impl StoreOpaque {
1648    pub fn id(&self) -> StoreId {
1649        self.store_data.id()
1650    }
1651
1652    pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1653        fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1654            let new = slot.saturating_add(amt);
1655            if new > max {
1656                bail!("resource limit exceeded: {desc} count too high at {new}");
1657            }
1658            *slot = new;
1659            Ok(())
1660        }
1661
1662        let module = module.env_module();
1663        let memories = module.num_defined_memories();
1664        let tables = module.num_defined_tables();
1665
1666        bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1667        bump(
1668            &mut self.memory_count,
1669            self.memory_limit,
1670            memories,
1671            "memory",
1672        )?;
1673        bump(&mut self.table_count, self.table_limit, tables, "table")?;
1674
1675        Ok(())
1676    }
1677
1678    #[inline]
1679    pub fn engine(&self) -> &Engine {
1680        &self.engine
1681    }
1682
1683    #[inline]
1684    pub fn store_data(&self) -> &StoreData {
1685        &self.store_data
1686    }
1687
1688    #[inline]
1689    pub fn store_data_mut(&mut self) -> &mut StoreData {
1690        &mut self.store_data
1691    }
1692
1693    pub fn store_data_mut_and_registry(&mut self) -> (&mut StoreData, &ModuleRegistry) {
1694        (&mut self.store_data, &self.modules)
1695    }
1696
1697    #[cfg(feature = "debug")]
1698    pub(crate) fn breakpoints_and_registry_mut(
1699        &mut self,
1700    ) -> (&mut BreakpointState, &mut ModuleRegistry) {
1701        (&mut self.breakpoints, &mut self.modules)
1702    }
1703
1704    #[cfg(feature = "debug")]
1705    pub(crate) fn breakpoints_and_registry(&self) -> (&BreakpointState, &ModuleRegistry) {
1706        (&self.breakpoints, &self.modules)
1707    }
1708
1709    #[inline]
1710    pub(crate) fn modules(&self) -> &ModuleRegistry {
1711        &self.modules
1712    }
1713
1714    pub(crate) fn register_module(&mut self, module: &Module) -> Result<RegisteredModuleId> {
1715        self.modules.register_module(module, &self.engine)
1716    }
1717
1718    #[cfg(feature = "component-model")]
1719    pub(crate) fn register_component(
1720        &mut self,
1721        component: &crate::component::Component,
1722    ) -> Result<()> {
1723        self.modules.register_component(component, &self.engine)
1724    }
1725
1726    pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1727        (&mut self.func_refs, &self.modules)
1728    }
1729
1730    pub(crate) fn host_globals(
1731        &self,
1732    ) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1733        &self.host_globals
1734    }
1735
1736    pub(crate) fn host_globals_mut(
1737        &mut self,
1738    ) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1739        &mut self.host_globals
1740    }
1741
1742    pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1743        instance.store_id().assert_belongs_to(self.id());
1744        match self.instances[instance.instance()].kind {
1745            StoreInstanceKind::Dummy => None,
1746            StoreInstanceKind::Real { module_id } => {
1747                let module = self
1748                    .modules()
1749                    .module_by_id(module_id)
1750                    .expect("should always have a registered module for real instances");
1751                Some(module)
1752            }
1753        }
1754    }
1755
1756    /// Accessor from `InstanceId` to `&vm::Instance`.
1757    ///
1758    /// Note that if you have a `StoreInstanceId` you should use
1759    /// `StoreInstanceId::get` instead. This assumes that `id` has been
1760    /// validated to already belong to this store.
1761    #[inline]
1762    pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1763        self.instances[id].handle.get()
1764    }
1765
1766    /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1767    ///
1768    /// Note that if you have a `StoreInstanceId` you should use
1769    /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1770    /// validated to already belong to this store.
1771    #[inline]
1772    pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1773        self.instances[id].handle.get_mut()
1774    }
1775
1776    /// Accessor from `InstanceId` to both `Pin<&mut vm::Instance>`
1777    /// and `&ModuleRegistry`.
1778    #[inline]
1779    pub fn instance_and_module_registry_mut(
1780        &mut self,
1781        id: InstanceId,
1782    ) -> (Pin<&mut vm::Instance>, &ModuleRegistry) {
1783        (self.instances[id].handle.get_mut(), &self.modules)
1784    }
1785
1786    /// Access multiple instances specified via `ids`.
1787    ///
1788    /// # Panics
1789    ///
1790    /// This method will panic if any indices in `ids` overlap.
1791    ///
1792    /// # Safety
1793    ///
1794    /// This method is not safe if the returned instances are used to traverse
1795    /// "laterally" between other instances. For example accessing imported
1796    /// items in an instance may traverse laterally to a sibling instance thus
1797    /// aliasing a returned value here. The caller must ensure that only defined
1798    /// items within the instances themselves are accessed.
1799    #[inline]
1800    pub unsafe fn optional_gc_store_and_instances_mut<const N: usize>(
1801        &mut self,
1802        ids: [InstanceId; N],
1803    ) -> (Option<&mut GcStore>, [Pin<&mut vm::Instance>; N]) {
1804        let instances = self
1805            .instances
1806            .get_disjoint_mut(ids)
1807            .unwrap()
1808            .map(|h| h.handle.get_mut());
1809        (self.gc_store.as_mut(), instances)
1810    }
1811
1812    /// Pair of `Self::optional_gc_store_mut` and `Self::instance_mut`
1813    pub fn optional_gc_store_and_instance_mut(
1814        &mut self,
1815        id: InstanceId,
1816    ) -> (Option<&mut GcStore>, Pin<&mut vm::Instance>) {
1817        (self.gc_store.as_mut(), self.instances[id].handle.get_mut())
1818    }
1819
1820    /// Tuple of `Self::optional_gc_store_mut`, `Self::modules`, and
1821    /// `Self::instance_mut`.
1822    pub fn optional_gc_store_and_registry_and_instance_mut(
1823        &mut self,
1824        id: InstanceId,
1825    ) -> (
1826        Option<&mut GcStore>,
1827        &ModuleRegistry,
1828        Pin<&mut vm::Instance>,
1829    ) {
1830        (
1831            self.gc_store.as_mut(),
1832            &self.modules,
1833            self.instances[id].handle.get_mut(),
1834        )
1835    }
1836
1837    /// Get all instances (ignoring dummy instances) within this store.
1838    pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1839        let instances = self
1840            .instances
1841            .iter()
1842            .filter_map(|(id, inst)| {
1843                if let StoreInstanceKind::Dummy = inst.kind {
1844                    None
1845                } else {
1846                    Some(id)
1847                }
1848            })
1849            .collect::<Vec<_>>();
1850        instances
1851            .into_iter()
1852            .map(|i| Instance::from_wasmtime(i, self))
1853    }
1854
1855    /// Get all memories (host- or Wasm-defined) within this store.
1856    pub fn all_memories<'a>(&'a self) -> impl Iterator<Item = ExportMemory> + 'a {
1857        // NB: Host-created memories have dummy instances. Therefore, we can get
1858        // all memories in the store by iterating over all instances (including
1859        // dummy instances) and getting each of their defined memories.
1860        let id = self.id();
1861        self.instances
1862            .iter()
1863            .flat_map(move |(_, instance)| instance.handle.get().defined_memories(id))
1864    }
1865
1866    /// Iterate over all tables (host- or Wasm-defined) within this store.
1867    pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1868        // NB: Host-created tables have dummy instances. Therefore, we can get
1869        // all tables in the store by iterating over all instances (including
1870        // dummy instances) and getting each of their defined memories.
1871        for id in self.instances.keys() {
1872            let instance = StoreInstanceId::new(self.id(), id);
1873            for table in 0..self.instance(id).env_module().num_defined_tables() {
1874                let table = DefinedTableIndex::new(table);
1875                f(self, Table::from_raw(instance, table));
1876            }
1877        }
1878    }
1879
1880    /// Iterate over all globals (host- or Wasm-defined) within this store.
1881    pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1882        // First enumerate all the host-created globals.
1883        for global in self.host_globals.keys() {
1884            let global = Global::new_host(self, global);
1885            f(self, global);
1886        }
1887
1888        // Then enumerate all instances' defined globals.
1889        for id in self.instances.keys() {
1890            for index in 0..self.instance(id).env_module().num_defined_globals() {
1891                let index = DefinedGlobalIndex::new(index);
1892                let global = Global::new_instance(self, id, index);
1893                f(self, global);
1894            }
1895        }
1896    }
1897
1898    #[cfg(all(feature = "std", any(unix, windows)))]
1899    pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1900        self.signal_handler = handler;
1901    }
1902
1903    #[inline]
1904    pub fn vm_store_context(&self) -> &VMStoreContext {
1905        &self.vm_store_context
1906    }
1907
1908    #[inline]
1909    pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
1910        &mut self.vm_store_context
1911    }
1912
1913    /// Performs a lazy allocation of the `GcStore` within this store, returning
1914    /// the previous allocation if it's already present.
1915    ///
1916    /// This method will, if necessary, allocate a new `GcStore` -- linear
1917    /// memory and all. This is a blocking operation due to
1918    /// `ResourceLimiterAsync` which means that this should only be executed
1919    /// in a fiber context at this time.
1920    #[inline]
1921    pub(crate) async fn ensure_gc_store(
1922        &mut self,
1923        limiter: Option<&mut StoreResourceLimiter<'_>>,
1924    ) -> Result<&mut GcStore> {
1925        if self.gc_store.is_some() {
1926            return Ok(self.gc_store.as_mut().unwrap());
1927        }
1928        self.allocate_gc_store(limiter).await
1929    }
1930
1931    #[inline(never)]
1932    async fn allocate_gc_store(
1933        &mut self,
1934        limiter: Option<&mut StoreResourceLimiter<'_>>,
1935    ) -> Result<&mut GcStore> {
1936        log::trace!("allocating GC heap for store {:?}", self.id());
1937
1938        assert!(self.gc_store.is_none());
1939        assert_eq!(
1940            self.vm_store_context.gc_heap.base.as_non_null(),
1941            NonNull::dangling(),
1942        );
1943        assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1944
1945        let gc_store = allocate_gc_store(self, limiter).await?;
1946        self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1947        return Ok(self.gc_store.insert(gc_store));
1948
1949        #[cfg(feature = "gc")]
1950        async fn allocate_gc_store(
1951            store: &mut StoreOpaque,
1952            limiter: Option<&mut StoreResourceLimiter<'_>>,
1953        ) -> Result<GcStore> {
1954            use wasmtime_environ::packed_option::ReservedValue;
1955
1956            let engine = store.engine();
1957            let mem_ty = engine.tunables().gc_heap_memory_type();
1958            ensure!(
1959                engine.features().gc_types(),
1960                "cannot allocate a GC store when GC is disabled at configuration time"
1961            );
1962
1963            // First, allocate the memory that will be our GC heap's storage.
1964            let mut request = InstanceAllocationRequest {
1965                id: InstanceId::reserved_value(),
1966                runtime_info: engine.empty_module_runtime_info(),
1967                imports: vm::Imports::default(),
1968                store,
1969                limiter,
1970            };
1971
1972            let (mem_alloc_index, mem) = engine
1973                .allocator()
1974                .allocate_memory(&mut request, &mem_ty, None)
1975                .await?;
1976
1977            // Then, allocate the actual GC heap, passing in that memory
1978            // storage.
1979            let gc_runtime = engine
1980                .gc_runtime()
1981                .context("no GC runtime: GC disabled at compile time or configuration time")?;
1982            let (index, heap) =
1983                engine
1984                    .allocator()
1985                    .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
1986
1987            Ok(GcStore::new(index, heap))
1988        }
1989
1990        #[cfg(not(feature = "gc"))]
1991        async fn allocate_gc_store(
1992            _: &mut StoreOpaque,
1993            _: Option<&mut StoreResourceLimiter<'_>>,
1994        ) -> Result<GcStore> {
1995            bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1996        }
1997    }
1998
1999    /// Helper method to require that a `GcStore` was previously allocated for
2000    /// this store, failing if it has not yet been allocated.
2001    ///
2002    /// Note that this should only be used in a context where allocation of a
2003    /// `GcStore` is sure to have already happened prior, otherwise this may
2004    /// return a confusing error to embedders which is a bug in Wasmtime.
2005    ///
2006    /// Some situations where it's safe to call this method:
2007    ///
2008    /// * There's already a non-null and non-i31 `VMGcRef` in scope. By existing
2009    ///   this shows proof that the `GcStore` was previously allocated.
2010    /// * During instantiation and instance's `needs_gc_heap` flag will be
2011    ///   handled and instantiation will automatically create a GC store.
2012    #[inline]
2013    #[cfg(feature = "gc")]
2014    pub(crate) fn require_gc_store(&self) -> Result<&GcStore> {
2015        match &self.gc_store {
2016            Some(gc_store) => Ok(gc_store),
2017            None => bail!("GC heap not initialized yet"),
2018        }
2019    }
2020
2021    /// Same as [`Self::require_gc_store`], but mutable.
2022    #[inline]
2023    #[cfg(feature = "gc")]
2024    pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> {
2025        match &mut self.gc_store {
2026            Some(gc_store) => Ok(gc_store),
2027            None => bail!("GC heap not initialized yet"),
2028        }
2029    }
2030
2031    /// Attempts to access the GC store that has been previously allocated.
2032    ///
2033    /// This method will return `Some` if the GC store was previously allocated.
2034    /// A `None` return value means either that the GC heap hasn't yet been
2035    /// allocated or that it does not need to be allocated for this store. Note
2036    /// that to require a GC store in a particular situation it's recommended to
2037    /// use [`Self::require_gc_store_mut`] instead.
2038    #[inline]
2039    pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
2040        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
2041            debug_assert!(self.gc_store.is_none());
2042            None
2043        } else {
2044            self.gc_store.as_mut()
2045        }
2046    }
2047
2048    /// Helper to assert that a GC store was previously allocated and is
2049    /// present.
2050    ///
2051    /// # Panics
2052    ///
2053    /// This method will panic if the GC store has not yet been allocated. This
2054    /// should only be used in a context where there's an existing GC reference,
2055    /// for example, or if `ensure_gc_store` has already been called.
2056    #[inline]
2057    #[track_caller]
2058    pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
2059        self.gc_store
2060            .as_ref()
2061            .expect("attempted to access the store's GC heap before it has been allocated")
2062    }
2063
2064    /// Same as [`Self::unwrap_gc_store`], but mutable.
2065    #[inline]
2066    #[track_caller]
2067    pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
2068        self.gc_store
2069            .as_mut()
2070            .expect("attempted to access the store's GC heap before it has been allocated")
2071    }
2072
2073    #[inline]
2074    pub(crate) fn gc_roots(&self) -> &RootSet {
2075        &self.gc_roots
2076    }
2077
2078    #[inline]
2079    #[cfg(feature = "gc")]
2080    pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
2081        &mut self.gc_roots
2082    }
2083
2084    #[inline]
2085    pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
2086        self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
2087    }
2088
2089    #[cfg(feature = "gc")]
2090    async fn do_gc(&mut self, asyncness: Asyncness) {
2091        // If the GC heap hasn't been initialized, there is nothing to collect.
2092        if self.gc_store.is_none() {
2093            return;
2094        }
2095
2096        log::trace!("============ Begin GC ===========");
2097
2098        // Take the GC roots out of `self` so we can borrow it mutably but still
2099        // call mutable methods on `self`.
2100        let mut roots = core::mem::take(&mut self.gc_roots_list);
2101
2102        self.trace_roots(&mut roots, asyncness).await;
2103        self.unwrap_gc_store_mut()
2104            .gc(asyncness, unsafe { roots.iter() })
2105            .await;
2106
2107        // Restore the GC roots for the next GC.
2108        roots.clear();
2109        self.gc_roots_list = roots;
2110
2111        log::trace!("============ End GC ===========");
2112    }
2113
2114    #[cfg(feature = "gc")]
2115    async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList, asyncness: Asyncness) {
2116        log::trace!("Begin trace GC roots");
2117
2118        // We shouldn't have any leftover, stale GC roots.
2119        assert!(gc_roots_list.is_empty());
2120
2121        self.trace_wasm_stack_roots(gc_roots_list);
2122        if asyncness != Asyncness::No {
2123            vm::Yield::new().await;
2124        }
2125        #[cfg(feature = "stack-switching")]
2126        {
2127            self.trace_wasm_continuation_roots(gc_roots_list);
2128            if asyncness != Asyncness::No {
2129                vm::Yield::new().await;
2130            }
2131        }
2132        self.trace_vmctx_roots(gc_roots_list);
2133        if asyncness != Asyncness::No {
2134            vm::Yield::new().await;
2135        }
2136        self.trace_user_roots(gc_roots_list);
2137        self.trace_pending_exception_roots(gc_roots_list);
2138
2139        log::trace!("End trace GC roots")
2140    }
2141
2142    #[cfg(feature = "gc")]
2143    fn trace_wasm_stack_frame(
2144        &self,
2145        gc_roots_list: &mut GcRootsList,
2146        frame: crate::runtime::vm::Frame,
2147    ) {
2148        let pc = frame.pc();
2149        debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
2150
2151        let fp = frame.fp() as *mut usize;
2152        debug_assert!(
2153            !fp.is_null(),
2154            "we should always get a valid frame pointer for Wasm frames"
2155        );
2156
2157        let (module_with_code, _offset) = self
2158            .modules()
2159            .module_and_code_by_pc(pc)
2160            .expect("should have module info for Wasm frame");
2161
2162        if let Some(stack_map) = module_with_code.lookup_stack_map(pc) {
2163            log::trace!(
2164                "We have a stack map that maps {} bytes in this Wasm frame",
2165                stack_map.frame_size()
2166            );
2167
2168            let sp = unsafe { stack_map.sp(fp) };
2169            for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
2170                unsafe {
2171                    self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2172                }
2173            }
2174        }
2175
2176        #[cfg(feature = "debug")]
2177        if let Some(frame_table) = module_with_code.module().frame_table() {
2178            let relpc = module_with_code
2179                .text_offset(pc)
2180                .expect("PC should be within module");
2181            for stack_slot in super::debug::gc_refs_in_frame(frame_table, relpc, fp) {
2182                unsafe {
2183                    self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2184                }
2185            }
2186        }
2187    }
2188
2189    #[cfg(feature = "gc")]
2190    unsafe fn trace_wasm_stack_slot(&self, gc_roots_list: &mut GcRootsList, stack_slot: *mut u32) {
2191        use crate::runtime::vm::SendSyncPtr;
2192        use core::ptr::NonNull;
2193
2194        let raw: u32 = unsafe { core::ptr::read(stack_slot) };
2195        log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
2196
2197        let gc_ref = vm::VMGcRef::from_raw_u32(raw);
2198        if gc_ref.is_some() {
2199            unsafe {
2200                gc_roots_list
2201                    .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
2202            }
2203        }
2204    }
2205
2206    #[cfg(feature = "gc")]
2207    fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2208        use crate::runtime::vm::Backtrace;
2209        log::trace!("Begin trace GC roots :: Wasm stack");
2210
2211        Backtrace::trace(self, |frame| {
2212            self.trace_wasm_stack_frame(gc_roots_list, frame);
2213            core::ops::ControlFlow::Continue(())
2214        });
2215
2216        log::trace!("End trace GC roots :: Wasm stack");
2217    }
2218
2219    #[cfg(all(feature = "gc", feature = "stack-switching"))]
2220    fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2221        use crate::{runtime::vm::Backtrace, vm::VMStackState};
2222        log::trace!("Begin trace GC roots :: continuations");
2223
2224        for continuation in &self.continuations {
2225            let state = continuation.common_stack_information.state;
2226
2227            // FIXME(frank-emrich) In general, it is not enough to just trace
2228            // through the stacks of continuations; we also need to look through
2229            // their `cont.bind` arguments. However, we don't currently have
2230            // enough RTTI information to check if any of the values in the
2231            // buffers used by `cont.bind` are GC values. As a workaround, note
2232            // that we currently disallow cont.bind-ing GC values altogether.
2233            // This way, it is okay not to check them here.
2234            match state {
2235                VMStackState::Suspended => {
2236                    Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
2237                        self.trace_wasm_stack_frame(gc_roots_list, frame);
2238                        core::ops::ControlFlow::Continue(())
2239                    });
2240                }
2241                VMStackState::Running => {
2242                    // Handled by `trace_wasm_stack_roots`.
2243                }
2244                VMStackState::Parent => {
2245                    // We don't know whether our child is suspended or running, but in
2246                    // either case things should be handled correctly when traversing
2247                    // further along in the chain, nothing required at this point.
2248                }
2249                VMStackState::Fresh | VMStackState::Returned => {
2250                    // Fresh/Returned continuations have no gc values on their stack.
2251                }
2252            }
2253        }
2254
2255        log::trace!("End trace GC roots :: continuations");
2256    }
2257
2258    #[cfg(feature = "gc")]
2259    fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2260        log::trace!("Begin trace GC roots :: vmctx");
2261        self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
2262        self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
2263        log::trace!("End trace GC roots :: vmctx");
2264    }
2265
2266    #[cfg(feature = "gc")]
2267    fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2268        log::trace!("Begin trace GC roots :: user");
2269        self.gc_roots.trace_roots(gc_roots_list);
2270        log::trace!("End trace GC roots :: user");
2271    }
2272
2273    #[cfg(feature = "gc")]
2274    fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2275        log::trace!("Begin trace GC roots :: pending exception");
2276        if let Some(pending_exception) = self.pending_exception.as_mut() {
2277            unsafe {
2278                let root = pending_exception.as_gc_ref_mut();
2279                gc_roots_list.add_root(root.into(), "Pending exception");
2280            }
2281        }
2282        log::trace!("End trace GC roots :: pending exception");
2283    }
2284
2285    /// Insert a host-allocated GC type into this store.
2286    ///
2287    /// This makes it suitable for the embedder to allocate instances of this
2288    /// type in this store, and we don't have to worry about the type being
2289    /// reclaimed (since it is possible that none of the Wasm modules in this
2290    /// store are holding it alive).
2291    #[cfg(feature = "gc")]
2292    pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
2293        self.gc_host_alloc_types.insert(ty);
2294    }
2295
2296    /// Helper function execute a `init_gc_ref` when placing `gc_ref` in `dest`.
2297    ///
2298    /// This avoids allocating `GcStore` where possible.
2299    pub(crate) fn init_gc_ref(
2300        &mut self,
2301        dest: &mut MaybeUninit<Option<VMGcRef>>,
2302        gc_ref: Option<&VMGcRef>,
2303    ) {
2304        if GcStore::needs_init_barrier(gc_ref) {
2305            self.unwrap_gc_store_mut().init_gc_ref(dest, gc_ref)
2306        } else {
2307            dest.write(gc_ref.map(|r| r.copy_i31()));
2308        }
2309    }
2310
2311    /// Helper function execute a write barrier when placing `gc_ref` in `dest`.
2312    ///
2313    /// This avoids allocating `GcStore` where possible.
2314    pub(crate) fn write_gc_ref(&mut self, dest: &mut Option<VMGcRef>, gc_ref: Option<&VMGcRef>) {
2315        GcStore::write_gc_ref_optional_store(self.optional_gc_store_mut(), dest, gc_ref)
2316    }
2317
2318    /// Helper function to clone `gc_ref` notably avoiding allocating a
2319    /// `GcStore` where possible.
2320    pub(crate) fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
2321        if gc_ref.is_i31() {
2322            gc_ref.copy_i31()
2323        } else {
2324            self.unwrap_gc_store_mut().clone_gc_ref(gc_ref)
2325        }
2326    }
2327
2328    pub fn get_fuel(&self) -> Result<u64> {
2329        crate::ensure!(
2330            self.engine().tunables().consume_fuel,
2331            "fuel is not configured in this store"
2332        );
2333        let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
2334        Ok(get_fuel(injected_fuel, self.fuel_reserve))
2335    }
2336
2337    pub(crate) fn refuel(&mut self) -> bool {
2338        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2339        refuel(
2340            injected_fuel,
2341            &mut self.fuel_reserve,
2342            self.fuel_yield_interval,
2343        )
2344    }
2345
2346    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
2347        crate::ensure!(
2348            self.engine().tunables().consume_fuel,
2349            "fuel is not configured in this store"
2350        );
2351        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2352        set_fuel(
2353            injected_fuel,
2354            &mut self.fuel_reserve,
2355            self.fuel_yield_interval,
2356            fuel,
2357        );
2358        Ok(())
2359    }
2360
2361    #[cfg(feature = "async")]
2362    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
2363        crate::ensure!(
2364            self.engine().tunables().consume_fuel,
2365            "fuel is not configured in this store"
2366        );
2367        crate::ensure!(
2368            interval != Some(0),
2369            "fuel_async_yield_interval must not be 0"
2370        );
2371
2372        // All future entrypoints must be async to handle the case that fuel
2373        // runs out and an async yield is needed.
2374        self.set_async_required(Asyncness::Yes);
2375
2376        self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
2377        // Reset the fuel active + reserve states by resetting the amount.
2378        self.set_fuel(self.get_fuel()?)
2379    }
2380
2381    #[inline]
2382    pub fn signal_handler(&self) -> Option<*const SignalHandler> {
2383        let handler = self.signal_handler.as_ref()?;
2384        Some(handler)
2385    }
2386
2387    #[inline]
2388    pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
2389        NonNull::from(&self.vm_store_context)
2390    }
2391
2392    #[inline]
2393    pub fn default_caller(&self) -> NonNull<VMContext> {
2394        self.default_caller_vmctx.as_non_null()
2395    }
2396
2397    #[inline]
2398    pub fn traitobj(&self) -> NonNull<dyn VMStore> {
2399        self.traitobj.0.unwrap()
2400    }
2401
2402    /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
2403    /// used as part of calling the host in a `Func::new` method invocation.
2404    #[inline]
2405    pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
2406        mem::take(&mut self.hostcall_val_storage)
2407    }
2408
2409    /// Restores the vector previously taken by `take_hostcall_val_storage`
2410    /// above back into the store, allowing it to be used in the future for the
2411    /// next wasm->host call.
2412    #[inline]
2413    pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
2414        if storage.capacity() > self.hostcall_val_storage.capacity() {
2415            self.hostcall_val_storage = storage;
2416        }
2417    }
2418
2419    /// Same as `take_hostcall_val_storage`, but for the direction of the host
2420    /// calling wasm.
2421    #[inline]
2422    pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
2423        mem::take(&mut self.wasm_val_raw_storage)
2424    }
2425
2426    /// Same as `save_hostcall_val_storage`, but for the direction of the host
2427    /// calling wasm.
2428    #[inline]
2429    pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
2430        if storage.capacity() > self.wasm_val_raw_storage.capacity() {
2431            self.wasm_val_raw_storage = storage;
2432        }
2433    }
2434
2435    /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
2436    /// WebAssembly-relative fault.
2437    ///
2438    /// This function may abort the process if `addr` is not found to actually
2439    /// reside in any linear memory. In such a situation it means that the
2440    /// segfault was erroneously caught by Wasmtime and is possibly indicative
2441    /// of a code generator bug.
2442    ///
2443    /// This function returns `None` for dynamically-bounds-checked-memories
2444    /// with spectre mitigations enabled since the hardware fault address is
2445    /// always zero in these situations which means that the trapping context
2446    /// doesn't have enough information to report the fault address.
2447    pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
2448        // There are a few instances where a "close to zero" pointer is loaded
2449        // and we expect that to happen:
2450        //
2451        // * Explicitly bounds-checked memories with spectre-guards enabled will
2452        //   cause out-of-bounds accesses to get routed to address 0, so allow
2453        //   wasm instructions to fault on the null address.
2454        // * `call_indirect` when invoking a null function pointer may load data
2455        //   from the a `VMFuncRef` whose address is null, meaning any field of
2456        //   `VMFuncRef` could be the address of the fault.
2457        //
2458        // In these situations where the address is so small it won't be in any
2459        // instance, so skip the checks below.
2460        if addr <= mem::size_of::<VMFuncRef>() {
2461            const _: () = {
2462                // static-assert that `VMFuncRef` isn't too big to ensure that
2463                // it lives solely within the first page as we currently only
2464                // have the guarantee that the first page of memory is unmapped,
2465                // no more.
2466                assert!(mem::size_of::<VMFuncRef>() <= 512);
2467            };
2468            return None;
2469        }
2470
2471        // Search all known instances in this store for this address. Note that
2472        // this is probably not the speediest way to do this. Traps, however,
2473        // are generally not expected to be super fast and additionally stores
2474        // probably don't have all that many instances or memories.
2475        //
2476        // If this loop becomes hot in the future, however, it should be
2477        // possible to precompute maps about linear memories in a store and have
2478        // a quicker lookup.
2479        let mut fault = None;
2480        for (_, instance) in self.instances.iter() {
2481            if let Some(f) = instance.handle.get().wasm_fault(addr) {
2482                assert!(fault.is_none());
2483                fault = Some(f);
2484            }
2485        }
2486        if fault.is_some() {
2487            return fault;
2488        }
2489
2490        cfg_if::cfg_if! {
2491            if #[cfg(feature = "std")] {
2492                // With the standard library a rich error can be printed here
2493                // to stderr and the native abort path is used.
2494                eprintln!(
2495                    "\
2496Wasmtime caught a segfault for a wasm program because the faulting instruction
2497is allowed to segfault due to how linear memories are implemented. The address
2498that was accessed, however, is not known to any linear memory in use within this
2499Store. This may be indicative of a critical bug in Wasmtime's code generation
2500because all addresses which are known to be reachable from wasm won't reach this
2501message.
2502
2503    pc:      0x{pc:x}
2504    address: 0x{addr:x}
2505
2506This is a possible security issue because WebAssembly has accessed something it
2507shouldn't have been able to. Other accesses may have succeeded and this one just
2508happened to be caught. The process will now be aborted to prevent this damage
2509from going any further and to alert what's going on. If this is a security
2510issue please reach out to the Wasmtime team via its security policy
2511at https://bytecodealliance.org/security.
2512"
2513                );
2514                std::process::abort();
2515            } else if #[cfg(panic = "abort")] {
2516                // Without the standard library but with `panic=abort` then
2517                // it's safe to panic as that's known to halt execution. For
2518                // now avoid the above error message as well since without
2519                // `std` it's probably best to be a bit more size-conscious.
2520                let _ = pc;
2521                panic!("invalid fault");
2522            } else {
2523                // Without `std` and with `panic = "unwind"` there's no
2524                // dedicated API to abort the process portably, so manufacture
2525                // this with a double-panic.
2526                let _ = pc;
2527
2528                struct PanicAgainOnDrop;
2529
2530                impl Drop for PanicAgainOnDrop {
2531                    fn drop(&mut self) {
2532                        panic!("panicking again to trigger a process abort");
2533                    }
2534
2535                }
2536
2537                let _bomb = PanicAgainOnDrop;
2538
2539                panic!("invalid fault");
2540            }
2541        }
2542    }
2543
2544    /// Retrieve the store's protection key.
2545    #[inline]
2546    #[cfg(feature = "pooling-allocator")]
2547    pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
2548        self.pkey
2549    }
2550
2551    #[inline]
2552    #[cfg(feature = "component-model")]
2553    pub(crate) fn component_resource_state(
2554        &mut self,
2555    ) -> (
2556        &mut vm::component::CallContexts,
2557        &mut vm::component::HandleTable,
2558        &mut crate::component::HostResourceData,
2559    ) {
2560        (
2561            &mut self.component_calls,
2562            &mut self.component_host_table,
2563            &mut self.host_resource_data,
2564        )
2565    }
2566
2567    #[cfg(feature = "component-model")]
2568    pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
2569        // We don't actually need the instance itself right now, but it seems
2570        // like something we will almost certainly eventually want to keep
2571        // around, so force callers to provide it.
2572        let _ = instance;
2573
2574        self.num_component_instances += 1;
2575    }
2576
2577    #[inline]
2578    #[cfg(feature = "component-model")]
2579    pub(crate) fn component_resource_state_with_instance(
2580        &mut self,
2581        instance: crate::component::Instance,
2582    ) -> (
2583        &mut vm::component::CallContexts,
2584        &mut vm::component::HandleTable,
2585        &mut crate::component::HostResourceData,
2586        Pin<&mut vm::component::ComponentInstance>,
2587    ) {
2588        (
2589            &mut self.component_calls,
2590            &mut self.component_host_table,
2591            &mut self.host_resource_data,
2592            instance.id().from_data_get_mut(&mut self.store_data),
2593        )
2594    }
2595
2596    #[cfg(feature = "component-model")]
2597    pub(crate) fn component_resource_state_with_instance_and_concurrent_state(
2598        &mut self,
2599        instance: crate::component::Instance,
2600    ) -> (
2601        &mut vm::component::CallContexts,
2602        &mut vm::component::HandleTable,
2603        &mut crate::component::HostResourceData,
2604        Pin<&mut vm::component::ComponentInstance>,
2605        Option<&mut concurrent::ConcurrentState>,
2606    ) {
2607        (
2608            &mut self.component_calls,
2609            &mut self.component_host_table,
2610            &mut self.host_resource_data,
2611            instance.id().from_data_get_mut(&mut self.store_data),
2612            self.concurrent_state.as_mut(),
2613        )
2614    }
2615
2616    #[cfg(feature = "async")]
2617    pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
2618        &mut self.async_state
2619    }
2620
2621    #[cfg(feature = "component-model-async")]
2622    pub(crate) fn concurrent_state_mut(&mut self) -> &mut concurrent::ConcurrentState {
2623        debug_assert!(self.concurrency_support());
2624        self.concurrent_state.as_mut().unwrap()
2625    }
2626
2627    #[inline]
2628    #[cfg(feature = "component-model")]
2629    pub(crate) fn concurrency_support(&self) -> bool {
2630        let support = self.concurrent_state.is_some();
2631        debug_assert_eq!(support, self.engine().tunables().concurrency_support);
2632        support
2633    }
2634
2635    #[cfg(feature = "async")]
2636    pub(crate) fn has_pkey(&self) -> bool {
2637        self.pkey.is_some()
2638    }
2639
2640    pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
2641        match &mut self.executor {
2642            Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
2643            #[cfg(has_host_compiler_backend)]
2644            Executor::Native => ExecutorRef::Native,
2645        }
2646    }
2647
2648    #[cfg(feature = "async")]
2649    pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
2650        mem::swap(&mut self.executor, executor);
2651    }
2652
2653    pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
2654        match &self.executor {
2655            Executor::Interpreter(i) => i.unwinder(),
2656            #[cfg(has_host_compiler_backend)]
2657            Executor::Native => &vm::UnwindHost,
2658        }
2659    }
2660
2661    /// Allocates a new continuation. Note that we currently don't support
2662    /// deallocating them. Instead, all continuations remain allocated
2663    /// throughout the store's lifetime.
2664    #[cfg(feature = "stack-switching")]
2665    pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
2666        // FIXME(frank-emrich) Do we need to pin this?
2667        let mut continuation = Box::new(VMContRef::empty());
2668        let stack_size = self.engine.config().async_stack_size;
2669        let stack = crate::vm::VMContinuationStack::new(stack_size)?;
2670        continuation.stack = stack;
2671        let ptr = continuation.deref_mut() as *mut VMContRef;
2672        self.continuations.push(continuation);
2673        Ok(ptr)
2674    }
2675
2676    /// Constructs and executes an `InstanceAllocationRequest` and pushes the
2677    /// returned instance into the store.
2678    ///
2679    /// This is a helper method for invoking
2680    /// `InstanceAllocator::allocate_module` with the appropriate parameters
2681    /// from this store's own configuration. The `kind` provided is used to
2682    /// distinguish between "real" modules and dummy ones that are synthesized
2683    /// for embedder-created memories, globals, tables, etc. The `kind` will
2684    /// also use a different instance allocator by default, the one passed in,
2685    /// rather than the engine's default allocator.
2686    ///
2687    /// This method will push the instance within `StoreOpaque` onto the
2688    /// `instances` array and return the `InstanceId` which can be use to look
2689    /// it up within the store.
2690    ///
2691    /// # Safety
2692    ///
2693    /// The `imports` provided must be correctly sized/typed for the module
2694    /// being allocated.
2695    pub(crate) async unsafe fn allocate_instance(
2696        &mut self,
2697        limiter: Option<&mut StoreResourceLimiter<'_>>,
2698        kind: AllocateInstanceKind<'_>,
2699        runtime_info: &ModuleRuntimeInfo,
2700        imports: Imports<'_>,
2701    ) -> Result<InstanceId> {
2702        let id = self.instances.next_key();
2703
2704        let allocator = match kind {
2705            AllocateInstanceKind::Module(_) => self.engine().allocator(),
2706            AllocateInstanceKind::Dummy { allocator } => allocator,
2707        };
2708        // SAFETY: this function's own contract is the same as
2709        // `allocate_module`, namely the imports provided are valid.
2710        let handle = unsafe {
2711            allocator
2712                .allocate_module(InstanceAllocationRequest {
2713                    id,
2714                    runtime_info,
2715                    imports,
2716                    store: self,
2717                    limiter,
2718                })
2719                .await?
2720        };
2721
2722        let actual = match kind {
2723            AllocateInstanceKind::Module(module_id) => {
2724                log::trace!(
2725                    "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2726                    self.id()
2727                );
2728                self.instances.push(StoreInstance {
2729                    handle,
2730                    kind: StoreInstanceKind::Real { module_id },
2731                })?
2732            }
2733            AllocateInstanceKind::Dummy { .. } => {
2734                log::trace!(
2735                    "Adding dummy instance to store: store={:?}, instance={id:?}",
2736                    self.id()
2737                );
2738                self.instances.push(StoreInstance {
2739                    handle,
2740                    kind: StoreInstanceKind::Dummy,
2741                })?
2742            }
2743        };
2744
2745        // double-check we didn't accidentally allocate two instances and our
2746        // prediction of what the id would be is indeed the id it should be.
2747        assert_eq!(id, actual);
2748
2749        Ok(id)
2750    }
2751
2752    /// Set a pending exception. The `exnref` is taken and held on
2753    /// this store to be fetched later by an unwind. This method does
2754    /// *not* set up an unwind request on the TLS call state; that
2755    /// must be done separately.
2756    #[cfg(feature = "gc")]
2757    pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) {
2758        self.pending_exception = Some(exnref);
2759    }
2760
2761    /// Take a pending exception, if any.
2762    #[cfg(feature = "gc")]
2763    pub(crate) fn take_pending_exception(&mut self) -> Option<VMExnRef> {
2764        self.pending_exception.take()
2765    }
2766
2767    /// Tests whether there is a pending exception.
2768    #[cfg(feature = "gc")]
2769    pub fn has_pending_exception(&self) -> bool {
2770        self.pending_exception.is_some()
2771    }
2772
2773    #[cfg(feature = "gc")]
2774    fn take_pending_exception_rooted(&mut self) -> Option<Rooted<ExnRef>> {
2775        let vmexnref = self.take_pending_exception()?;
2776        let mut nogc = AutoAssertNoGc::new(self);
2777        Some(Rooted::new(&mut nogc, vmexnref.into()))
2778    }
2779
2780    /// Get an owned rooted reference to the pending exception,
2781    /// without taking it off the store.
2782    #[cfg(all(feature = "gc", feature = "debug"))]
2783    pub(crate) fn pending_exception_owned_rooted(
2784        &mut self,
2785    ) -> Result<Option<OwnedRooted<ExnRef>>, crate::error::OutOfMemory> {
2786        let mut nogc = AutoAssertNoGc::new(self);
2787        nogc.pending_exception
2788            .take()
2789            .map(|vmexnref| {
2790                let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref());
2791                nogc.pending_exception = Some(cloned.into_exnref_unchecked());
2792                OwnedRooted::new(&mut nogc, vmexnref.into())
2793            })
2794            .transpose()
2795    }
2796
2797    #[cfg(feature = "gc")]
2798    fn throw_impl(&mut self, exception: Rooted<ExnRef>) {
2799        let mut nogc = AutoAssertNoGc::new(self);
2800        let exnref = exception._to_raw(&mut nogc).unwrap();
2801        let exnref = VMGcRef::from_raw_u32(exnref)
2802            .expect("exception cannot be null")
2803            .into_exnref_unchecked();
2804        nogc.set_pending_exception(exnref);
2805    }
2806
2807    #[cfg(target_has_atomic = "64")]
2808    pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2809        // Set a new deadline based on the "epoch deadline delta".
2810        //
2811        // Also, note that when this update is performed while Wasm is
2812        // on the stack, the Wasm will reload the new value once we
2813        // return into it.
2814        let current_epoch = self.engine().current_epoch();
2815        let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2816        *epoch_deadline = current_epoch + delta;
2817    }
2818
2819    pub(crate) fn get_epoch_deadline(&mut self) -> u64 {
2820        *self.vm_store_context.epoch_deadline.get_mut()
2821    }
2822
2823    #[inline]
2824    pub(crate) fn validate_sync_call(&self) -> Result<()> {
2825        #[cfg(feature = "async")]
2826        if self.async_state.async_required {
2827            bail!("store configuration requires that `*_async` functions are used instead");
2828        }
2829        Ok(())
2830    }
2831
2832    /// Returns whether this store is presently on a fiber and is allowed to
2833    /// block via `block_on` with fibers.
2834    pub(crate) fn can_block(&mut self) -> bool {
2835        #[cfg(feature = "async")]
2836        if true {
2837            return self.fiber_async_state_mut().can_block();
2838        }
2839
2840        false
2841    }
2842
2843    #[cfg(not(feature = "async"))]
2844    pub(crate) fn set_async_required(&mut self, asyncness: Asyncness) {
2845        match asyncness {
2846            Asyncness::No => {}
2847        }
2848    }
2849}
2850
2851/// Helper parameter to [`StoreOpaque::allocate_instance`].
2852pub(crate) enum AllocateInstanceKind<'a> {
2853    /// An embedder-provided module is being allocated meaning that the default
2854    /// engine's allocator will be used.
2855    Module(RegisteredModuleId),
2856
2857    /// Add a dummy instance that to the store.
2858    ///
2859    /// These are instances that are just implementation details of something
2860    /// else (e.g. host-created memories that are not actually defined in any
2861    /// Wasm module) and therefore shouldn't show up in things like core dumps.
2862    ///
2863    /// A custom, typically OnDemand-flavored, allocator is provided to execute
2864    /// the allocation.
2865    Dummy {
2866        allocator: &'a dyn InstanceAllocator,
2867    },
2868}
2869
2870unsafe impl<T> VMStore for StoreInner<T> {
2871    #[cfg(feature = "component-model-async")]
2872    fn component_async_store(
2873        &mut self,
2874    ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2875        self
2876    }
2877
2878    fn store_opaque(&self) -> &StoreOpaque {
2879        &self.inner
2880    }
2881
2882    fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2883        &mut self.inner
2884    }
2885
2886    fn resource_limiter_and_store_opaque(
2887        &mut self,
2888    ) -> (Option<StoreResourceLimiter<'_>>, &mut StoreOpaque) {
2889        let (data, limiter, opaque) = self.data_limiter_and_opaque();
2890
2891        let limiter = limiter.map(|l| match l {
2892            ResourceLimiterInner::Sync(s) => StoreResourceLimiter::Sync(s(data)),
2893            #[cfg(feature = "async")]
2894            ResourceLimiterInner::Async(s) => StoreResourceLimiter::Async(s(data)),
2895        });
2896
2897        (limiter, opaque)
2898    }
2899
2900    #[cfg(target_has_atomic = "64")]
2901    fn new_epoch_updated_deadline(&mut self) -> Result<UpdateDeadline> {
2902        // Temporarily take the configured behavior to avoid mutably borrowing
2903        // multiple times.
2904        let mut behavior = self.epoch_deadline_behavior.take();
2905        let update = match &mut behavior {
2906            Some(callback) => callback((&mut *self).as_context_mut()),
2907            None => Ok(UpdateDeadline::Interrupt),
2908        };
2909
2910        // Put back the original behavior which was replaced by `take`.
2911        self.epoch_deadline_behavior = behavior;
2912        update
2913    }
2914
2915    #[cfg(feature = "component-model")]
2916    fn component_calls(&mut self) -> &mut vm::component::CallContexts {
2917        &mut self.component_calls
2918    }
2919
2920    #[cfg(feature = "debug")]
2921    fn block_on_debug_handler(&mut self, event: crate::DebugEvent<'_>) -> crate::Result<()> {
2922        if let Some(handler) = self.debug_handler.take() {
2923            if !self.can_block() {
2924                bail!("could not invoke debug handler without async context");
2925            }
2926            log::trace!("about to raise debug event {event:?}");
2927            StoreContextMut(self).with_blocking(|store, cx| {
2928                cx.block_on(Pin::from(handler.handle(store, event)).as_mut())
2929            })
2930        } else {
2931            Ok(())
2932        }
2933    }
2934}
2935
2936impl<T> StoreInner<T> {
2937    #[cfg(target_has_atomic = "64")]
2938    fn epoch_deadline_trap(&mut self) {
2939        self.epoch_deadline_behavior = None;
2940    }
2941
2942    #[cfg(target_has_atomic = "64")]
2943    fn epoch_deadline_callback(
2944        &mut self,
2945        callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2946    ) {
2947        self.epoch_deadline_behavior = Some(callback);
2948    }
2949}
2950
2951impl<T: Default> Default for Store<T> {
2952    fn default() -> Store<T> {
2953        Store::new(&Engine::default(), T::default())
2954    }
2955}
2956
2957impl<T: fmt::Debug> fmt::Debug for Store<T> {
2958    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2959        let inner = &**self.inner as *const StoreInner<T>;
2960        f.debug_struct("Store")
2961            .field("inner", &inner)
2962            .field("data", self.inner.data())
2963            .finish()
2964    }
2965}
2966
2967impl<T> Drop for Store<T> {
2968    fn drop(&mut self) {
2969        self.run_manual_drop_routines();
2970
2971        // For documentation on this `unsafe`, see `into_data`.
2972        unsafe {
2973            ManuallyDrop::drop(&mut self.inner.data_no_provenance);
2974            ManuallyDrop::drop(&mut self.inner);
2975        }
2976    }
2977}
2978
2979impl Drop for StoreOpaque {
2980    fn drop(&mut self) {
2981        // NB it's important that this destructor does not access `self.data`.
2982        // That is deallocated by `Drop for Store<T>` above.
2983
2984        unsafe {
2985            let allocator = self.engine.allocator();
2986            let ondemand = OnDemandInstanceAllocator::default();
2987            let store_id = self.id();
2988
2989            #[cfg(feature = "gc")]
2990            if let Some(gc_store) = self.gc_store.take() {
2991                let gc_alloc_index = gc_store.allocation_index;
2992                log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2993                debug_assert!(self.engine.features().gc_types());
2994                let (mem_alloc_index, mem) =
2995                    allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2996                allocator.deallocate_memory(None, mem_alloc_index, mem);
2997            }
2998
2999            for (id, instance) in self.instances.iter_mut() {
3000                log::trace!("store {store_id:?} is deallocating {id:?}");
3001                let allocator = match instance.kind {
3002                    StoreInstanceKind::Dummy => &ondemand,
3003                    _ => allocator,
3004                };
3005                allocator.deallocate_module(&mut instance.handle);
3006            }
3007
3008            #[cfg(feature = "component-model")]
3009            {
3010                for _ in 0..self.num_component_instances {
3011                    allocator.decrement_component_instance_count();
3012                }
3013            }
3014        }
3015    }
3016}
3017
3018#[cfg_attr(
3019    not(any(feature = "gc", feature = "async")),
3020    // NB: Rust 1.89, current stable, does not fire this lint. Rust 1.90,
3021    // however, does, so use #[allow] until our MSRV is 1.90.
3022    allow(dead_code, reason = "don't want to put #[cfg] on all impls below too")
3023)]
3024pub(crate) trait AsStoreOpaque {
3025    fn as_store_opaque(&mut self) -> &mut StoreOpaque;
3026}
3027
3028impl AsStoreOpaque for StoreOpaque {
3029    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
3030        self
3031    }
3032}
3033
3034impl AsStoreOpaque for dyn VMStore {
3035    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
3036        self
3037    }
3038}
3039
3040impl<T: 'static> AsStoreOpaque for Store<T> {
3041    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
3042        &mut self.inner.inner
3043    }
3044}
3045
3046impl<T: 'static> AsStoreOpaque for StoreInner<T> {
3047    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
3048        self
3049    }
3050}
3051
3052impl<T: AsStoreOpaque + ?Sized> AsStoreOpaque for &mut T {
3053    fn as_store_opaque(&mut self) -> &mut StoreOpaque {
3054        T::as_store_opaque(self)
3055    }
3056}
3057
3058/// Helper enum to indicate, in some function contexts, whether `async` should
3059/// be taken advantage of or not.
3060///
3061/// This is used throughout Wasmtime where internal functions are all `async`
3062/// but external functions might be either sync or `async`. If the external
3063/// function is sync, then internally Wasmtime shouldn't yield as it won't do
3064/// anything. If the external function is `async`, however, yields are fine.
3065///
3066/// An example of this is GC. Right now GC will cooperatively yield after phases
3067/// of GC have passed, but this cooperative yielding is only enabled with
3068/// `Asyncness::Yes`.
3069///
3070/// This enum is additionally conditionally defined such that `Yes` is only
3071/// present in `async`-enabled builds. That ensures that this compiles down to a
3072/// zero-sized type in `async`-disabled builds in case that interests embedders.
3073#[derive(PartialEq, Eq, Copy, Clone)]
3074pub enum Asyncness {
3075    /// Don't do async things, don't yield, etc. It's ok to execute an `async`
3076    /// function, but it should be validated ahead of time that when doing so a
3077    /// yield isn't possible (e.g. `validate_sync_*` methods on Store.
3078    No,
3079
3080    /// Async things is OK. This should only be used when the API entrypoint is
3081    /// itself `async`.
3082    #[cfg(feature = "async")]
3083    Yes,
3084}
3085
3086impl core::ops::BitOr for Asyncness {
3087    type Output = Self;
3088
3089    fn bitor(self, rhs: Self) -> Self::Output {
3090        match (self, rhs) {
3091            (Asyncness::No, Asyncness::No) => Asyncness::No,
3092            #[cfg(feature = "async")]
3093            (Asyncness::Yes, _) | (_, Asyncness::Yes) => Asyncness::Yes,
3094        }
3095    }
3096}
3097
3098#[cfg(test)]
3099mod tests {
3100    use super::*;
3101
3102    struct FuelTank {
3103        pub consumed_fuel: i64,
3104        pub reserve_fuel: u64,
3105        pub yield_interval: Option<NonZeroU64>,
3106    }
3107
3108    impl FuelTank {
3109        fn new() -> Self {
3110            FuelTank {
3111                consumed_fuel: 0,
3112                reserve_fuel: 0,
3113                yield_interval: None,
3114            }
3115        }
3116        fn get_fuel(&self) -> u64 {
3117            get_fuel(self.consumed_fuel, self.reserve_fuel)
3118        }
3119        fn refuel(&mut self) -> bool {
3120            refuel(
3121                &mut self.consumed_fuel,
3122                &mut self.reserve_fuel,
3123                self.yield_interval,
3124            )
3125        }
3126        fn set_fuel(&mut self, fuel: u64) {
3127            set_fuel(
3128                &mut self.consumed_fuel,
3129                &mut self.reserve_fuel,
3130                self.yield_interval,
3131                fuel,
3132            );
3133        }
3134    }
3135
3136    #[test]
3137    fn smoke() {
3138        let mut tank = FuelTank::new();
3139        tank.set_fuel(10);
3140        assert_eq!(tank.consumed_fuel, -10);
3141        assert_eq!(tank.reserve_fuel, 0);
3142
3143        tank.yield_interval = NonZeroU64::new(10);
3144        tank.set_fuel(25);
3145        assert_eq!(tank.consumed_fuel, -10);
3146        assert_eq!(tank.reserve_fuel, 15);
3147    }
3148
3149    #[test]
3150    fn does_not_lose_precision() {
3151        let mut tank = FuelTank::new();
3152        tank.set_fuel(u64::MAX);
3153        assert_eq!(tank.get_fuel(), u64::MAX);
3154
3155        tank.set_fuel(i64::MAX as u64);
3156        assert_eq!(tank.get_fuel(), i64::MAX as u64);
3157
3158        tank.set_fuel(i64::MAX as u64 + 1);
3159        assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
3160    }
3161
3162    #[test]
3163    fn yielding_does_not_lose_precision() {
3164        let mut tank = FuelTank::new();
3165
3166        tank.yield_interval = NonZeroU64::new(10);
3167        tank.set_fuel(u64::MAX);
3168        assert_eq!(tank.get_fuel(), u64::MAX);
3169        assert_eq!(tank.consumed_fuel, -10);
3170        assert_eq!(tank.reserve_fuel, u64::MAX - 10);
3171
3172        tank.yield_interval = NonZeroU64::new(u64::MAX);
3173        tank.set_fuel(u64::MAX);
3174        assert_eq!(tank.get_fuel(), u64::MAX);
3175        assert_eq!(tank.consumed_fuel, -i64::MAX);
3176        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3177
3178        tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
3179        tank.set_fuel(u64::MAX);
3180        assert_eq!(tank.get_fuel(), u64::MAX);
3181        assert_eq!(tank.consumed_fuel, -i64::MAX);
3182        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3183    }
3184
3185    #[test]
3186    fn refueling() {
3187        // It's possible to fuel to have consumed over the limit as some instructions can consume
3188        // multiple units of fuel at once. Refueling should be strict in it's consumption and not
3189        // add more fuel than there is.
3190        let mut tank = FuelTank::new();
3191
3192        tank.yield_interval = NonZeroU64::new(10);
3193        tank.reserve_fuel = 42;
3194        tank.consumed_fuel = 4;
3195        assert!(tank.refuel());
3196        assert_eq!(tank.reserve_fuel, 28);
3197        assert_eq!(tank.consumed_fuel, -10);
3198
3199        tank.yield_interval = NonZeroU64::new(1);
3200        tank.reserve_fuel = 8;
3201        tank.consumed_fuel = 4;
3202        assert_eq!(tank.get_fuel(), 4);
3203        assert!(tank.refuel());
3204        assert_eq!(tank.reserve_fuel, 3);
3205        assert_eq!(tank.consumed_fuel, -1);
3206        assert_eq!(tank.get_fuel(), 4);
3207
3208        tank.yield_interval = NonZeroU64::new(10);
3209        tank.reserve_fuel = 3;
3210        tank.consumed_fuel = 4;
3211        assert_eq!(tank.get_fuel(), 0);
3212        assert!(!tank.refuel());
3213        assert_eq!(tank.reserve_fuel, 3);
3214        assert_eq!(tank.consumed_fuel, 4);
3215        assert_eq!(tank.get_fuel(), 0);
3216    }
3217
3218    #[test]
3219    fn store_data_provenance() {
3220        // Test that we juggle pointer provenance and all that correctly, and
3221        // miri is happy with everything, while allowing both Rust code and
3222        // "Wasm" to access and modify the store's `T` data. Note that this is
3223        // not actually Wasm mutating the store data here because compiling Wasm
3224        // under miri is way too slow.
3225
3226        unsafe fn run_wasm(store: &mut Store<u32>) {
3227            let ptr = store
3228                .inner
3229                .inner
3230                .vm_store_context
3231                .store_data
3232                .as_ptr()
3233                .cast::<u32>();
3234            unsafe { *ptr += 1 }
3235        }
3236
3237        let engine = Engine::default();
3238        let mut store = Store::new(&engine, 0_u32);
3239
3240        assert_eq!(*store.data(), 0);
3241        *store.data_mut() += 1;
3242        assert_eq!(*store.data(), 1);
3243        unsafe { run_wasm(&mut store) }
3244        assert_eq!(*store.data(), 2);
3245        *store.data_mut() += 1;
3246        assert_eq!(*store.data(), 3);
3247    }
3248}