wasmtime/runtime/
store.rs

1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//!   Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//!   intended to be consumed by the outside world. Note that the "just a
39//!   pointer large" is a load-bearing implementation detail in Wasmtime. This
40//!   enables it to store a pointer to its own trait object which doesn't need
41//!   to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//!   stored inside the `Box`. This is the general Rust pattern when one struct
45//!   is a layer over another. The surprising part, though, is that this is
46//!   further subdivided. This structure only contains things which actually
47//!   need `T` itself. The downside of this structure is that it's always
48//!   generic and means that code is monomorphized into consumer crates. We
49//!   strive to have things be as monomorphic as possible in `wasmtime` so this
50//!   type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//!   Stored inline in the outer type the "opaque" here means that it's a
54//!   "store" but it doesn't have access to the `T`. This is the primary
55//!   "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//!   internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//!   All references of Wasm items into a `Store` are actually indices into a
60//!   table in this structure, and the `StoreData` being separate makes it a bit
61//!   easier to manage/define/work with. There's no real fundamental reason this
62//!   is split out, although sometimes it's useful to have separate borrows into
63//!   these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79use crate::instance::InstanceData;
80use crate::linker::Definition;
81use crate::module::RegisteredModuleId;
82use crate::prelude::*;
83use crate::runtime::vm::mpk::ProtectionKey;
84#[cfg(feature = "gc")]
85use crate::runtime::vm::GcRootsList;
86use crate::runtime::vm::{
87    ExportGlobal, GcStore, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
88    Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator, SignalHandler,
89    StoreBox, StorePtr, Unwind, VMContext, VMFuncRef, VMGcRef, VMRuntimeLimits,
90};
91use crate::trampoline::VMHostGlobalContext;
92use crate::RootSet;
93use crate::{module::ModuleRegistry, Engine, Module, Trap, Val, ValRaw};
94use crate::{Global, Instance, Memory, Table, Uninhabited};
95use alloc::sync::Arc;
96use core::fmt;
97use core::marker;
98use core::mem::{self, ManuallyDrop};
99use core::num::NonZeroU64;
100use core::ops::{Deref, DerefMut};
101use core::ptr::NonNull;
102use wasmtime_environ::TripleExt;
103
104mod context;
105pub use self::context::*;
106mod data;
107pub use self::data::*;
108mod func_refs;
109use func_refs::FuncRefs;
110#[cfg(feature = "async")]
111mod async_;
112#[cfg(all(feature = "async", feature = "call-hook"))]
113pub use self::async_::CallHookHandler;
114#[cfg(feature = "async")]
115use self::async_::*;
116
117/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
118///
119/// All WebAssembly instances and items will be attached to and refer to a
120/// [`Store`]. For example instances, functions, globals, and tables are all
121/// attached to a [`Store`]. Instances are created by instantiating a
122/// [`Module`](crate::Module) within a [`Store`].
123///
124/// A [`Store`] is intended to be a short-lived object in a program. No form
125/// of GC is implemented at this time so once an instance is created within a
126/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
127/// This makes [`Store`] unsuitable for creating an unbounded number of
128/// instances in it because [`Store`] will never release this memory. It's
129/// recommended to have a [`Store`] correspond roughly to the lifetime of a
130/// "main instance" that an embedding is interested in executing.
131///
132/// ## Type parameter `T`
133///
134/// Each [`Store`] has a type parameter `T` associated with it. This `T`
135/// represents state defined by the host. This state will be accessible through
136/// the [`Caller`](crate::Caller) type that host-defined functions get access
137/// to. This `T` is suitable for storing `Store`-specific information which
138/// imported functions may want access to.
139///
140/// The data `T` can be accessed through methods like [`Store::data`] and
141/// [`Store::data_mut`].
142///
143/// ## Stores, contexts, oh my
144///
145/// Most methods in Wasmtime take something of the form
146/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
147/// the first argument. These two traits allow ergonomically passing in the
148/// context you currently have to any method. The primary two sources of
149/// contexts are:
150///
151/// * `Store<T>`
152/// * `Caller<'_, T>`
153///
154/// corresponding to what you create and what you have access to in a host
155/// function. You can also explicitly acquire a [`StoreContext`] or
156/// [`StoreContextMut`] and pass that around as well.
157///
158/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
159/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
160/// form of context you have you can call various methods, create objects, etc.
161///
162/// ## Stores and `Default`
163///
164/// You can create a store with default configuration settings using
165/// `Store::default()`. This will create a brand new [`Engine`] with default
166/// configuration (see [`Config`](crate::Config) for more information).
167///
168/// ## Cross-store usage of items
169///
170/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
171/// [`Store`]. The store they belong to is the one they were created with
172/// (passed in as a parameter) or instantiated with. This store is the only
173/// store that can be used to interact with wasm items after they're created.
174///
175/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
176/// operations is incorrect. In other words it's considered a programmer error
177/// rather than a recoverable error for the wrong [`Store`] to be used when
178/// calling APIs.
179pub struct Store<T> {
180    // for comments about `ManuallyDrop`, see `Store::into_data`
181    inner: ManuallyDrop<Box<StoreInner<T>>>,
182}
183
184#[derive(Copy, Clone, Debug)]
185/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
186/// the WebAssembly VM.
187pub enum CallHook {
188    /// Indicates the VM is calling a WebAssembly function, from the host.
189    CallingWasm,
190    /// Indicates the VM is returning from a WebAssembly function, to the host.
191    ReturningFromWasm,
192    /// Indicates the VM is calling a host function, from WebAssembly.
193    CallingHost,
194    /// Indicates the VM is returning from a host function, to WebAssembly.
195    ReturningFromHost,
196}
197
198impl CallHook {
199    /// Indicates the VM is entering host code (exiting WebAssembly code)
200    pub fn entering_host(&self) -> bool {
201        match self {
202            CallHook::ReturningFromWasm | CallHook::CallingHost => true,
203            _ => false,
204        }
205    }
206    /// Indicates the VM is exiting host code (entering WebAssembly code)
207    pub fn exiting_host(&self) -> bool {
208        match self {
209            CallHook::ReturningFromHost | CallHook::CallingWasm => true,
210            _ => false,
211        }
212    }
213}
214
215/// Internal contents of a `Store<T>` that live on the heap.
216///
217/// The members of this struct are those that need to be generic over `T`, the
218/// store's internal type storage. Otherwise all things that don't rely on `T`
219/// should go into `StoreOpaque`.
220pub struct StoreInner<T> {
221    /// Generic metadata about the store that doesn't need access to `T`.
222    inner: StoreOpaque,
223
224    limiter: Option<ResourceLimiterInner<T>>,
225    call_hook: Option<CallHookInner<T>>,
226    #[cfg(target_has_atomic = "64")]
227    epoch_deadline_behavior:
228        Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
229    // for comments about `ManuallyDrop`, see `Store::into_data`
230    data: ManuallyDrop<T>,
231}
232
233enum ResourceLimiterInner<T> {
234    Sync(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync>),
235    #[cfg(feature = "async")]
236    Async(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync) + Send + Sync>),
237}
238
239enum CallHookInner<T> {
240    #[cfg(feature = "call-hook")]
241    Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
242    #[cfg(all(feature = "async", feature = "call-hook"))]
243    Async(Box<dyn CallHookHandler<T> + Send + Sync>),
244    #[allow(dead_code)]
245    ForceTypeParameterToBeUsed {
246        uninhabited: Uninhabited,
247        _marker: marker::PhantomData<T>,
248    },
249}
250
251/// What to do after returning from a callback when the engine epoch reaches
252/// the deadline for a Store during execution of a function using that store.
253pub enum UpdateDeadline {
254    /// Extend the deadline by the specified number of ticks.
255    Continue(u64),
256    /// Extend the deadline by the specified number of ticks after yielding to
257    /// the async executor loop. This can only be used with an async [`Store`]
258    /// configured via [`Config::async_support`](crate::Config::async_support).
259    #[cfg(feature = "async")]
260    Yield(u64),
261}
262
263// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
264impl<T> Deref for StoreInner<T> {
265    type Target = StoreOpaque;
266    fn deref(&self) -> &Self::Target {
267        &self.inner
268    }
269}
270
271impl<T> DerefMut for StoreInner<T> {
272    fn deref_mut(&mut self) -> &mut Self::Target {
273        &mut self.inner
274    }
275}
276
277/// Monomorphic storage for a `Store<T>`.
278///
279/// This structure contains the bulk of the metadata about a `Store`. This is
280/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
281/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
282/// crate itself.
283pub struct StoreOpaque {
284    // This `StoreOpaque` structure has references to itself. These aren't
285    // immediately evident, however, so we need to tell the compiler that it
286    // contains self-references. This notably suppresses `noalias` annotations
287    // when this shows up in compiled code because types of this structure do
288    // indeed alias itself. An example of this is `default_callee` holds a
289    // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
290    // aliasing!
291    //
292    // It's somewhat unclear to me at this time if this is 100% sufficient to
293    // get all the right codegen in all the right places. For example does
294    // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
295    // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
296    // enough with `Pin` to understand if it's appropriate here (we do, for
297    // example want to allow movement in and out of `data: T`, just not movement
298    // of most of the other members). It's also not clear if using `Pin` in a
299    // few places buys us much other than a bunch of `unsafe` that we already
300    // sort of hand-wave away.
301    //
302    // In any case this seems like a good mid-ground for now where we're at
303    // least telling the compiler something about all the aliasing happening
304    // within a `Store`.
305    _marker: marker::PhantomPinned,
306
307    engine: Engine,
308    runtime_limits: VMRuntimeLimits,
309    instances: Vec<StoreInstance>,
310    #[cfg(feature = "component-model")]
311    num_component_instances: usize,
312    signal_handler: Option<SignalHandler>,
313    modules: ModuleRegistry,
314    func_refs: FuncRefs,
315    host_globals: Vec<StoreBox<VMHostGlobalContext>>,
316
317    // GC-related fields.
318    gc_store: Option<GcStore>,
319    gc_roots: RootSet,
320    #[cfg(feature = "gc")]
321    gc_roots_list: GcRootsList,
322    // Types for which the embedder has created an allocator for.
323    #[cfg(feature = "gc")]
324    gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
325
326    // Numbers of resources instantiated in this store, and their limits
327    instance_count: usize,
328    instance_limit: usize,
329    memory_count: usize,
330    memory_limit: usize,
331    table_count: usize,
332    table_limit: usize,
333    #[cfg(feature = "async")]
334    async_state: AsyncState,
335
336    // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
337    // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
338    // together. Then when we run out of gas, we inject the yield amount from the reserve
339    // until the reserve is empty.
340    fuel_reserve: u64,
341    fuel_yield_interval: Option<NonZeroU64>,
342    /// Indexed data within this `Store`, used to store information about
343    /// globals, functions, memories, etc.
344    ///
345    /// Note that this is `ManuallyDrop` because it needs to be dropped before
346    /// `rooted_host_funcs` below. This structure contains pointers which are
347    /// otherwise kept alive by the `Arc` references in `rooted_host_funcs`.
348    store_data: ManuallyDrop<StoreData>,
349    default_caller: InstanceHandle,
350
351    /// Used to optimized wasm->host calls when the host function is defined with
352    /// `Func::new` to avoid allocating a new vector each time a function is
353    /// called.
354    hostcall_val_storage: Vec<Val>,
355    /// Same as `hostcall_val_storage`, but for the direction of the host
356    /// calling wasm.
357    wasm_val_raw_storage: Vec<ValRaw>,
358
359    /// A list of lists of definitions which have been used to instantiate
360    /// within this `Store`.
361    ///
362    /// Note that not all instantiations end up pushing to this list. At the
363    /// time of this writing only the `InstancePre<T>` type will push to this
364    /// list. Pushes to this list are typically accompanied with
365    /// `HostFunc::to_func_store_rooted` to clone an `Arc` here once which
366    /// preserves a strong reference to the `Arc` for each `HostFunc` stored
367    /// within the list of `Definition`s.
368    ///
369    /// Note that this is `ManuallyDrop` as it must be dropped after
370    /// `store_data` above, where the function pointers are stored.
371    rooted_host_funcs: ManuallyDrop<Vec<Arc<[Definition]>>>,
372
373    /// Keep track of what protection key is being used during allocation so
374    /// that the right memory pages can be enabled when entering WebAssembly
375    /// guest code.
376    pkey: Option<ProtectionKey>,
377
378    /// Runtime state for components used in the handling of resources, borrow,
379    /// and calls. These also interact with the `ResourceAny` type and its
380    /// internal representation.
381    #[cfg(feature = "component-model")]
382    component_host_table: crate::runtime::vm::component::ResourceTable,
383    #[cfg(feature = "component-model")]
384    component_calls: crate::runtime::vm::component::CallContexts,
385    #[cfg(feature = "component-model")]
386    host_resource_data: crate::component::HostResourceData,
387
388    /// State related to the executor of wasm code.
389    ///
390    /// For example if Pulley is enabled and configured then this will store a
391    /// Pulley interpreter.
392    executor: Executor,
393}
394
395/// Executor state within `StoreOpaque`.
396///
397/// Effectively stores Pulley interpreter state and handles conditional support
398/// for Cranelift at compile time.
399enum Executor {
400    Interpreter(Interpreter),
401    #[cfg(has_host_compiler_backend)]
402    Native,
403}
404
405/// A borrowed reference to `Executor` above.
406pub(crate) enum ExecutorRef<'a> {
407    Interpreter(InterpreterRef<'a>),
408    #[cfg(has_host_compiler_backend)]
409    Native,
410}
411
412/// An RAII type to automatically mark a region of code as unsafe for GC.
413#[doc(hidden)]
414pub struct AutoAssertNoGc<'a> {
415    store: &'a mut StoreOpaque,
416    entered: bool,
417}
418
419impl<'a> AutoAssertNoGc<'a> {
420    #[inline]
421    pub fn new(store: &'a mut StoreOpaque) -> Self {
422        let entered = if !cfg!(feature = "gc") {
423            false
424        } else if let Some(gc_store) = store.gc_store.as_mut() {
425            gc_store.gc_heap.enter_no_gc_scope();
426            true
427        } else {
428            false
429        };
430
431        AutoAssertNoGc { store, entered }
432    }
433
434    /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
435    /// disables checks for no GC happening for the duration of this value.
436    ///
437    /// This is used when it is statically otherwise known that a GC doesn't
438    /// happen for the various types involved.
439    ///
440    /// # Unsafety
441    ///
442    /// This method is `unsafe` as it does not provide the same safety
443    /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
444    /// caller that a GC doesn't happen.
445    #[inline]
446    pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
447        if cfg!(debug_assertions) {
448            AutoAssertNoGc::new(store)
449        } else {
450            AutoAssertNoGc {
451                store,
452                entered: false,
453            }
454        }
455    }
456}
457
458impl core::ops::Deref for AutoAssertNoGc<'_> {
459    type Target = StoreOpaque;
460
461    #[inline]
462    fn deref(&self) -> &Self::Target {
463        &*self.store
464    }
465}
466
467impl core::ops::DerefMut for AutoAssertNoGc<'_> {
468    #[inline]
469    fn deref_mut(&mut self) -> &mut Self::Target {
470        &mut *self.store
471    }
472}
473
474impl Drop for AutoAssertNoGc<'_> {
475    #[inline]
476    fn drop(&mut self) {
477        if self.entered {
478            self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
479        }
480    }
481}
482
483/// Used to associate instances with the store.
484///
485/// This is needed to track if the instance was allocated explicitly with the on-demand
486/// instance allocator.
487struct StoreInstance {
488    handle: InstanceHandle,
489    kind: StoreInstanceKind,
490}
491
492enum StoreInstanceKind {
493    /// An actual, non-dummy instance.
494    Real {
495        /// The id of this instance's module inside our owning store's
496        /// `ModuleRegistry`.
497        module_id: RegisteredModuleId,
498    },
499
500    /// This is a dummy instance that is just an implementation detail for
501    /// something else. For example, host-created memories internally create a
502    /// dummy instance.
503    ///
504    /// Regardless of the configured instance allocator for the engine, dummy
505    /// instances always use the on-demand allocator to deallocate the instance.
506    Dummy,
507}
508
509impl<T> Store<T> {
510    /// Creates a new [`Store`] to be associated with the given [`Engine`] and
511    /// `data` provided.
512    ///
513    /// The created [`Store`] will place no additional limits on the size of
514    /// linear memories or tables at runtime. Linear memories and tables will
515    /// be allowed to grow to any upper limit specified in their definitions.
516    /// The store will limit the number of instances, linear memories, and
517    /// tables created to 10,000. This can be overridden with the
518    /// [`Store::limiter`] configuration method.
519    pub fn new(engine: &Engine, data: T) -> Self {
520        let pkey = engine.allocator().next_available_pkey();
521
522        let mut inner = Box::new(StoreInner {
523            inner: StoreOpaque {
524                _marker: marker::PhantomPinned,
525                engine: engine.clone(),
526                runtime_limits: Default::default(),
527                instances: Vec::new(),
528                #[cfg(feature = "component-model")]
529                num_component_instances: 0,
530                signal_handler: None,
531                gc_store: None,
532                gc_roots: RootSet::default(),
533                #[cfg(feature = "gc")]
534                gc_roots_list: GcRootsList::default(),
535                #[cfg(feature = "gc")]
536                gc_host_alloc_types: Default::default(),
537                modules: ModuleRegistry::default(),
538                func_refs: FuncRefs::default(),
539                host_globals: Vec::new(),
540                instance_count: 0,
541                instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
542                memory_count: 0,
543                memory_limit: crate::DEFAULT_MEMORY_LIMIT,
544                table_count: 0,
545                table_limit: crate::DEFAULT_TABLE_LIMIT,
546                #[cfg(feature = "async")]
547                async_state: AsyncState::default(),
548                fuel_reserve: 0,
549                fuel_yield_interval: None,
550                store_data: ManuallyDrop::new(StoreData::new()),
551                default_caller: InstanceHandle::null(),
552                hostcall_val_storage: Vec::new(),
553                wasm_val_raw_storage: Vec::new(),
554                rooted_host_funcs: ManuallyDrop::new(Vec::new()),
555                pkey,
556                #[cfg(feature = "component-model")]
557                component_host_table: Default::default(),
558                #[cfg(feature = "component-model")]
559                component_calls: Default::default(),
560                #[cfg(feature = "component-model")]
561                host_resource_data: Default::default(),
562                #[cfg(has_host_compiler_backend)]
563                executor: if cfg!(feature = "pulley") && engine.target().is_pulley() {
564                    Executor::Interpreter(Interpreter::new(engine))
565                } else {
566                    Executor::Native
567                },
568                #[cfg(not(has_host_compiler_backend))]
569                executor: {
570                    debug_assert!(engine.target().is_pulley());
571                    Executor::Interpreter(Interpreter::new(engine))
572                },
573            },
574            limiter: None,
575            call_hook: None,
576            #[cfg(target_has_atomic = "64")]
577            epoch_deadline_behavior: None,
578            data: ManuallyDrop::new(data),
579        });
580
581        // Wasmtime uses the callee argument to host functions to learn about
582        // the original pointer to the `Store` itself, allowing it to
583        // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
584        // however, there's no "callee" to provide. To fix this we allocate a
585        // single "default callee" for the entire `Store`. This is then used as
586        // part of `Func::call` to guarantee that the `callee: *mut VMContext`
587        // is never null.
588        inner.default_caller = {
589            let module = Arc::new(wasmtime_environ::Module::default());
590            let shim = ModuleRuntimeInfo::bare(module);
591            let allocator = OnDemandInstanceAllocator::default();
592            allocator
593                .validate_module(shim.env_module(), shim.offsets())
594                .unwrap();
595            let mut instance = unsafe {
596                allocator
597                    .allocate_module(InstanceAllocationRequest {
598                        host_state: Box::new(()),
599                        imports: Default::default(),
600                        store: StorePtr::empty(),
601                        runtime_info: &shim,
602                        wmemcheck: engine.config().wmemcheck,
603                        pkey: None,
604                        tunables: engine.tunables(),
605                    })
606                    .expect("failed to allocate default callee")
607            };
608
609            // Note the erasure of the lifetime here into `'static`, so in
610            // general usage of this trait object must be strictly bounded to
611            // the `Store` itself, and this is an invariant that we have to
612            // maintain throughout Wasmtime.
613            unsafe {
614                let traitobj = mem::transmute::<
615                    NonNull<dyn crate::runtime::vm::VMStore + '_>,
616                    NonNull<dyn crate::runtime::vm::VMStore + 'static>,
617                >(NonNull::from(&mut *inner));
618                instance.set_store(traitobj);
619                instance
620            }
621        };
622
623        Self {
624            inner: ManuallyDrop::new(inner),
625        }
626    }
627
628    /// Access the underlying data owned by this `Store`.
629    #[inline]
630    pub fn data(&self) -> &T {
631        self.inner.data()
632    }
633
634    /// Access the underlying data owned by this `Store`.
635    #[inline]
636    pub fn data_mut(&mut self) -> &mut T {
637        self.inner.data_mut()
638    }
639
640    /// Consumes this [`Store`], destroying it, and returns the underlying data.
641    pub fn into_data(mut self) -> T {
642        self.inner.flush_fiber_stack();
643
644        // This is an unsafe operation because we want to avoid having a runtime
645        // check or boolean for whether the data is actually contained within a
646        // `Store`. The data itself is stored as `ManuallyDrop` since we're
647        // manually managing the memory here, and there's also a `ManuallyDrop`
648        // around the `Box<StoreInner<T>>`. The way this works though is a bit
649        // tricky, so here's how things get dropped appropriately:
650        //
651        // * When a `Store<T>` is normally dropped, the custom destructor for
652        //   `Store<T>` will drop `T`, then the `self.inner` field. The
653        //   rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
654        //   `StoreInner<T>`. This cleans up all internal fields and doesn't
655        //   touch `T` because it's wrapped in `ManuallyDrop`.
656        //
657        // * When calling this method we skip the top-level destructor for
658        //   `Store<T>` with `mem::forget`. This skips both the destructor for
659        //   `T` and the destructor for `StoreInner<T>`. We do, however, run the
660        //   destructor for `Box<StoreInner<T>>` which, like above, will skip
661        //   the destructor for `T` since it's `ManuallyDrop`.
662        //
663        // In both cases all the other fields of `StoreInner<T>` should all get
664        // dropped, and the manual management of destructors is basically
665        // between this method and `Drop for Store<T>`. Note that this also
666        // means that `Drop for StoreInner<T>` cannot access `self.data`, so
667        // there is a comment indicating this as well.
668        unsafe {
669            let mut inner = ManuallyDrop::take(&mut self.inner);
670            core::mem::forget(self);
671            ManuallyDrop::take(&mut inner.data)
672        }
673    }
674
675    /// Configures the [`ResourceLimiter`] used to limit resource creation
676    /// within this [`Store`].
677    ///
678    /// Whenever resources such as linear memory, tables, or instances are
679    /// allocated the `limiter` specified here is invoked with the store's data
680    /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
681    /// being allocated. The returned [`ResourceLimiter`] is intended to live
682    /// within the `T` itself, for example by storing a
683    /// [`StoreLimits`](crate::StoreLimits).
684    ///
685    /// Note that this limiter is only used to limit the creation/growth of
686    /// resources in the future, this does not retroactively attempt to apply
687    /// limits to the [`Store`].
688    ///
689    /// # Examples
690    ///
691    /// ```
692    /// use wasmtime::*;
693    ///
694    /// struct MyApplicationState {
695    ///     my_state: u32,
696    ///     limits: StoreLimits,
697    /// }
698    ///
699    /// let engine = Engine::default();
700    /// let my_state = MyApplicationState {
701    ///     my_state: 42,
702    ///     limits: StoreLimitsBuilder::new()
703    ///         .memory_size(1 << 20 /* 1 MB */)
704    ///         .instances(2)
705    ///         .build(),
706    /// };
707    /// let mut store = Store::new(&engine, my_state);
708    /// store.limiter(|state| &mut state.limits);
709    ///
710    /// // Creation of smaller memories is allowed
711    /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
712    ///
713    /// // Creation of a larger memory, however, will exceed the 1MB limit we've
714    /// // configured
715    /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
716    ///
717    /// // The number of instances in this store is limited to 2, so the third
718    /// // instance here should fail.
719    /// let module = Module::new(&engine, "(module)").unwrap();
720    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
721    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
722    /// assert!(Instance::new(&mut store, &module, &[]).is_err());
723    /// ```
724    ///
725    /// [`ResourceLimiter`]: crate::ResourceLimiter
726    pub fn limiter(
727        &mut self,
728        mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync + 'static,
729    ) {
730        // Apply the limits on instances, tables, and memory given by the limiter:
731        let inner = &mut self.inner;
732        let (instance_limit, table_limit, memory_limit) = {
733            let l = limiter(&mut inner.data);
734            (l.instances(), l.tables(), l.memories())
735        };
736        let innermost = &mut inner.inner;
737        innermost.instance_limit = instance_limit;
738        innermost.table_limit = table_limit;
739        innermost.memory_limit = memory_limit;
740
741        // Save the limiter accessor function:
742        inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
743    }
744
745    /// Configure a function that runs on calls and returns between WebAssembly
746    /// and host code.
747    ///
748    /// The function is passed a [`CallHook`] argument, which indicates which
749    /// state transition the VM is making.
750    ///
751    /// This function may return a [`Trap`]. If a trap is returned when an
752    /// import was called, it is immediately raised as-if the host import had
753    /// returned the trap. If a trap is returned after wasm returns to the host
754    /// then the wasm function's result is ignored and this trap is returned
755    /// instead.
756    ///
757    /// After this function returns a trap, it may be called for subsequent returns
758    /// to host or wasm code as the trap propagates to the root call.
759    #[cfg(feature = "call-hook")]
760    pub fn call_hook(
761        &mut self,
762        hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
763    ) {
764        self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
765    }
766
767    /// Returns the [`Engine`] that this store is associated with.
768    pub fn engine(&self) -> &Engine {
769        self.inner.engine()
770    }
771
772    /// Perform garbage collection.
773    ///
774    /// Note that it is not required to actively call this function. GC will
775    /// automatically happen according to various internal heuristics. This is
776    /// provided if fine-grained control over the GC is desired.
777    ///
778    /// This method is only available when the `gc` Cargo feature is enabled.
779    #[cfg(feature = "gc")]
780    pub fn gc(&mut self) {
781        self.inner.gc()
782    }
783
784    /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
785    /// be configured via [`Store::set_fuel`].
786    ///
787    /// # Errors
788    ///
789    /// This function will return an error if fuel consumption is not enabled
790    /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
791    pub fn get_fuel(&self) -> Result<u64> {
792        self.inner.get_fuel()
793    }
794
795    /// Set the fuel to this [`Store`] for wasm to consume while executing.
796    ///
797    /// For this method to work fuel consumption must be enabled via
798    /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
799    /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
800    /// immediately trap). This function must be called for the store to have
801    /// some fuel to allow WebAssembly to execute.
802    ///
803    /// Most WebAssembly instructions consume 1 unit of fuel. Some
804    /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
805    /// units, as any execution cost associated with them involves other
806    /// instructions which do consume fuel.
807    ///
808    /// Note that when fuel is entirely consumed it will cause wasm to trap.
809    ///
810    /// # Errors
811    ///
812    /// This function will return an error if fuel consumption is not enabled via
813    /// [`Config::consume_fuel`](crate::Config::consume_fuel).
814    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
815        self.inner.set_fuel(fuel)
816    }
817
818    /// Configures a [`Store`] to yield execution of async WebAssembly code
819    /// periodically.
820    ///
821    /// When a [`Store`] is configured to consume fuel with
822    /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
823    /// configure WebAssembly to be suspended and control will be yielded back to the
824    /// caller every `interval` units of fuel consumed. This is only suitable with use of
825    /// a store associated with an [async config](crate::Config::async_support) because
826    /// only then are futures used and yields are possible.
827    ///
828    /// The purpose of this behavior is to ensure that futures which represent
829    /// execution of WebAssembly do not execute too long inside their
830    /// `Future::poll` method. This allows for some form of cooperative
831    /// multitasking where WebAssembly will voluntarily yield control
832    /// periodically (based on fuel consumption) back to the running thread.
833    ///
834    /// Note that futures returned by this crate will automatically flag
835    /// themselves to get re-polled if a yield happens. This means that
836    /// WebAssembly will continue to execute, just after giving the host an
837    /// opportunity to do something else.
838    ///
839    /// The `interval` parameter indicates how much fuel should be
840    /// consumed between yields of an async future. When fuel runs out wasm will trap.
841    ///
842    /// # Error
843    ///
844    /// This method will error if it is not called on a store associated with an [async
845    /// config](crate::Config::async_support).
846    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
847        self.inner.fuel_async_yield_interval(interval)
848    }
849
850    /// Sets the epoch deadline to a certain number of ticks in the future.
851    ///
852    /// When the Wasm guest code is compiled with epoch-interruption
853    /// instrumentation
854    /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
855    /// and when the `Engine`'s epoch is incremented
856    /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
857    /// past a deadline, execution can be configured to either trap or
858    /// yield and then continue.
859    ///
860    /// This deadline is always set relative to the current epoch:
861    /// `ticks_beyond_current` ticks in the future. The deadline can
862    /// be set explicitly via this method, or refilled automatically
863    /// on a yield if configured via
864    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
865    /// this method is invoked, the deadline is reached when
866    /// [`Engine::increment_epoch()`] has been invoked at least
867    /// `ticks_beyond_current` times.
868    ///
869    /// By default a store will trap immediately with an epoch deadline of 0
870    /// (which has always "elapsed"). This method is required to be configured
871    /// for stores with epochs enabled to some future epoch deadline.
872    ///
873    /// See documentation on
874    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
875    /// for an introduction to epoch-based interruption.
876    #[cfg(target_has_atomic = "64")]
877    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
878        self.inner.set_epoch_deadline(ticks_beyond_current);
879    }
880
881    /// Configures epoch-deadline expiration to trap.
882    ///
883    /// When epoch-interruption-instrumented code is executed on this
884    /// store and the epoch deadline is reached before completion,
885    /// with the store configured in this way, execution will
886    /// terminate with a trap as soon as an epoch check in the
887    /// instrumented code is reached.
888    ///
889    /// This behavior is the default if the store is not otherwise
890    /// configured via
891    /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
892    /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
893    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
894    ///
895    /// This setting is intended to allow for coarse-grained
896    /// interruption, but not a deterministic deadline of a fixed,
897    /// finite interval. For deterministic interruption, see the
898    /// "fuel" mechanism instead.
899    ///
900    /// Note that when this is used it's required to call
901    /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
902    /// trap.
903    ///
904    /// See documentation on
905    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
906    /// for an introduction to epoch-based interruption.
907    #[cfg(target_has_atomic = "64")]
908    pub fn epoch_deadline_trap(&mut self) {
909        self.inner.epoch_deadline_trap();
910    }
911
912    /// Configures epoch-deadline expiration to invoke a custom callback
913    /// function.
914    ///
915    /// When epoch-interruption-instrumented code is executed on this
916    /// store and the epoch deadline is reached before completion, the
917    /// provided callback function is invoked.
918    ///
919    /// This callback should either return an [`UpdateDeadline`], or
920    /// return an error, which will terminate execution with a trap.
921    ///
922    /// The [`UpdateDeadline`] is a positive number of ticks to
923    /// add to the epoch deadline, as well as indicating what
924    /// to do after the callback returns. If the [`Store`] is
925    /// configured with async support, then the callback may return
926    /// [`UpdateDeadline::Yield`] to yield to the async executor before
927    /// updating the epoch deadline. Alternatively, the callback may
928    /// return [`UpdateDeadline::Continue`] to update the epoch deadline
929    /// immediately.
930    ///
931    /// This setting is intended to allow for coarse-grained
932    /// interruption, but not a deterministic deadline of a fixed,
933    /// finite interval. For deterministic interruption, see the
934    /// "fuel" mechanism instead.
935    ///
936    /// See documentation on
937    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
938    /// for an introduction to epoch-based interruption.
939    #[cfg(target_has_atomic = "64")]
940    pub fn epoch_deadline_callback(
941        &mut self,
942        callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
943    ) {
944        self.inner.epoch_deadline_callback(Box::new(callback));
945    }
946}
947
948impl<'a, T> StoreContext<'a, T> {
949    pub(crate) fn async_support(&self) -> bool {
950        self.0.async_support()
951    }
952
953    /// Returns the underlying [`Engine`] this store is connected to.
954    pub fn engine(&self) -> &Engine {
955        self.0.engine()
956    }
957
958    /// Access the underlying data owned by this `Store`.
959    ///
960    /// Same as [`Store::data`].
961    pub fn data(&self) -> &'a T {
962        self.0.data()
963    }
964
965    /// Returns the remaining fuel in this store.
966    ///
967    /// For more information see [`Store::get_fuel`].
968    pub fn get_fuel(&self) -> Result<u64> {
969        self.0.get_fuel()
970    }
971}
972
973impl<'a, T> StoreContextMut<'a, T> {
974    /// Access the underlying data owned by this `Store`.
975    ///
976    /// Same as [`Store::data`].
977    pub fn data(&self) -> &T {
978        self.0.data()
979    }
980
981    /// Access the underlying data owned by this `Store`.
982    ///
983    /// Same as [`Store::data_mut`].
984    pub fn data_mut(&mut self) -> &mut T {
985        self.0.data_mut()
986    }
987
988    /// Returns the underlying [`Engine`] this store is connected to.
989    pub fn engine(&self) -> &Engine {
990        self.0.engine()
991    }
992
993    /// Perform garbage collection of `ExternRef`s.
994    ///
995    /// Same as [`Store::gc`].
996    ///
997    /// This method is only available when the `gc` Cargo feature is enabled.
998    #[cfg(feature = "gc")]
999    pub fn gc(&mut self) {
1000        self.0.gc()
1001    }
1002
1003    /// Returns remaining fuel in this store.
1004    ///
1005    /// For more information see [`Store::get_fuel`]
1006    pub fn get_fuel(&self) -> Result<u64> {
1007        self.0.get_fuel()
1008    }
1009
1010    /// Set the amount of fuel in this store.
1011    ///
1012    /// For more information see [`Store::set_fuel`]
1013    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1014        self.0.set_fuel(fuel)
1015    }
1016
1017    /// Configures this `Store` to periodically yield while executing futures.
1018    ///
1019    /// For more information see [`Store::fuel_async_yield_interval`]
1020    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1021        self.0.fuel_async_yield_interval(interval)
1022    }
1023
1024    /// Sets the epoch deadline to a certain number of ticks in the future.
1025    ///
1026    /// For more information see [`Store::set_epoch_deadline`].
1027    #[cfg(target_has_atomic = "64")]
1028    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1029        self.0.set_epoch_deadline(ticks_beyond_current);
1030    }
1031
1032    /// Configures epoch-deadline expiration to trap.
1033    ///
1034    /// For more information see [`Store::epoch_deadline_trap`].
1035    #[cfg(target_has_atomic = "64")]
1036    pub fn epoch_deadline_trap(&mut self) {
1037        self.0.epoch_deadline_trap();
1038    }
1039}
1040
1041impl<T> StoreInner<T> {
1042    #[inline]
1043    fn data(&self) -> &T {
1044        &self.data
1045    }
1046
1047    #[inline]
1048    fn data_mut(&mut self) -> &mut T {
1049        &mut self.data
1050    }
1051
1052    #[inline]
1053    pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1054        if self.inner.pkey.is_none() && self.call_hook.is_none() {
1055            Ok(())
1056        } else {
1057            self.call_hook_slow_path(s)
1058        }
1059    }
1060
1061    fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1062        if let Some(pkey) = &self.inner.pkey {
1063            let allocator = self.engine().allocator();
1064            match s {
1065                CallHook::CallingWasm | CallHook::ReturningFromHost => {
1066                    allocator.restrict_to_pkey(*pkey)
1067                }
1068                CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1069            }
1070        }
1071
1072        // Temporarily take the configured behavior to avoid mutably borrowing
1073        // multiple times.
1074        #[cfg_attr(not(feature = "call-hook"), allow(unreachable_patterns))]
1075        if let Some(mut call_hook) = self.call_hook.take() {
1076            let result = self.invoke_call_hook(&mut call_hook, s);
1077            self.call_hook = Some(call_hook);
1078            return result;
1079        }
1080
1081        Ok(())
1082    }
1083
1084    fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1085        match call_hook {
1086            #[cfg(feature = "call-hook")]
1087            CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1088
1089            #[cfg(all(feature = "async", feature = "call-hook"))]
1090            CallHookInner::Async(handler) => unsafe {
1091                self.inner
1092                    .async_cx()
1093                    .ok_or_else(|| anyhow!("couldn't grab async_cx for call hook"))?
1094                    .block_on(
1095                        handler
1096                            .handle_call_event((&mut *self).as_context_mut(), s)
1097                            .as_mut(),
1098                    )?
1099            },
1100
1101            CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1102                let _ = s;
1103                match *uninhabited {}
1104            }
1105        }
1106    }
1107
1108    #[cfg(not(feature = "async"))]
1109    fn flush_fiber_stack(&mut self) {
1110        // noop shim so code can assume this always exists.
1111    }
1112}
1113
1114fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1115    fuel_reserve.saturating_add_signed(-injected_fuel)
1116}
1117
1118// Add remaining fuel from the reserve into the active fuel if there is any left.
1119fn refuel(
1120    injected_fuel: &mut i64,
1121    fuel_reserve: &mut u64,
1122    yield_interval: Option<NonZeroU64>,
1123) -> bool {
1124    let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1125    if fuel > 0 {
1126        set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1127        true
1128    } else {
1129        false
1130    }
1131}
1132
1133fn set_fuel(
1134    injected_fuel: &mut i64,
1135    fuel_reserve: &mut u64,
1136    yield_interval: Option<NonZeroU64>,
1137    new_fuel_amount: u64,
1138) {
1139    let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1140    // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1141    // for the VM to use.
1142    let injected = core::cmp::min(interval, new_fuel_amount);
1143    // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1144    // VM at once to be i64 range.
1145    let injected = core::cmp::min(injected, i64::MAX as u64);
1146    // Add whatever is left over after injection to the reserve for later use.
1147    *fuel_reserve = new_fuel_amount - injected;
1148    // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1149    // this counter is positive.
1150    *injected_fuel = -(injected as i64);
1151}
1152
1153#[doc(hidden)]
1154impl StoreOpaque {
1155    pub fn id(&self) -> StoreId {
1156        self.store_data.id()
1157    }
1158
1159    pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1160        fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1161            let new = slot.saturating_add(amt);
1162            if new > max {
1163                bail!(
1164                    "resource limit exceeded: {} count too high at {}",
1165                    desc,
1166                    new
1167                );
1168            }
1169            *slot = new;
1170            Ok(())
1171        }
1172
1173        let module = module.env_module();
1174        let memories = module.num_defined_memories();
1175        let tables = module.num_defined_tables();
1176
1177        bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1178        bump(
1179            &mut self.memory_count,
1180            self.memory_limit,
1181            memories,
1182            "memory",
1183        )?;
1184        bump(&mut self.table_count, self.table_limit, tables, "table")?;
1185
1186        Ok(())
1187    }
1188
1189    #[inline]
1190    pub fn async_support(&self) -> bool {
1191        cfg!(feature = "async") && self.engine().config().async_support
1192    }
1193
1194    #[inline]
1195    pub fn engine(&self) -> &Engine {
1196        &self.engine
1197    }
1198
1199    #[inline]
1200    pub fn store_data(&self) -> &StoreData {
1201        &self.store_data
1202    }
1203
1204    #[inline]
1205    pub fn store_data_mut(&mut self) -> &mut StoreData {
1206        &mut self.store_data
1207    }
1208
1209    #[inline]
1210    pub(crate) fn modules(&self) -> &ModuleRegistry {
1211        &self.modules
1212    }
1213
1214    #[inline]
1215    pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
1216        &mut self.modules
1217    }
1218
1219    pub(crate) fn func_refs(&mut self) -> &mut FuncRefs {
1220        &mut self.func_refs
1221    }
1222
1223    pub(crate) fn fill_func_refs(&mut self) {
1224        self.func_refs.fill(&self.modules);
1225    }
1226
1227    pub(crate) fn push_instance_pre_func_refs(&mut self, func_refs: Arc<[VMFuncRef]>) {
1228        self.func_refs.push_instance_pre_func_refs(func_refs);
1229    }
1230
1231    pub(crate) fn host_globals(&mut self) -> &mut Vec<StoreBox<VMHostGlobalContext>> {
1232        &mut self.host_globals
1233    }
1234
1235    pub fn module_for_instance(&self, instance: InstanceId) -> Option<&'_ Module> {
1236        match self.instances[instance.0].kind {
1237            StoreInstanceKind::Dummy => None,
1238            StoreInstanceKind::Real { module_id } => {
1239                let module = self
1240                    .modules()
1241                    .lookup_module_by_id(module_id)
1242                    .expect("should always have a registered module for real instances");
1243                Some(module)
1244            }
1245        }
1246    }
1247
1248    pub unsafe fn add_instance(
1249        &mut self,
1250        handle: InstanceHandle,
1251        module_id: RegisteredModuleId,
1252    ) -> InstanceId {
1253        self.instances.push(StoreInstance {
1254            handle: handle.clone(),
1255            kind: StoreInstanceKind::Real { module_id },
1256        });
1257        InstanceId(self.instances.len() - 1)
1258    }
1259
1260    /// Add a dummy instance that to the store.
1261    ///
1262    /// These are instances that are just implementation details of something
1263    /// else (e.g. host-created memories that are not actually defined in any
1264    /// Wasm module) and therefore shouldn't show up in things like core dumps.
1265    pub unsafe fn add_dummy_instance(&mut self, handle: InstanceHandle) -> InstanceId {
1266        self.instances.push(StoreInstance {
1267            handle: handle.clone(),
1268            kind: StoreInstanceKind::Dummy,
1269        });
1270        InstanceId(self.instances.len() - 1)
1271    }
1272
1273    pub fn instance(&self, id: InstanceId) -> &InstanceHandle {
1274        &self.instances[id.0].handle
1275    }
1276
1277    pub fn instance_mut(&mut self, id: InstanceId) -> &mut InstanceHandle {
1278        &mut self.instances[id.0].handle
1279    }
1280
1281    /// Get all instances (ignoring dummy instances) within this store.
1282    pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1283        let instances = self
1284            .instances
1285            .iter()
1286            .enumerate()
1287            .filter_map(|(idx, inst)| {
1288                let id = InstanceId::from_index(idx);
1289                if let StoreInstanceKind::Dummy = inst.kind {
1290                    None
1291                } else {
1292                    Some(InstanceData::from_id(id))
1293                }
1294            })
1295            .collect::<Vec<_>>();
1296        instances
1297            .into_iter()
1298            .map(|i| Instance::from_wasmtime(i, self))
1299    }
1300
1301    /// Get all memories (host- or Wasm-defined) within this store.
1302    pub fn all_memories<'a>(&'a mut self) -> impl Iterator<Item = Memory> + 'a {
1303        // NB: Host-created memories have dummy instances. Therefore, we can get
1304        // all memories in the store by iterating over all instances (including
1305        // dummy instances) and getting each of their defined memories.
1306        let mems = self
1307            .instances
1308            .iter_mut()
1309            .flat_map(|instance| instance.handle.defined_memories())
1310            .collect::<Vec<_>>();
1311        mems.into_iter()
1312            .map(|memory| unsafe { Memory::from_wasmtime_memory(memory, self) })
1313    }
1314
1315    /// Iterate over all tables (host- or Wasm-defined) within this store.
1316    pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1317        // NB: Host-created tables have dummy instances. Therefore, we can get
1318        // all memories in the store by iterating over all instances (including
1319        // dummy instances) and getting each of their defined memories.
1320
1321        struct TempTakeInstances<'a> {
1322            instances: Vec<StoreInstance>,
1323            store: &'a mut StoreOpaque,
1324        }
1325
1326        impl<'a> TempTakeInstances<'a> {
1327            fn new(store: &'a mut StoreOpaque) -> Self {
1328                let instances = mem::take(&mut store.instances);
1329                Self { instances, store }
1330            }
1331        }
1332
1333        impl Drop for TempTakeInstances<'_> {
1334            fn drop(&mut self) {
1335                assert!(self.store.instances.is_empty());
1336                self.store.instances = mem::take(&mut self.instances);
1337            }
1338        }
1339
1340        let mut temp = TempTakeInstances::new(self);
1341        for instance in temp.instances.iter_mut() {
1342            for table in instance.handle.defined_tables() {
1343                let table = unsafe { Table::from_wasmtime_table(table, temp.store) };
1344                f(temp.store, table);
1345            }
1346        }
1347    }
1348
1349    /// Iterate over all globals (host- or Wasm-defined) within this store.
1350    pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1351        struct TempTakeHostGlobalsAndInstances<'a> {
1352            host_globals: Vec<StoreBox<VMHostGlobalContext>>,
1353            instances: Vec<StoreInstance>,
1354            store: &'a mut StoreOpaque,
1355        }
1356
1357        impl<'a> TempTakeHostGlobalsAndInstances<'a> {
1358            fn new(store: &'a mut StoreOpaque) -> Self {
1359                let host_globals = mem::take(&mut store.host_globals);
1360                let instances = mem::take(&mut store.instances);
1361                Self {
1362                    host_globals,
1363                    instances,
1364                    store,
1365                }
1366            }
1367        }
1368
1369        impl Drop for TempTakeHostGlobalsAndInstances<'_> {
1370            fn drop(&mut self) {
1371                assert!(self.store.host_globals.is_empty());
1372                self.store.host_globals = mem::take(&mut self.host_globals);
1373                assert!(self.store.instances.is_empty());
1374                self.store.instances = mem::take(&mut self.instances);
1375            }
1376        }
1377
1378        let mut temp = TempTakeHostGlobalsAndInstances::new(self);
1379        unsafe {
1380            // First enumerate all the host-created globals.
1381            for global in temp.host_globals.iter() {
1382                let export = ExportGlobal {
1383                    definition: NonNull::from(&mut global.get().as_mut().global),
1384                    vmctx: None,
1385                    global: global.get().as_ref().ty.to_wasm_type(),
1386                };
1387                let global = Global::from_wasmtime_global(export, temp.store);
1388                f(temp.store, global);
1389            }
1390
1391            // Then enumerate all instances' defined globals.
1392            for instance in temp.instances.iter_mut() {
1393                for (_, export) in instance.handle.defined_globals() {
1394                    let global = Global::from_wasmtime_global(export, temp.store);
1395                    f(temp.store, global);
1396                }
1397            }
1398        }
1399    }
1400
1401    #[cfg_attr(not(target_os = "linux"), allow(dead_code))] // not used on all platforms
1402    pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1403        self.signal_handler = handler;
1404    }
1405
1406    #[inline]
1407    pub fn runtime_limits(&self) -> &VMRuntimeLimits {
1408        &self.runtime_limits
1409    }
1410
1411    #[inline(never)]
1412    pub(crate) fn allocate_gc_heap(&mut self) -> Result<()> {
1413        assert!(self.gc_store.is_none());
1414        let gc_store = allocate_gc_store(self.engine())?;
1415        self.gc_store = Some(gc_store);
1416        return Ok(());
1417
1418        #[cfg(feature = "gc")]
1419        fn allocate_gc_store(engine: &Engine) -> Result<GcStore> {
1420            ensure!(
1421                engine.features().gc_types(),
1422                "cannot allocate a GC store when GC is disabled at configuration time"
1423            );
1424            let (index, heap) = engine
1425                .allocator()
1426                .allocate_gc_heap(&**engine.gc_runtime()?)?;
1427            Ok(GcStore::new(index, heap))
1428        }
1429
1430        #[cfg(not(feature = "gc"))]
1431        fn allocate_gc_store(_engine: &Engine) -> Result<GcStore> {
1432            bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1433        }
1434    }
1435
1436    #[inline]
1437    #[cfg(feature = "gc")]
1438    pub(crate) fn gc_store(&self) -> Result<&GcStore> {
1439        match &self.gc_store {
1440            Some(gc_store) => Ok(gc_store),
1441            None => bail!("GC heap not initialized yet"),
1442        }
1443    }
1444
1445    #[inline]
1446    pub(crate) fn gc_store_mut(&mut self) -> Result<&mut GcStore> {
1447        if self.gc_store.is_none() {
1448            self.allocate_gc_heap()?;
1449        }
1450        Ok(self.unwrap_gc_store_mut())
1451    }
1452
1453    /// If this store is configured with a GC heap, return a mutable reference
1454    /// to it. Otherwise, return `None`.
1455    #[inline]
1456    pub(crate) fn optional_gc_store_mut(&mut self) -> Result<Option<&mut GcStore>> {
1457        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1458            Ok(None)
1459        } else {
1460            Ok(Some(self.gc_store_mut()?))
1461        }
1462    }
1463
1464    #[inline]
1465    #[cfg(feature = "gc")]
1466    pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
1467        self.gc_store
1468            .as_ref()
1469            .expect("attempted to access the store's GC heap before it has been allocated")
1470    }
1471
1472    #[inline]
1473    pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
1474        self.gc_store
1475            .as_mut()
1476            .expect("attempted to access the store's GC heap before it has been allocated")
1477    }
1478
1479    #[inline]
1480    pub(crate) fn gc_roots(&self) -> &RootSet {
1481        &self.gc_roots
1482    }
1483
1484    #[inline]
1485    #[cfg(feature = "gc")]
1486    pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
1487        &mut self.gc_roots
1488    }
1489
1490    #[inline]
1491    pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
1492        self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
1493    }
1494
1495    #[cfg(feature = "gc")]
1496    pub fn gc(&mut self) {
1497        // If the GC heap hasn't been initialized, there is nothing to collect.
1498        if self.gc_store.is_none() {
1499            return;
1500        }
1501
1502        log::trace!("============ Begin GC ===========");
1503
1504        // Take the GC roots out of `self` so we can borrow it mutably but still
1505        // call mutable methods on `self`.
1506        let mut roots = core::mem::take(&mut self.gc_roots_list);
1507
1508        self.trace_roots(&mut roots);
1509        self.unwrap_gc_store_mut().gc(unsafe { roots.iter() });
1510
1511        // Restore the GC roots for the next GC.
1512        roots.clear();
1513        self.gc_roots_list = roots;
1514
1515        log::trace!("============ End GC ===========");
1516    }
1517
1518    #[inline]
1519    #[cfg(not(feature = "gc"))]
1520    pub fn gc(&mut self) {
1521        // Nothing to collect.
1522        //
1523        // Note that this is *not* a public method, this is just defined for the
1524        // crate-internal `StoreOpaque` type. This is a convenience so that we
1525        // don't have to `cfg` every call site.
1526    }
1527
1528    #[cfg(feature = "gc")]
1529    fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1530        log::trace!("Begin trace GC roots");
1531
1532        // We shouldn't have any leftover, stale GC roots.
1533        assert!(gc_roots_list.is_empty());
1534
1535        self.trace_wasm_stack_roots(gc_roots_list);
1536        self.trace_vmctx_roots(gc_roots_list);
1537        self.trace_user_roots(gc_roots_list);
1538
1539        log::trace!("End trace GC roots")
1540    }
1541
1542    #[cfg(feature = "gc")]
1543    fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1544        use crate::runtime::vm::{Backtrace, SendSyncPtr};
1545        use core::ptr::NonNull;
1546
1547        log::trace!("Begin trace GC roots :: Wasm stack");
1548
1549        Backtrace::trace(self, |frame| {
1550            let pc = frame.pc();
1551            debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
1552
1553            let fp = frame.fp() as *mut usize;
1554            debug_assert!(
1555                !fp.is_null(),
1556                "we should always get a valid frame pointer for Wasm frames"
1557            );
1558
1559            let module_info = self
1560                .modules()
1561                .lookup_module_by_pc(pc)
1562                .expect("should have module info for Wasm frame");
1563
1564            let stack_map = match module_info.lookup_stack_map(pc) {
1565                Some(sm) => sm,
1566                None => {
1567                    log::trace!("No stack map for this Wasm frame");
1568                    return core::ops::ControlFlow::Continue(());
1569                }
1570            };
1571            log::trace!(
1572                "We have a stack map that maps {} bytes in this Wasm frame",
1573                stack_map.frame_size()
1574            );
1575
1576            let sp = unsafe { stack_map.sp(fp) };
1577            for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
1578                let raw: u32 = unsafe { core::ptr::read(stack_slot) };
1579                log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
1580
1581                let gc_ref = VMGcRef::from_raw_u32(raw);
1582                if gc_ref.is_some() {
1583                    unsafe {
1584                        gc_roots_list.add_wasm_stack_root(SendSyncPtr::new(
1585                            NonNull::new(stack_slot).unwrap(),
1586                        ));
1587                    }
1588                }
1589            }
1590
1591            core::ops::ControlFlow::Continue(())
1592        });
1593
1594        log::trace!("End trace GC roots :: Wasm stack");
1595    }
1596
1597    #[cfg(feature = "gc")]
1598    fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1599        log::trace!("Begin trace GC roots :: vmctx");
1600        self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
1601        self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
1602        log::trace!("End trace GC roots :: vmctx");
1603    }
1604
1605    #[cfg(feature = "gc")]
1606    fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1607        log::trace!("Begin trace GC roots :: user");
1608        self.gc_roots.trace_roots(gc_roots_list);
1609        log::trace!("End trace GC roots :: user");
1610    }
1611
1612    /// Insert a host-allocated GC type into this store.
1613    ///
1614    /// This makes it suitable for the embedder to allocate instances of this
1615    /// type in this store, and we don't have to worry about the type being
1616    /// reclaimed (since it is possible that none of the Wasm modules in this
1617    /// store are holding it alive).
1618    #[cfg(feature = "gc")]
1619    pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
1620        self.gc_host_alloc_types.insert(ty);
1621    }
1622
1623    pub fn get_fuel(&self) -> Result<u64> {
1624        anyhow::ensure!(
1625            self.engine().tunables().consume_fuel,
1626            "fuel is not configured in this store"
1627        );
1628        let injected_fuel = unsafe { *self.runtime_limits.fuel_consumed.get() };
1629        Ok(get_fuel(injected_fuel, self.fuel_reserve))
1630    }
1631
1632    fn refuel(&mut self) -> bool {
1633        let injected_fuel = unsafe { &mut *self.runtime_limits.fuel_consumed.get() };
1634        refuel(
1635            injected_fuel,
1636            &mut self.fuel_reserve,
1637            self.fuel_yield_interval,
1638        )
1639    }
1640
1641    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1642        anyhow::ensure!(
1643            self.engine().tunables().consume_fuel,
1644            "fuel is not configured in this store"
1645        );
1646        let injected_fuel = unsafe { &mut *self.runtime_limits.fuel_consumed.get() };
1647        set_fuel(
1648            injected_fuel,
1649            &mut self.fuel_reserve,
1650            self.fuel_yield_interval,
1651            fuel,
1652        );
1653        Ok(())
1654    }
1655
1656    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1657        anyhow::ensure!(
1658            self.engine().tunables().consume_fuel,
1659            "fuel is not configured in this store"
1660        );
1661        anyhow::ensure!(
1662            self.engine().config().async_support,
1663            "async support is not configured in this store"
1664        );
1665        anyhow::ensure!(
1666            interval != Some(0),
1667            "fuel_async_yield_interval must not be 0"
1668        );
1669        self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
1670        // Reset the fuel active + reserve states by resetting the amount.
1671        self.set_fuel(self.get_fuel()?)
1672    }
1673
1674    #[inline]
1675    pub fn signal_handler(&self) -> Option<*const SignalHandler> {
1676        let handler = self.signal_handler.as_ref()?;
1677        Some(handler)
1678    }
1679
1680    #[inline]
1681    pub fn vmruntime_limits(&self) -> NonNull<VMRuntimeLimits> {
1682        NonNull::from(&self.runtime_limits)
1683    }
1684
1685    #[inline]
1686    pub fn default_caller(&self) -> NonNull<VMContext> {
1687        self.default_caller.vmctx()
1688    }
1689
1690    #[inline]
1691    pub fn traitobj(&self) -> NonNull<dyn crate::runtime::vm::VMStore> {
1692        self.default_caller.traitobj(self)
1693    }
1694
1695    /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
1696    /// used as part of calling the host in a `Func::new` method invocation.
1697    #[inline]
1698    pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
1699        mem::take(&mut self.hostcall_val_storage)
1700    }
1701
1702    /// Restores the vector previously taken by `take_hostcall_val_storage`
1703    /// above back into the store, allowing it to be used in the future for the
1704    /// next wasm->host call.
1705    #[inline]
1706    pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
1707        if storage.capacity() > self.hostcall_val_storage.capacity() {
1708            self.hostcall_val_storage = storage;
1709        }
1710    }
1711
1712    /// Same as `take_hostcall_val_storage`, but for the direction of the host
1713    /// calling wasm.
1714    #[inline]
1715    pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
1716        mem::take(&mut self.wasm_val_raw_storage)
1717    }
1718
1719    /// Same as `save_hostcall_val_storage`, but for the direction of the host
1720    /// calling wasm.
1721    #[inline]
1722    pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
1723        if storage.capacity() > self.wasm_val_raw_storage.capacity() {
1724            self.wasm_val_raw_storage = storage;
1725        }
1726    }
1727
1728    pub(crate) fn push_rooted_funcs(&mut self, funcs: Arc<[Definition]>) {
1729        self.rooted_host_funcs.push(funcs);
1730    }
1731
1732    /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
1733    /// WebAssembly-relative fault.
1734    ///
1735    /// This function may abort the process if `addr` is not found to actually
1736    /// reside in any linear memory. In such a situation it means that the
1737    /// segfault was erroneously caught by Wasmtime and is possibly indicative
1738    /// of a code generator bug.
1739    ///
1740    /// This function returns `None` for dynamically-bounds-checked-memories
1741    /// with spectre mitigations enabled since the hardware fault address is
1742    /// always zero in these situations which means that the trapping context
1743    /// doesn't have enough information to report the fault address.
1744    pub(crate) fn wasm_fault(
1745        &self,
1746        pc: usize,
1747        addr: usize,
1748    ) -> Option<crate::runtime::vm::WasmFault> {
1749        // There are a few instances where a "close to zero" pointer is loaded
1750        // and we expect that to happen:
1751        //
1752        // * Explicitly bounds-checked memories with spectre-guards enabled will
1753        //   cause out-of-bounds accesses to get routed to address 0, so allow
1754        //   wasm instructions to fault on the null address.
1755        // * `call_indirect` when invoking a null function pointer may load data
1756        //   from the a `VMFuncRef` whose address is null, meaning any field of
1757        //   `VMFuncRef` could be the address of the fault.
1758        //
1759        // In these situations where the address is so small it won't be in any
1760        // instance, so skip the checks below.
1761        if addr <= mem::size_of::<VMFuncRef>() {
1762            const _: () = {
1763                // static-assert that `VMFuncRef` isn't too big to ensure that
1764                // it lives solely within the first page as we currently only
1765                // have the guarantee that the first page of memory is unmapped,
1766                // no more.
1767                assert!(mem::size_of::<VMFuncRef>() <= 512);
1768            };
1769            return None;
1770        }
1771
1772        // Search all known instances in this store for this address. Note that
1773        // this is probably not the speediest way to do this. Traps, however,
1774        // are generally not expected to be super fast and additionally stores
1775        // probably don't have all that many instances or memories.
1776        //
1777        // If this loop becomes hot in the future, however, it should be
1778        // possible to precompute maps about linear memories in a store and have
1779        // a quicker lookup.
1780        let mut fault = None;
1781        for instance in self.instances.iter() {
1782            if let Some(f) = instance.handle.wasm_fault(addr) {
1783                assert!(fault.is_none());
1784                fault = Some(f);
1785            }
1786        }
1787        if fault.is_some() {
1788            return fault;
1789        }
1790
1791        cfg_if::cfg_if! {
1792            if #[cfg(any(feature = "std", unix, windows))] {
1793                // With the standard library a rich error can be printed here
1794                // to stderr and the native abort path is used.
1795                eprintln!(
1796                    "\
1797Wasmtime caught a segfault for a wasm program because the faulting instruction
1798is allowed to segfault due to how linear memories are implemented. The address
1799that was accessed, however, is not known to any linear memory in use within this
1800Store. This may be indicative of a critical bug in Wasmtime's code generation
1801because all addresses which are known to be reachable from wasm won't reach this
1802message.
1803
1804    pc:      0x{pc:x}
1805    address: 0x{addr:x}
1806
1807This is a possible security issue because WebAssembly has accessed something it
1808shouldn't have been able to. Other accesses may have succeeded and this one just
1809happened to be caught. The process will now be aborted to prevent this damage
1810from going any further and to alert what's going on. If this is a security
1811issue please reach out to the Wasmtime team via its security policy
1812at https://bytecodealliance.org/security.
1813"
1814                );
1815                std::process::abort();
1816            } else if #[cfg(panic = "abort")] {
1817                // Without the standard library but with `panic=abort` then
1818                // it's safe to panic as that's known to halt execution. For
1819                // now avoid the above error message as well since without
1820                // `std` it's probably best to be a bit more size-conscious.
1821                let _ = pc;
1822                panic!("invalid fault");
1823            } else {
1824                // Without `std` and with `panic = "unwind"` there's no way to
1825                // abort the process portably, so flag a compile time error.
1826                //
1827                // NB: if this becomes a problem in the future one option would
1828                // be to extend the `capi.rs` module for no_std platforms, but
1829                // it remains yet to be seen at this time if this is hit much.
1830                compile_error!("either `std` or `panic=abort` must be enabled");
1831                None
1832            }
1833        }
1834    }
1835
1836    /// Retrieve the store's protection key.
1837    #[inline]
1838    pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
1839        self.pkey
1840    }
1841
1842    #[inline]
1843    #[cfg(feature = "component-model")]
1844    pub(crate) fn component_resource_state(
1845        &mut self,
1846    ) -> (
1847        &mut crate::runtime::vm::component::CallContexts,
1848        &mut crate::runtime::vm::component::ResourceTable,
1849        &mut crate::component::HostResourceData,
1850    ) {
1851        (
1852            &mut self.component_calls,
1853            &mut self.component_host_table,
1854            &mut self.host_resource_data,
1855        )
1856    }
1857
1858    #[cfg(feature = "component-model")]
1859    pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
1860        // We don't actually need the instance itself right now, but it seems
1861        // like something we will almost certainly eventually want to keep
1862        // around, so force callers to provide it.
1863        let _ = instance;
1864
1865        self.num_component_instances += 1;
1866    }
1867
1868    #[cfg(not(feature = "async"))]
1869    pub(crate) fn async_guard_range(&self) -> core::ops::Range<*mut u8> {
1870        core::ptr::null_mut()..core::ptr::null_mut()
1871    }
1872
1873    pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
1874        match &mut self.executor {
1875            Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
1876            #[cfg(has_host_compiler_backend)]
1877            Executor::Native => ExecutorRef::Native,
1878        }
1879    }
1880
1881    pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
1882        match &self.executor {
1883            Executor::Interpreter(_) => &crate::runtime::vm::UnwindPulley,
1884            #[cfg(has_host_compiler_backend)]
1885            Executor::Native => &crate::runtime::vm::UnwindHost,
1886        }
1887    }
1888}
1889
1890unsafe impl<T> crate::runtime::vm::VMStore for StoreInner<T> {
1891    fn store_opaque(&self) -> &StoreOpaque {
1892        &self.inner
1893    }
1894
1895    fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
1896        &mut self.inner
1897    }
1898
1899    fn memory_growing(
1900        &mut self,
1901        current: usize,
1902        desired: usize,
1903        maximum: Option<usize>,
1904    ) -> Result<bool, anyhow::Error> {
1905        match self.limiter {
1906            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
1907                limiter(&mut self.data).memory_growing(current, desired, maximum)
1908            }
1909            #[cfg(feature = "async")]
1910            Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
1911                self.inner
1912                    .async_cx()
1913                    .expect("ResourceLimiterAsync requires async Store")
1914                    .block_on(
1915                        limiter(&mut self.data)
1916                            .memory_growing(current, desired, maximum)
1917                            .as_mut(),
1918                    )?
1919            },
1920            None => Ok(true),
1921        }
1922    }
1923
1924    fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
1925        match self.limiter {
1926            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
1927                limiter(&mut self.data).memory_grow_failed(error)
1928            }
1929            #[cfg(feature = "async")]
1930            Some(ResourceLimiterInner::Async(ref mut limiter)) => {
1931                limiter(&mut self.data).memory_grow_failed(error)
1932            }
1933            None => {
1934                log::debug!("ignoring memory growth failure error: {error:?}");
1935                Ok(())
1936            }
1937        }
1938    }
1939
1940    fn table_growing(
1941        &mut self,
1942        current: usize,
1943        desired: usize,
1944        maximum: Option<usize>,
1945    ) -> Result<bool, anyhow::Error> {
1946        // Need to borrow async_cx before the mut borrow of the limiter.
1947        // self.async_cx() panicks when used with a non-async store, so
1948        // wrap this in an option.
1949        #[cfg(feature = "async")]
1950        let async_cx = if self.async_support()
1951            && matches!(self.limiter, Some(ResourceLimiterInner::Async(_)))
1952        {
1953            Some(self.async_cx().unwrap())
1954        } else {
1955            None
1956        };
1957
1958        match self.limiter {
1959            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
1960                limiter(&mut self.data).table_growing(current, desired, maximum)
1961            }
1962            #[cfg(feature = "async")]
1963            Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
1964                async_cx
1965                    .expect("ResourceLimiterAsync requires async Store")
1966                    .block_on(limiter(&mut self.data).table_growing(current, desired, maximum))?
1967            },
1968            None => Ok(true),
1969        }
1970    }
1971
1972    fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
1973        match self.limiter {
1974            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
1975                limiter(&mut self.data).table_grow_failed(error)
1976            }
1977            #[cfg(feature = "async")]
1978            Some(ResourceLimiterInner::Async(ref mut limiter)) => {
1979                limiter(&mut self.data).table_grow_failed(error)
1980            }
1981            None => {
1982                log::debug!("ignoring table growth failure: {error:?}");
1983                Ok(())
1984            }
1985        }
1986    }
1987
1988    fn out_of_gas(&mut self) -> Result<()> {
1989        if !self.refuel() {
1990            return Err(Trap::OutOfFuel.into());
1991        }
1992        #[cfg(feature = "async")]
1993        if self.fuel_yield_interval.is_some() {
1994            self.async_yield_impl()?;
1995        }
1996        Ok(())
1997    }
1998
1999    #[cfg(target_has_atomic = "64")]
2000    fn new_epoch(&mut self) -> Result<u64, anyhow::Error> {
2001        // Temporarily take the configured behavior to avoid mutably borrowing
2002        // multiple times.
2003        let mut behavior = self.epoch_deadline_behavior.take();
2004        let delta_result = match &mut behavior {
2005            None => Err(Trap::Interrupt.into()),
2006            Some(callback) => callback((&mut *self).as_context_mut()).and_then(|update| {
2007                let delta = match update {
2008                    UpdateDeadline::Continue(delta) => delta,
2009
2010                    #[cfg(feature = "async")]
2011                    UpdateDeadline::Yield(delta) => {
2012                        assert!(
2013                            self.async_support(),
2014                            "cannot use `UpdateDeadline::Yield` without enabling async support in the config"
2015                        );
2016                        // Do the async yield. May return a trap if future was
2017                        // canceled while we're yielded.
2018                        self.async_yield_impl()?;
2019                        delta
2020                    }
2021                };
2022
2023                // Set a new deadline and return the new epoch deadline so
2024                // the Wasm code doesn't have to reload it.
2025                self.set_epoch_deadline(delta);
2026                Ok(self.get_epoch_deadline())
2027            })
2028        };
2029
2030        // Put back the original behavior which was replaced by `take`.
2031        self.epoch_deadline_behavior = behavior;
2032        delta_result
2033    }
2034
2035    #[cfg(feature = "gc")]
2036    fn maybe_async_gc(&mut self, root: Option<VMGcRef>) -> Result<Option<VMGcRef>> {
2037        let mut scope = crate::RootScope::new(self);
2038        let store = scope.as_context_mut().0;
2039        let store_id = store.id();
2040        let root = root.map(|r| store.gc_roots_mut().push_lifo_root(store_id, r));
2041
2042        if store.async_support() {
2043            #[cfg(feature = "async")]
2044            unsafe {
2045                let async_cx = store.async_cx();
2046                let future = store.gc_async();
2047                async_cx
2048                    .expect("attempted to pull async context during shutdown")
2049                    .block_on(future)?;
2050            }
2051        } else {
2052            (**store).gc();
2053        }
2054
2055        let root = match root {
2056            None => None,
2057            Some(r) => {
2058                let r = r
2059                    .get_gc_ref(store)
2060                    .expect("still in scope")
2061                    .unchecked_copy();
2062                Some(store.gc_store_mut()?.clone_gc_ref(&r))
2063            }
2064        };
2065
2066        Ok(root)
2067    }
2068
2069    #[cfg(not(feature = "gc"))]
2070    fn maybe_async_gc(&mut self, root: Option<VMGcRef>) -> Result<Option<VMGcRef>> {
2071        Ok(root)
2072    }
2073
2074    #[cfg(feature = "component-model")]
2075    fn component_calls(&mut self) -> &mut crate::runtime::vm::component::CallContexts {
2076        &mut self.component_calls
2077    }
2078}
2079
2080impl<T> StoreInner<T> {
2081    #[cfg(target_has_atomic = "64")]
2082    pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2083        // Set a new deadline based on the "epoch deadline delta".
2084        //
2085        // Safety: this is safe because the epoch deadline in the
2086        // `VMRuntimeLimits` is accessed only here and by Wasm guest code
2087        // running in this store, and we have a `&mut self` here.
2088        //
2089        // Also, note that when this update is performed while Wasm is
2090        // on the stack, the Wasm will reload the new value once we
2091        // return into it.
2092        let epoch_deadline = unsafe { self.vmruntime_limits().as_mut().epoch_deadline.get_mut() };
2093        *epoch_deadline = self.engine().current_epoch() + delta;
2094    }
2095
2096    #[cfg(target_has_atomic = "64")]
2097    fn epoch_deadline_trap(&mut self) {
2098        self.epoch_deadline_behavior = None;
2099    }
2100
2101    #[cfg(target_has_atomic = "64")]
2102    fn epoch_deadline_callback(
2103        &mut self,
2104        callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2105    ) {
2106        self.epoch_deadline_behavior = Some(callback);
2107    }
2108
2109    fn get_epoch_deadline(&self) -> u64 {
2110        // Safety: this is safe because, as above, it is only invoked
2111        // from within `new_epoch` which is called from guest Wasm
2112        // code, which will have an exclusive borrow on the Store.
2113        let epoch_deadline = unsafe { self.vmruntime_limits().as_mut().epoch_deadline.get_mut() };
2114        *epoch_deadline
2115    }
2116}
2117
2118impl<T: Default> Default for Store<T> {
2119    fn default() -> Store<T> {
2120        Store::new(&Engine::default(), T::default())
2121    }
2122}
2123
2124impl<T: fmt::Debug> fmt::Debug for Store<T> {
2125    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2126        let inner = &**self.inner as *const StoreInner<T>;
2127        f.debug_struct("Store")
2128            .field("inner", &inner)
2129            .field("data", &self.inner.data)
2130            .finish()
2131    }
2132}
2133
2134impl<T> Drop for Store<T> {
2135    fn drop(&mut self) {
2136        self.inner.flush_fiber_stack();
2137
2138        // for documentation on this `unsafe`, see `into_data`.
2139        unsafe {
2140            ManuallyDrop::drop(&mut self.inner.data);
2141            ManuallyDrop::drop(&mut self.inner);
2142        }
2143    }
2144}
2145
2146impl Drop for StoreOpaque {
2147    fn drop(&mut self) {
2148        // NB it's important that this destructor does not access `self.data`.
2149        // That is deallocated by `Drop for Store<T>` above.
2150
2151        unsafe {
2152            let allocator = self.engine.allocator();
2153            let ondemand = OnDemandInstanceAllocator::default();
2154            for instance in self.instances.iter_mut() {
2155                if let StoreInstanceKind::Dummy = instance.kind {
2156                    ondemand.deallocate_module(&mut instance.handle);
2157                } else {
2158                    allocator.deallocate_module(&mut instance.handle);
2159                }
2160            }
2161            ondemand.deallocate_module(&mut self.default_caller);
2162
2163            #[cfg(feature = "gc")]
2164            if let Some(gc_store) = self.gc_store.take() {
2165                debug_assert!(self.engine.features().gc_types());
2166                allocator.deallocate_gc_heap(gc_store.allocation_index, gc_store.gc_heap);
2167            }
2168
2169            #[cfg(feature = "component-model")]
2170            {
2171                for _ in 0..self.num_component_instances {
2172                    allocator.decrement_component_instance_count();
2173                }
2174            }
2175
2176            // See documentation for these fields on `StoreOpaque` for why they
2177            // must be dropped in this order.
2178            ManuallyDrop::drop(&mut self.store_data);
2179            ManuallyDrop::drop(&mut self.rooted_host_funcs);
2180        }
2181    }
2182}
2183
2184#[cfg(test)]
2185mod tests {
2186    use super::{get_fuel, refuel, set_fuel};
2187    use std::num::NonZeroU64;
2188
2189    struct FuelTank {
2190        pub consumed_fuel: i64,
2191        pub reserve_fuel: u64,
2192        pub yield_interval: Option<NonZeroU64>,
2193    }
2194
2195    impl FuelTank {
2196        fn new() -> Self {
2197            FuelTank {
2198                consumed_fuel: 0,
2199                reserve_fuel: 0,
2200                yield_interval: None,
2201            }
2202        }
2203        fn get_fuel(&self) -> u64 {
2204            get_fuel(self.consumed_fuel, self.reserve_fuel)
2205        }
2206        fn refuel(&mut self) -> bool {
2207            refuel(
2208                &mut self.consumed_fuel,
2209                &mut self.reserve_fuel,
2210                self.yield_interval,
2211            )
2212        }
2213        fn set_fuel(&mut self, fuel: u64) {
2214            set_fuel(
2215                &mut self.consumed_fuel,
2216                &mut self.reserve_fuel,
2217                self.yield_interval,
2218                fuel,
2219            );
2220        }
2221    }
2222
2223    #[test]
2224    fn smoke() {
2225        let mut tank = FuelTank::new();
2226        tank.set_fuel(10);
2227        assert_eq!(tank.consumed_fuel, -10);
2228        assert_eq!(tank.reserve_fuel, 0);
2229
2230        tank.yield_interval = NonZeroU64::new(10);
2231        tank.set_fuel(25);
2232        assert_eq!(tank.consumed_fuel, -10);
2233        assert_eq!(tank.reserve_fuel, 15);
2234    }
2235
2236    #[test]
2237    fn does_not_lose_precision() {
2238        let mut tank = FuelTank::new();
2239        tank.set_fuel(u64::MAX);
2240        assert_eq!(tank.get_fuel(), u64::MAX);
2241
2242        tank.set_fuel(i64::MAX as u64);
2243        assert_eq!(tank.get_fuel(), i64::MAX as u64);
2244
2245        tank.set_fuel(i64::MAX as u64 + 1);
2246        assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
2247    }
2248
2249    #[test]
2250    fn yielding_does_not_lose_precision() {
2251        let mut tank = FuelTank::new();
2252
2253        tank.yield_interval = NonZeroU64::new(10);
2254        tank.set_fuel(u64::MAX);
2255        assert_eq!(tank.get_fuel(), u64::MAX);
2256        assert_eq!(tank.consumed_fuel, -10);
2257        assert_eq!(tank.reserve_fuel, u64::MAX - 10);
2258
2259        tank.yield_interval = NonZeroU64::new(u64::MAX);
2260        tank.set_fuel(u64::MAX);
2261        assert_eq!(tank.get_fuel(), u64::MAX);
2262        assert_eq!(tank.consumed_fuel, -i64::MAX);
2263        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2264
2265        tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
2266        tank.set_fuel(u64::MAX);
2267        assert_eq!(tank.get_fuel(), u64::MAX);
2268        assert_eq!(tank.consumed_fuel, -i64::MAX);
2269        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2270    }
2271
2272    #[test]
2273    fn refueling() {
2274        // It's possible to fuel to have consumed over the limit as some instructions can consume
2275        // multiple units of fuel at once. Refueling should be strict in it's consumption and not
2276        // add more fuel than there is.
2277        let mut tank = FuelTank::new();
2278
2279        tank.yield_interval = NonZeroU64::new(10);
2280        tank.reserve_fuel = 42;
2281        tank.consumed_fuel = 4;
2282        assert!(tank.refuel());
2283        assert_eq!(tank.reserve_fuel, 28);
2284        assert_eq!(tank.consumed_fuel, -10);
2285
2286        tank.yield_interval = NonZeroU64::new(1);
2287        tank.reserve_fuel = 8;
2288        tank.consumed_fuel = 4;
2289        assert_eq!(tank.get_fuel(), 4);
2290        assert!(tank.refuel());
2291        assert_eq!(tank.reserve_fuel, 3);
2292        assert_eq!(tank.consumed_fuel, -1);
2293        assert_eq!(tank.get_fuel(), 4);
2294
2295        tank.yield_interval = NonZeroU64::new(10);
2296        tank.reserve_fuel = 3;
2297        tank.consumed_fuel = 4;
2298        assert_eq!(tank.get_fuel(), 0);
2299        assert!(!tank.refuel());
2300        assert_eq!(tank.reserve_fuel, 3);
2301        assert_eq!(tank.consumed_fuel, 4);
2302        assert_eq!(tank.get_fuel(), 0);
2303    }
2304}