wasmtime/runtime/
store.rs

1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//!   Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//!   intended to be consumed by the outside world. Note that the "just a
39//!   pointer large" is a load-bearing implementation detail in Wasmtime. This
40//!   enables it to store a pointer to its own trait object which doesn't need
41//!   to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//!   stored inside the `Box`. This is the general Rust pattern when one struct
45//!   is a layer over another. The surprising part, though, is that this is
46//!   further subdivided. This structure only contains things which actually
47//!   need `T` itself. The downside of this structure is that it's always
48//!   generic and means that code is monomorphized into consumer crates. We
49//!   strive to have things be as monomorphic as possible in `wasmtime` so this
50//!   type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//!   Stored inline in the outer type the "opaque" here means that it's a
54//!   "store" but it doesn't have access to the `T`. This is the primary
55//!   "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//!   internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//!   All references of Wasm items into a `Store` are actually indices into a
60//!   table in this structure, and the `StoreData` being separate makes it a bit
61//!   easier to manage/define/work with. There's no real fundamental reason this
62//!   is split out, although sometimes it's useful to have separate borrows into
63//!   these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79use crate::instance::InstanceData;
80use crate::linker::Definition;
81use crate::module::RegisteredModuleId;
82use crate::prelude::*;
83use crate::runtime::vm::mpk::ProtectionKey;
84#[cfg(feature = "gc")]
85use crate::runtime::vm::GcRootsList;
86use crate::runtime::vm::{
87    ExportGlobal, GcStore, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
88    Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator, SignalHandler,
89    StoreBox, StorePtr, Unwind, VMContext, VMFuncRef, VMGcRef, VMStoreContext,
90};
91use crate::trampoline::VMHostGlobalContext;
92use crate::RootSet;
93use crate::{module::ModuleRegistry, Engine, Module, Trap, Val, ValRaw};
94use crate::{Global, Instance, Memory, Table, Uninhabited};
95use alloc::sync::Arc;
96use core::fmt;
97use core::marker;
98use core::mem::{self, ManuallyDrop};
99use core::num::NonZeroU64;
100use core::ops::{Deref, DerefMut};
101use core::ptr::NonNull;
102use wasmtime_environ::TripleExt;
103
104mod context;
105pub use self::context::*;
106mod data;
107pub use self::data::*;
108mod func_refs;
109use func_refs::FuncRefs;
110#[cfg(feature = "async")]
111mod async_;
112#[cfg(all(feature = "async", feature = "call-hook"))]
113pub use self::async_::CallHookHandler;
114#[cfg(feature = "async")]
115use self::async_::*;
116#[cfg(feature = "gc")]
117mod gc;
118
119/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
120///
121/// All WebAssembly instances and items will be attached to and refer to a
122/// [`Store`]. For example instances, functions, globals, and tables are all
123/// attached to a [`Store`]. Instances are created by instantiating a
124/// [`Module`](crate::Module) within a [`Store`].
125///
126/// A [`Store`] is intended to be a short-lived object in a program. No form
127/// of GC is implemented at this time so once an instance is created within a
128/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
129/// This makes [`Store`] unsuitable for creating an unbounded number of
130/// instances in it because [`Store`] will never release this memory. It's
131/// recommended to have a [`Store`] correspond roughly to the lifetime of a
132/// "main instance" that an embedding is interested in executing.
133///
134/// ## Type parameter `T`
135///
136/// Each [`Store`] has a type parameter `T` associated with it. This `T`
137/// represents state defined by the host. This state will be accessible through
138/// the [`Caller`](crate::Caller) type that host-defined functions get access
139/// to. This `T` is suitable for storing `Store`-specific information which
140/// imported functions may want access to.
141///
142/// The data `T` can be accessed through methods like [`Store::data`] and
143/// [`Store::data_mut`].
144///
145/// ## Stores, contexts, oh my
146///
147/// Most methods in Wasmtime take something of the form
148/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
149/// the first argument. These two traits allow ergonomically passing in the
150/// context you currently have to any method. The primary two sources of
151/// contexts are:
152///
153/// * `Store<T>`
154/// * `Caller<'_, T>`
155///
156/// corresponding to what you create and what you have access to in a host
157/// function. You can also explicitly acquire a [`StoreContext`] or
158/// [`StoreContextMut`] and pass that around as well.
159///
160/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
161/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
162/// form of context you have you can call various methods, create objects, etc.
163///
164/// ## Stores and `Default`
165///
166/// You can create a store with default configuration settings using
167/// `Store::default()`. This will create a brand new [`Engine`] with default
168/// configuration (see [`Config`](crate::Config) for more information).
169///
170/// ## Cross-store usage of items
171///
172/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
173/// [`Store`]. The store they belong to is the one they were created with
174/// (passed in as a parameter) or instantiated with. This store is the only
175/// store that can be used to interact with wasm items after they're created.
176///
177/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
178/// operations is incorrect. In other words it's considered a programmer error
179/// rather than a recoverable error for the wrong [`Store`] to be used when
180/// calling APIs.
181pub struct Store<T> {
182    // for comments about `ManuallyDrop`, see `Store::into_data`
183    inner: ManuallyDrop<Box<StoreInner<T>>>,
184}
185
186#[derive(Copy, Clone, Debug)]
187/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
188/// the WebAssembly VM.
189pub enum CallHook {
190    /// Indicates the VM is calling a WebAssembly function, from the host.
191    CallingWasm,
192    /// Indicates the VM is returning from a WebAssembly function, to the host.
193    ReturningFromWasm,
194    /// Indicates the VM is calling a host function, from WebAssembly.
195    CallingHost,
196    /// Indicates the VM is returning from a host function, to WebAssembly.
197    ReturningFromHost,
198}
199
200impl CallHook {
201    /// Indicates the VM is entering host code (exiting WebAssembly code)
202    pub fn entering_host(&self) -> bool {
203        match self {
204            CallHook::ReturningFromWasm | CallHook::CallingHost => true,
205            _ => false,
206        }
207    }
208    /// Indicates the VM is exiting host code (entering WebAssembly code)
209    pub fn exiting_host(&self) -> bool {
210        match self {
211            CallHook::ReturningFromHost | CallHook::CallingWasm => true,
212            _ => false,
213        }
214    }
215}
216
217/// Internal contents of a `Store<T>` that live on the heap.
218///
219/// The members of this struct are those that need to be generic over `T`, the
220/// store's internal type storage. Otherwise all things that don't rely on `T`
221/// should go into `StoreOpaque`.
222pub struct StoreInner<T> {
223    /// Generic metadata about the store that doesn't need access to `T`.
224    inner: StoreOpaque,
225
226    limiter: Option<ResourceLimiterInner<T>>,
227    call_hook: Option<CallHookInner<T>>,
228    #[cfg(target_has_atomic = "64")]
229    epoch_deadline_behavior:
230        Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
231    // for comments about `ManuallyDrop`, see `Store::into_data`
232    data: ManuallyDrop<T>,
233}
234
235enum ResourceLimiterInner<T> {
236    Sync(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync>),
237    #[cfg(feature = "async")]
238    Async(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync) + Send + Sync>),
239}
240
241enum CallHookInner<T> {
242    #[cfg(feature = "call-hook")]
243    Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
244    #[cfg(all(feature = "async", feature = "call-hook"))]
245    Async(Box<dyn CallHookHandler<T> + Send + Sync>),
246    #[allow(dead_code)]
247    ForceTypeParameterToBeUsed {
248        uninhabited: Uninhabited,
249        _marker: marker::PhantomData<T>,
250    },
251}
252
253/// What to do after returning from a callback when the engine epoch reaches
254/// the deadline for a Store during execution of a function using that store.
255pub enum UpdateDeadline {
256    /// Extend the deadline by the specified number of ticks.
257    Continue(u64),
258    /// Extend the deadline by the specified number of ticks after yielding to
259    /// the async executor loop. This can only be used with an async [`Store`]
260    /// configured via [`Config::async_support`](crate::Config::async_support).
261    #[cfg(feature = "async")]
262    Yield(u64),
263}
264
265// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
266impl<T> Deref for StoreInner<T> {
267    type Target = StoreOpaque;
268    fn deref(&self) -> &Self::Target {
269        &self.inner
270    }
271}
272
273impl<T> DerefMut for StoreInner<T> {
274    fn deref_mut(&mut self) -> &mut Self::Target {
275        &mut self.inner
276    }
277}
278
279/// Monomorphic storage for a `Store<T>`.
280///
281/// This structure contains the bulk of the metadata about a `Store`. This is
282/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
283/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
284/// crate itself.
285pub struct StoreOpaque {
286    // This `StoreOpaque` structure has references to itself. These aren't
287    // immediately evident, however, so we need to tell the compiler that it
288    // contains self-references. This notably suppresses `noalias` annotations
289    // when this shows up in compiled code because types of this structure do
290    // indeed alias itself. An example of this is `default_callee` holds a
291    // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
292    // aliasing!
293    //
294    // It's somewhat unclear to me at this time if this is 100% sufficient to
295    // get all the right codegen in all the right places. For example does
296    // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
297    // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
298    // enough with `Pin` to understand if it's appropriate here (we do, for
299    // example want to allow movement in and out of `data: T`, just not movement
300    // of most of the other members). It's also not clear if using `Pin` in a
301    // few places buys us much other than a bunch of `unsafe` that we already
302    // sort of hand-wave away.
303    //
304    // In any case this seems like a good mid-ground for now where we're at
305    // least telling the compiler something about all the aliasing happening
306    // within a `Store`.
307    _marker: marker::PhantomPinned,
308
309    engine: Engine,
310    vm_store_context: VMStoreContext,
311    instances: Vec<StoreInstance>,
312    #[cfg(feature = "component-model")]
313    num_component_instances: usize,
314    signal_handler: Option<SignalHandler>,
315    modules: ModuleRegistry,
316    func_refs: FuncRefs,
317    host_globals: Vec<StoreBox<VMHostGlobalContext>>,
318
319    // GC-related fields.
320    gc_store: Option<GcStore>,
321    gc_roots: RootSet,
322    #[cfg(feature = "gc")]
323    gc_roots_list: GcRootsList,
324    // Types for which the embedder has created an allocator for.
325    #[cfg(feature = "gc")]
326    gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
327
328    // Numbers of resources instantiated in this store, and their limits
329    instance_count: usize,
330    instance_limit: usize,
331    memory_count: usize,
332    memory_limit: usize,
333    table_count: usize,
334    table_limit: usize,
335    #[cfg(feature = "async")]
336    async_state: AsyncState,
337
338    // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
339    // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
340    // together. Then when we run out of gas, we inject the yield amount from the reserve
341    // until the reserve is empty.
342    fuel_reserve: u64,
343    fuel_yield_interval: Option<NonZeroU64>,
344    /// Indexed data within this `Store`, used to store information about
345    /// globals, functions, memories, etc.
346    ///
347    /// Note that this is `ManuallyDrop` because it needs to be dropped before
348    /// `rooted_host_funcs` below. This structure contains pointers which are
349    /// otherwise kept alive by the `Arc` references in `rooted_host_funcs`.
350    store_data: ManuallyDrop<StoreData>,
351    traitobj: StorePtr,
352    default_caller: InstanceHandle,
353
354    /// Used to optimized wasm->host calls when the host function is defined with
355    /// `Func::new` to avoid allocating a new vector each time a function is
356    /// called.
357    hostcall_val_storage: Vec<Val>,
358    /// Same as `hostcall_val_storage`, but for the direction of the host
359    /// calling wasm.
360    wasm_val_raw_storage: Vec<ValRaw>,
361
362    /// A list of lists of definitions which have been used to instantiate
363    /// within this `Store`.
364    ///
365    /// Note that not all instantiations end up pushing to this list. At the
366    /// time of this writing only the `InstancePre<T>` type will push to this
367    /// list. Pushes to this list are typically accompanied with
368    /// `HostFunc::to_func_store_rooted` to clone an `Arc` here once which
369    /// preserves a strong reference to the `Arc` for each `HostFunc` stored
370    /// within the list of `Definition`s.
371    ///
372    /// Note that this is `ManuallyDrop` as it must be dropped after
373    /// `store_data` above, where the function pointers are stored.
374    rooted_host_funcs: ManuallyDrop<Vec<Arc<[Definition]>>>,
375
376    /// Keep track of what protection key is being used during allocation so
377    /// that the right memory pages can be enabled when entering WebAssembly
378    /// guest code.
379    pkey: Option<ProtectionKey>,
380
381    /// Runtime state for components used in the handling of resources, borrow,
382    /// and calls. These also interact with the `ResourceAny` type and its
383    /// internal representation.
384    #[cfg(feature = "component-model")]
385    component_host_table: crate::runtime::vm::component::ResourceTable,
386    #[cfg(feature = "component-model")]
387    component_calls: crate::runtime::vm::component::CallContexts,
388    #[cfg(feature = "component-model")]
389    host_resource_data: crate::component::HostResourceData,
390
391    /// State related to the executor of wasm code.
392    ///
393    /// For example if Pulley is enabled and configured then this will store a
394    /// Pulley interpreter.
395    executor: Executor,
396}
397
398/// Executor state within `StoreOpaque`.
399///
400/// Effectively stores Pulley interpreter state and handles conditional support
401/// for Cranelift at compile time.
402enum Executor {
403    Interpreter(Interpreter),
404    #[cfg(has_host_compiler_backend)]
405    Native,
406}
407
408/// A borrowed reference to `Executor` above.
409pub(crate) enum ExecutorRef<'a> {
410    Interpreter(InterpreterRef<'a>),
411    #[cfg(has_host_compiler_backend)]
412    Native,
413}
414
415/// An RAII type to automatically mark a region of code as unsafe for GC.
416#[doc(hidden)]
417pub struct AutoAssertNoGc<'a> {
418    store: &'a mut StoreOpaque,
419    entered: bool,
420}
421
422impl<'a> AutoAssertNoGc<'a> {
423    #[inline]
424    pub fn new(store: &'a mut StoreOpaque) -> Self {
425        let entered = if !cfg!(feature = "gc") {
426            false
427        } else if let Some(gc_store) = store.gc_store.as_mut() {
428            gc_store.gc_heap.enter_no_gc_scope();
429            true
430        } else {
431            false
432        };
433
434        AutoAssertNoGc { store, entered }
435    }
436
437    /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
438    /// disables checks for no GC happening for the duration of this value.
439    ///
440    /// This is used when it is statically otherwise known that a GC doesn't
441    /// happen for the various types involved.
442    ///
443    /// # Unsafety
444    ///
445    /// This method is `unsafe` as it does not provide the same safety
446    /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
447    /// caller that a GC doesn't happen.
448    #[inline]
449    pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
450        if cfg!(debug_assertions) {
451            AutoAssertNoGc::new(store)
452        } else {
453            AutoAssertNoGc {
454                store,
455                entered: false,
456            }
457        }
458    }
459}
460
461impl core::ops::Deref for AutoAssertNoGc<'_> {
462    type Target = StoreOpaque;
463
464    #[inline]
465    fn deref(&self) -> &Self::Target {
466        &*self.store
467    }
468}
469
470impl core::ops::DerefMut for AutoAssertNoGc<'_> {
471    #[inline]
472    fn deref_mut(&mut self) -> &mut Self::Target {
473        &mut *self.store
474    }
475}
476
477impl Drop for AutoAssertNoGc<'_> {
478    #[inline]
479    fn drop(&mut self) {
480        if self.entered {
481            self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
482        }
483    }
484}
485
486/// Used to associate instances with the store.
487///
488/// This is needed to track if the instance was allocated explicitly with the on-demand
489/// instance allocator.
490struct StoreInstance {
491    handle: InstanceHandle,
492    kind: StoreInstanceKind,
493}
494
495enum StoreInstanceKind {
496    /// An actual, non-dummy instance.
497    Real {
498        /// The id of this instance's module inside our owning store's
499        /// `ModuleRegistry`.
500        module_id: RegisteredModuleId,
501    },
502
503    /// This is a dummy instance that is just an implementation detail for
504    /// something else. For example, host-created memories internally create a
505    /// dummy instance.
506    ///
507    /// Regardless of the configured instance allocator for the engine, dummy
508    /// instances always use the on-demand allocator to deallocate the instance.
509    Dummy,
510}
511
512impl<T> Store<T> {
513    /// Creates a new [`Store`] to be associated with the given [`Engine`] and
514    /// `data` provided.
515    ///
516    /// The created [`Store`] will place no additional limits on the size of
517    /// linear memories or tables at runtime. Linear memories and tables will
518    /// be allowed to grow to any upper limit specified in their definitions.
519    /// The store will limit the number of instances, linear memories, and
520    /// tables created to 10,000. This can be overridden with the
521    /// [`Store::limiter`] configuration method.
522    pub fn new(engine: &Engine, data: T) -> Self {
523        let store_data = StoreData::new();
524        log::trace!("creating new store {:?}", store_data.id());
525
526        let pkey = engine.allocator().next_available_pkey();
527
528        let inner = StoreOpaque {
529            _marker: marker::PhantomPinned,
530            engine: engine.clone(),
531            vm_store_context: Default::default(),
532            instances: Vec::new(),
533            #[cfg(feature = "component-model")]
534            num_component_instances: 0,
535            signal_handler: None,
536            gc_store: None,
537            gc_roots: RootSet::default(),
538            #[cfg(feature = "gc")]
539            gc_roots_list: GcRootsList::default(),
540            #[cfg(feature = "gc")]
541            gc_host_alloc_types: Default::default(),
542            modules: ModuleRegistry::default(),
543            func_refs: FuncRefs::default(),
544            host_globals: Vec::new(),
545            instance_count: 0,
546            instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
547            memory_count: 0,
548            memory_limit: crate::DEFAULT_MEMORY_LIMIT,
549            table_count: 0,
550            table_limit: crate::DEFAULT_TABLE_LIMIT,
551            #[cfg(feature = "async")]
552            async_state: AsyncState::default(),
553            fuel_reserve: 0,
554            fuel_yield_interval: None,
555            store_data: ManuallyDrop::new(store_data),
556            traitobj: StorePtr::empty(),
557            default_caller: InstanceHandle::null(),
558            hostcall_val_storage: Vec::new(),
559            wasm_val_raw_storage: Vec::new(),
560            rooted_host_funcs: ManuallyDrop::new(Vec::new()),
561            pkey,
562            #[cfg(feature = "component-model")]
563            component_host_table: Default::default(),
564            #[cfg(feature = "component-model")]
565            component_calls: Default::default(),
566            #[cfg(feature = "component-model")]
567            host_resource_data: Default::default(),
568            #[cfg(has_host_compiler_backend)]
569            executor: if cfg!(feature = "pulley") && engine.target().is_pulley() {
570                Executor::Interpreter(Interpreter::new(engine))
571            } else {
572                Executor::Native
573            },
574            #[cfg(not(has_host_compiler_backend))]
575            executor: {
576                debug_assert!(engine.target().is_pulley());
577                Executor::Interpreter(Interpreter::new(engine))
578            },
579        };
580        let mut inner = Box::new(StoreInner {
581            inner,
582            limiter: None,
583            call_hook: None,
584            #[cfg(target_has_atomic = "64")]
585            epoch_deadline_behavior: None,
586            data: ManuallyDrop::new(data),
587        });
588
589        // Note the erasure of the lifetime here into `'static`, so in general
590        // usage of this trait object must be strictly bounded to the `Store`
591        // itself, and this is an invariant that we have to maintain throughout
592        // Wasmtime.
593        inner.traitobj = StorePtr::new(unsafe {
594            mem::transmute::<
595                NonNull<dyn crate::runtime::vm::VMStore + '_>,
596                NonNull<dyn crate::runtime::vm::VMStore + 'static>,
597            >(NonNull::from(&mut *inner))
598        });
599
600        // Wasmtime uses the callee argument to host functions to learn about
601        // the original pointer to the `Store` itself, allowing it to
602        // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
603        // however, there's no "callee" to provide. To fix this we allocate a
604        // single "default callee" for the entire `Store`. This is then used as
605        // part of `Func::call` to guarantee that the `callee: *mut VMContext`
606        // is never null.
607        inner.default_caller = {
608            let module = Arc::new(wasmtime_environ::Module::default());
609            let shim = ModuleRuntimeInfo::bare(module);
610            let allocator = OnDemandInstanceAllocator::default();
611
612            allocator
613                .validate_module(shim.env_module(), shim.offsets())
614                .unwrap();
615
616            let mut instance = unsafe {
617                allocator
618                    .allocate_module(InstanceAllocationRequest {
619                        host_state: Box::new(()),
620                        imports: Default::default(),
621                        store: StorePtr::empty(),
622                        runtime_info: &shim,
623                        wmemcheck: engine.config().wmemcheck,
624                        pkey: None,
625                        tunables: engine.tunables(),
626                    })
627                    .expect("failed to allocate default callee")
628            };
629            unsafe {
630                instance.set_store(Some(inner.traitobj()));
631            }
632            instance
633        };
634
635        Self {
636            inner: ManuallyDrop::new(inner),
637        }
638    }
639
640    /// Access the underlying data owned by this `Store`.
641    #[inline]
642    pub fn data(&self) -> &T {
643        self.inner.data()
644    }
645
646    /// Access the underlying data owned by this `Store`.
647    #[inline]
648    pub fn data_mut(&mut self) -> &mut T {
649        self.inner.data_mut()
650    }
651
652    /// Consumes this [`Store`], destroying it, and returns the underlying data.
653    pub fn into_data(mut self) -> T {
654        self.inner.flush_fiber_stack();
655
656        // This is an unsafe operation because we want to avoid having a runtime
657        // check or boolean for whether the data is actually contained within a
658        // `Store`. The data itself is stored as `ManuallyDrop` since we're
659        // manually managing the memory here, and there's also a `ManuallyDrop`
660        // around the `Box<StoreInner<T>>`. The way this works though is a bit
661        // tricky, so here's how things get dropped appropriately:
662        //
663        // * When a `Store<T>` is normally dropped, the custom destructor for
664        //   `Store<T>` will drop `T`, then the `self.inner` field. The
665        //   rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
666        //   `StoreInner<T>`. This cleans up all internal fields and doesn't
667        //   touch `T` because it's wrapped in `ManuallyDrop`.
668        //
669        // * When calling this method we skip the top-level destructor for
670        //   `Store<T>` with `mem::forget`. This skips both the destructor for
671        //   `T` and the destructor for `StoreInner<T>`. We do, however, run the
672        //   destructor for `Box<StoreInner<T>>` which, like above, will skip
673        //   the destructor for `T` since it's `ManuallyDrop`.
674        //
675        // In both cases all the other fields of `StoreInner<T>` should all get
676        // dropped, and the manual management of destructors is basically
677        // between this method and `Drop for Store<T>`. Note that this also
678        // means that `Drop for StoreInner<T>` cannot access `self.data`, so
679        // there is a comment indicating this as well.
680        unsafe {
681            let mut inner = ManuallyDrop::take(&mut self.inner);
682            core::mem::forget(self);
683            ManuallyDrop::take(&mut inner.data)
684        }
685    }
686
687    /// Configures the [`ResourceLimiter`] used to limit resource creation
688    /// within this [`Store`].
689    ///
690    /// Whenever resources such as linear memory, tables, or instances are
691    /// allocated the `limiter` specified here is invoked with the store's data
692    /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
693    /// being allocated. The returned [`ResourceLimiter`] is intended to live
694    /// within the `T` itself, for example by storing a
695    /// [`StoreLimits`](crate::StoreLimits).
696    ///
697    /// Note that this limiter is only used to limit the creation/growth of
698    /// resources in the future, this does not retroactively attempt to apply
699    /// limits to the [`Store`].
700    ///
701    /// # Examples
702    ///
703    /// ```
704    /// use wasmtime::*;
705    ///
706    /// struct MyApplicationState {
707    ///     my_state: u32,
708    ///     limits: StoreLimits,
709    /// }
710    ///
711    /// let engine = Engine::default();
712    /// let my_state = MyApplicationState {
713    ///     my_state: 42,
714    ///     limits: StoreLimitsBuilder::new()
715    ///         .memory_size(1 << 20 /* 1 MB */)
716    ///         .instances(2)
717    ///         .build(),
718    /// };
719    /// let mut store = Store::new(&engine, my_state);
720    /// store.limiter(|state| &mut state.limits);
721    ///
722    /// // Creation of smaller memories is allowed
723    /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
724    ///
725    /// // Creation of a larger memory, however, will exceed the 1MB limit we've
726    /// // configured
727    /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
728    ///
729    /// // The number of instances in this store is limited to 2, so the third
730    /// // instance here should fail.
731    /// let module = Module::new(&engine, "(module)").unwrap();
732    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
733    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
734    /// assert!(Instance::new(&mut store, &module, &[]).is_err());
735    /// ```
736    ///
737    /// [`ResourceLimiter`]: crate::ResourceLimiter
738    pub fn limiter(
739        &mut self,
740        mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync + 'static,
741    ) {
742        // Apply the limits on instances, tables, and memory given by the limiter:
743        let inner = &mut self.inner;
744        let (instance_limit, table_limit, memory_limit) = {
745            let l = limiter(&mut inner.data);
746            (l.instances(), l.tables(), l.memories())
747        };
748        let innermost = &mut inner.inner;
749        innermost.instance_limit = instance_limit;
750        innermost.table_limit = table_limit;
751        innermost.memory_limit = memory_limit;
752
753        // Save the limiter accessor function:
754        inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
755    }
756
757    /// Configure a function that runs on calls and returns between WebAssembly
758    /// and host code.
759    ///
760    /// The function is passed a [`CallHook`] argument, which indicates which
761    /// state transition the VM is making.
762    ///
763    /// This function may return a [`Trap`]. If a trap is returned when an
764    /// import was called, it is immediately raised as-if the host import had
765    /// returned the trap. If a trap is returned after wasm returns to the host
766    /// then the wasm function's result is ignored and this trap is returned
767    /// instead.
768    ///
769    /// After this function returns a trap, it may be called for subsequent returns
770    /// to host or wasm code as the trap propagates to the root call.
771    #[cfg(feature = "call-hook")]
772    pub fn call_hook(
773        &mut self,
774        hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
775    ) {
776        self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
777    }
778
779    /// Returns the [`Engine`] that this store is associated with.
780    pub fn engine(&self) -> &Engine {
781        self.inner.engine()
782    }
783
784    /// Perform garbage collection.
785    ///
786    /// Note that it is not required to actively call this function. GC will
787    /// automatically happen according to various internal heuristics. This is
788    /// provided if fine-grained control over the GC is desired.
789    ///
790    /// If you are calling this method after an attempted allocation failed, you
791    /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
792    /// When you do so, this method will attempt to create enough space in the
793    /// GC heap for that allocation, so that it will succeed on the next
794    /// attempt.
795    ///
796    /// This method is only available when the `gc` Cargo feature is enabled.
797    #[cfg(feature = "gc")]
798    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
799        assert!(!self.inner.async_support());
800        self.inner.gc(why);
801    }
802
803    /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
804    /// be configured via [`Store::set_fuel`].
805    ///
806    /// # Errors
807    ///
808    /// This function will return an error if fuel consumption is not enabled
809    /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
810    pub fn get_fuel(&self) -> Result<u64> {
811        self.inner.get_fuel()
812    }
813
814    /// Set the fuel to this [`Store`] for wasm to consume while executing.
815    ///
816    /// For this method to work fuel consumption must be enabled via
817    /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
818    /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
819    /// immediately trap). This function must be called for the store to have
820    /// some fuel to allow WebAssembly to execute.
821    ///
822    /// Most WebAssembly instructions consume 1 unit of fuel. Some
823    /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
824    /// units, as any execution cost associated with them involves other
825    /// instructions which do consume fuel.
826    ///
827    /// Note that when fuel is entirely consumed it will cause wasm to trap.
828    ///
829    /// # Errors
830    ///
831    /// This function will return an error if fuel consumption is not enabled via
832    /// [`Config::consume_fuel`](crate::Config::consume_fuel).
833    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
834        self.inner.set_fuel(fuel)
835    }
836
837    /// Configures a [`Store`] to yield execution of async WebAssembly code
838    /// periodically.
839    ///
840    /// When a [`Store`] is configured to consume fuel with
841    /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
842    /// configure WebAssembly to be suspended and control will be yielded back to the
843    /// caller every `interval` units of fuel consumed. This is only suitable with use of
844    /// a store associated with an [async config](crate::Config::async_support) because
845    /// only then are futures used and yields are possible.
846    ///
847    /// The purpose of this behavior is to ensure that futures which represent
848    /// execution of WebAssembly do not execute too long inside their
849    /// `Future::poll` method. This allows for some form of cooperative
850    /// multitasking where WebAssembly will voluntarily yield control
851    /// periodically (based on fuel consumption) back to the running thread.
852    ///
853    /// Note that futures returned by this crate will automatically flag
854    /// themselves to get re-polled if a yield happens. This means that
855    /// WebAssembly will continue to execute, just after giving the host an
856    /// opportunity to do something else.
857    ///
858    /// The `interval` parameter indicates how much fuel should be
859    /// consumed between yields of an async future. When fuel runs out wasm will trap.
860    ///
861    /// # Error
862    ///
863    /// This method will error if it is not called on a store associated with an [async
864    /// config](crate::Config::async_support).
865    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
866        self.inner.fuel_async_yield_interval(interval)
867    }
868
869    /// Sets the epoch deadline to a certain number of ticks in the future.
870    ///
871    /// When the Wasm guest code is compiled with epoch-interruption
872    /// instrumentation
873    /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
874    /// and when the `Engine`'s epoch is incremented
875    /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
876    /// past a deadline, execution can be configured to either trap or
877    /// yield and then continue.
878    ///
879    /// This deadline is always set relative to the current epoch:
880    /// `ticks_beyond_current` ticks in the future. The deadline can
881    /// be set explicitly via this method, or refilled automatically
882    /// on a yield if configured via
883    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
884    /// this method is invoked, the deadline is reached when
885    /// [`Engine::increment_epoch()`] has been invoked at least
886    /// `ticks_beyond_current` times.
887    ///
888    /// By default a store will trap immediately with an epoch deadline of 0
889    /// (which has always "elapsed"). This method is required to be configured
890    /// for stores with epochs enabled to some future epoch deadline.
891    ///
892    /// See documentation on
893    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
894    /// for an introduction to epoch-based interruption.
895    #[cfg(target_has_atomic = "64")]
896    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
897        self.inner.set_epoch_deadline(ticks_beyond_current);
898    }
899
900    /// Configures epoch-deadline expiration to trap.
901    ///
902    /// When epoch-interruption-instrumented code is executed on this
903    /// store and the epoch deadline is reached before completion,
904    /// with the store configured in this way, execution will
905    /// terminate with a trap as soon as an epoch check in the
906    /// instrumented code is reached.
907    ///
908    /// This behavior is the default if the store is not otherwise
909    /// configured via
910    /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
911    /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
912    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
913    ///
914    /// This setting is intended to allow for coarse-grained
915    /// interruption, but not a deterministic deadline of a fixed,
916    /// finite interval. For deterministic interruption, see the
917    /// "fuel" mechanism instead.
918    ///
919    /// Note that when this is used it's required to call
920    /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
921    /// trap.
922    ///
923    /// See documentation on
924    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
925    /// for an introduction to epoch-based interruption.
926    #[cfg(target_has_atomic = "64")]
927    pub fn epoch_deadline_trap(&mut self) {
928        self.inner.epoch_deadline_trap();
929    }
930
931    /// Configures epoch-deadline expiration to invoke a custom callback
932    /// function.
933    ///
934    /// When epoch-interruption-instrumented code is executed on this
935    /// store and the epoch deadline is reached before completion, the
936    /// provided callback function is invoked.
937    ///
938    /// This callback should either return an [`UpdateDeadline`], or
939    /// return an error, which will terminate execution with a trap.
940    ///
941    /// The [`UpdateDeadline`] is a positive number of ticks to
942    /// add to the epoch deadline, as well as indicating what
943    /// to do after the callback returns. If the [`Store`] is
944    /// configured with async support, then the callback may return
945    /// [`UpdateDeadline::Yield`] to yield to the async executor before
946    /// updating the epoch deadline. Alternatively, the callback may
947    /// return [`UpdateDeadline::Continue`] to update the epoch deadline
948    /// immediately.
949    ///
950    /// This setting is intended to allow for coarse-grained
951    /// interruption, but not a deterministic deadline of a fixed,
952    /// finite interval. For deterministic interruption, see the
953    /// "fuel" mechanism instead.
954    ///
955    /// See documentation on
956    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
957    /// for an introduction to epoch-based interruption.
958    #[cfg(target_has_atomic = "64")]
959    pub fn epoch_deadline_callback(
960        &mut self,
961        callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
962    ) {
963        self.inner.epoch_deadline_callback(Box::new(callback));
964    }
965}
966
967impl<'a, T> StoreContext<'a, T> {
968    pub(crate) fn async_support(&self) -> bool {
969        self.0.async_support()
970    }
971
972    /// Returns the underlying [`Engine`] this store is connected to.
973    pub fn engine(&self) -> &Engine {
974        self.0.engine()
975    }
976
977    /// Access the underlying data owned by this `Store`.
978    ///
979    /// Same as [`Store::data`].
980    pub fn data(&self) -> &'a T {
981        self.0.data()
982    }
983
984    /// Returns the remaining fuel in this store.
985    ///
986    /// For more information see [`Store::get_fuel`].
987    pub fn get_fuel(&self) -> Result<u64> {
988        self.0.get_fuel()
989    }
990}
991
992impl<'a, T> StoreContextMut<'a, T> {
993    /// Access the underlying data owned by this `Store`.
994    ///
995    /// Same as [`Store::data`].
996    pub fn data(&self) -> &T {
997        self.0.data()
998    }
999
1000    /// Access the underlying data owned by this `Store`.
1001    ///
1002    /// Same as [`Store::data_mut`].
1003    pub fn data_mut(&mut self) -> &mut T {
1004        self.0.data_mut()
1005    }
1006
1007    /// Returns the underlying [`Engine`] this store is connected to.
1008    pub fn engine(&self) -> &Engine {
1009        self.0.engine()
1010    }
1011
1012    /// Perform garbage collection of `ExternRef`s.
1013    ///
1014    /// Same as [`Store::gc`].
1015    ///
1016    /// This method is only available when the `gc` Cargo feature is enabled.
1017    #[cfg(feature = "gc")]
1018    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
1019        self.0.gc(why);
1020    }
1021
1022    /// Returns remaining fuel in this store.
1023    ///
1024    /// For more information see [`Store::get_fuel`]
1025    pub fn get_fuel(&self) -> Result<u64> {
1026        self.0.get_fuel()
1027    }
1028
1029    /// Set the amount of fuel in this store.
1030    ///
1031    /// For more information see [`Store::set_fuel`]
1032    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1033        self.0.set_fuel(fuel)
1034    }
1035
1036    /// Configures this `Store` to periodically yield while executing futures.
1037    ///
1038    /// For more information see [`Store::fuel_async_yield_interval`]
1039    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1040        self.0.fuel_async_yield_interval(interval)
1041    }
1042
1043    /// Sets the epoch deadline to a certain number of ticks in the future.
1044    ///
1045    /// For more information see [`Store::set_epoch_deadline`].
1046    #[cfg(target_has_atomic = "64")]
1047    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1048        self.0.set_epoch_deadline(ticks_beyond_current);
1049    }
1050
1051    /// Configures epoch-deadline expiration to trap.
1052    ///
1053    /// For more information see [`Store::epoch_deadline_trap`].
1054    #[cfg(target_has_atomic = "64")]
1055    pub fn epoch_deadline_trap(&mut self) {
1056        self.0.epoch_deadline_trap();
1057    }
1058}
1059
1060impl<T> StoreInner<T> {
1061    #[inline]
1062    fn data(&self) -> &T {
1063        &self.data
1064    }
1065
1066    #[inline]
1067    fn data_mut(&mut self) -> &mut T {
1068        &mut self.data
1069    }
1070
1071    #[inline]
1072    pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1073        if self.inner.pkey.is_none() && self.call_hook.is_none() {
1074            Ok(())
1075        } else {
1076            self.call_hook_slow_path(s)
1077        }
1078    }
1079
1080    fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1081        if let Some(pkey) = &self.inner.pkey {
1082            let allocator = self.engine().allocator();
1083            match s {
1084                CallHook::CallingWasm | CallHook::ReturningFromHost => {
1085                    allocator.restrict_to_pkey(*pkey)
1086                }
1087                CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1088            }
1089        }
1090
1091        // Temporarily take the configured behavior to avoid mutably borrowing
1092        // multiple times.
1093        #[cfg_attr(not(feature = "call-hook"), allow(unreachable_patterns))]
1094        if let Some(mut call_hook) = self.call_hook.take() {
1095            let result = self.invoke_call_hook(&mut call_hook, s);
1096            self.call_hook = Some(call_hook);
1097            return result;
1098        }
1099
1100        Ok(())
1101    }
1102
1103    fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1104        match call_hook {
1105            #[cfg(feature = "call-hook")]
1106            CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1107
1108            #[cfg(all(feature = "async", feature = "call-hook"))]
1109            CallHookInner::Async(handler) => unsafe {
1110                self.inner
1111                    .async_cx()
1112                    .ok_or_else(|| anyhow!("couldn't grab async_cx for call hook"))?
1113                    .block_on(
1114                        handler
1115                            .handle_call_event((&mut *self).as_context_mut(), s)
1116                            .as_mut(),
1117                    )?
1118            },
1119
1120            CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1121                let _ = s;
1122                match *uninhabited {}
1123            }
1124        }
1125    }
1126
1127    #[cfg(not(feature = "async"))]
1128    fn flush_fiber_stack(&mut self) {
1129        // noop shim so code can assume this always exists.
1130    }
1131}
1132
1133fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1134    fuel_reserve.saturating_add_signed(-injected_fuel)
1135}
1136
1137// Add remaining fuel from the reserve into the active fuel if there is any left.
1138fn refuel(
1139    injected_fuel: &mut i64,
1140    fuel_reserve: &mut u64,
1141    yield_interval: Option<NonZeroU64>,
1142) -> bool {
1143    let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1144    if fuel > 0 {
1145        set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1146        true
1147    } else {
1148        false
1149    }
1150}
1151
1152fn set_fuel(
1153    injected_fuel: &mut i64,
1154    fuel_reserve: &mut u64,
1155    yield_interval: Option<NonZeroU64>,
1156    new_fuel_amount: u64,
1157) {
1158    let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1159    // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1160    // for the VM to use.
1161    let injected = core::cmp::min(interval, new_fuel_amount);
1162    // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1163    // VM at once to be i64 range.
1164    let injected = core::cmp::min(injected, i64::MAX as u64);
1165    // Add whatever is left over after injection to the reserve for later use.
1166    *fuel_reserve = new_fuel_amount - injected;
1167    // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1168    // this counter is positive.
1169    *injected_fuel = -(injected as i64);
1170}
1171
1172#[doc(hidden)]
1173impl StoreOpaque {
1174    pub fn id(&self) -> StoreId {
1175        self.store_data.id()
1176    }
1177
1178    pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1179        fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1180            let new = slot.saturating_add(amt);
1181            if new > max {
1182                bail!(
1183                    "resource limit exceeded: {} count too high at {}",
1184                    desc,
1185                    new
1186                );
1187            }
1188            *slot = new;
1189            Ok(())
1190        }
1191
1192        let module = module.env_module();
1193        let memories = module.num_defined_memories();
1194        let tables = module.num_defined_tables();
1195
1196        bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1197        bump(
1198            &mut self.memory_count,
1199            self.memory_limit,
1200            memories,
1201            "memory",
1202        )?;
1203        bump(&mut self.table_count, self.table_limit, tables, "table")?;
1204
1205        Ok(())
1206    }
1207
1208    #[inline]
1209    pub fn async_support(&self) -> bool {
1210        cfg!(feature = "async") && self.engine().config().async_support
1211    }
1212
1213    #[inline]
1214    pub fn engine(&self) -> &Engine {
1215        &self.engine
1216    }
1217
1218    #[inline]
1219    pub fn store_data(&self) -> &StoreData {
1220        &self.store_data
1221    }
1222
1223    #[inline]
1224    pub fn store_data_mut(&mut self) -> &mut StoreData {
1225        &mut self.store_data
1226    }
1227
1228    #[inline]
1229    pub(crate) fn modules(&self) -> &ModuleRegistry {
1230        &self.modules
1231    }
1232
1233    #[inline]
1234    pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
1235        &mut self.modules
1236    }
1237
1238    pub(crate) fn func_refs(&mut self) -> &mut FuncRefs {
1239        &mut self.func_refs
1240    }
1241
1242    pub(crate) fn fill_func_refs(&mut self) {
1243        self.func_refs.fill(&self.modules);
1244    }
1245
1246    pub(crate) fn push_instance_pre_func_refs(&mut self, func_refs: Arc<[VMFuncRef]>) {
1247        self.func_refs.push_instance_pre_func_refs(func_refs);
1248    }
1249
1250    pub(crate) fn host_globals(&mut self) -> &mut Vec<StoreBox<VMHostGlobalContext>> {
1251        &mut self.host_globals
1252    }
1253
1254    pub fn module_for_instance(&self, instance: InstanceId) -> Option<&'_ Module> {
1255        match self.instances[instance.0].kind {
1256            StoreInstanceKind::Dummy => None,
1257            StoreInstanceKind::Real { module_id } => {
1258                let module = self
1259                    .modules()
1260                    .lookup_module_by_id(module_id)
1261                    .expect("should always have a registered module for real instances");
1262                Some(module)
1263            }
1264        }
1265    }
1266
1267    pub unsafe fn add_instance(
1268        &mut self,
1269        handle: InstanceHandle,
1270        module_id: RegisteredModuleId,
1271    ) -> InstanceId {
1272        let id = InstanceId(self.instances.len());
1273        log::trace!(
1274            "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
1275            self.id()
1276        );
1277        self.instances.push(StoreInstance {
1278            handle: handle.clone(),
1279            kind: StoreInstanceKind::Real { module_id },
1280        });
1281        id
1282    }
1283
1284    /// Add a dummy instance that to the store.
1285    ///
1286    /// These are instances that are just implementation details of something
1287    /// else (e.g. host-created memories that are not actually defined in any
1288    /// Wasm module) and therefore shouldn't show up in things like core dumps.
1289    pub unsafe fn add_dummy_instance(&mut self, handle: InstanceHandle) -> InstanceId {
1290        let id = InstanceId(self.instances.len());
1291        log::trace!(
1292            "Adding dummy instance to store: store={:?}, instance={id:?}",
1293            self.id()
1294        );
1295        self.instances.push(StoreInstance {
1296            handle: handle.clone(),
1297            kind: StoreInstanceKind::Dummy,
1298        });
1299        id
1300    }
1301
1302    pub fn instance(&self, id: InstanceId) -> &InstanceHandle {
1303        &self.instances[id.0].handle
1304    }
1305
1306    pub fn instance_mut(&mut self, id: InstanceId) -> &mut InstanceHandle {
1307        &mut self.instances[id.0].handle
1308    }
1309
1310    /// Get all instances (ignoring dummy instances) within this store.
1311    pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1312        let instances = self
1313            .instances
1314            .iter()
1315            .enumerate()
1316            .filter_map(|(idx, inst)| {
1317                let id = InstanceId::from_index(idx);
1318                if let StoreInstanceKind::Dummy = inst.kind {
1319                    None
1320                } else {
1321                    Some(InstanceData::from_id(id))
1322                }
1323            })
1324            .collect::<Vec<_>>();
1325        instances
1326            .into_iter()
1327            .map(|i| Instance::from_wasmtime(i, self))
1328    }
1329
1330    /// Get all memories (host- or Wasm-defined) within this store.
1331    pub fn all_memories<'a>(&'a mut self) -> impl Iterator<Item = Memory> + 'a {
1332        // NB: Host-created memories have dummy instances. Therefore, we can get
1333        // all memories in the store by iterating over all instances (including
1334        // dummy instances) and getting each of their defined memories.
1335        let mems = self
1336            .instances
1337            .iter_mut()
1338            .flat_map(|instance| instance.handle.defined_memories())
1339            .collect::<Vec<_>>();
1340        mems.into_iter()
1341            .map(|memory| unsafe { Memory::from_wasmtime_memory(memory, self) })
1342    }
1343
1344    /// Iterate over all tables (host- or Wasm-defined) within this store.
1345    pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1346        // NB: Host-created tables have dummy instances. Therefore, we can get
1347        // all memories in the store by iterating over all instances (including
1348        // dummy instances) and getting each of their defined memories.
1349
1350        struct TempTakeInstances<'a> {
1351            instances: Vec<StoreInstance>,
1352            store: &'a mut StoreOpaque,
1353        }
1354
1355        impl<'a> TempTakeInstances<'a> {
1356            fn new(store: &'a mut StoreOpaque) -> Self {
1357                let instances = mem::take(&mut store.instances);
1358                Self { instances, store }
1359            }
1360        }
1361
1362        impl Drop for TempTakeInstances<'_> {
1363            fn drop(&mut self) {
1364                assert!(self.store.instances.is_empty());
1365                self.store.instances = mem::take(&mut self.instances);
1366            }
1367        }
1368
1369        let mut temp = TempTakeInstances::new(self);
1370        for instance in temp.instances.iter_mut() {
1371            for table in instance.handle.defined_tables() {
1372                let table = unsafe { Table::from_wasmtime_table(table, temp.store) };
1373                f(temp.store, table);
1374            }
1375        }
1376    }
1377
1378    /// Iterate over all globals (host- or Wasm-defined) within this store.
1379    pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1380        struct TempTakeHostGlobalsAndInstances<'a> {
1381            host_globals: Vec<StoreBox<VMHostGlobalContext>>,
1382            instances: Vec<StoreInstance>,
1383            store: &'a mut StoreOpaque,
1384        }
1385
1386        impl<'a> TempTakeHostGlobalsAndInstances<'a> {
1387            fn new(store: &'a mut StoreOpaque) -> Self {
1388                let host_globals = mem::take(&mut store.host_globals);
1389                let instances = mem::take(&mut store.instances);
1390                Self {
1391                    host_globals,
1392                    instances,
1393                    store,
1394                }
1395            }
1396        }
1397
1398        impl Drop for TempTakeHostGlobalsAndInstances<'_> {
1399            fn drop(&mut self) {
1400                assert!(self.store.host_globals.is_empty());
1401                self.store.host_globals = mem::take(&mut self.host_globals);
1402                assert!(self.store.instances.is_empty());
1403                self.store.instances = mem::take(&mut self.instances);
1404            }
1405        }
1406
1407        let mut temp = TempTakeHostGlobalsAndInstances::new(self);
1408        unsafe {
1409            // First enumerate all the host-created globals.
1410            for global in temp.host_globals.iter() {
1411                let export = ExportGlobal {
1412                    definition: NonNull::from(&mut global.get().as_mut().global),
1413                    vmctx: None,
1414                    global: global.get().as_ref().ty.to_wasm_type(),
1415                };
1416                let global = Global::from_wasmtime_global(export, temp.store);
1417                f(temp.store, global);
1418            }
1419
1420            // Then enumerate all instances' defined globals.
1421            for instance in temp.instances.iter_mut() {
1422                for (_, export) in instance.handle.defined_globals() {
1423                    let global = Global::from_wasmtime_global(export, temp.store);
1424                    f(temp.store, global);
1425                }
1426            }
1427        }
1428    }
1429
1430    #[cfg_attr(not(target_os = "linux"), allow(dead_code))] // not used on all platforms
1431    pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1432        self.signal_handler = handler;
1433    }
1434
1435    #[inline]
1436    pub fn vm_store_context(&self) -> &VMStoreContext {
1437        &self.vm_store_context
1438    }
1439
1440    #[inline(never)]
1441    pub(crate) fn allocate_gc_heap(&mut self) -> Result<()> {
1442        log::trace!("allocating GC heap for store {:?}", self.id());
1443
1444        assert!(self.gc_store.is_none());
1445        assert_eq!(
1446            self.vm_store_context.gc_heap.base.as_non_null(),
1447            NonNull::dangling(),
1448        );
1449        assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1450
1451        let vmstore = self.traitobj();
1452        let gc_store = allocate_gc_store(self.engine(), vmstore, self.get_pkey())?;
1453        self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1454        self.gc_store = Some(gc_store);
1455        return Ok(());
1456
1457        #[cfg(feature = "gc")]
1458        fn allocate_gc_store(
1459            engine: &Engine,
1460            vmstore: NonNull<dyn crate::vm::VMStore>,
1461            pkey: Option<ProtectionKey>,
1462        ) -> Result<GcStore> {
1463            ensure!(
1464                engine.features().gc_types(),
1465                "cannot allocate a GC store when GC is disabled at configuration time"
1466            );
1467
1468            // First, allocate the memory that will be our GC heap's storage.
1469            let mut request = InstanceAllocationRequest {
1470                runtime_info: &ModuleRuntimeInfo::bare(Arc::new(
1471                    wasmtime_environ::Module::default(),
1472                )),
1473                imports: crate::vm::Imports::default(),
1474                host_state: Box::new(()),
1475                store: StorePtr::new(vmstore),
1476                wmemcheck: false,
1477                pkey,
1478                tunables: engine.tunables(),
1479            };
1480            let mem_ty = engine.tunables().gc_heap_memory_type();
1481            let tunables = engine.tunables();
1482
1483            // SAFETY: We validated the GC heap's memory type during engine creation.
1484            let (mem_alloc_index, mem) = unsafe {
1485                engine
1486                    .allocator()
1487                    .allocate_memory(&mut request, &mem_ty, tunables, None)?
1488            };
1489
1490            // Then, allocate the actual GC heap, passing in that memory
1491            // storage.
1492            let (index, heap) = engine.allocator().allocate_gc_heap(
1493                engine,
1494                &**engine.gc_runtime()?,
1495                mem_alloc_index,
1496                mem,
1497            )?;
1498
1499            Ok(GcStore::new(index, heap))
1500        }
1501
1502        #[cfg(not(feature = "gc"))]
1503        fn allocate_gc_store(
1504            _engine: &Engine,
1505            _vmstore: NonNull<dyn crate::vm::VMStore>,
1506            _pkey: Option<ProtectionKey>,
1507        ) -> Result<GcStore> {
1508            bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1509        }
1510    }
1511
1512    #[inline]
1513    pub(crate) fn gc_store(&self) -> Result<&GcStore> {
1514        match &self.gc_store {
1515            Some(gc_store) => Ok(gc_store),
1516            None => bail!("GC heap not initialized yet"),
1517        }
1518    }
1519
1520    #[inline]
1521    pub(crate) fn gc_store_mut(&mut self) -> Result<&mut GcStore> {
1522        if self.gc_store.is_none() {
1523            self.allocate_gc_heap()?;
1524        }
1525        Ok(self.unwrap_gc_store_mut())
1526    }
1527
1528    /// If this store is configured with a GC heap, return a mutable reference
1529    /// to it. Otherwise, return `None`.
1530    #[inline]
1531    pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
1532        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1533            debug_assert!(self.gc_store.is_none());
1534            None
1535        } else {
1536            self.gc_store.as_mut()
1537        }
1538    }
1539
1540    /// If this store is configured with a GC heap, return a shared reference to
1541    /// it. Otherwise, return `None`.
1542    #[inline]
1543    #[cfg(feature = "gc")]
1544    pub(crate) fn optional_gc_store(&self) -> Option<&GcStore> {
1545        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1546            debug_assert!(self.gc_store.is_none());
1547            None
1548        } else {
1549            self.gc_store.as_ref()
1550        }
1551    }
1552
1553    #[inline]
1554    #[track_caller]
1555    #[cfg(feature = "gc")]
1556    pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
1557        self.gc_store
1558            .as_ref()
1559            .expect("attempted to access the store's GC heap before it has been allocated")
1560    }
1561
1562    #[inline]
1563    #[track_caller]
1564    pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
1565        self.gc_store
1566            .as_mut()
1567            .expect("attempted to access the store's GC heap before it has been allocated")
1568    }
1569
1570    #[inline]
1571    pub(crate) fn gc_roots(&self) -> &RootSet {
1572        &self.gc_roots
1573    }
1574
1575    #[inline]
1576    #[cfg(feature = "gc")]
1577    pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
1578        &mut self.gc_roots
1579    }
1580
1581    #[inline]
1582    pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
1583        self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
1584    }
1585
1586    #[cfg(feature = "gc")]
1587    fn do_gc(&mut self) {
1588        assert!(
1589            !self.async_support(),
1590            "must use `store.gc_async()` instead of `store.gc()` for async stores"
1591        );
1592
1593        // If the GC heap hasn't been initialized, there is nothing to collect.
1594        if self.gc_store.is_none() {
1595            return;
1596        }
1597
1598        log::trace!("============ Begin GC ===========");
1599
1600        // Take the GC roots out of `self` so we can borrow it mutably but still
1601        // call mutable methods on `self`.
1602        let mut roots = core::mem::take(&mut self.gc_roots_list);
1603
1604        self.trace_roots(&mut roots);
1605        self.unwrap_gc_store_mut().gc(unsafe { roots.iter() });
1606
1607        // Restore the GC roots for the next GC.
1608        roots.clear();
1609        self.gc_roots_list = roots;
1610
1611        log::trace!("============ End GC ===========");
1612    }
1613
1614    #[cfg(feature = "gc")]
1615    fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1616        log::trace!("Begin trace GC roots");
1617
1618        // We shouldn't have any leftover, stale GC roots.
1619        assert!(gc_roots_list.is_empty());
1620
1621        self.trace_wasm_stack_roots(gc_roots_list);
1622        self.trace_vmctx_roots(gc_roots_list);
1623        self.trace_user_roots(gc_roots_list);
1624
1625        log::trace!("End trace GC roots")
1626    }
1627
1628    #[cfg(feature = "gc")]
1629    fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1630        use crate::runtime::vm::{Backtrace, SendSyncPtr};
1631        use core::ptr::NonNull;
1632
1633        log::trace!("Begin trace GC roots :: Wasm stack");
1634
1635        Backtrace::trace(self, |frame| {
1636            let pc = frame.pc();
1637            debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
1638
1639            let fp = frame.fp() as *mut usize;
1640            debug_assert!(
1641                !fp.is_null(),
1642                "we should always get a valid frame pointer for Wasm frames"
1643            );
1644
1645            let module_info = self
1646                .modules()
1647                .lookup_module_by_pc(pc)
1648                .expect("should have module info for Wasm frame");
1649
1650            let stack_map = match module_info.lookup_stack_map(pc) {
1651                Some(sm) => sm,
1652                None => {
1653                    log::trace!("No stack map for this Wasm frame");
1654                    return core::ops::ControlFlow::Continue(());
1655                }
1656            };
1657            log::trace!(
1658                "We have a stack map that maps {} bytes in this Wasm frame",
1659                stack_map.frame_size()
1660            );
1661
1662            let sp = unsafe { stack_map.sp(fp) };
1663            for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
1664                let raw: u32 = unsafe { core::ptr::read(stack_slot) };
1665                log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
1666
1667                let gc_ref = VMGcRef::from_raw_u32(raw);
1668                if gc_ref.is_some() {
1669                    unsafe {
1670                        gc_roots_list.add_wasm_stack_root(SendSyncPtr::new(
1671                            NonNull::new(stack_slot).unwrap(),
1672                        ));
1673                    }
1674                }
1675            }
1676
1677            core::ops::ControlFlow::Continue(())
1678        });
1679
1680        log::trace!("End trace GC roots :: Wasm stack");
1681    }
1682
1683    #[cfg(feature = "gc")]
1684    fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1685        log::trace!("Begin trace GC roots :: vmctx");
1686        self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
1687        self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
1688        log::trace!("End trace GC roots :: vmctx");
1689    }
1690
1691    #[cfg(feature = "gc")]
1692    fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1693        log::trace!("Begin trace GC roots :: user");
1694        self.gc_roots.trace_roots(gc_roots_list);
1695        log::trace!("End trace GC roots :: user");
1696    }
1697
1698    /// Insert a host-allocated GC type into this store.
1699    ///
1700    /// This makes it suitable for the embedder to allocate instances of this
1701    /// type in this store, and we don't have to worry about the type being
1702    /// reclaimed (since it is possible that none of the Wasm modules in this
1703    /// store are holding it alive).
1704    #[cfg(feature = "gc")]
1705    pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
1706        self.gc_host_alloc_types.insert(ty);
1707    }
1708
1709    pub fn get_fuel(&self) -> Result<u64> {
1710        anyhow::ensure!(
1711            self.engine().tunables().consume_fuel,
1712            "fuel is not configured in this store"
1713        );
1714        let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
1715        Ok(get_fuel(injected_fuel, self.fuel_reserve))
1716    }
1717
1718    fn refuel(&mut self) -> bool {
1719        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
1720        refuel(
1721            injected_fuel,
1722            &mut self.fuel_reserve,
1723            self.fuel_yield_interval,
1724        )
1725    }
1726
1727    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1728        anyhow::ensure!(
1729            self.engine().tunables().consume_fuel,
1730            "fuel is not configured in this store"
1731        );
1732        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
1733        set_fuel(
1734            injected_fuel,
1735            &mut self.fuel_reserve,
1736            self.fuel_yield_interval,
1737            fuel,
1738        );
1739        Ok(())
1740    }
1741
1742    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1743        anyhow::ensure!(
1744            self.engine().tunables().consume_fuel,
1745            "fuel is not configured in this store"
1746        );
1747        anyhow::ensure!(
1748            self.engine().config().async_support,
1749            "async support is not configured in this store"
1750        );
1751        anyhow::ensure!(
1752            interval != Some(0),
1753            "fuel_async_yield_interval must not be 0"
1754        );
1755        self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
1756        // Reset the fuel active + reserve states by resetting the amount.
1757        self.set_fuel(self.get_fuel()?)
1758    }
1759
1760    #[inline]
1761    pub fn signal_handler(&self) -> Option<*const SignalHandler> {
1762        let handler = self.signal_handler.as_ref()?;
1763        Some(handler)
1764    }
1765
1766    #[inline]
1767    pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
1768        NonNull::from(&self.vm_store_context)
1769    }
1770
1771    #[inline]
1772    pub fn default_caller(&self) -> NonNull<VMContext> {
1773        self.default_caller.vmctx()
1774    }
1775
1776    #[inline]
1777    pub fn traitobj(&self) -> NonNull<dyn crate::runtime::vm::VMStore> {
1778        self.traitobj.as_raw().unwrap()
1779    }
1780
1781    #[inline]
1782    pub fn traitobj_mut(&mut self) -> &mut dyn crate::runtime::vm::VMStore {
1783        unsafe { self.traitobj().as_mut() }
1784    }
1785
1786    /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
1787    /// used as part of calling the host in a `Func::new` method invocation.
1788    #[inline]
1789    pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
1790        mem::take(&mut self.hostcall_val_storage)
1791    }
1792
1793    /// Restores the vector previously taken by `take_hostcall_val_storage`
1794    /// above back into the store, allowing it to be used in the future for the
1795    /// next wasm->host call.
1796    #[inline]
1797    pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
1798        if storage.capacity() > self.hostcall_val_storage.capacity() {
1799            self.hostcall_val_storage = storage;
1800        }
1801    }
1802
1803    /// Same as `take_hostcall_val_storage`, but for the direction of the host
1804    /// calling wasm.
1805    #[inline]
1806    pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
1807        mem::take(&mut self.wasm_val_raw_storage)
1808    }
1809
1810    /// Same as `save_hostcall_val_storage`, but for the direction of the host
1811    /// calling wasm.
1812    #[inline]
1813    pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
1814        if storage.capacity() > self.wasm_val_raw_storage.capacity() {
1815            self.wasm_val_raw_storage = storage;
1816        }
1817    }
1818
1819    pub(crate) fn push_rooted_funcs(&mut self, funcs: Arc<[Definition]>) {
1820        self.rooted_host_funcs.push(funcs);
1821    }
1822
1823    /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
1824    /// WebAssembly-relative fault.
1825    ///
1826    /// This function may abort the process if `addr` is not found to actually
1827    /// reside in any linear memory. In such a situation it means that the
1828    /// segfault was erroneously caught by Wasmtime and is possibly indicative
1829    /// of a code generator bug.
1830    ///
1831    /// This function returns `None` for dynamically-bounds-checked-memories
1832    /// with spectre mitigations enabled since the hardware fault address is
1833    /// always zero in these situations which means that the trapping context
1834    /// doesn't have enough information to report the fault address.
1835    pub(crate) fn wasm_fault(
1836        &self,
1837        pc: usize,
1838        addr: usize,
1839    ) -> Option<crate::runtime::vm::WasmFault> {
1840        // There are a few instances where a "close to zero" pointer is loaded
1841        // and we expect that to happen:
1842        //
1843        // * Explicitly bounds-checked memories with spectre-guards enabled will
1844        //   cause out-of-bounds accesses to get routed to address 0, so allow
1845        //   wasm instructions to fault on the null address.
1846        // * `call_indirect` when invoking a null function pointer may load data
1847        //   from the a `VMFuncRef` whose address is null, meaning any field of
1848        //   `VMFuncRef` could be the address of the fault.
1849        //
1850        // In these situations where the address is so small it won't be in any
1851        // instance, so skip the checks below.
1852        if addr <= mem::size_of::<VMFuncRef>() {
1853            const _: () = {
1854                // static-assert that `VMFuncRef` isn't too big to ensure that
1855                // it lives solely within the first page as we currently only
1856                // have the guarantee that the first page of memory is unmapped,
1857                // no more.
1858                assert!(mem::size_of::<VMFuncRef>() <= 512);
1859            };
1860            return None;
1861        }
1862
1863        // Search all known instances in this store for this address. Note that
1864        // this is probably not the speediest way to do this. Traps, however,
1865        // are generally not expected to be super fast and additionally stores
1866        // probably don't have all that many instances or memories.
1867        //
1868        // If this loop becomes hot in the future, however, it should be
1869        // possible to precompute maps about linear memories in a store and have
1870        // a quicker lookup.
1871        let mut fault = None;
1872        for instance in self.instances.iter() {
1873            if let Some(f) = instance.handle.wasm_fault(addr) {
1874                assert!(fault.is_none());
1875                fault = Some(f);
1876            }
1877        }
1878        if fault.is_some() {
1879            return fault;
1880        }
1881
1882        cfg_if::cfg_if! {
1883            if #[cfg(any(feature = "std", unix, windows))] {
1884                // With the standard library a rich error can be printed here
1885                // to stderr and the native abort path is used.
1886                eprintln!(
1887                    "\
1888Wasmtime caught a segfault for a wasm program because the faulting instruction
1889is allowed to segfault due to how linear memories are implemented. The address
1890that was accessed, however, is not known to any linear memory in use within this
1891Store. This may be indicative of a critical bug in Wasmtime's code generation
1892because all addresses which are known to be reachable from wasm won't reach this
1893message.
1894
1895    pc:      0x{pc:x}
1896    address: 0x{addr:x}
1897
1898This is a possible security issue because WebAssembly has accessed something it
1899shouldn't have been able to. Other accesses may have succeeded and this one just
1900happened to be caught. The process will now be aborted to prevent this damage
1901from going any further and to alert what's going on. If this is a security
1902issue please reach out to the Wasmtime team via its security policy
1903at https://bytecodealliance.org/security.
1904"
1905                );
1906                std::process::abort();
1907            } else if #[cfg(panic = "abort")] {
1908                // Without the standard library but with `panic=abort` then
1909                // it's safe to panic as that's known to halt execution. For
1910                // now avoid the above error message as well since without
1911                // `std` it's probably best to be a bit more size-conscious.
1912                let _ = pc;
1913                panic!("invalid fault");
1914            } else {
1915                // Without `std` and with `panic = "unwind"` there's no way to
1916                // abort the process portably, so flag a compile time error.
1917                //
1918                // NB: if this becomes a problem in the future one option would
1919                // be to extend the `capi.rs` module for no_std platforms, but
1920                // it remains yet to be seen at this time if this is hit much.
1921                compile_error!("either `std` or `panic=abort` must be enabled");
1922                None
1923            }
1924        }
1925    }
1926
1927    /// Retrieve the store's protection key.
1928    #[inline]
1929    pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
1930        self.pkey
1931    }
1932
1933    #[inline]
1934    #[cfg(feature = "component-model")]
1935    pub(crate) fn component_resource_state(
1936        &mut self,
1937    ) -> (
1938        &mut crate::runtime::vm::component::CallContexts,
1939        &mut crate::runtime::vm::component::ResourceTable,
1940        &mut crate::component::HostResourceData,
1941    ) {
1942        (
1943            &mut self.component_calls,
1944            &mut self.component_host_table,
1945            &mut self.host_resource_data,
1946        )
1947    }
1948
1949    #[cfg(feature = "component-model")]
1950    pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
1951        // We don't actually need the instance itself right now, but it seems
1952        // like something we will almost certainly eventually want to keep
1953        // around, so force callers to provide it.
1954        let _ = instance;
1955
1956        self.num_component_instances += 1;
1957    }
1958
1959    #[cfg(not(feature = "async"))]
1960    pub(crate) fn async_guard_range(&self) -> core::ops::Range<*mut u8> {
1961        core::ptr::null_mut()..core::ptr::null_mut()
1962    }
1963
1964    pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
1965        match &mut self.executor {
1966            Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
1967            #[cfg(has_host_compiler_backend)]
1968            Executor::Native => ExecutorRef::Native,
1969        }
1970    }
1971
1972    pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
1973        match &self.executor {
1974            Executor::Interpreter(_) => &crate::runtime::vm::UnwindPulley,
1975            #[cfg(has_host_compiler_backend)]
1976            Executor::Native => &crate::runtime::vm::UnwindHost,
1977        }
1978    }
1979}
1980
1981unsafe impl<T> crate::runtime::vm::VMStore for StoreInner<T> {
1982    #[cfg(feature = "component-model-async")]
1983    fn component_async_store(
1984        &mut self,
1985    ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
1986        self
1987    }
1988
1989    fn store_opaque(&self) -> &StoreOpaque {
1990        &self.inner
1991    }
1992
1993    fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
1994        &mut self.inner
1995    }
1996
1997    fn memory_growing(
1998        &mut self,
1999        current: usize,
2000        desired: usize,
2001        maximum: Option<usize>,
2002    ) -> Result<bool, anyhow::Error> {
2003        match self.limiter {
2004            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2005                limiter(&mut self.data).memory_growing(current, desired, maximum)
2006            }
2007            #[cfg(feature = "async")]
2008            Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
2009                self.inner
2010                    .async_cx()
2011                    .expect("ResourceLimiterAsync requires async Store")
2012                    .block_on(
2013                        limiter(&mut self.data)
2014                            .memory_growing(current, desired, maximum)
2015                            .as_mut(),
2016                    )?
2017            },
2018            None => Ok(true),
2019        }
2020    }
2021
2022    fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2023        match self.limiter {
2024            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2025                limiter(&mut self.data).memory_grow_failed(error)
2026            }
2027            #[cfg(feature = "async")]
2028            Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2029                limiter(&mut self.data).memory_grow_failed(error)
2030            }
2031            None => {
2032                log::debug!("ignoring memory growth failure error: {error:?}");
2033                Ok(())
2034            }
2035        }
2036    }
2037
2038    fn table_growing(
2039        &mut self,
2040        current: usize,
2041        desired: usize,
2042        maximum: Option<usize>,
2043    ) -> Result<bool, anyhow::Error> {
2044        // Need to borrow async_cx before the mut borrow of the limiter.
2045        // self.async_cx() panicks when used with a non-async store, so
2046        // wrap this in an option.
2047        #[cfg(feature = "async")]
2048        let async_cx = if self.async_support()
2049            && matches!(self.limiter, Some(ResourceLimiterInner::Async(_)))
2050        {
2051            Some(self.async_cx().unwrap())
2052        } else {
2053            None
2054        };
2055
2056        match self.limiter {
2057            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2058                limiter(&mut self.data).table_growing(current, desired, maximum)
2059            }
2060            #[cfg(feature = "async")]
2061            Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
2062                async_cx
2063                    .expect("ResourceLimiterAsync requires async Store")
2064                    .block_on(limiter(&mut self.data).table_growing(current, desired, maximum))?
2065            },
2066            None => Ok(true),
2067        }
2068    }
2069
2070    fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2071        match self.limiter {
2072            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2073                limiter(&mut self.data).table_grow_failed(error)
2074            }
2075            #[cfg(feature = "async")]
2076            Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2077                limiter(&mut self.data).table_grow_failed(error)
2078            }
2079            None => {
2080                log::debug!("ignoring table growth failure: {error:?}");
2081                Ok(())
2082            }
2083        }
2084    }
2085
2086    fn out_of_gas(&mut self) -> Result<()> {
2087        if !self.refuel() {
2088            return Err(Trap::OutOfFuel.into());
2089        }
2090        #[cfg(feature = "async")]
2091        if self.fuel_yield_interval.is_some() {
2092            self.async_yield_impl()?;
2093        }
2094        Ok(())
2095    }
2096
2097    #[cfg(target_has_atomic = "64")]
2098    fn new_epoch(&mut self) -> Result<u64, anyhow::Error> {
2099        // Temporarily take the configured behavior to avoid mutably borrowing
2100        // multiple times.
2101        let mut behavior = self.epoch_deadline_behavior.take();
2102        let delta_result = match &mut behavior {
2103            None => Err(Trap::Interrupt.into()),
2104            Some(callback) => callback((&mut *self).as_context_mut()).and_then(|update| {
2105                let delta = match update {
2106                    UpdateDeadline::Continue(delta) => delta,
2107
2108                    #[cfg(feature = "async")]
2109                    UpdateDeadline::Yield(delta) => {
2110                        assert!(
2111                            self.async_support(),
2112                            "cannot use `UpdateDeadline::Yield` without enabling async support in the config"
2113                        );
2114                        // Do the async yield. May return a trap if future was
2115                        // canceled while we're yielded.
2116                        self.async_yield_impl()?;
2117                        delta
2118                    }
2119                };
2120
2121                // Set a new deadline and return the new epoch deadline so
2122                // the Wasm code doesn't have to reload it.
2123                self.set_epoch_deadline(delta);
2124                Ok(self.get_epoch_deadline())
2125            })
2126        };
2127
2128        // Put back the original behavior which was replaced by `take`.
2129        self.epoch_deadline_behavior = behavior;
2130        delta_result
2131    }
2132
2133    #[cfg(feature = "gc")]
2134    unsafe fn maybe_async_grow_or_collect_gc_heap(
2135        &mut self,
2136        root: Option<VMGcRef>,
2137        bytes_needed: Option<u64>,
2138    ) -> Result<Option<VMGcRef>> {
2139        self.inner.maybe_async_gc(root, bytes_needed)
2140    }
2141
2142    #[cfg(not(feature = "gc"))]
2143    unsafe fn maybe_async_grow_or_collect_gc_heap(
2144        &mut self,
2145        root: Option<VMGcRef>,
2146        _bytes_needed: Option<u64>,
2147    ) -> Result<Option<VMGcRef>> {
2148        Ok(root)
2149    }
2150
2151    #[cfg(feature = "component-model")]
2152    fn component_calls(&mut self) -> &mut crate::runtime::vm::component::CallContexts {
2153        &mut self.component_calls
2154    }
2155}
2156
2157impl<T> StoreInner<T> {
2158    #[cfg(target_has_atomic = "64")]
2159    pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2160        // Set a new deadline based on the "epoch deadline delta".
2161        //
2162        // Safety: this is safe because the epoch deadline in the
2163        // `VMStoreContext` is accessed only here and by Wasm guest code
2164        // running in this store, and we have a `&mut self` here.
2165        //
2166        // Also, note that when this update is performed while Wasm is
2167        // on the stack, the Wasm will reload the new value once we
2168        // return into it.
2169        let epoch_deadline = unsafe {
2170            self.vm_store_context_ptr()
2171                .as_mut()
2172                .epoch_deadline
2173                .get_mut()
2174        };
2175        *epoch_deadline = self.engine().current_epoch() + delta;
2176    }
2177
2178    #[cfg(target_has_atomic = "64")]
2179    fn epoch_deadline_trap(&mut self) {
2180        self.epoch_deadline_behavior = None;
2181    }
2182
2183    #[cfg(target_has_atomic = "64")]
2184    fn epoch_deadline_callback(
2185        &mut self,
2186        callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2187    ) {
2188        self.epoch_deadline_behavior = Some(callback);
2189    }
2190
2191    fn get_epoch_deadline(&self) -> u64 {
2192        // Safety: this is safe because, as above, it is only invoked
2193        // from within `new_epoch` which is called from guest Wasm
2194        // code, which will have an exclusive borrow on the Store.
2195        let epoch_deadline = unsafe {
2196            self.vm_store_context_ptr()
2197                .as_mut()
2198                .epoch_deadline
2199                .get_mut()
2200        };
2201        *epoch_deadline
2202    }
2203}
2204
2205impl<T: Default> Default for Store<T> {
2206    fn default() -> Store<T> {
2207        Store::new(&Engine::default(), T::default())
2208    }
2209}
2210
2211impl<T: fmt::Debug> fmt::Debug for Store<T> {
2212    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2213        let inner = &**self.inner as *const StoreInner<T>;
2214        f.debug_struct("Store")
2215            .field("inner", &inner)
2216            .field("data", &self.inner.data)
2217            .finish()
2218    }
2219}
2220
2221impl<T> Drop for Store<T> {
2222    fn drop(&mut self) {
2223        self.inner.flush_fiber_stack();
2224
2225        // for documentation on this `unsafe`, see `into_data`.
2226        unsafe {
2227            ManuallyDrop::drop(&mut self.inner.data);
2228            ManuallyDrop::drop(&mut self.inner);
2229        }
2230    }
2231}
2232
2233impl Drop for StoreOpaque {
2234    fn drop(&mut self) {
2235        // NB it's important that this destructor does not access `self.data`.
2236        // That is deallocated by `Drop for Store<T>` above.
2237
2238        unsafe {
2239            let allocator = self.engine.allocator();
2240            let ondemand = OnDemandInstanceAllocator::default();
2241            let store_id = self.id();
2242
2243            #[cfg(feature = "gc")]
2244            if let Some(gc_store) = self.gc_store.take() {
2245                let gc_alloc_index = gc_store.allocation_index;
2246                log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2247                debug_assert!(self.engine.features().gc_types());
2248                let (mem_alloc_index, mem) =
2249                    allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2250                allocator.deallocate_memory(None, mem_alloc_index, mem);
2251            }
2252
2253            for (idx, instance) in self.instances.iter_mut().enumerate() {
2254                let id = InstanceId::from_index(idx);
2255                log::trace!("store {store_id:?} is deallocating {id:?}");
2256                if let StoreInstanceKind::Dummy = instance.kind {
2257                    ondemand.deallocate_module(&mut instance.handle);
2258                } else {
2259                    allocator.deallocate_module(&mut instance.handle);
2260                }
2261            }
2262
2263            log::trace!("store {store_id:?} is deallocating its default caller instance");
2264            ondemand.deallocate_module(&mut self.default_caller);
2265
2266            #[cfg(feature = "component-model")]
2267            {
2268                for _ in 0..self.num_component_instances {
2269                    allocator.decrement_component_instance_count();
2270                }
2271            }
2272
2273            // See documentation for these fields on `StoreOpaque` for why they
2274            // must be dropped in this order.
2275            ManuallyDrop::drop(&mut self.store_data);
2276            ManuallyDrop::drop(&mut self.rooted_host_funcs);
2277        }
2278    }
2279}
2280
2281#[cfg(test)]
2282mod tests {
2283    use super::{get_fuel, refuel, set_fuel};
2284    use std::num::NonZeroU64;
2285
2286    struct FuelTank {
2287        pub consumed_fuel: i64,
2288        pub reserve_fuel: u64,
2289        pub yield_interval: Option<NonZeroU64>,
2290    }
2291
2292    impl FuelTank {
2293        fn new() -> Self {
2294            FuelTank {
2295                consumed_fuel: 0,
2296                reserve_fuel: 0,
2297                yield_interval: None,
2298            }
2299        }
2300        fn get_fuel(&self) -> u64 {
2301            get_fuel(self.consumed_fuel, self.reserve_fuel)
2302        }
2303        fn refuel(&mut self) -> bool {
2304            refuel(
2305                &mut self.consumed_fuel,
2306                &mut self.reserve_fuel,
2307                self.yield_interval,
2308            )
2309        }
2310        fn set_fuel(&mut self, fuel: u64) {
2311            set_fuel(
2312                &mut self.consumed_fuel,
2313                &mut self.reserve_fuel,
2314                self.yield_interval,
2315                fuel,
2316            );
2317        }
2318    }
2319
2320    #[test]
2321    fn smoke() {
2322        let mut tank = FuelTank::new();
2323        tank.set_fuel(10);
2324        assert_eq!(tank.consumed_fuel, -10);
2325        assert_eq!(tank.reserve_fuel, 0);
2326
2327        tank.yield_interval = NonZeroU64::new(10);
2328        tank.set_fuel(25);
2329        assert_eq!(tank.consumed_fuel, -10);
2330        assert_eq!(tank.reserve_fuel, 15);
2331    }
2332
2333    #[test]
2334    fn does_not_lose_precision() {
2335        let mut tank = FuelTank::new();
2336        tank.set_fuel(u64::MAX);
2337        assert_eq!(tank.get_fuel(), u64::MAX);
2338
2339        tank.set_fuel(i64::MAX as u64);
2340        assert_eq!(tank.get_fuel(), i64::MAX as u64);
2341
2342        tank.set_fuel(i64::MAX as u64 + 1);
2343        assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
2344    }
2345
2346    #[test]
2347    fn yielding_does_not_lose_precision() {
2348        let mut tank = FuelTank::new();
2349
2350        tank.yield_interval = NonZeroU64::new(10);
2351        tank.set_fuel(u64::MAX);
2352        assert_eq!(tank.get_fuel(), u64::MAX);
2353        assert_eq!(tank.consumed_fuel, -10);
2354        assert_eq!(tank.reserve_fuel, u64::MAX - 10);
2355
2356        tank.yield_interval = NonZeroU64::new(u64::MAX);
2357        tank.set_fuel(u64::MAX);
2358        assert_eq!(tank.get_fuel(), u64::MAX);
2359        assert_eq!(tank.consumed_fuel, -i64::MAX);
2360        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2361
2362        tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
2363        tank.set_fuel(u64::MAX);
2364        assert_eq!(tank.get_fuel(), u64::MAX);
2365        assert_eq!(tank.consumed_fuel, -i64::MAX);
2366        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2367    }
2368
2369    #[test]
2370    fn refueling() {
2371        // It's possible to fuel to have consumed over the limit as some instructions can consume
2372        // multiple units of fuel at once. Refueling should be strict in it's consumption and not
2373        // add more fuel than there is.
2374        let mut tank = FuelTank::new();
2375
2376        tank.yield_interval = NonZeroU64::new(10);
2377        tank.reserve_fuel = 42;
2378        tank.consumed_fuel = 4;
2379        assert!(tank.refuel());
2380        assert_eq!(tank.reserve_fuel, 28);
2381        assert_eq!(tank.consumed_fuel, -10);
2382
2383        tank.yield_interval = NonZeroU64::new(1);
2384        tank.reserve_fuel = 8;
2385        tank.consumed_fuel = 4;
2386        assert_eq!(tank.get_fuel(), 4);
2387        assert!(tank.refuel());
2388        assert_eq!(tank.reserve_fuel, 3);
2389        assert_eq!(tank.consumed_fuel, -1);
2390        assert_eq!(tank.get_fuel(), 4);
2391
2392        tank.yield_interval = NonZeroU64::new(10);
2393        tank.reserve_fuel = 3;
2394        tank.consumed_fuel = 4;
2395        assert_eq!(tank.get_fuel(), 0);
2396        assert!(!tank.refuel());
2397        assert_eq!(tank.reserve_fuel, 3);
2398        assert_eq!(tank.consumed_fuel, 4);
2399        assert_eq!(tank.get_fuel(), 0);
2400    }
2401}