wasmtime/runtime/
store.rs

1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//!   Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//!   intended to be consumed by the outside world. Note that the "just a
39//!   pointer large" is a load-bearing implementation detail in Wasmtime. This
40//!   enables it to store a pointer to its own trait object which doesn't need
41//!   to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//!   stored inside the `Box`. This is the general Rust pattern when one struct
45//!   is a layer over another. The surprising part, though, is that this is
46//!   further subdivided. This structure only contains things which actually
47//!   need `T` itself. The downside of this structure is that it's always
48//!   generic and means that code is monomorphized into consumer crates. We
49//!   strive to have things be as monomorphic as possible in `wasmtime` so this
50//!   type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//!   Stored inline in the outer type the "opaque" here means that it's a
54//!   "store" but it doesn't have access to the `T`. This is the primary
55//!   "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//!   internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//!   All references of Wasm items into a `Store` are actually indices into a
60//!   table in this structure, and the `StoreData` being separate makes it a bit
61//!   easier to manage/define/work with. There's no real fundamental reason this
62//!   is split out, although sometimes it's useful to have separate borrows into
63//!   these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79use crate::RootSet;
80use crate::module::RegisteredModuleId;
81use crate::prelude::*;
82#[cfg(feature = "gc")]
83use crate::runtime::vm::GcRootsList;
84#[cfg(feature = "stack-switching")]
85use crate::runtime::vm::VMContRef;
86use crate::runtime::vm::mpk::ProtectionKey;
87use crate::runtime::vm::{
88    self, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
89    Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator, SendSyncPtr,
90    SignalHandler, StoreBox, StorePtr, Unwind, VMContext, VMFuncRef, VMGcRef, VMStoreContext,
91};
92use crate::trampoline::VMHostGlobalContext;
93use crate::{Engine, Module, Trap, Val, ValRaw, module::ModuleRegistry};
94use crate::{Global, Instance, Memory, Table, Uninhabited};
95use alloc::sync::Arc;
96use core::fmt;
97use core::marker;
98use core::mem::{self, ManuallyDrop};
99use core::num::NonZeroU64;
100use core::ops::{Deref, DerefMut};
101use core::pin::Pin;
102use core::ptr::NonNull;
103use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
104
105mod context;
106pub use self::context::*;
107mod data;
108pub use self::data::*;
109mod func_refs;
110use func_refs::FuncRefs;
111#[cfg(feature = "async")]
112mod async_;
113#[cfg(all(feature = "async", feature = "call-hook"))]
114pub use self::async_::CallHookHandler;
115#[cfg(feature = "async")]
116use self::async_::*;
117#[cfg(feature = "gc")]
118mod gc;
119
120/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
121///
122/// All WebAssembly instances and items will be attached to and refer to a
123/// [`Store`]. For example instances, functions, globals, and tables are all
124/// attached to a [`Store`]. Instances are created by instantiating a
125/// [`Module`](crate::Module) within a [`Store`].
126///
127/// A [`Store`] is intended to be a short-lived object in a program. No form
128/// of GC is implemented at this time so once an instance is created within a
129/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
130/// This makes [`Store`] unsuitable for creating an unbounded number of
131/// instances in it because [`Store`] will never release this memory. It's
132/// recommended to have a [`Store`] correspond roughly to the lifetime of a
133/// "main instance" that an embedding is interested in executing.
134///
135/// ## Type parameter `T`
136///
137/// Each [`Store`] has a type parameter `T` associated with it. This `T`
138/// represents state defined by the host. This state will be accessible through
139/// the [`Caller`](crate::Caller) type that host-defined functions get access
140/// to. This `T` is suitable for storing `Store`-specific information which
141/// imported functions may want access to.
142///
143/// The data `T` can be accessed through methods like [`Store::data`] and
144/// [`Store::data_mut`].
145///
146/// ## Stores, contexts, oh my
147///
148/// Most methods in Wasmtime take something of the form
149/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
150/// the first argument. These two traits allow ergonomically passing in the
151/// context you currently have to any method. The primary two sources of
152/// contexts are:
153///
154/// * `Store<T>`
155/// * `Caller<'_, T>`
156///
157/// corresponding to what you create and what you have access to in a host
158/// function. You can also explicitly acquire a [`StoreContext`] or
159/// [`StoreContextMut`] and pass that around as well.
160///
161/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
162/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
163/// form of context you have you can call various methods, create objects, etc.
164///
165/// ## Stores and `Default`
166///
167/// You can create a store with default configuration settings using
168/// `Store::default()`. This will create a brand new [`Engine`] with default
169/// configuration (see [`Config`](crate::Config) for more information).
170///
171/// ## Cross-store usage of items
172///
173/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
174/// [`Store`]. The store they belong to is the one they were created with
175/// (passed in as a parameter) or instantiated with. This store is the only
176/// store that can be used to interact with wasm items after they're created.
177///
178/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
179/// operations is incorrect. In other words it's considered a programmer error
180/// rather than a recoverable error for the wrong [`Store`] to be used when
181/// calling APIs.
182pub struct Store<T: 'static> {
183    // for comments about `ManuallyDrop`, see `Store::into_data`
184    inner: ManuallyDrop<Box<StoreInner<T>>>,
185}
186
187#[derive(Copy, Clone, Debug)]
188/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
189/// the WebAssembly VM.
190pub enum CallHook {
191    /// Indicates the VM is calling a WebAssembly function, from the host.
192    CallingWasm,
193    /// Indicates the VM is returning from a WebAssembly function, to the host.
194    ReturningFromWasm,
195    /// Indicates the VM is calling a host function, from WebAssembly.
196    CallingHost,
197    /// Indicates the VM is returning from a host function, to WebAssembly.
198    ReturningFromHost,
199}
200
201impl CallHook {
202    /// Indicates the VM is entering host code (exiting WebAssembly code)
203    pub fn entering_host(&self) -> bool {
204        match self {
205            CallHook::ReturningFromWasm | CallHook::CallingHost => true,
206            _ => false,
207        }
208    }
209    /// Indicates the VM is exiting host code (entering WebAssembly code)
210    pub fn exiting_host(&self) -> bool {
211        match self {
212            CallHook::ReturningFromHost | CallHook::CallingWasm => true,
213            _ => false,
214        }
215    }
216}
217
218/// Internal contents of a `Store<T>` that live on the heap.
219///
220/// The members of this struct are those that need to be generic over `T`, the
221/// store's internal type storage. Otherwise all things that don't rely on `T`
222/// should go into `StoreOpaque`.
223pub struct StoreInner<T: 'static> {
224    /// Generic metadata about the store that doesn't need access to `T`.
225    inner: StoreOpaque,
226
227    limiter: Option<ResourceLimiterInner<T>>,
228    call_hook: Option<CallHookInner<T>>,
229    #[cfg(target_has_atomic = "64")]
230    epoch_deadline_behavior:
231        Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
232    // for comments about `ManuallyDrop`, see `Store::into_data`
233    data: ManuallyDrop<T>,
234}
235
236enum ResourceLimiterInner<T> {
237    Sync(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync>),
238    #[cfg(feature = "async")]
239    Async(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync) + Send + Sync>),
240}
241
242enum CallHookInner<T: 'static> {
243    #[cfg(feature = "call-hook")]
244    Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
245    #[cfg(all(feature = "async", feature = "call-hook"))]
246    Async(Box<dyn CallHookHandler<T> + Send + Sync>),
247    #[allow(dead_code)]
248    ForceTypeParameterToBeUsed {
249        uninhabited: Uninhabited,
250        _marker: marker::PhantomData<T>,
251    },
252}
253
254/// What to do after returning from a callback when the engine epoch reaches
255/// the deadline for a Store during execution of a function using that store.
256#[non_exhaustive]
257pub enum UpdateDeadline {
258    /// Extend the deadline by the specified number of ticks.
259    Continue(u64),
260    /// Extend the deadline by the specified number of ticks after yielding to
261    /// the async executor loop. This can only be used with an async [`Store`]
262    /// configured via [`Config::async_support`](crate::Config::async_support).
263    #[cfg(feature = "async")]
264    Yield(u64),
265    /// Extend the deadline by the specified number of ticks after yielding to
266    /// the async executor loop. This can only be used with an async [`Store`]
267    /// configured via [`Config::async_support`](crate::Config::async_support).
268    ///
269    /// The yield will be performed by the future provided; when using `tokio`
270    /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
271    /// here.
272    #[cfg(feature = "async")]
273    YieldCustom(
274        u64,
275        ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
276    ),
277}
278
279// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
280impl<T> Deref for StoreInner<T> {
281    type Target = StoreOpaque;
282    fn deref(&self) -> &Self::Target {
283        &self.inner
284    }
285}
286
287impl<T> DerefMut for StoreInner<T> {
288    fn deref_mut(&mut self) -> &mut Self::Target {
289        &mut self.inner
290    }
291}
292
293/// Monomorphic storage for a `Store<T>`.
294///
295/// This structure contains the bulk of the metadata about a `Store`. This is
296/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
297/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
298/// crate itself.
299pub struct StoreOpaque {
300    // This `StoreOpaque` structure has references to itself. These aren't
301    // immediately evident, however, so we need to tell the compiler that it
302    // contains self-references. This notably suppresses `noalias` annotations
303    // when this shows up in compiled code because types of this structure do
304    // indeed alias itself. An example of this is `default_callee` holds a
305    // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
306    // aliasing!
307    //
308    // It's somewhat unclear to me at this time if this is 100% sufficient to
309    // get all the right codegen in all the right places. For example does
310    // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
311    // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
312    // enough with `Pin` to understand if it's appropriate here (we do, for
313    // example want to allow movement in and out of `data: T`, just not movement
314    // of most of the other members). It's also not clear if using `Pin` in a
315    // few places buys us much other than a bunch of `unsafe` that we already
316    // sort of hand-wave away.
317    //
318    // In any case this seems like a good mid-ground for now where we're at
319    // least telling the compiler something about all the aliasing happening
320    // within a `Store`.
321    _marker: marker::PhantomPinned,
322
323    engine: Engine,
324    vm_store_context: VMStoreContext,
325
326    // Contains all continuations ever allocated throughout the lifetime of this
327    // store.
328    #[cfg(feature = "stack-switching")]
329    continuations: Vec<Box<VMContRef>>,
330
331    instances: PrimaryMap<InstanceId, StoreInstance>,
332
333    #[cfg(feature = "component-model")]
334    num_component_instances: usize,
335    signal_handler: Option<SignalHandler>,
336    modules: ModuleRegistry,
337    func_refs: FuncRefs,
338    host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
339    // GC-related fields.
340    gc_store: Option<GcStore>,
341    gc_roots: RootSet,
342    #[cfg(feature = "gc")]
343    gc_roots_list: GcRootsList,
344    // Types for which the embedder has created an allocator for.
345    #[cfg(feature = "gc")]
346    gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
347
348    // Numbers of resources instantiated in this store, and their limits
349    instance_count: usize,
350    instance_limit: usize,
351    memory_count: usize,
352    memory_limit: usize,
353    table_count: usize,
354    table_limit: usize,
355    #[cfg(feature = "async")]
356    async_state: AsyncState,
357
358    // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
359    // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
360    // together. Then when we run out of gas, we inject the yield amount from the reserve
361    // until the reserve is empty.
362    fuel_reserve: u64,
363    fuel_yield_interval: Option<NonZeroU64>,
364    /// Indexed data within this `Store`, used to store information about
365    /// globals, functions, memories, etc.
366    store_data: StoreData,
367    traitobj: StorePtr,
368    default_caller_vmctx: SendSyncPtr<VMContext>,
369
370    /// Used to optimized wasm->host calls when the host function is defined with
371    /// `Func::new` to avoid allocating a new vector each time a function is
372    /// called.
373    hostcall_val_storage: Vec<Val>,
374    /// Same as `hostcall_val_storage`, but for the direction of the host
375    /// calling wasm.
376    wasm_val_raw_storage: Vec<ValRaw>,
377
378    /// Keep track of what protection key is being used during allocation so
379    /// that the right memory pages can be enabled when entering WebAssembly
380    /// guest code.
381    pkey: Option<ProtectionKey>,
382
383    /// Runtime state for components used in the handling of resources, borrow,
384    /// and calls. These also interact with the `ResourceAny` type and its
385    /// internal representation.
386    #[cfg(feature = "component-model")]
387    component_host_table: vm::component::ResourceTable,
388    #[cfg(feature = "component-model")]
389    component_calls: vm::component::CallContexts,
390    #[cfg(feature = "component-model")]
391    host_resource_data: crate::component::HostResourceData,
392
393    /// State related to the executor of wasm code.
394    ///
395    /// For example if Pulley is enabled and configured then this will store a
396    /// Pulley interpreter.
397    executor: Executor,
398}
399
400/// Executor state within `StoreOpaque`.
401///
402/// Effectively stores Pulley interpreter state and handles conditional support
403/// for Cranelift at compile time.
404enum Executor {
405    Interpreter(Interpreter),
406    #[cfg(has_host_compiler_backend)]
407    Native,
408}
409
410/// A borrowed reference to `Executor` above.
411pub(crate) enum ExecutorRef<'a> {
412    Interpreter(InterpreterRef<'a>),
413    #[cfg(has_host_compiler_backend)]
414    Native,
415}
416
417/// An RAII type to automatically mark a region of code as unsafe for GC.
418#[doc(hidden)]
419pub struct AutoAssertNoGc<'a> {
420    store: &'a mut StoreOpaque,
421    entered: bool,
422}
423
424impl<'a> AutoAssertNoGc<'a> {
425    #[inline]
426    pub fn new(store: &'a mut StoreOpaque) -> Self {
427        let entered = if !cfg!(feature = "gc") {
428            false
429        } else if let Some(gc_store) = store.gc_store.as_mut() {
430            gc_store.gc_heap.enter_no_gc_scope();
431            true
432        } else {
433            false
434        };
435
436        AutoAssertNoGc { store, entered }
437    }
438
439    /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
440    /// disables checks for no GC happening for the duration of this value.
441    ///
442    /// This is used when it is statically otherwise known that a GC doesn't
443    /// happen for the various types involved.
444    ///
445    /// # Unsafety
446    ///
447    /// This method is `unsafe` as it does not provide the same safety
448    /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
449    /// caller that a GC doesn't happen.
450    #[inline]
451    pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
452        if cfg!(debug_assertions) {
453            AutoAssertNoGc::new(store)
454        } else {
455            AutoAssertNoGc {
456                store,
457                entered: false,
458            }
459        }
460    }
461}
462
463impl core::ops::Deref for AutoAssertNoGc<'_> {
464    type Target = StoreOpaque;
465
466    #[inline]
467    fn deref(&self) -> &Self::Target {
468        &*self.store
469    }
470}
471
472impl core::ops::DerefMut for AutoAssertNoGc<'_> {
473    #[inline]
474    fn deref_mut(&mut self) -> &mut Self::Target {
475        &mut *self.store
476    }
477}
478
479impl Drop for AutoAssertNoGc<'_> {
480    #[inline]
481    fn drop(&mut self) {
482        if self.entered {
483            self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
484        }
485    }
486}
487
488/// Used to associate instances with the store.
489///
490/// This is needed to track if the instance was allocated explicitly with the on-demand
491/// instance allocator.
492struct StoreInstance {
493    handle: InstanceHandle,
494    kind: StoreInstanceKind,
495}
496
497enum StoreInstanceKind {
498    /// An actual, non-dummy instance.
499    Real {
500        /// The id of this instance's module inside our owning store's
501        /// `ModuleRegistry`.
502        module_id: RegisteredModuleId,
503    },
504
505    /// This is a dummy instance that is just an implementation detail for
506    /// something else. For example, host-created memories internally create a
507    /// dummy instance.
508    ///
509    /// Regardless of the configured instance allocator for the engine, dummy
510    /// instances always use the on-demand allocator to deallocate the instance.
511    Dummy,
512}
513
514impl<T> Store<T> {
515    /// Creates a new [`Store`] to be associated with the given [`Engine`] and
516    /// `data` provided.
517    ///
518    /// The created [`Store`] will place no additional limits on the size of
519    /// linear memories or tables at runtime. Linear memories and tables will
520    /// be allowed to grow to any upper limit specified in their definitions.
521    /// The store will limit the number of instances, linear memories, and
522    /// tables created to 10,000. This can be overridden with the
523    /// [`Store::limiter`] configuration method.
524    pub fn new(engine: &Engine, data: T) -> Self {
525        let store_data = StoreData::new();
526        log::trace!("creating new store {:?}", store_data.id());
527
528        let pkey = engine.allocator().next_available_pkey();
529
530        let inner = StoreOpaque {
531            _marker: marker::PhantomPinned,
532            engine: engine.clone(),
533            vm_store_context: Default::default(),
534            #[cfg(feature = "stack-switching")]
535            continuations: Vec::new(),
536            instances: PrimaryMap::new(),
537            #[cfg(feature = "component-model")]
538            num_component_instances: 0,
539            signal_handler: None,
540            gc_store: None,
541            gc_roots: RootSet::default(),
542            #[cfg(feature = "gc")]
543            gc_roots_list: GcRootsList::default(),
544            #[cfg(feature = "gc")]
545            gc_host_alloc_types: Default::default(),
546            modules: ModuleRegistry::default(),
547            func_refs: FuncRefs::default(),
548            host_globals: PrimaryMap::new(),
549            instance_count: 0,
550            instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
551            memory_count: 0,
552            memory_limit: crate::DEFAULT_MEMORY_LIMIT,
553            table_count: 0,
554            table_limit: crate::DEFAULT_TABLE_LIMIT,
555            #[cfg(feature = "async")]
556            async_state: AsyncState::default(),
557            fuel_reserve: 0,
558            fuel_yield_interval: None,
559            store_data,
560            traitobj: StorePtr::empty(),
561            default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
562            hostcall_val_storage: Vec::new(),
563            wasm_val_raw_storage: Vec::new(),
564            pkey,
565            #[cfg(feature = "component-model")]
566            component_host_table: Default::default(),
567            #[cfg(feature = "component-model")]
568            component_calls: Default::default(),
569            #[cfg(feature = "component-model")]
570            host_resource_data: Default::default(),
571            #[cfg(has_host_compiler_backend)]
572            executor: if cfg!(feature = "pulley") && engine.target().is_pulley() {
573                Executor::Interpreter(Interpreter::new(engine))
574            } else {
575                Executor::Native
576            },
577            #[cfg(not(has_host_compiler_backend))]
578            executor: {
579                debug_assert!(engine.target().is_pulley());
580                Executor::Interpreter(Interpreter::new(engine))
581            },
582        };
583        let mut inner = Box::new(StoreInner {
584            inner,
585            limiter: None,
586            call_hook: None,
587            #[cfg(target_has_atomic = "64")]
588            epoch_deadline_behavior: None,
589            data: ManuallyDrop::new(data),
590        });
591
592        inner.traitobj = StorePtr::new(NonNull::from(&mut *inner));
593
594        // Wasmtime uses the callee argument to host functions to learn about
595        // the original pointer to the `Store` itself, allowing it to
596        // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
597        // however, there's no "callee" to provide. To fix this we allocate a
598        // single "default callee" for the entire `Store`. This is then used as
599        // part of `Func::call` to guarantee that the `callee: *mut VMContext`
600        // is never null.
601        let module = Arc::new(wasmtime_environ::Module::default());
602        let shim = ModuleRuntimeInfo::bare(module);
603        let allocator = OnDemandInstanceAllocator::default();
604
605        allocator
606            .validate_module(shim.env_module(), shim.offsets())
607            .unwrap();
608
609        unsafe {
610            let id = inner
611                .allocate_instance(
612                    AllocateInstanceKind::Dummy {
613                        allocator: &allocator,
614                    },
615                    &shim,
616                    Default::default(),
617                )
618                .expect("failed to allocate default callee");
619            let default_caller_vmctx = inner.instance(id).vmctx();
620            inner.default_caller_vmctx = default_caller_vmctx.into();
621        }
622
623        Self {
624            inner: ManuallyDrop::new(inner),
625        }
626    }
627
628    /// Access the underlying data owned by this `Store`.
629    #[inline]
630    pub fn data(&self) -> &T {
631        self.inner.data()
632    }
633
634    /// Access the underlying data owned by this `Store`.
635    #[inline]
636    pub fn data_mut(&mut self) -> &mut T {
637        self.inner.data_mut()
638    }
639
640    /// Consumes this [`Store`], destroying it, and returns the underlying data.
641    pub fn into_data(mut self) -> T {
642        self.inner.flush_fiber_stack();
643
644        // This is an unsafe operation because we want to avoid having a runtime
645        // check or boolean for whether the data is actually contained within a
646        // `Store`. The data itself is stored as `ManuallyDrop` since we're
647        // manually managing the memory here, and there's also a `ManuallyDrop`
648        // around the `Box<StoreInner<T>>`. The way this works though is a bit
649        // tricky, so here's how things get dropped appropriately:
650        //
651        // * When a `Store<T>` is normally dropped, the custom destructor for
652        //   `Store<T>` will drop `T`, then the `self.inner` field. The
653        //   rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
654        //   `StoreInner<T>`. This cleans up all internal fields and doesn't
655        //   touch `T` because it's wrapped in `ManuallyDrop`.
656        //
657        // * When calling this method we skip the top-level destructor for
658        //   `Store<T>` with `mem::forget`. This skips both the destructor for
659        //   `T` and the destructor for `StoreInner<T>`. We do, however, run the
660        //   destructor for `Box<StoreInner<T>>` which, like above, will skip
661        //   the destructor for `T` since it's `ManuallyDrop`.
662        //
663        // In both cases all the other fields of `StoreInner<T>` should all get
664        // dropped, and the manual management of destructors is basically
665        // between this method and `Drop for Store<T>`. Note that this also
666        // means that `Drop for StoreInner<T>` cannot access `self.data`, so
667        // there is a comment indicating this as well.
668        unsafe {
669            let mut inner = ManuallyDrop::take(&mut self.inner);
670            core::mem::forget(self);
671            ManuallyDrop::take(&mut inner.data)
672        }
673    }
674
675    /// Configures the [`ResourceLimiter`] used to limit resource creation
676    /// within this [`Store`].
677    ///
678    /// Whenever resources such as linear memory, tables, or instances are
679    /// allocated the `limiter` specified here is invoked with the store's data
680    /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
681    /// being allocated. The returned [`ResourceLimiter`] is intended to live
682    /// within the `T` itself, for example by storing a
683    /// [`StoreLimits`](crate::StoreLimits).
684    ///
685    /// Note that this limiter is only used to limit the creation/growth of
686    /// resources in the future, this does not retroactively attempt to apply
687    /// limits to the [`Store`].
688    ///
689    /// # Examples
690    ///
691    /// ```
692    /// use wasmtime::*;
693    ///
694    /// struct MyApplicationState {
695    ///     my_state: u32,
696    ///     limits: StoreLimits,
697    /// }
698    ///
699    /// let engine = Engine::default();
700    /// let my_state = MyApplicationState {
701    ///     my_state: 42,
702    ///     limits: StoreLimitsBuilder::new()
703    ///         .memory_size(1 << 20 /* 1 MB */)
704    ///         .instances(2)
705    ///         .build(),
706    /// };
707    /// let mut store = Store::new(&engine, my_state);
708    /// store.limiter(|state| &mut state.limits);
709    ///
710    /// // Creation of smaller memories is allowed
711    /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
712    ///
713    /// // Creation of a larger memory, however, will exceed the 1MB limit we've
714    /// // configured
715    /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
716    ///
717    /// // The number of instances in this store is limited to 2, so the third
718    /// // instance here should fail.
719    /// let module = Module::new(&engine, "(module)").unwrap();
720    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
721    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
722    /// assert!(Instance::new(&mut store, &module, &[]).is_err());
723    /// ```
724    ///
725    /// [`ResourceLimiter`]: crate::ResourceLimiter
726    pub fn limiter(
727        &mut self,
728        mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync + 'static,
729    ) {
730        // Apply the limits on instances, tables, and memory given by the limiter:
731        let inner = &mut self.inner;
732        let (instance_limit, table_limit, memory_limit) = {
733            let l = limiter(&mut inner.data);
734            (l.instances(), l.tables(), l.memories())
735        };
736        let innermost = &mut inner.inner;
737        innermost.instance_limit = instance_limit;
738        innermost.table_limit = table_limit;
739        innermost.memory_limit = memory_limit;
740
741        // Save the limiter accessor function:
742        inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
743    }
744
745    /// Configure a function that runs on calls and returns between WebAssembly
746    /// and host code.
747    ///
748    /// The function is passed a [`CallHook`] argument, which indicates which
749    /// state transition the VM is making.
750    ///
751    /// This function may return a [`Trap`]. If a trap is returned when an
752    /// import was called, it is immediately raised as-if the host import had
753    /// returned the trap. If a trap is returned after wasm returns to the host
754    /// then the wasm function's result is ignored and this trap is returned
755    /// instead.
756    ///
757    /// After this function returns a trap, it may be called for subsequent returns
758    /// to host or wasm code as the trap propagates to the root call.
759    #[cfg(feature = "call-hook")]
760    pub fn call_hook(
761        &mut self,
762        hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
763    ) {
764        self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
765    }
766
767    /// Returns the [`Engine`] that this store is associated with.
768    pub fn engine(&self) -> &Engine {
769        self.inner.engine()
770    }
771
772    /// Perform garbage collection.
773    ///
774    /// Note that it is not required to actively call this function. GC will
775    /// automatically happen according to various internal heuristics. This is
776    /// provided if fine-grained control over the GC is desired.
777    ///
778    /// If you are calling this method after an attempted allocation failed, you
779    /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
780    /// When you do so, this method will attempt to create enough space in the
781    /// GC heap for that allocation, so that it will succeed on the next
782    /// attempt.
783    ///
784    /// This method is only available when the `gc` Cargo feature is enabled.
785    #[cfg(feature = "gc")]
786    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
787        assert!(!self.inner.async_support());
788        self.inner.gc(why);
789    }
790
791    /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
792    /// be configured via [`Store::set_fuel`].
793    ///
794    /// # Errors
795    ///
796    /// This function will return an error if fuel consumption is not enabled
797    /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
798    pub fn get_fuel(&self) -> Result<u64> {
799        self.inner.get_fuel()
800    }
801
802    /// Set the fuel to this [`Store`] for wasm to consume while executing.
803    ///
804    /// For this method to work fuel consumption must be enabled via
805    /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
806    /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
807    /// immediately trap). This function must be called for the store to have
808    /// some fuel to allow WebAssembly to execute.
809    ///
810    /// Most WebAssembly instructions consume 1 unit of fuel. Some
811    /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
812    /// units, as any execution cost associated with them involves other
813    /// instructions which do consume fuel.
814    ///
815    /// Note that when fuel is entirely consumed it will cause wasm to trap.
816    ///
817    /// # Errors
818    ///
819    /// This function will return an error if fuel consumption is not enabled via
820    /// [`Config::consume_fuel`](crate::Config::consume_fuel).
821    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
822        self.inner.set_fuel(fuel)
823    }
824
825    /// Configures a [`Store`] to yield execution of async WebAssembly code
826    /// periodically.
827    ///
828    /// When a [`Store`] is configured to consume fuel with
829    /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
830    /// configure WebAssembly to be suspended and control will be yielded back to the
831    /// caller every `interval` units of fuel consumed. This is only suitable with use of
832    /// a store associated with an [async config](crate::Config::async_support) because
833    /// only then are futures used and yields are possible.
834    ///
835    /// The purpose of this behavior is to ensure that futures which represent
836    /// execution of WebAssembly do not execute too long inside their
837    /// `Future::poll` method. This allows for some form of cooperative
838    /// multitasking where WebAssembly will voluntarily yield control
839    /// periodically (based on fuel consumption) back to the running thread.
840    ///
841    /// Note that futures returned by this crate will automatically flag
842    /// themselves to get re-polled if a yield happens. This means that
843    /// WebAssembly will continue to execute, just after giving the host an
844    /// opportunity to do something else.
845    ///
846    /// The `interval` parameter indicates how much fuel should be
847    /// consumed between yields of an async future. When fuel runs out wasm will trap.
848    ///
849    /// # Error
850    ///
851    /// This method will error if it is not called on a store associated with an [async
852    /// config](crate::Config::async_support).
853    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
854        self.inner.fuel_async_yield_interval(interval)
855    }
856
857    /// Sets the epoch deadline to a certain number of ticks in the future.
858    ///
859    /// When the Wasm guest code is compiled with epoch-interruption
860    /// instrumentation
861    /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
862    /// and when the `Engine`'s epoch is incremented
863    /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
864    /// past a deadline, execution can be configured to either trap or
865    /// yield and then continue.
866    ///
867    /// This deadline is always set relative to the current epoch:
868    /// `ticks_beyond_current` ticks in the future. The deadline can
869    /// be set explicitly via this method, or refilled automatically
870    /// on a yield if configured via
871    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
872    /// this method is invoked, the deadline is reached when
873    /// [`Engine::increment_epoch()`] has been invoked at least
874    /// `ticks_beyond_current` times.
875    ///
876    /// By default a store will trap immediately with an epoch deadline of 0
877    /// (which has always "elapsed"). This method is required to be configured
878    /// for stores with epochs enabled to some future epoch deadline.
879    ///
880    /// See documentation on
881    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
882    /// for an introduction to epoch-based interruption.
883    #[cfg(target_has_atomic = "64")]
884    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
885        self.inner.set_epoch_deadline(ticks_beyond_current);
886    }
887
888    /// Configures epoch-deadline expiration to trap.
889    ///
890    /// When epoch-interruption-instrumented code is executed on this
891    /// store and the epoch deadline is reached before completion,
892    /// with the store configured in this way, execution will
893    /// terminate with a trap as soon as an epoch check in the
894    /// instrumented code is reached.
895    ///
896    /// This behavior is the default if the store is not otherwise
897    /// configured via
898    /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
899    /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
900    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
901    ///
902    /// This setting is intended to allow for coarse-grained
903    /// interruption, but not a deterministic deadline of a fixed,
904    /// finite interval. For deterministic interruption, see the
905    /// "fuel" mechanism instead.
906    ///
907    /// Note that when this is used it's required to call
908    /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
909    /// trap.
910    ///
911    /// See documentation on
912    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
913    /// for an introduction to epoch-based interruption.
914    #[cfg(target_has_atomic = "64")]
915    pub fn epoch_deadline_trap(&mut self) {
916        self.inner.epoch_deadline_trap();
917    }
918
919    /// Configures epoch-deadline expiration to invoke a custom callback
920    /// function.
921    ///
922    /// When epoch-interruption-instrumented code is executed on this
923    /// store and the epoch deadline is reached before completion, the
924    /// provided callback function is invoked.
925    ///
926    /// This callback should either return an [`UpdateDeadline`], or
927    /// return an error, which will terminate execution with a trap.
928    ///
929    /// The [`UpdateDeadline`] is a positive number of ticks to
930    /// add to the epoch deadline, as well as indicating what
931    /// to do after the callback returns. If the [`Store`] is
932    /// configured with async support, then the callback may return
933    /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
934    /// to yield to the async executor before updating the epoch deadline.
935    /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
936    /// update the epoch deadline immediately.
937    ///
938    /// This setting is intended to allow for coarse-grained
939    /// interruption, but not a deterministic deadline of a fixed,
940    /// finite interval. For deterministic interruption, see the
941    /// "fuel" mechanism instead.
942    ///
943    /// See documentation on
944    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
945    /// for an introduction to epoch-based interruption.
946    #[cfg(target_has_atomic = "64")]
947    pub fn epoch_deadline_callback(
948        &mut self,
949        callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
950    ) {
951        self.inner.epoch_deadline_callback(Box::new(callback));
952    }
953}
954
955impl<'a, T> StoreContext<'a, T> {
956    pub(crate) fn async_support(&self) -> bool {
957        self.0.async_support()
958    }
959
960    /// Returns the underlying [`Engine`] this store is connected to.
961    pub fn engine(&self) -> &Engine {
962        self.0.engine()
963    }
964
965    /// Access the underlying data owned by this `Store`.
966    ///
967    /// Same as [`Store::data`].
968    pub fn data(&self) -> &'a T {
969        self.0.data()
970    }
971
972    /// Returns the remaining fuel in this store.
973    ///
974    /// For more information see [`Store::get_fuel`].
975    pub fn get_fuel(&self) -> Result<u64> {
976        self.0.get_fuel()
977    }
978}
979
980impl<'a, T> StoreContextMut<'a, T> {
981    /// Access the underlying data owned by this `Store`.
982    ///
983    /// Same as [`Store::data`].
984    pub fn data(&self) -> &T {
985        self.0.data()
986    }
987
988    /// Access the underlying data owned by this `Store`.
989    ///
990    /// Same as [`Store::data_mut`].
991    pub fn data_mut(&mut self) -> &mut T {
992        self.0.data_mut()
993    }
994
995    /// Returns the underlying [`Engine`] this store is connected to.
996    pub fn engine(&self) -> &Engine {
997        self.0.engine()
998    }
999
1000    /// Perform garbage collection of `ExternRef`s.
1001    ///
1002    /// Same as [`Store::gc`].
1003    ///
1004    /// This method is only available when the `gc` Cargo feature is enabled.
1005    #[cfg(feature = "gc")]
1006    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
1007        self.0.gc(why);
1008    }
1009
1010    /// Returns remaining fuel in this store.
1011    ///
1012    /// For more information see [`Store::get_fuel`]
1013    pub fn get_fuel(&self) -> Result<u64> {
1014        self.0.get_fuel()
1015    }
1016
1017    /// Set the amount of fuel in this store.
1018    ///
1019    /// For more information see [`Store::set_fuel`]
1020    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1021        self.0.set_fuel(fuel)
1022    }
1023
1024    /// Configures this `Store` to periodically yield while executing futures.
1025    ///
1026    /// For more information see [`Store::fuel_async_yield_interval`]
1027    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1028        self.0.fuel_async_yield_interval(interval)
1029    }
1030
1031    /// Sets the epoch deadline to a certain number of ticks in the future.
1032    ///
1033    /// For more information see [`Store::set_epoch_deadline`].
1034    #[cfg(target_has_atomic = "64")]
1035    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1036        self.0.set_epoch_deadline(ticks_beyond_current);
1037    }
1038
1039    /// Configures epoch-deadline expiration to trap.
1040    ///
1041    /// For more information see [`Store::epoch_deadline_trap`].
1042    #[cfg(target_has_atomic = "64")]
1043    pub fn epoch_deadline_trap(&mut self) {
1044        self.0.epoch_deadline_trap();
1045    }
1046}
1047
1048impl<T> StoreInner<T> {
1049    #[inline]
1050    fn data(&self) -> &T {
1051        &self.data
1052    }
1053
1054    #[inline]
1055    fn data_mut(&mut self) -> &mut T {
1056        &mut self.data
1057    }
1058
1059    #[inline]
1060    pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1061        if self.inner.pkey.is_none() && self.call_hook.is_none() {
1062            Ok(())
1063        } else {
1064            self.call_hook_slow_path(s)
1065        }
1066    }
1067
1068    fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1069        if let Some(pkey) = &self.inner.pkey {
1070            let allocator = self.engine().allocator();
1071            match s {
1072                CallHook::CallingWasm | CallHook::ReturningFromHost => {
1073                    allocator.restrict_to_pkey(*pkey)
1074                }
1075                CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1076            }
1077        }
1078
1079        // Temporarily take the configured behavior to avoid mutably borrowing
1080        // multiple times.
1081        #[cfg_attr(not(feature = "call-hook"), allow(unreachable_patterns))]
1082        if let Some(mut call_hook) = self.call_hook.take() {
1083            let result = self.invoke_call_hook(&mut call_hook, s);
1084            self.call_hook = Some(call_hook);
1085            return result;
1086        }
1087
1088        Ok(())
1089    }
1090
1091    fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1092        match call_hook {
1093            #[cfg(feature = "call-hook")]
1094            CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1095
1096            #[cfg(all(feature = "async", feature = "call-hook"))]
1097            CallHookInner::Async(handler) => unsafe {
1098                self.inner
1099                    .async_cx()
1100                    .ok_or_else(|| anyhow!("couldn't grab async_cx for call hook"))?
1101                    .block_on(
1102                        handler
1103                            .handle_call_event((&mut *self).as_context_mut(), s)
1104                            .as_mut(),
1105                    )?
1106            },
1107
1108            CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1109                let _ = s;
1110                match *uninhabited {}
1111            }
1112        }
1113    }
1114
1115    #[cfg(not(feature = "async"))]
1116    fn flush_fiber_stack(&mut self) {
1117        // noop shim so code can assume this always exists.
1118    }
1119}
1120
1121fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1122    fuel_reserve.saturating_add_signed(-injected_fuel)
1123}
1124
1125// Add remaining fuel from the reserve into the active fuel if there is any left.
1126fn refuel(
1127    injected_fuel: &mut i64,
1128    fuel_reserve: &mut u64,
1129    yield_interval: Option<NonZeroU64>,
1130) -> bool {
1131    let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1132    if fuel > 0 {
1133        set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1134        true
1135    } else {
1136        false
1137    }
1138}
1139
1140fn set_fuel(
1141    injected_fuel: &mut i64,
1142    fuel_reserve: &mut u64,
1143    yield_interval: Option<NonZeroU64>,
1144    new_fuel_amount: u64,
1145) {
1146    let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1147    // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1148    // for the VM to use.
1149    let injected = core::cmp::min(interval, new_fuel_amount);
1150    // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1151    // VM at once to be i64 range.
1152    let injected = core::cmp::min(injected, i64::MAX as u64);
1153    // Add whatever is left over after injection to the reserve for later use.
1154    *fuel_reserve = new_fuel_amount - injected;
1155    // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1156    // this counter is positive.
1157    *injected_fuel = -(injected as i64);
1158}
1159
1160#[doc(hidden)]
1161impl StoreOpaque {
1162    pub fn id(&self) -> StoreId {
1163        self.store_data.id()
1164    }
1165
1166    pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1167        fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1168            let new = slot.saturating_add(amt);
1169            if new > max {
1170                bail!(
1171                    "resource limit exceeded: {} count too high at {}",
1172                    desc,
1173                    new
1174                );
1175            }
1176            *slot = new;
1177            Ok(())
1178        }
1179
1180        let module = module.env_module();
1181        let memories = module.num_defined_memories();
1182        let tables = module.num_defined_tables();
1183
1184        bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1185        bump(
1186            &mut self.memory_count,
1187            self.memory_limit,
1188            memories,
1189            "memory",
1190        )?;
1191        bump(&mut self.table_count, self.table_limit, tables, "table")?;
1192
1193        Ok(())
1194    }
1195
1196    #[inline]
1197    pub fn async_support(&self) -> bool {
1198        cfg!(feature = "async") && self.engine().config().async_support
1199    }
1200
1201    #[inline]
1202    pub fn engine(&self) -> &Engine {
1203        &self.engine
1204    }
1205
1206    #[inline]
1207    pub fn store_data(&self) -> &StoreData {
1208        &self.store_data
1209    }
1210
1211    #[inline]
1212    pub fn store_data_mut(&mut self) -> &mut StoreData {
1213        &mut self.store_data
1214    }
1215
1216    #[inline]
1217    pub(crate) fn modules(&self) -> &ModuleRegistry {
1218        &self.modules
1219    }
1220
1221    #[inline]
1222    pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
1223        &mut self.modules
1224    }
1225
1226    pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1227        (&mut self.func_refs, &self.modules)
1228    }
1229
1230    pub(crate) fn host_globals(
1231        &self,
1232    ) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1233        &self.host_globals
1234    }
1235
1236    pub(crate) fn host_globals_mut(
1237        &mut self,
1238    ) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1239        &mut self.host_globals
1240    }
1241
1242    pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1243        instance.store_id().assert_belongs_to(self.id());
1244        match self.instances[instance.instance()].kind {
1245            StoreInstanceKind::Dummy => None,
1246            StoreInstanceKind::Real { module_id } => {
1247                let module = self
1248                    .modules()
1249                    .lookup_module_by_id(module_id)
1250                    .expect("should always have a registered module for real instances");
1251                Some(module)
1252            }
1253        }
1254    }
1255
1256    /// Accessor from `InstanceId` to `&vm::Instance`.
1257    ///
1258    /// Note that if you have a `StoreInstanceId` you should use
1259    /// `StoreInstanceId::get` instead. This assumes that `id` has been
1260    /// validated to already belong to this store.
1261    pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1262        self.instances[id].handle.get()
1263    }
1264
1265    /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1266    ///
1267    /// Note that if you have a `StoreInstanceId` you should use
1268    /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1269    /// validated to already belong to this store.
1270    pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1271        self.instances[id].handle.get_mut()
1272    }
1273
1274    /// Get all instances (ignoring dummy instances) within this store.
1275    pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1276        let instances = self
1277            .instances
1278            .iter()
1279            .filter_map(|(id, inst)| {
1280                if let StoreInstanceKind::Dummy = inst.kind {
1281                    None
1282                } else {
1283                    Some(id)
1284                }
1285            })
1286            .collect::<Vec<_>>();
1287        instances
1288            .into_iter()
1289            .map(|i| Instance::from_wasmtime(i, self))
1290    }
1291
1292    /// Get all memories (host- or Wasm-defined) within this store.
1293    pub fn all_memories<'a>(&'a mut self) -> impl Iterator<Item = Memory> + 'a {
1294        // NB: Host-created memories have dummy instances. Therefore, we can get
1295        // all memories in the store by iterating over all instances (including
1296        // dummy instances) and getting each of their defined memories.
1297        let mems = self
1298            .instances
1299            .iter_mut()
1300            .flat_map(|(_, instance)| instance.handle.get().defined_memories())
1301            .collect::<Vec<_>>();
1302        mems.into_iter()
1303            .map(|memory| unsafe { Memory::from_wasmtime_memory(memory, self) })
1304    }
1305
1306    /// Iterate over all tables (host- or Wasm-defined) within this store.
1307    pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1308        // NB: Host-created tables have dummy instances. Therefore, we can get
1309        // all tables in the store by iterating over all instances (including
1310        // dummy instances) and getting each of their defined memories.
1311        for id in self.instances.keys() {
1312            let instance = StoreInstanceId::new(self.id(), id);
1313            for table in 0..self.instance(id).env_module().num_defined_tables() {
1314                let table = DefinedTableIndex::new(table);
1315                f(self, Table::from_raw(instance, table));
1316            }
1317        }
1318    }
1319
1320    /// Iterate over all globals (host- or Wasm-defined) within this store.
1321    pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1322        // First enumerate all the host-created globals.
1323        for global in self.host_globals.keys() {
1324            let global = Global::new_host(self, global);
1325            f(self, global);
1326        }
1327
1328        // Then enumerate all instances' defined globals.
1329        for id in self.instances.keys() {
1330            for index in 0..self.instance(id).env_module().num_defined_globals() {
1331                let index = DefinedGlobalIndex::new(index);
1332                let global = Global::new_instance(self, id, index);
1333                f(self, global);
1334            }
1335        }
1336    }
1337
1338    #[cfg_attr(not(target_os = "linux"), allow(dead_code))] // not used on all platforms
1339    pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1340        self.signal_handler = handler;
1341    }
1342
1343    #[inline]
1344    pub fn vm_store_context(&self) -> &VMStoreContext {
1345        &self.vm_store_context
1346    }
1347
1348    #[inline(never)]
1349    pub(crate) fn allocate_gc_heap(&mut self) -> Result<()> {
1350        log::trace!("allocating GC heap for store {:?}", self.id());
1351
1352        assert!(self.gc_store.is_none());
1353        assert_eq!(
1354            self.vm_store_context.gc_heap.base.as_non_null(),
1355            NonNull::dangling(),
1356        );
1357        assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1358
1359        let vmstore = self.traitobj();
1360        let gc_store = allocate_gc_store(self.engine(), vmstore, self.get_pkey())?;
1361        self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1362        self.gc_store = Some(gc_store);
1363        return Ok(());
1364
1365        #[cfg(feature = "gc")]
1366        fn allocate_gc_store(
1367            engine: &Engine,
1368            vmstore: NonNull<dyn vm::VMStore>,
1369            pkey: Option<ProtectionKey>,
1370        ) -> Result<GcStore> {
1371            use wasmtime_environ::packed_option::ReservedValue;
1372
1373            ensure!(
1374                engine.features().gc_types(),
1375                "cannot allocate a GC store when GC is disabled at configuration time"
1376            );
1377
1378            // First, allocate the memory that will be our GC heap's storage.
1379            let mut request = InstanceAllocationRequest {
1380                id: InstanceId::reserved_value(),
1381                runtime_info: &ModuleRuntimeInfo::bare(Arc::new(
1382                    wasmtime_environ::Module::default(),
1383                )),
1384                imports: vm::Imports::default(),
1385                store: StorePtr::new(vmstore),
1386                wmemcheck: false,
1387                pkey,
1388                tunables: engine.tunables(),
1389            };
1390            let mem_ty = engine.tunables().gc_heap_memory_type();
1391            let tunables = engine.tunables();
1392
1393            // SAFETY: We validated the GC heap's memory type during engine creation.
1394            let (mem_alloc_index, mem) = unsafe {
1395                engine
1396                    .allocator()
1397                    .allocate_memory(&mut request, &mem_ty, tunables, None)?
1398            };
1399
1400            // Then, allocate the actual GC heap, passing in that memory
1401            // storage.
1402            let gc_runtime = engine
1403                .gc_runtime()
1404                .context("no GC runtime: GC disabled at compile time or configuration time")?;
1405            let (index, heap) =
1406                engine
1407                    .allocator()
1408                    .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
1409
1410            Ok(GcStore::new(index, heap))
1411        }
1412
1413        #[cfg(not(feature = "gc"))]
1414        fn allocate_gc_store(
1415            _engine: &Engine,
1416            _vmstore: NonNull<dyn vm::VMStore>,
1417            _pkey: Option<ProtectionKey>,
1418        ) -> Result<GcStore> {
1419            bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1420        }
1421    }
1422
1423    #[inline]
1424    pub(crate) fn gc_store(&self) -> Result<&GcStore> {
1425        match &self.gc_store {
1426            Some(gc_store) => Ok(gc_store),
1427            None => bail!("GC heap not initialized yet"),
1428        }
1429    }
1430
1431    #[inline]
1432    pub(crate) fn gc_store_mut(&mut self) -> Result<&mut GcStore> {
1433        if self.gc_store.is_none() {
1434            self.allocate_gc_heap()?;
1435        }
1436        Ok(self.unwrap_gc_store_mut())
1437    }
1438
1439    /// If this store is configured with a GC heap, return a mutable reference
1440    /// to it. Otherwise, return `None`.
1441    #[inline]
1442    pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
1443        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1444            debug_assert!(self.gc_store.is_none());
1445            None
1446        } else {
1447            self.gc_store.as_mut()
1448        }
1449    }
1450
1451    /// If this store is configured with a GC heap, return a shared reference to
1452    /// it. Otherwise, return `None`.
1453    #[inline]
1454    #[cfg(feature = "gc")]
1455    pub(crate) fn optional_gc_store(&self) -> Option<&GcStore> {
1456        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1457            debug_assert!(self.gc_store.is_none());
1458            None
1459        } else {
1460            self.gc_store.as_ref()
1461        }
1462    }
1463
1464    #[inline]
1465    #[track_caller]
1466    #[cfg(feature = "gc")]
1467    pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
1468        self.gc_store
1469            .as_ref()
1470            .expect("attempted to access the store's GC heap before it has been allocated")
1471    }
1472
1473    #[inline]
1474    #[track_caller]
1475    pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
1476        self.gc_store
1477            .as_mut()
1478            .expect("attempted to access the store's GC heap before it has been allocated")
1479    }
1480
1481    #[inline]
1482    pub(crate) fn gc_roots(&self) -> &RootSet {
1483        &self.gc_roots
1484    }
1485
1486    #[inline]
1487    #[cfg(feature = "gc")]
1488    pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
1489        &mut self.gc_roots
1490    }
1491
1492    #[inline]
1493    pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
1494        self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
1495    }
1496
1497    #[cfg(feature = "gc")]
1498    fn do_gc(&mut self) {
1499        assert!(
1500            !self.async_support(),
1501            "must use `store.gc_async()` instead of `store.gc()` for async stores"
1502        );
1503
1504        // If the GC heap hasn't been initialized, there is nothing to collect.
1505        if self.gc_store.is_none() {
1506            return;
1507        }
1508
1509        log::trace!("============ Begin GC ===========");
1510
1511        // Take the GC roots out of `self` so we can borrow it mutably but still
1512        // call mutable methods on `self`.
1513        let mut roots = core::mem::take(&mut self.gc_roots_list);
1514
1515        self.trace_roots(&mut roots);
1516        self.unwrap_gc_store_mut().gc(unsafe { roots.iter() });
1517
1518        // Restore the GC roots for the next GC.
1519        roots.clear();
1520        self.gc_roots_list = roots;
1521
1522        log::trace!("============ End GC ===========");
1523    }
1524
1525    #[cfg(feature = "gc")]
1526    fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1527        log::trace!("Begin trace GC roots");
1528
1529        // We shouldn't have any leftover, stale GC roots.
1530        assert!(gc_roots_list.is_empty());
1531
1532        self.trace_wasm_stack_roots(gc_roots_list);
1533        #[cfg(feature = "stack-switching")]
1534        self.trace_wasm_continuation_roots(gc_roots_list);
1535        self.trace_vmctx_roots(gc_roots_list);
1536        self.trace_user_roots(gc_roots_list);
1537
1538        log::trace!("End trace GC roots")
1539    }
1540
1541    #[cfg(feature = "gc")]
1542    fn trace_wasm_stack_frame(
1543        &self,
1544        gc_roots_list: &mut GcRootsList,
1545        frame: crate::runtime::vm::Frame,
1546    ) {
1547        use crate::runtime::vm::SendSyncPtr;
1548        use core::ptr::NonNull;
1549
1550        let pc = frame.pc();
1551        debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
1552
1553        let fp = frame.fp() as *mut usize;
1554        debug_assert!(
1555            !fp.is_null(),
1556            "we should always get a valid frame pointer for Wasm frames"
1557        );
1558
1559        let module_info = self
1560            .modules()
1561            .lookup_module_by_pc(pc)
1562            .expect("should have module info for Wasm frame");
1563
1564        let stack_map = match module_info.lookup_stack_map(pc) {
1565            Some(sm) => sm,
1566            None => {
1567                log::trace!("No stack map for this Wasm frame");
1568                return;
1569            }
1570        };
1571        log::trace!(
1572            "We have a stack map that maps {} bytes in this Wasm frame",
1573            stack_map.frame_size()
1574        );
1575
1576        let sp = unsafe { stack_map.sp(fp) };
1577        for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
1578            let raw: u32 = unsafe { core::ptr::read(stack_slot) };
1579            log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
1580
1581            let gc_ref = VMGcRef::from_raw_u32(raw);
1582            if gc_ref.is_some() {
1583                unsafe {
1584                    gc_roots_list
1585                        .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
1586                }
1587            }
1588        }
1589    }
1590
1591    #[cfg(feature = "gc")]
1592    fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1593        use crate::runtime::vm::Backtrace;
1594        log::trace!("Begin trace GC roots :: Wasm stack");
1595
1596        Backtrace::trace(self, |frame| {
1597            self.trace_wasm_stack_frame(gc_roots_list, frame);
1598            core::ops::ControlFlow::Continue(())
1599        });
1600
1601        log::trace!("End trace GC roots :: Wasm stack");
1602    }
1603
1604    #[cfg(all(feature = "gc", feature = "stack-switching"))]
1605    fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1606        use crate::{runtime::vm::Backtrace, vm::VMStackState};
1607        log::trace!("Begin trace GC roots :: continuations");
1608
1609        for continuation in &self.continuations {
1610            let state = continuation.common_stack_information.state;
1611
1612            // FIXME(frank-emrich) In general, it is not enough to just trace
1613            // through the stacks of continuations; we also need to look through
1614            // their `cont.bind` arguments. However, we don't currently have
1615            // enough RTTI information to check if any of the values in the
1616            // buffers used by `cont.bind` are GC values. As a workaround, note
1617            // that we currently disallow cont.bind-ing GC values altogether.
1618            // This way, it is okay not to check them here.
1619            match state {
1620                VMStackState::Suspended => {
1621                    Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
1622                        self.trace_wasm_stack_frame(gc_roots_list, frame);
1623                        core::ops::ControlFlow::Continue(())
1624                    });
1625                }
1626                VMStackState::Running => {
1627                    // Handled by `trace_wasm_stack_roots`.
1628                }
1629                VMStackState::Parent => {
1630                    // We don't know whether our child is suspended or running, but in
1631                    // either case things should be hanlded correctly when traversing
1632                    // further along in the chain, nothing required at this point.
1633                }
1634                VMStackState::Fresh | VMStackState::Returned => {
1635                    // Fresh/Returned continuations have no gc values on their stack.
1636                }
1637            }
1638        }
1639
1640        log::trace!("End trace GC roots :: continuations");
1641    }
1642
1643    #[cfg(feature = "gc")]
1644    fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1645        log::trace!("Begin trace GC roots :: vmctx");
1646        self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
1647        self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
1648        log::trace!("End trace GC roots :: vmctx");
1649    }
1650
1651    #[cfg(feature = "gc")]
1652    fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1653        log::trace!("Begin trace GC roots :: user");
1654        self.gc_roots.trace_roots(gc_roots_list);
1655        log::trace!("End trace GC roots :: user");
1656    }
1657
1658    /// Insert a host-allocated GC type into this store.
1659    ///
1660    /// This makes it suitable for the embedder to allocate instances of this
1661    /// type in this store, and we don't have to worry about the type being
1662    /// reclaimed (since it is possible that none of the Wasm modules in this
1663    /// store are holding it alive).
1664    #[cfg(feature = "gc")]
1665    pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
1666        self.gc_host_alloc_types.insert(ty);
1667    }
1668
1669    pub fn get_fuel(&self) -> Result<u64> {
1670        anyhow::ensure!(
1671            self.engine().tunables().consume_fuel,
1672            "fuel is not configured in this store"
1673        );
1674        let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
1675        Ok(get_fuel(injected_fuel, self.fuel_reserve))
1676    }
1677
1678    fn refuel(&mut self) -> bool {
1679        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
1680        refuel(
1681            injected_fuel,
1682            &mut self.fuel_reserve,
1683            self.fuel_yield_interval,
1684        )
1685    }
1686
1687    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1688        anyhow::ensure!(
1689            self.engine().tunables().consume_fuel,
1690            "fuel is not configured in this store"
1691        );
1692        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
1693        set_fuel(
1694            injected_fuel,
1695            &mut self.fuel_reserve,
1696            self.fuel_yield_interval,
1697            fuel,
1698        );
1699        Ok(())
1700    }
1701
1702    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1703        anyhow::ensure!(
1704            self.engine().tunables().consume_fuel,
1705            "fuel is not configured in this store"
1706        );
1707        anyhow::ensure!(
1708            self.engine().config().async_support,
1709            "async support is not configured in this store"
1710        );
1711        anyhow::ensure!(
1712            interval != Some(0),
1713            "fuel_async_yield_interval must not be 0"
1714        );
1715        self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
1716        // Reset the fuel active + reserve states by resetting the amount.
1717        self.set_fuel(self.get_fuel()?)
1718    }
1719
1720    #[inline]
1721    pub fn signal_handler(&self) -> Option<*const SignalHandler> {
1722        let handler = self.signal_handler.as_ref()?;
1723        Some(handler)
1724    }
1725
1726    #[inline]
1727    pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
1728        NonNull::from(&self.vm_store_context)
1729    }
1730
1731    #[inline]
1732    pub fn default_caller(&self) -> NonNull<VMContext> {
1733        self.default_caller_vmctx.as_non_null()
1734    }
1735
1736    #[inline]
1737    pub fn traitobj(&self) -> NonNull<dyn vm::VMStore> {
1738        self.traitobj.as_raw().unwrap()
1739    }
1740
1741    #[inline]
1742    pub fn traitobj_mut(&mut self) -> &mut dyn vm::VMStore {
1743        unsafe { self.traitobj().as_mut() }
1744    }
1745
1746    /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
1747    /// used as part of calling the host in a `Func::new` method invocation.
1748    #[inline]
1749    pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
1750        mem::take(&mut self.hostcall_val_storage)
1751    }
1752
1753    /// Restores the vector previously taken by `take_hostcall_val_storage`
1754    /// above back into the store, allowing it to be used in the future for the
1755    /// next wasm->host call.
1756    #[inline]
1757    pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
1758        if storage.capacity() > self.hostcall_val_storage.capacity() {
1759            self.hostcall_val_storage = storage;
1760        }
1761    }
1762
1763    /// Same as `take_hostcall_val_storage`, but for the direction of the host
1764    /// calling wasm.
1765    #[inline]
1766    pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
1767        mem::take(&mut self.wasm_val_raw_storage)
1768    }
1769
1770    /// Same as `save_hostcall_val_storage`, but for the direction of the host
1771    /// calling wasm.
1772    #[inline]
1773    pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
1774        if storage.capacity() > self.wasm_val_raw_storage.capacity() {
1775            self.wasm_val_raw_storage = storage;
1776        }
1777    }
1778
1779    /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
1780    /// WebAssembly-relative fault.
1781    ///
1782    /// This function may abort the process if `addr` is not found to actually
1783    /// reside in any linear memory. In such a situation it means that the
1784    /// segfault was erroneously caught by Wasmtime and is possibly indicative
1785    /// of a code generator bug.
1786    ///
1787    /// This function returns `None` for dynamically-bounds-checked-memories
1788    /// with spectre mitigations enabled since the hardware fault address is
1789    /// always zero in these situations which means that the trapping context
1790    /// doesn't have enough information to report the fault address.
1791    pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
1792        // There are a few instances where a "close to zero" pointer is loaded
1793        // and we expect that to happen:
1794        //
1795        // * Explicitly bounds-checked memories with spectre-guards enabled will
1796        //   cause out-of-bounds accesses to get routed to address 0, so allow
1797        //   wasm instructions to fault on the null address.
1798        // * `call_indirect` when invoking a null function pointer may load data
1799        //   from the a `VMFuncRef` whose address is null, meaning any field of
1800        //   `VMFuncRef` could be the address of the fault.
1801        //
1802        // In these situations where the address is so small it won't be in any
1803        // instance, so skip the checks below.
1804        if addr <= mem::size_of::<VMFuncRef>() {
1805            const _: () = {
1806                // static-assert that `VMFuncRef` isn't too big to ensure that
1807                // it lives solely within the first page as we currently only
1808                // have the guarantee that the first page of memory is unmapped,
1809                // no more.
1810                assert!(mem::size_of::<VMFuncRef>() <= 512);
1811            };
1812            return None;
1813        }
1814
1815        // Search all known instances in this store for this address. Note that
1816        // this is probably not the speediest way to do this. Traps, however,
1817        // are generally not expected to be super fast and additionally stores
1818        // probably don't have all that many instances or memories.
1819        //
1820        // If this loop becomes hot in the future, however, it should be
1821        // possible to precompute maps about linear memories in a store and have
1822        // a quicker lookup.
1823        let mut fault = None;
1824        for (_, instance) in self.instances.iter() {
1825            if let Some(f) = instance.handle.get().wasm_fault(addr) {
1826                assert!(fault.is_none());
1827                fault = Some(f);
1828            }
1829        }
1830        if fault.is_some() {
1831            return fault;
1832        }
1833
1834        cfg_if::cfg_if! {
1835            if #[cfg(any(feature = "std", unix, windows))] {
1836                // With the standard library a rich error can be printed here
1837                // to stderr and the native abort path is used.
1838                eprintln!(
1839                    "\
1840Wasmtime caught a segfault for a wasm program because the faulting instruction
1841is allowed to segfault due to how linear memories are implemented. The address
1842that was accessed, however, is not known to any linear memory in use within this
1843Store. This may be indicative of a critical bug in Wasmtime's code generation
1844because all addresses which are known to be reachable from wasm won't reach this
1845message.
1846
1847    pc:      0x{pc:x}
1848    address: 0x{addr:x}
1849
1850This is a possible security issue because WebAssembly has accessed something it
1851shouldn't have been able to. Other accesses may have succeeded and this one just
1852happened to be caught. The process will now be aborted to prevent this damage
1853from going any further and to alert what's going on. If this is a security
1854issue please reach out to the Wasmtime team via its security policy
1855at https://bytecodealliance.org/security.
1856"
1857                );
1858                std::process::abort();
1859            } else if #[cfg(panic = "abort")] {
1860                // Without the standard library but with `panic=abort` then
1861                // it's safe to panic as that's known to halt execution. For
1862                // now avoid the above error message as well since without
1863                // `std` it's probably best to be a bit more size-conscious.
1864                let _ = pc;
1865                panic!("invalid fault");
1866            } else {
1867                // Without `std` and with `panic = "unwind"` there's no way to
1868                // abort the process portably, so flag a compile time error.
1869                //
1870                // NB: if this becomes a problem in the future one option would
1871                // be to extend the `capi.rs` module for no_std platforms, but
1872                // it remains yet to be seen at this time if this is hit much.
1873                compile_error!("either `std` or `panic=abort` must be enabled");
1874                None
1875            }
1876        }
1877    }
1878
1879    /// Retrieve the store's protection key.
1880    #[inline]
1881    pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
1882        self.pkey
1883    }
1884
1885    #[inline]
1886    #[cfg(feature = "component-model")]
1887    pub(crate) fn component_resource_state(
1888        &mut self,
1889    ) -> (
1890        &mut vm::component::CallContexts,
1891        &mut vm::component::ResourceTable,
1892        &mut crate::component::HostResourceData,
1893    ) {
1894        (
1895            &mut self.component_calls,
1896            &mut self.component_host_table,
1897            &mut self.host_resource_data,
1898        )
1899    }
1900
1901    #[cfg(feature = "component-model")]
1902    pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
1903        // We don't actually need the instance itself right now, but it seems
1904        // like something we will almost certainly eventually want to keep
1905        // around, so force callers to provide it.
1906        let _ = instance;
1907
1908        self.num_component_instances += 1;
1909    }
1910
1911    #[cfg(feature = "component-model")]
1912    pub(crate) fn component_resource_state_with_instance(
1913        &mut self,
1914        instance: crate::component::Instance,
1915    ) -> (
1916        &mut vm::component::CallContexts,
1917        &mut vm::component::ResourceTable,
1918        &mut crate::component::HostResourceData,
1919        Pin<&mut vm::component::ComponentInstance>,
1920    ) {
1921        (
1922            &mut self.component_calls,
1923            &mut self.component_host_table,
1924            &mut self.host_resource_data,
1925            instance.id().from_data_get_mut(&mut self.store_data),
1926        )
1927    }
1928
1929    #[cfg(not(feature = "async"))]
1930    pub(crate) fn async_guard_range(&self) -> core::ops::Range<*mut u8> {
1931        core::ptr::null_mut()..core::ptr::null_mut()
1932    }
1933
1934    pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
1935        match &mut self.executor {
1936            Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
1937            #[cfg(has_host_compiler_backend)]
1938            Executor::Native => ExecutorRef::Native,
1939        }
1940    }
1941
1942    pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
1943        match &self.executor {
1944            Executor::Interpreter(i) => i.unwinder(),
1945            #[cfg(has_host_compiler_backend)]
1946            Executor::Native => &vm::UnwindHost,
1947        }
1948    }
1949
1950    /// Allocates a new continuation. Note that we currently don't support
1951    /// deallocating them. Instead, all continuations remain allocated
1952    /// throughout the store's lifetime.
1953    #[cfg(feature = "stack-switching")]
1954    pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
1955        // FIXME(frank-emrich) Do we need to pin this?
1956        let mut continuation = Box::new(VMContRef::empty());
1957        let stack_size = self.engine.config().async_stack_size;
1958        let stack = crate::vm::VMContinuationStack::new(stack_size)?;
1959        continuation.stack = stack;
1960        let ptr = continuation.deref_mut() as *mut VMContRef;
1961        self.continuations.push(continuation);
1962        Ok(ptr)
1963    }
1964
1965    /// Constructs and executes an `InstanceAllocationRequest` and pushes the
1966    /// returned instance into the store.
1967    ///
1968    /// This is a helper method for invoking
1969    /// `InstanceAllocator::allocate_module` with the appropriate parameters
1970    /// from this store's own configuration. The `kind` provided is used to
1971    /// distinguish between "real" modules and dummy ones that are synthesized
1972    /// for embedder-created memories, globals, tables, etc. The `kind` will
1973    /// also use a different instance allocator by default, the one passed in,
1974    /// rather than the engine's default allocator.
1975    ///
1976    /// This method will push the instance within `StoreOpaque` onto the
1977    /// `instances` array and return the `InstanceId` which can be use to look
1978    /// it up within the store.
1979    ///
1980    /// # Safety
1981    ///
1982    /// The request's associated module, memories, tables, and vmctx must have
1983    /// already have been validated by `validate_module` for the allocator
1984    /// configured. This is typically done during module construction for
1985    /// example.
1986    pub(crate) unsafe fn allocate_instance(
1987        &mut self,
1988        kind: AllocateInstanceKind<'_>,
1989        runtime_info: &ModuleRuntimeInfo,
1990        imports: Imports<'_>,
1991    ) -> Result<InstanceId> {
1992        let id = self.instances.next_key();
1993
1994        let allocator = match kind {
1995            AllocateInstanceKind::Module(_) => self.engine().allocator(),
1996            AllocateInstanceKind::Dummy { allocator } => allocator,
1997        };
1998        let handle = allocator.allocate_module(InstanceAllocationRequest {
1999            id,
2000            runtime_info,
2001            imports,
2002            store: StorePtr::new(self.traitobj()),
2003            wmemcheck: self.engine().config().wmemcheck,
2004            pkey: self.get_pkey(),
2005            tunables: self.engine().tunables(),
2006        })?;
2007
2008        let actual = match kind {
2009            AllocateInstanceKind::Module(module_id) => {
2010                log::trace!(
2011                    "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2012                    self.id()
2013                );
2014                self.instances.push(StoreInstance {
2015                    handle,
2016                    kind: StoreInstanceKind::Real { module_id },
2017                })
2018            }
2019            AllocateInstanceKind::Dummy { .. } => {
2020                log::trace!(
2021                    "Adding dummy instance to store: store={:?}, instance={id:?}",
2022                    self.id()
2023                );
2024                self.instances.push(StoreInstance {
2025                    handle,
2026                    kind: StoreInstanceKind::Dummy,
2027                })
2028            }
2029        };
2030
2031        // double-check we didn't accidentally allocate two instances and our
2032        // prediction of what the id would be is indeed the id it should be.
2033        assert_eq!(id, actual);
2034
2035        Ok(id)
2036    }
2037
2038    /// Returns the `StoreInstanceId` that can be used to re-acquire access to
2039    /// `vmctx` from a store later on.
2040    ///
2041    /// # Safety
2042    ///
2043    /// This method is unsafe as it cannot validate that `vmctx` is a valid
2044    /// allocation that lives within this store.
2045    pub(crate) unsafe fn vmctx_id(&self, vmctx: NonNull<VMContext>) -> StoreInstanceId {
2046        let instance_id = vm::Instance::from_vmctx(vmctx, |i| i.id());
2047        StoreInstanceId::new(self.id(), instance_id)
2048    }
2049}
2050
2051/// Helper parameter to [`StoreOpaque::allocate_instance`].
2052pub(crate) enum AllocateInstanceKind<'a> {
2053    /// An embedder-provided module is being allocated meaning that the default
2054    /// engine's allocator will be used.
2055    Module(RegisteredModuleId),
2056
2057    /// Add a dummy instance that to the store.
2058    ///
2059    /// These are instances that are just implementation details of something
2060    /// else (e.g. host-created memories that are not actually defined in any
2061    /// Wasm module) and therefore shouldn't show up in things like core dumps.
2062    ///
2063    /// A custom, typically OnDemand-flavored, allocator is provided to execute
2064    /// the allocation.
2065    Dummy {
2066        allocator: &'a dyn InstanceAllocator,
2067    },
2068}
2069
2070unsafe impl<T> vm::VMStore for StoreInner<T> {
2071    #[cfg(feature = "component-model-async")]
2072    fn component_async_store(
2073        &mut self,
2074    ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2075        self
2076    }
2077
2078    fn store_opaque(&self) -> &StoreOpaque {
2079        &self.inner
2080    }
2081
2082    fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2083        &mut self.inner
2084    }
2085
2086    fn memory_growing(
2087        &mut self,
2088        current: usize,
2089        desired: usize,
2090        maximum: Option<usize>,
2091    ) -> Result<bool, anyhow::Error> {
2092        match self.limiter {
2093            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2094                limiter(&mut self.data).memory_growing(current, desired, maximum)
2095            }
2096            #[cfg(feature = "async")]
2097            Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
2098                self.inner
2099                    .async_cx()
2100                    .expect("ResourceLimiterAsync requires async Store")
2101                    .block_on(
2102                        limiter(&mut self.data)
2103                            .memory_growing(current, desired, maximum)
2104                            .as_mut(),
2105                    )?
2106            },
2107            None => Ok(true),
2108        }
2109    }
2110
2111    fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2112        match self.limiter {
2113            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2114                limiter(&mut self.data).memory_grow_failed(error)
2115            }
2116            #[cfg(feature = "async")]
2117            Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2118                limiter(&mut self.data).memory_grow_failed(error)
2119            }
2120            None => {
2121                log::debug!("ignoring memory growth failure error: {error:?}");
2122                Ok(())
2123            }
2124        }
2125    }
2126
2127    fn table_growing(
2128        &mut self,
2129        current: usize,
2130        desired: usize,
2131        maximum: Option<usize>,
2132    ) -> Result<bool, anyhow::Error> {
2133        // Need to borrow async_cx before the mut borrow of the limiter.
2134        // self.async_cx() panicks when used with a non-async store, so
2135        // wrap this in an option.
2136        #[cfg(feature = "async")]
2137        let async_cx = if self.async_support()
2138            && matches!(self.limiter, Some(ResourceLimiterInner::Async(_)))
2139        {
2140            Some(self.async_cx().unwrap())
2141        } else {
2142            None
2143        };
2144
2145        match self.limiter {
2146            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2147                limiter(&mut self.data).table_growing(current, desired, maximum)
2148            }
2149            #[cfg(feature = "async")]
2150            Some(ResourceLimiterInner::Async(ref mut limiter)) => unsafe {
2151                async_cx
2152                    .expect("ResourceLimiterAsync requires async Store")
2153                    .block_on(limiter(&mut self.data).table_growing(current, desired, maximum))?
2154            },
2155            None => Ok(true),
2156        }
2157    }
2158
2159    fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2160        match self.limiter {
2161            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2162                limiter(&mut self.data).table_grow_failed(error)
2163            }
2164            #[cfg(feature = "async")]
2165            Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2166                limiter(&mut self.data).table_grow_failed(error)
2167            }
2168            None => {
2169                log::debug!("ignoring table growth failure: {error:?}");
2170                Ok(())
2171            }
2172        }
2173    }
2174
2175    fn out_of_gas(&mut self) -> Result<()> {
2176        if !self.refuel() {
2177            return Err(Trap::OutOfFuel.into());
2178        }
2179        #[cfg(feature = "async")]
2180        if self.fuel_yield_interval.is_some() {
2181            self.async_yield_impl()?;
2182        }
2183        Ok(())
2184    }
2185
2186    #[cfg(target_has_atomic = "64")]
2187    fn new_epoch(&mut self) -> Result<u64, anyhow::Error> {
2188        // Temporarily take the configured behavior to avoid mutably borrowing
2189        // multiple times.
2190        let mut behavior = self.epoch_deadline_behavior.take();
2191        let delta_result = match &mut behavior {
2192            None => Err(Trap::Interrupt.into()),
2193            Some(callback) => callback((&mut *self).as_context_mut()).and_then(|update| {
2194                let delta = match update {
2195                    UpdateDeadline::Continue(delta) => delta,
2196                    #[cfg(feature = "async")]
2197                    UpdateDeadline::Yield(delta) => {
2198                        assert!(
2199                            self.async_support(),
2200                            "cannot use `UpdateDeadline::Yield` without enabling async support in the config"
2201                        );
2202                        // Do the async yield. May return a trap if future was
2203                        // canceled while we're yielded.
2204                        self.async_yield_impl()?;
2205                        delta
2206                    }
2207                    #[cfg(feature = "async")]
2208                    UpdateDeadline::YieldCustom(delta, future) => {
2209                        assert!(
2210                            self.async_support(),
2211                            "cannot use `UpdateDeadline::YieldCustom` without enabling async support in the config"
2212                        );
2213
2214                        // When control returns, we have a `Result<()>` passed
2215                        // in from the host fiber. If this finished successfully then
2216                        // we were resumed normally via a `poll`, so keep going.  If
2217                        // the future was dropped while we were yielded, then we need
2218                        // to clean up this fiber. Do so by raising a trap which will
2219                        // abort all wasm and get caught on the other side to clean
2220                        // things up.
2221                        unsafe {
2222                            self.async_cx()
2223                                .expect("attempted to pull async context during shutdown")
2224                                .block_on(future)?
2225                        }
2226                        delta
2227                    }
2228                };
2229
2230                // Set a new deadline and return the new epoch deadline so
2231                // the Wasm code doesn't have to reload it.
2232                self.set_epoch_deadline(delta);
2233                Ok(self.get_epoch_deadline())
2234            })
2235        };
2236
2237        // Put back the original behavior which was replaced by `take`.
2238        self.epoch_deadline_behavior = behavior;
2239        delta_result
2240    }
2241
2242    #[cfg(feature = "gc")]
2243    unsafe fn maybe_async_grow_or_collect_gc_heap(
2244        &mut self,
2245        root: Option<VMGcRef>,
2246        bytes_needed: Option<u64>,
2247    ) -> Result<Option<VMGcRef>> {
2248        self.inner.maybe_async_gc(root, bytes_needed)
2249    }
2250
2251    #[cfg(not(feature = "gc"))]
2252    unsafe fn maybe_async_grow_or_collect_gc_heap(
2253        &mut self,
2254        root: Option<VMGcRef>,
2255        _bytes_needed: Option<u64>,
2256    ) -> Result<Option<VMGcRef>> {
2257        Ok(root)
2258    }
2259
2260    #[cfg(feature = "component-model")]
2261    fn component_calls(&mut self) -> &mut vm::component::CallContexts {
2262        &mut self.component_calls
2263    }
2264}
2265
2266impl<T> StoreInner<T> {
2267    #[cfg(target_has_atomic = "64")]
2268    pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2269        // Set a new deadline based on the "epoch deadline delta".
2270        //
2271        // Also, note that when this update is performed while Wasm is
2272        // on the stack, the Wasm will reload the new value once we
2273        // return into it.
2274        let current_epoch = self.engine().current_epoch();
2275        let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2276        *epoch_deadline = current_epoch + delta;
2277    }
2278
2279    #[cfg(target_has_atomic = "64")]
2280    fn epoch_deadline_trap(&mut self) {
2281        self.epoch_deadline_behavior = None;
2282    }
2283
2284    #[cfg(target_has_atomic = "64")]
2285    fn epoch_deadline_callback(
2286        &mut self,
2287        callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2288    ) {
2289        self.epoch_deadline_behavior = Some(callback);
2290    }
2291
2292    fn get_epoch_deadline(&mut self) -> u64 {
2293        *self.vm_store_context.epoch_deadline.get_mut()
2294    }
2295}
2296
2297impl<T: Default> Default for Store<T> {
2298    fn default() -> Store<T> {
2299        Store::new(&Engine::default(), T::default())
2300    }
2301}
2302
2303impl<T: fmt::Debug> fmt::Debug for Store<T> {
2304    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2305        let inner = &**self.inner as *const StoreInner<T>;
2306        f.debug_struct("Store")
2307            .field("inner", &inner)
2308            .field("data", &self.inner.data)
2309            .finish()
2310    }
2311}
2312
2313impl<T> Drop for Store<T> {
2314    fn drop(&mut self) {
2315        self.inner.flush_fiber_stack();
2316
2317        // for documentation on this `unsafe`, see `into_data`.
2318        unsafe {
2319            ManuallyDrop::drop(&mut self.inner.data);
2320            ManuallyDrop::drop(&mut self.inner);
2321        }
2322    }
2323}
2324
2325impl Drop for StoreOpaque {
2326    fn drop(&mut self) {
2327        // NB it's important that this destructor does not access `self.data`.
2328        // That is deallocated by `Drop for Store<T>` above.
2329
2330        unsafe {
2331            let allocator = self.engine.allocator();
2332            let ondemand = OnDemandInstanceAllocator::default();
2333            let store_id = self.id();
2334
2335            #[cfg(feature = "gc")]
2336            if let Some(gc_store) = self.gc_store.take() {
2337                let gc_alloc_index = gc_store.allocation_index;
2338                log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2339                debug_assert!(self.engine.features().gc_types());
2340                let (mem_alloc_index, mem) =
2341                    allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2342                allocator.deallocate_memory(None, mem_alloc_index, mem);
2343            }
2344
2345            for (id, instance) in self.instances.iter_mut() {
2346                log::trace!("store {store_id:?} is deallocating {id:?}");
2347                if let StoreInstanceKind::Dummy = instance.kind {
2348                    ondemand.deallocate_module(&mut instance.handle);
2349                } else {
2350                    allocator.deallocate_module(&mut instance.handle);
2351                }
2352            }
2353
2354            #[cfg(feature = "component-model")]
2355            {
2356                for _ in 0..self.num_component_instances {
2357                    allocator.decrement_component_instance_count();
2358                }
2359            }
2360        }
2361    }
2362}
2363
2364#[cfg(test)]
2365mod tests {
2366    use super::{get_fuel, refuel, set_fuel};
2367    use std::num::NonZeroU64;
2368
2369    struct FuelTank {
2370        pub consumed_fuel: i64,
2371        pub reserve_fuel: u64,
2372        pub yield_interval: Option<NonZeroU64>,
2373    }
2374
2375    impl FuelTank {
2376        fn new() -> Self {
2377            FuelTank {
2378                consumed_fuel: 0,
2379                reserve_fuel: 0,
2380                yield_interval: None,
2381            }
2382        }
2383        fn get_fuel(&self) -> u64 {
2384            get_fuel(self.consumed_fuel, self.reserve_fuel)
2385        }
2386        fn refuel(&mut self) -> bool {
2387            refuel(
2388                &mut self.consumed_fuel,
2389                &mut self.reserve_fuel,
2390                self.yield_interval,
2391            )
2392        }
2393        fn set_fuel(&mut self, fuel: u64) {
2394            set_fuel(
2395                &mut self.consumed_fuel,
2396                &mut self.reserve_fuel,
2397                self.yield_interval,
2398                fuel,
2399            );
2400        }
2401    }
2402
2403    #[test]
2404    fn smoke() {
2405        let mut tank = FuelTank::new();
2406        tank.set_fuel(10);
2407        assert_eq!(tank.consumed_fuel, -10);
2408        assert_eq!(tank.reserve_fuel, 0);
2409
2410        tank.yield_interval = NonZeroU64::new(10);
2411        tank.set_fuel(25);
2412        assert_eq!(tank.consumed_fuel, -10);
2413        assert_eq!(tank.reserve_fuel, 15);
2414    }
2415
2416    #[test]
2417    fn does_not_lose_precision() {
2418        let mut tank = FuelTank::new();
2419        tank.set_fuel(u64::MAX);
2420        assert_eq!(tank.get_fuel(), u64::MAX);
2421
2422        tank.set_fuel(i64::MAX as u64);
2423        assert_eq!(tank.get_fuel(), i64::MAX as u64);
2424
2425        tank.set_fuel(i64::MAX as u64 + 1);
2426        assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
2427    }
2428
2429    #[test]
2430    fn yielding_does_not_lose_precision() {
2431        let mut tank = FuelTank::new();
2432
2433        tank.yield_interval = NonZeroU64::new(10);
2434        tank.set_fuel(u64::MAX);
2435        assert_eq!(tank.get_fuel(), u64::MAX);
2436        assert_eq!(tank.consumed_fuel, -10);
2437        assert_eq!(tank.reserve_fuel, u64::MAX - 10);
2438
2439        tank.yield_interval = NonZeroU64::new(u64::MAX);
2440        tank.set_fuel(u64::MAX);
2441        assert_eq!(tank.get_fuel(), u64::MAX);
2442        assert_eq!(tank.consumed_fuel, -i64::MAX);
2443        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2444
2445        tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
2446        tank.set_fuel(u64::MAX);
2447        assert_eq!(tank.get_fuel(), u64::MAX);
2448        assert_eq!(tank.consumed_fuel, -i64::MAX);
2449        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2450    }
2451
2452    #[test]
2453    fn refueling() {
2454        // It's possible to fuel to have consumed over the limit as some instructions can consume
2455        // multiple units of fuel at once. Refueling should be strict in it's consumption and not
2456        // add more fuel than there is.
2457        let mut tank = FuelTank::new();
2458
2459        tank.yield_interval = NonZeroU64::new(10);
2460        tank.reserve_fuel = 42;
2461        tank.consumed_fuel = 4;
2462        assert!(tank.refuel());
2463        assert_eq!(tank.reserve_fuel, 28);
2464        assert_eq!(tank.consumed_fuel, -10);
2465
2466        tank.yield_interval = NonZeroU64::new(1);
2467        tank.reserve_fuel = 8;
2468        tank.consumed_fuel = 4;
2469        assert_eq!(tank.get_fuel(), 4);
2470        assert!(tank.refuel());
2471        assert_eq!(tank.reserve_fuel, 3);
2472        assert_eq!(tank.consumed_fuel, -1);
2473        assert_eq!(tank.get_fuel(), 4);
2474
2475        tank.yield_interval = NonZeroU64::new(10);
2476        tank.reserve_fuel = 3;
2477        tank.consumed_fuel = 4;
2478        assert_eq!(tank.get_fuel(), 0);
2479        assert!(!tank.refuel());
2480        assert_eq!(tank.reserve_fuel, 3);
2481        assert_eq!(tank.consumed_fuel, 4);
2482        assert_eq!(tank.get_fuel(), 0);
2483    }
2484}