wasmtime/runtime/
store.rs

1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//!   Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//!   intended to be consumed by the outside world. Note that the "just a
39//!   pointer large" is a load-bearing implementation detail in Wasmtime. This
40//!   enables it to store a pointer to its own trait object which doesn't need
41//!   to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//!   stored inside the `Box`. This is the general Rust pattern when one struct
45//!   is a layer over another. The surprising part, though, is that this is
46//!   further subdivided. This structure only contains things which actually
47//!   need `T` itself. The downside of this structure is that it's always
48//!   generic and means that code is monomorphized into consumer crates. We
49//!   strive to have things be as monomorphic as possible in `wasmtime` so this
50//!   type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//!   Stored inline in the outer type the "opaque" here means that it's a
54//!   "store" but it doesn't have access to the `T`. This is the primary
55//!   "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//!   internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//!   All references of Wasm items into a `Store` are actually indices into a
60//!   table in this structure, and the `StoreData` being separate makes it a bit
61//!   easier to manage/define/work with. There's no real fundamental reason this
62//!   is split out, although sometimes it's useful to have separate borrows into
63//!   these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79use crate::RootSet;
80#[cfg(feature = "component-model-async")]
81use crate::component::ComponentStoreData;
82#[cfg(feature = "component-model-async")]
83use crate::component::concurrent;
84#[cfg(feature = "async")]
85use crate::fiber;
86use crate::module::RegisteredModuleId;
87use crate::prelude::*;
88#[cfg(feature = "gc")]
89use crate::runtime::vm::GcRootsList;
90#[cfg(feature = "stack-switching")]
91use crate::runtime::vm::VMContRef;
92use crate::runtime::vm::mpk::ProtectionKey;
93use crate::runtime::vm::{
94    self, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
95    Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator, SendSyncPtr,
96    SignalHandler, StoreBox, StorePtr, Unwind, VMContext, VMFuncRef, VMGcRef, VMStoreContext,
97};
98use crate::trampoline::VMHostGlobalContext;
99use crate::{Engine, Module, Trap, Val, ValRaw, module::ModuleRegistry};
100use crate::{Global, Instance, Memory, Table, Uninhabited};
101use alloc::sync::Arc;
102use core::fmt;
103use core::marker;
104use core::mem::{self, ManuallyDrop};
105use core::num::NonZeroU64;
106use core::ops::{Deref, DerefMut};
107use core::pin::Pin;
108use core::ptr::NonNull;
109use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
110
111mod context;
112pub use self::context::*;
113mod data;
114pub use self::data::*;
115mod func_refs;
116use func_refs::FuncRefs;
117#[cfg(feature = "async")]
118mod token;
119#[cfg(feature = "async")]
120pub(crate) use token::StoreToken;
121#[cfg(feature = "async")]
122mod async_;
123#[cfg(all(feature = "async", feature = "call-hook"))]
124pub use self::async_::CallHookHandler;
125#[cfg(feature = "gc")]
126mod gc;
127
128/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
129///
130/// All WebAssembly instances and items will be attached to and refer to a
131/// [`Store`]. For example instances, functions, globals, and tables are all
132/// attached to a [`Store`]. Instances are created by instantiating a
133/// [`Module`](crate::Module) within a [`Store`].
134///
135/// A [`Store`] is intended to be a short-lived object in a program. No form
136/// of GC is implemented at this time so once an instance is created within a
137/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
138/// This makes [`Store`] unsuitable for creating an unbounded number of
139/// instances in it because [`Store`] will never release this memory. It's
140/// recommended to have a [`Store`] correspond roughly to the lifetime of a
141/// "main instance" that an embedding is interested in executing.
142///
143/// ## Type parameter `T`
144///
145/// Each [`Store`] has a type parameter `T` associated with it. This `T`
146/// represents state defined by the host. This state will be accessible through
147/// the [`Caller`](crate::Caller) type that host-defined functions get access
148/// to. This `T` is suitable for storing `Store`-specific information which
149/// imported functions may want access to.
150///
151/// The data `T` can be accessed through methods like [`Store::data`] and
152/// [`Store::data_mut`].
153///
154/// ## Stores, contexts, oh my
155///
156/// Most methods in Wasmtime take something of the form
157/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
158/// the first argument. These two traits allow ergonomically passing in the
159/// context you currently have to any method. The primary two sources of
160/// contexts are:
161///
162/// * `Store<T>`
163/// * `Caller<'_, T>`
164///
165/// corresponding to what you create and what you have access to in a host
166/// function. You can also explicitly acquire a [`StoreContext`] or
167/// [`StoreContextMut`] and pass that around as well.
168///
169/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
170/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
171/// form of context you have you can call various methods, create objects, etc.
172///
173/// ## Stores and `Default`
174///
175/// You can create a store with default configuration settings using
176/// `Store::default()`. This will create a brand new [`Engine`] with default
177/// configuration (see [`Config`](crate::Config) for more information).
178///
179/// ## Cross-store usage of items
180///
181/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
182/// [`Store`]. The store they belong to is the one they were created with
183/// (passed in as a parameter) or instantiated with. This store is the only
184/// store that can be used to interact with wasm items after they're created.
185///
186/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
187/// operations is incorrect. In other words it's considered a programmer error
188/// rather than a recoverable error for the wrong [`Store`] to be used when
189/// calling APIs.
190pub struct Store<T: 'static> {
191    // for comments about `ManuallyDrop`, see `Store::into_data`
192    inner: ManuallyDrop<Box<StoreInner<T>>>,
193}
194
195#[derive(Copy, Clone, Debug)]
196/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
197/// the WebAssembly VM.
198pub enum CallHook {
199    /// Indicates the VM is calling a WebAssembly function, from the host.
200    CallingWasm,
201    /// Indicates the VM is returning from a WebAssembly function, to the host.
202    ReturningFromWasm,
203    /// Indicates the VM is calling a host function, from WebAssembly.
204    CallingHost,
205    /// Indicates the VM is returning from a host function, to WebAssembly.
206    ReturningFromHost,
207}
208
209impl CallHook {
210    /// Indicates the VM is entering host code (exiting WebAssembly code)
211    pub fn entering_host(&self) -> bool {
212        match self {
213            CallHook::ReturningFromWasm | CallHook::CallingHost => true,
214            _ => false,
215        }
216    }
217    /// Indicates the VM is exiting host code (entering WebAssembly code)
218    pub fn exiting_host(&self) -> bool {
219        match self {
220            CallHook::ReturningFromHost | CallHook::CallingWasm => true,
221            _ => false,
222        }
223    }
224}
225
226/// Internal contents of a `Store<T>` that live on the heap.
227///
228/// The members of this struct are those that need to be generic over `T`, the
229/// store's internal type storage. Otherwise all things that don't rely on `T`
230/// should go into `StoreOpaque`.
231pub struct StoreInner<T: 'static> {
232    /// Generic metadata about the store that doesn't need access to `T`.
233    inner: StoreOpaque,
234
235    limiter: Option<ResourceLimiterInner<T>>,
236    call_hook: Option<CallHookInner<T>>,
237    #[cfg(target_has_atomic = "64")]
238    epoch_deadline_behavior:
239        Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
240    // for comments about `ManuallyDrop`, see `Store::into_data`
241    data: ManuallyDrop<T>,
242}
243
244enum ResourceLimiterInner<T> {
245    Sync(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync>),
246    #[cfg(feature = "async")]
247    Async(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiterAsync) + Send + Sync>),
248}
249
250enum CallHookInner<T: 'static> {
251    #[cfg(feature = "call-hook")]
252    Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
253    #[cfg(all(feature = "async", feature = "call-hook"))]
254    Async(Box<dyn CallHookHandler<T> + Send + Sync>),
255    #[expect(
256        dead_code,
257        reason = "forcing, regardless of cfg, the type param to be used"
258    )]
259    ForceTypeParameterToBeUsed {
260        uninhabited: Uninhabited,
261        _marker: marker::PhantomData<T>,
262    },
263}
264
265/// What to do after returning from a callback when the engine epoch reaches
266/// the deadline for a Store during execution of a function using that store.
267#[non_exhaustive]
268pub enum UpdateDeadline {
269    /// Extend the deadline by the specified number of ticks.
270    Continue(u64),
271    /// Extend the deadline by the specified number of ticks after yielding to
272    /// the async executor loop. This can only be used with an async [`Store`]
273    /// configured via [`Config::async_support`](crate::Config::async_support).
274    #[cfg(feature = "async")]
275    Yield(u64),
276    /// Extend the deadline by the specified number of ticks after yielding to
277    /// the async executor loop. This can only be used with an async [`Store`]
278    /// configured via [`Config::async_support`](crate::Config::async_support).
279    ///
280    /// The yield will be performed by the future provided; when using `tokio`
281    /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
282    /// here.
283    #[cfg(feature = "async")]
284    YieldCustom(
285        u64,
286        ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
287    ),
288}
289
290// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
291impl<T> Deref for StoreInner<T> {
292    type Target = StoreOpaque;
293    fn deref(&self) -> &Self::Target {
294        &self.inner
295    }
296}
297
298impl<T> DerefMut for StoreInner<T> {
299    fn deref_mut(&mut self) -> &mut Self::Target {
300        &mut self.inner
301    }
302}
303
304/// Monomorphic storage for a `Store<T>`.
305///
306/// This structure contains the bulk of the metadata about a `Store`. This is
307/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
308/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
309/// crate itself.
310pub struct StoreOpaque {
311    // This `StoreOpaque` structure has references to itself. These aren't
312    // immediately evident, however, so we need to tell the compiler that it
313    // contains self-references. This notably suppresses `noalias` annotations
314    // when this shows up in compiled code because types of this structure do
315    // indeed alias itself. An example of this is `default_callee` holds a
316    // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
317    // aliasing!
318    //
319    // It's somewhat unclear to me at this time if this is 100% sufficient to
320    // get all the right codegen in all the right places. For example does
321    // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
322    // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
323    // enough with `Pin` to understand if it's appropriate here (we do, for
324    // example want to allow movement in and out of `data: T`, just not movement
325    // of most of the other members). It's also not clear if using `Pin` in a
326    // few places buys us much other than a bunch of `unsafe` that we already
327    // sort of hand-wave away.
328    //
329    // In any case this seems like a good mid-ground for now where we're at
330    // least telling the compiler something about all the aliasing happening
331    // within a `Store`.
332    _marker: marker::PhantomPinned,
333
334    engine: Engine,
335    vm_store_context: VMStoreContext,
336
337    // Contains all continuations ever allocated throughout the lifetime of this
338    // store.
339    #[cfg(feature = "stack-switching")]
340    continuations: Vec<Box<VMContRef>>,
341
342    instances: PrimaryMap<InstanceId, StoreInstance>,
343
344    #[cfg(feature = "component-model")]
345    num_component_instances: usize,
346    signal_handler: Option<SignalHandler>,
347    modules: ModuleRegistry,
348    func_refs: FuncRefs,
349    host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
350    // GC-related fields.
351    gc_store: Option<GcStore>,
352    gc_roots: RootSet,
353    #[cfg(feature = "gc")]
354    gc_roots_list: GcRootsList,
355    // Types for which the embedder has created an allocator for.
356    #[cfg(feature = "gc")]
357    gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
358
359    // Numbers of resources instantiated in this store, and their limits
360    instance_count: usize,
361    instance_limit: usize,
362    memory_count: usize,
363    memory_limit: usize,
364    table_count: usize,
365    table_limit: usize,
366    #[cfg(feature = "async")]
367    async_state: fiber::AsyncState,
368
369    // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
370    // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
371    // together. Then when we run out of gas, we inject the yield amount from the reserve
372    // until the reserve is empty.
373    fuel_reserve: u64,
374    fuel_yield_interval: Option<NonZeroU64>,
375    /// Indexed data within this `Store`, used to store information about
376    /// globals, functions, memories, etc.
377    store_data: StoreData,
378    traitobj: StorePtr,
379    default_caller_vmctx: SendSyncPtr<VMContext>,
380
381    /// Used to optimized wasm->host calls when the host function is defined with
382    /// `Func::new` to avoid allocating a new vector each time a function is
383    /// called.
384    hostcall_val_storage: Vec<Val>,
385    /// Same as `hostcall_val_storage`, but for the direction of the host
386    /// calling wasm.
387    wasm_val_raw_storage: Vec<ValRaw>,
388
389    /// Keep track of what protection key is being used during allocation so
390    /// that the right memory pages can be enabled when entering WebAssembly
391    /// guest code.
392    pkey: Option<ProtectionKey>,
393
394    /// Runtime state for components used in the handling of resources, borrow,
395    /// and calls. These also interact with the `ResourceAny` type and its
396    /// internal representation.
397    #[cfg(feature = "component-model")]
398    component_host_table: vm::component::ResourceTable,
399    #[cfg(feature = "component-model")]
400    component_calls: vm::component::CallContexts,
401    #[cfg(feature = "component-model")]
402    host_resource_data: crate::component::HostResourceData,
403
404    #[cfg(feature = "component-model-async")]
405    concurrent_async_state: concurrent::AsyncState,
406
407    /// State related to the executor of wasm code.
408    ///
409    /// For example if Pulley is enabled and configured then this will store a
410    /// Pulley interpreter.
411    executor: Executor,
412}
413
414/// Executor state within `StoreOpaque`.
415///
416/// Effectively stores Pulley interpreter state and handles conditional support
417/// for Cranelift at compile time.
418pub(crate) enum Executor {
419    Interpreter(Interpreter),
420    #[cfg(has_host_compiler_backend)]
421    Native,
422}
423
424impl Executor {
425    pub(crate) fn new(engine: &Engine) -> Self {
426        #[cfg(has_host_compiler_backend)]
427        if cfg!(feature = "pulley") && engine.target().is_pulley() {
428            Executor::Interpreter(Interpreter::new(engine))
429        } else {
430            Executor::Native
431        }
432        #[cfg(not(has_host_compiler_backend))]
433        {
434            debug_assert!(engine.target().is_pulley());
435            Executor::Interpreter(Interpreter::new(engine))
436        }
437    }
438}
439
440/// A borrowed reference to `Executor` above.
441pub(crate) enum ExecutorRef<'a> {
442    Interpreter(InterpreterRef<'a>),
443    #[cfg(has_host_compiler_backend)]
444    Native,
445}
446
447/// An RAII type to automatically mark a region of code as unsafe for GC.
448#[doc(hidden)]
449pub struct AutoAssertNoGc<'a> {
450    store: &'a mut StoreOpaque,
451    entered: bool,
452}
453
454impl<'a> AutoAssertNoGc<'a> {
455    #[inline]
456    pub fn new(store: &'a mut StoreOpaque) -> Self {
457        let entered = if !cfg!(feature = "gc") {
458            false
459        } else if let Some(gc_store) = store.gc_store.as_mut() {
460            gc_store.gc_heap.enter_no_gc_scope();
461            true
462        } else {
463            false
464        };
465
466        AutoAssertNoGc { store, entered }
467    }
468
469    /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
470    /// disables checks for no GC happening for the duration of this value.
471    ///
472    /// This is used when it is statically otherwise known that a GC doesn't
473    /// happen for the various types involved.
474    ///
475    /// # Unsafety
476    ///
477    /// This method is `unsafe` as it does not provide the same safety
478    /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
479    /// caller that a GC doesn't happen.
480    #[inline]
481    pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
482        if cfg!(debug_assertions) {
483            AutoAssertNoGc::new(store)
484        } else {
485            AutoAssertNoGc {
486                store,
487                entered: false,
488            }
489        }
490    }
491}
492
493impl core::ops::Deref for AutoAssertNoGc<'_> {
494    type Target = StoreOpaque;
495
496    #[inline]
497    fn deref(&self) -> &Self::Target {
498        &*self.store
499    }
500}
501
502impl core::ops::DerefMut for AutoAssertNoGc<'_> {
503    #[inline]
504    fn deref_mut(&mut self) -> &mut Self::Target {
505        &mut *self.store
506    }
507}
508
509impl Drop for AutoAssertNoGc<'_> {
510    #[inline]
511    fn drop(&mut self) {
512        if self.entered {
513            self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
514        }
515    }
516}
517
518/// Used to associate instances with the store.
519///
520/// This is needed to track if the instance was allocated explicitly with the on-demand
521/// instance allocator.
522struct StoreInstance {
523    handle: InstanceHandle,
524    kind: StoreInstanceKind,
525}
526
527enum StoreInstanceKind {
528    /// An actual, non-dummy instance.
529    Real {
530        /// The id of this instance's module inside our owning store's
531        /// `ModuleRegistry`.
532        module_id: RegisteredModuleId,
533    },
534
535    /// This is a dummy instance that is just an implementation detail for
536    /// something else. For example, host-created memories internally create a
537    /// dummy instance.
538    ///
539    /// Regardless of the configured instance allocator for the engine, dummy
540    /// instances always use the on-demand allocator to deallocate the instance.
541    Dummy,
542}
543
544impl<T> Store<T> {
545    /// Creates a new [`Store`] to be associated with the given [`Engine`] and
546    /// `data` provided.
547    ///
548    /// The created [`Store`] will place no additional limits on the size of
549    /// linear memories or tables at runtime. Linear memories and tables will
550    /// be allowed to grow to any upper limit specified in their definitions.
551    /// The store will limit the number of instances, linear memories, and
552    /// tables created to 10,000. This can be overridden with the
553    /// [`Store::limiter`] configuration method.
554    pub fn new(engine: &Engine, data: T) -> Self {
555        let store_data = StoreData::new();
556        log::trace!("creating new store {:?}", store_data.id());
557
558        let pkey = engine.allocator().next_available_pkey();
559
560        let inner = StoreOpaque {
561            _marker: marker::PhantomPinned,
562            engine: engine.clone(),
563            vm_store_context: Default::default(),
564            #[cfg(feature = "stack-switching")]
565            continuations: Vec::new(),
566            instances: PrimaryMap::new(),
567            #[cfg(feature = "component-model")]
568            num_component_instances: 0,
569            signal_handler: None,
570            gc_store: None,
571            gc_roots: RootSet::default(),
572            #[cfg(feature = "gc")]
573            gc_roots_list: GcRootsList::default(),
574            #[cfg(feature = "gc")]
575            gc_host_alloc_types: Default::default(),
576            modules: ModuleRegistry::default(),
577            func_refs: FuncRefs::default(),
578            host_globals: PrimaryMap::new(),
579            instance_count: 0,
580            instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
581            memory_count: 0,
582            memory_limit: crate::DEFAULT_MEMORY_LIMIT,
583            table_count: 0,
584            table_limit: crate::DEFAULT_TABLE_LIMIT,
585            #[cfg(feature = "async")]
586            async_state: Default::default(),
587            fuel_reserve: 0,
588            fuel_yield_interval: None,
589            store_data,
590            traitobj: StorePtr::empty(),
591            default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
592            hostcall_val_storage: Vec::new(),
593            wasm_val_raw_storage: Vec::new(),
594            pkey,
595            #[cfg(feature = "component-model")]
596            component_host_table: Default::default(),
597            #[cfg(feature = "component-model")]
598            component_calls: Default::default(),
599            #[cfg(feature = "component-model")]
600            host_resource_data: Default::default(),
601            executor: Executor::new(engine),
602            #[cfg(feature = "component-model-async")]
603            concurrent_async_state: Default::default(),
604        };
605        let mut inner = Box::new(StoreInner {
606            inner,
607            limiter: None,
608            call_hook: None,
609            #[cfg(target_has_atomic = "64")]
610            epoch_deadline_behavior: None,
611            data: ManuallyDrop::new(data),
612        });
613
614        inner.traitobj = StorePtr::new(NonNull::from(&mut *inner));
615
616        // Wasmtime uses the callee argument to host functions to learn about
617        // the original pointer to the `Store` itself, allowing it to
618        // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
619        // however, there's no "callee" to provide. To fix this we allocate a
620        // single "default callee" for the entire `Store`. This is then used as
621        // part of `Func::call` to guarantee that the `callee: *mut VMContext`
622        // is never null.
623        let module = Arc::new(wasmtime_environ::Module::default());
624        let shim = ModuleRuntimeInfo::bare(module);
625        let allocator = OnDemandInstanceAllocator::default();
626
627        allocator
628            .validate_module(shim.env_module(), shim.offsets())
629            .unwrap();
630
631        unsafe {
632            let id = inner
633                .allocate_instance(
634                    AllocateInstanceKind::Dummy {
635                        allocator: &allocator,
636                    },
637                    &shim,
638                    Default::default(),
639                )
640                .expect("failed to allocate default callee");
641            let default_caller_vmctx = inner.instance(id).vmctx();
642            inner.default_caller_vmctx = default_caller_vmctx.into();
643        }
644
645        Self {
646            inner: ManuallyDrop::new(inner),
647        }
648    }
649
650    /// Access the underlying data owned by this `Store`.
651    #[inline]
652    pub fn data(&self) -> &T {
653        self.inner.data()
654    }
655
656    /// Access the underlying data owned by this `Store`.
657    #[inline]
658    pub fn data_mut(&mut self) -> &mut T {
659        self.inner.data_mut()
660    }
661
662    fn run_manual_drop_routines(&mut self) {
663        // We need to drop the fibers of each component instance before
664        // attempting to drop the instances themselves since the fibers may need
665        // to be resumed and allowed to exit cleanly before we yank the state
666        // out from under them.
667        //
668        // This will also drop any futures which might use a `&Accessor` fields
669        // in their `Drop::drop` implementations, in which case they'll need to
670        // be called from with in the context of a `tls::set` closure.
671        #[cfg(feature = "component-model-async")]
672        ComponentStoreData::drop_fibers_and_futures(&mut self.inner);
673
674        // Ensure all fiber stacks, even cached ones, are all flushed out to the
675        // instance allocator.
676        self.inner.flush_fiber_stack();
677    }
678
679    /// Consumes this [`Store`], destroying it, and returns the underlying data.
680    pub fn into_data(mut self) -> T {
681        self.run_manual_drop_routines();
682
683        // This is an unsafe operation because we want to avoid having a runtime
684        // check or boolean for whether the data is actually contained within a
685        // `Store`. The data itself is stored as `ManuallyDrop` since we're
686        // manually managing the memory here, and there's also a `ManuallyDrop`
687        // around the `Box<StoreInner<T>>`. The way this works though is a bit
688        // tricky, so here's how things get dropped appropriately:
689        //
690        // * When a `Store<T>` is normally dropped, the custom destructor for
691        //   `Store<T>` will drop `T`, then the `self.inner` field. The
692        //   rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
693        //   `StoreInner<T>`. This cleans up all internal fields and doesn't
694        //   touch `T` because it's wrapped in `ManuallyDrop`.
695        //
696        // * When calling this method we skip the top-level destructor for
697        //   `Store<T>` with `mem::forget`. This skips both the destructor for
698        //   `T` and the destructor for `StoreInner<T>`. We do, however, run the
699        //   destructor for `Box<StoreInner<T>>` which, like above, will skip
700        //   the destructor for `T` since it's `ManuallyDrop`.
701        //
702        // In both cases all the other fields of `StoreInner<T>` should all get
703        // dropped, and the manual management of destructors is basically
704        // between this method and `Drop for Store<T>`. Note that this also
705        // means that `Drop for StoreInner<T>` cannot access `self.data`, so
706        // there is a comment indicating this as well.
707        unsafe {
708            let mut inner = ManuallyDrop::take(&mut self.inner);
709            core::mem::forget(self);
710            ManuallyDrop::take(&mut inner.data)
711        }
712    }
713
714    /// Configures the [`ResourceLimiter`] used to limit resource creation
715    /// within this [`Store`].
716    ///
717    /// Whenever resources such as linear memory, tables, or instances are
718    /// allocated the `limiter` specified here is invoked with the store's data
719    /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
720    /// being allocated. The returned [`ResourceLimiter`] is intended to live
721    /// within the `T` itself, for example by storing a
722    /// [`StoreLimits`](crate::StoreLimits).
723    ///
724    /// Note that this limiter is only used to limit the creation/growth of
725    /// resources in the future, this does not retroactively attempt to apply
726    /// limits to the [`Store`].
727    ///
728    /// # Examples
729    ///
730    /// ```
731    /// use wasmtime::*;
732    ///
733    /// struct MyApplicationState {
734    ///     my_state: u32,
735    ///     limits: StoreLimits,
736    /// }
737    ///
738    /// let engine = Engine::default();
739    /// let my_state = MyApplicationState {
740    ///     my_state: 42,
741    ///     limits: StoreLimitsBuilder::new()
742    ///         .memory_size(1 << 20 /* 1 MB */)
743    ///         .instances(2)
744    ///         .build(),
745    /// };
746    /// let mut store = Store::new(&engine, my_state);
747    /// store.limiter(|state| &mut state.limits);
748    ///
749    /// // Creation of smaller memories is allowed
750    /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
751    ///
752    /// // Creation of a larger memory, however, will exceed the 1MB limit we've
753    /// // configured
754    /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
755    ///
756    /// // The number of instances in this store is limited to 2, so the third
757    /// // instance here should fail.
758    /// let module = Module::new(&engine, "(module)").unwrap();
759    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
760    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
761    /// assert!(Instance::new(&mut store, &module, &[]).is_err());
762    /// ```
763    ///
764    /// [`ResourceLimiter`]: crate::ResourceLimiter
765    pub fn limiter(
766        &mut self,
767        mut limiter: impl (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync + 'static,
768    ) {
769        // Apply the limits on instances, tables, and memory given by the limiter:
770        let inner = &mut self.inner;
771        let (instance_limit, table_limit, memory_limit) = {
772            let l = limiter(&mut inner.data);
773            (l.instances(), l.tables(), l.memories())
774        };
775        let innermost = &mut inner.inner;
776        innermost.instance_limit = instance_limit;
777        innermost.table_limit = table_limit;
778        innermost.memory_limit = memory_limit;
779
780        // Save the limiter accessor function:
781        inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
782    }
783
784    /// Configure a function that runs on calls and returns between WebAssembly
785    /// and host code.
786    ///
787    /// The function is passed a [`CallHook`] argument, which indicates which
788    /// state transition the VM is making.
789    ///
790    /// This function may return a [`Trap`]. If a trap is returned when an
791    /// import was called, it is immediately raised as-if the host import had
792    /// returned the trap. If a trap is returned after wasm returns to the host
793    /// then the wasm function's result is ignored and this trap is returned
794    /// instead.
795    ///
796    /// After this function returns a trap, it may be called for subsequent returns
797    /// to host or wasm code as the trap propagates to the root call.
798    #[cfg(feature = "call-hook")]
799    pub fn call_hook(
800        &mut self,
801        hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
802    ) {
803        self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
804    }
805
806    /// Returns the [`Engine`] that this store is associated with.
807    pub fn engine(&self) -> &Engine {
808        self.inner.engine()
809    }
810
811    /// Perform garbage collection.
812    ///
813    /// Note that it is not required to actively call this function. GC will
814    /// automatically happen according to various internal heuristics. This is
815    /// provided if fine-grained control over the GC is desired.
816    ///
817    /// If you are calling this method after an attempted allocation failed, you
818    /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
819    /// When you do so, this method will attempt to create enough space in the
820    /// GC heap for that allocation, so that it will succeed on the next
821    /// attempt.
822    ///
823    /// This method is only available when the `gc` Cargo feature is enabled.
824    #[cfg(feature = "gc")]
825    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
826        assert!(!self.inner.async_support());
827        self.inner.gc(why);
828    }
829
830    /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
831    /// be configured via [`Store::set_fuel`].
832    ///
833    /// # Errors
834    ///
835    /// This function will return an error if fuel consumption is not enabled
836    /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
837    pub fn get_fuel(&self) -> Result<u64> {
838        self.inner.get_fuel()
839    }
840
841    /// Set the fuel to this [`Store`] for wasm to consume while executing.
842    ///
843    /// For this method to work fuel consumption must be enabled via
844    /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
845    /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
846    /// immediately trap). This function must be called for the store to have
847    /// some fuel to allow WebAssembly to execute.
848    ///
849    /// Most WebAssembly instructions consume 1 unit of fuel. Some
850    /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
851    /// units, as any execution cost associated with them involves other
852    /// instructions which do consume fuel.
853    ///
854    /// Note that when fuel is entirely consumed it will cause wasm to trap.
855    ///
856    /// # Errors
857    ///
858    /// This function will return an error if fuel consumption is not enabled via
859    /// [`Config::consume_fuel`](crate::Config::consume_fuel).
860    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
861        self.inner.set_fuel(fuel)
862    }
863
864    /// Configures a [`Store`] to yield execution of async WebAssembly code
865    /// periodically.
866    ///
867    /// When a [`Store`] is configured to consume fuel with
868    /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
869    /// configure WebAssembly to be suspended and control will be yielded back to the
870    /// caller every `interval` units of fuel consumed. This is only suitable with use of
871    /// a store associated with an [async config](crate::Config::async_support) because
872    /// only then are futures used and yields are possible.
873    ///
874    /// The purpose of this behavior is to ensure that futures which represent
875    /// execution of WebAssembly do not execute too long inside their
876    /// `Future::poll` method. This allows for some form of cooperative
877    /// multitasking where WebAssembly will voluntarily yield control
878    /// periodically (based on fuel consumption) back to the running thread.
879    ///
880    /// Note that futures returned by this crate will automatically flag
881    /// themselves to get re-polled if a yield happens. This means that
882    /// WebAssembly will continue to execute, just after giving the host an
883    /// opportunity to do something else.
884    ///
885    /// The `interval` parameter indicates how much fuel should be
886    /// consumed between yields of an async future. When fuel runs out wasm will trap.
887    ///
888    /// # Error
889    ///
890    /// This method will error if it is not called on a store associated with an [async
891    /// config](crate::Config::async_support).
892    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
893        self.inner.fuel_async_yield_interval(interval)
894    }
895
896    /// Sets the epoch deadline to a certain number of ticks in the future.
897    ///
898    /// When the Wasm guest code is compiled with epoch-interruption
899    /// instrumentation
900    /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
901    /// and when the `Engine`'s epoch is incremented
902    /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
903    /// past a deadline, execution can be configured to either trap or
904    /// yield and then continue.
905    ///
906    /// This deadline is always set relative to the current epoch:
907    /// `ticks_beyond_current` ticks in the future. The deadline can
908    /// be set explicitly via this method, or refilled automatically
909    /// on a yield if configured via
910    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
911    /// this method is invoked, the deadline is reached when
912    /// [`Engine::increment_epoch()`] has been invoked at least
913    /// `ticks_beyond_current` times.
914    ///
915    /// By default a store will trap immediately with an epoch deadline of 0
916    /// (which has always "elapsed"). This method is required to be configured
917    /// for stores with epochs enabled to some future epoch deadline.
918    ///
919    /// See documentation on
920    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
921    /// for an introduction to epoch-based interruption.
922    #[cfg(target_has_atomic = "64")]
923    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
924        self.inner.set_epoch_deadline(ticks_beyond_current);
925    }
926
927    /// Configures epoch-deadline expiration to trap.
928    ///
929    /// When epoch-interruption-instrumented code is executed on this
930    /// store and the epoch deadline is reached before completion,
931    /// with the store configured in this way, execution will
932    /// terminate with a trap as soon as an epoch check in the
933    /// instrumented code is reached.
934    ///
935    /// This behavior is the default if the store is not otherwise
936    /// configured via
937    /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
938    /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
939    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
940    ///
941    /// This setting is intended to allow for coarse-grained
942    /// interruption, but not a deterministic deadline of a fixed,
943    /// finite interval. For deterministic interruption, see the
944    /// "fuel" mechanism instead.
945    ///
946    /// Note that when this is used it's required to call
947    /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
948    /// trap.
949    ///
950    /// See documentation on
951    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
952    /// for an introduction to epoch-based interruption.
953    #[cfg(target_has_atomic = "64")]
954    pub fn epoch_deadline_trap(&mut self) {
955        self.inner.epoch_deadline_trap();
956    }
957
958    /// Configures epoch-deadline expiration to invoke a custom callback
959    /// function.
960    ///
961    /// When epoch-interruption-instrumented code is executed on this
962    /// store and the epoch deadline is reached before completion, the
963    /// provided callback function is invoked.
964    ///
965    /// This callback should either return an [`UpdateDeadline`], or
966    /// return an error, which will terminate execution with a trap.
967    ///
968    /// The [`UpdateDeadline`] is a positive number of ticks to
969    /// add to the epoch deadline, as well as indicating what
970    /// to do after the callback returns. If the [`Store`] is
971    /// configured with async support, then the callback may return
972    /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
973    /// to yield to the async executor before updating the epoch deadline.
974    /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
975    /// update the epoch deadline immediately.
976    ///
977    /// This setting is intended to allow for coarse-grained
978    /// interruption, but not a deterministic deadline of a fixed,
979    /// finite interval. For deterministic interruption, see the
980    /// "fuel" mechanism instead.
981    ///
982    /// See documentation on
983    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
984    /// for an introduction to epoch-based interruption.
985    #[cfg(target_has_atomic = "64")]
986    pub fn epoch_deadline_callback(
987        &mut self,
988        callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
989    ) {
990        self.inner.epoch_deadline_callback(Box::new(callback));
991    }
992}
993
994impl<'a, T> StoreContext<'a, T> {
995    pub(crate) fn async_support(&self) -> bool {
996        self.0.async_support()
997    }
998
999    /// Returns the underlying [`Engine`] this store is connected to.
1000    pub fn engine(&self) -> &Engine {
1001        self.0.engine()
1002    }
1003
1004    /// Access the underlying data owned by this `Store`.
1005    ///
1006    /// Same as [`Store::data`].
1007    pub fn data(&self) -> &'a T {
1008        self.0.data()
1009    }
1010
1011    /// Returns the remaining fuel in this store.
1012    ///
1013    /// For more information see [`Store::get_fuel`].
1014    pub fn get_fuel(&self) -> Result<u64> {
1015        self.0.get_fuel()
1016    }
1017}
1018
1019impl<'a, T> StoreContextMut<'a, T> {
1020    /// Access the underlying data owned by this `Store`.
1021    ///
1022    /// Same as [`Store::data`].
1023    pub fn data(&self) -> &T {
1024        self.0.data()
1025    }
1026
1027    /// Access the underlying data owned by this `Store`.
1028    ///
1029    /// Same as [`Store::data_mut`].
1030    pub fn data_mut(&mut self) -> &mut T {
1031        self.0.data_mut()
1032    }
1033
1034    /// Returns the underlying [`Engine`] this store is connected to.
1035    pub fn engine(&self) -> &Engine {
1036        self.0.engine()
1037    }
1038
1039    /// Perform garbage collection of `ExternRef`s.
1040    ///
1041    /// Same as [`Store::gc`].
1042    ///
1043    /// This method is only available when the `gc` Cargo feature is enabled.
1044    #[cfg(feature = "gc")]
1045    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
1046        self.0.gc(why);
1047    }
1048
1049    /// Returns remaining fuel in this store.
1050    ///
1051    /// For more information see [`Store::get_fuel`]
1052    pub fn get_fuel(&self) -> Result<u64> {
1053        self.0.get_fuel()
1054    }
1055
1056    /// Set the amount of fuel in this store.
1057    ///
1058    /// For more information see [`Store::set_fuel`]
1059    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1060        self.0.set_fuel(fuel)
1061    }
1062
1063    /// Configures this `Store` to periodically yield while executing futures.
1064    ///
1065    /// For more information see [`Store::fuel_async_yield_interval`]
1066    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1067        self.0.fuel_async_yield_interval(interval)
1068    }
1069
1070    /// Sets the epoch deadline to a certain number of ticks in the future.
1071    ///
1072    /// For more information see [`Store::set_epoch_deadline`].
1073    #[cfg(target_has_atomic = "64")]
1074    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1075        self.0.set_epoch_deadline(ticks_beyond_current);
1076    }
1077
1078    /// Configures epoch-deadline expiration to trap.
1079    ///
1080    /// For more information see [`Store::epoch_deadline_trap`].
1081    #[cfg(target_has_atomic = "64")]
1082    pub fn epoch_deadline_trap(&mut self) {
1083        self.0.epoch_deadline_trap();
1084    }
1085}
1086
1087impl<T> StoreInner<T> {
1088    #[inline]
1089    fn data(&self) -> &T {
1090        &self.data
1091    }
1092
1093    #[inline]
1094    fn data_mut(&mut self) -> &mut T {
1095        &mut self.data
1096    }
1097
1098    #[inline]
1099    pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1100        if self.inner.pkey.is_none() && self.call_hook.is_none() {
1101            Ok(())
1102        } else {
1103            self.call_hook_slow_path(s)
1104        }
1105    }
1106
1107    fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1108        if let Some(pkey) = &self.inner.pkey {
1109            let allocator = self.engine().allocator();
1110            match s {
1111                CallHook::CallingWasm | CallHook::ReturningFromHost => {
1112                    allocator.restrict_to_pkey(*pkey)
1113                }
1114                CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1115            }
1116        }
1117
1118        // Temporarily take the configured behavior to avoid mutably borrowing
1119        // multiple times.
1120        if let Some(mut call_hook) = self.call_hook.take() {
1121            let result = self.invoke_call_hook(&mut call_hook, s);
1122            self.call_hook = Some(call_hook);
1123            return result;
1124        }
1125
1126        Ok(())
1127    }
1128
1129    fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1130        match call_hook {
1131            #[cfg(feature = "call-hook")]
1132            CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1133
1134            #[cfg(all(feature = "async", feature = "call-hook"))]
1135            CallHookInner::Async(handler) => {
1136                if !self.can_block() {
1137                    bail!("couldn't grab async_cx for call hook")
1138                }
1139                return (&mut *self)
1140                    .as_context_mut()
1141                    .with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
1142            }
1143
1144            CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1145                let _ = s;
1146                match *uninhabited {}
1147            }
1148        }
1149    }
1150
1151    #[cfg(not(feature = "async"))]
1152    fn flush_fiber_stack(&mut self) {
1153        // noop shim so code can assume this always exists.
1154    }
1155}
1156
1157fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1158    fuel_reserve.saturating_add_signed(-injected_fuel)
1159}
1160
1161// Add remaining fuel from the reserve into the active fuel if there is any left.
1162fn refuel(
1163    injected_fuel: &mut i64,
1164    fuel_reserve: &mut u64,
1165    yield_interval: Option<NonZeroU64>,
1166) -> bool {
1167    let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1168    if fuel > 0 {
1169        set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1170        true
1171    } else {
1172        false
1173    }
1174}
1175
1176fn set_fuel(
1177    injected_fuel: &mut i64,
1178    fuel_reserve: &mut u64,
1179    yield_interval: Option<NonZeroU64>,
1180    new_fuel_amount: u64,
1181) {
1182    let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1183    // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1184    // for the VM to use.
1185    let injected = core::cmp::min(interval, new_fuel_amount);
1186    // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1187    // VM at once to be i64 range.
1188    let injected = core::cmp::min(injected, i64::MAX as u64);
1189    // Add whatever is left over after injection to the reserve for later use.
1190    *fuel_reserve = new_fuel_amount - injected;
1191    // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1192    // this counter is positive.
1193    *injected_fuel = -(injected as i64);
1194}
1195
1196#[doc(hidden)]
1197impl StoreOpaque {
1198    pub fn id(&self) -> StoreId {
1199        self.store_data.id()
1200    }
1201
1202    pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1203        fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1204            let new = slot.saturating_add(amt);
1205            if new > max {
1206                bail!(
1207                    "resource limit exceeded: {} count too high at {}",
1208                    desc,
1209                    new
1210                );
1211            }
1212            *slot = new;
1213            Ok(())
1214        }
1215
1216        let module = module.env_module();
1217        let memories = module.num_defined_memories();
1218        let tables = module.num_defined_tables();
1219
1220        bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1221        bump(
1222            &mut self.memory_count,
1223            self.memory_limit,
1224            memories,
1225            "memory",
1226        )?;
1227        bump(&mut self.table_count, self.table_limit, tables, "table")?;
1228
1229        Ok(())
1230    }
1231
1232    #[inline]
1233    pub fn async_support(&self) -> bool {
1234        cfg!(feature = "async") && self.engine().config().async_support
1235    }
1236
1237    #[inline]
1238    pub fn engine(&self) -> &Engine {
1239        &self.engine
1240    }
1241
1242    #[inline]
1243    pub fn store_data(&self) -> &StoreData {
1244        &self.store_data
1245    }
1246
1247    #[inline]
1248    pub fn store_data_mut(&mut self) -> &mut StoreData {
1249        &mut self.store_data
1250    }
1251
1252    #[inline]
1253    pub(crate) fn modules(&self) -> &ModuleRegistry {
1254        &self.modules
1255    }
1256
1257    #[inline]
1258    pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
1259        &mut self.modules
1260    }
1261
1262    pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1263        (&mut self.func_refs, &self.modules)
1264    }
1265
1266    pub(crate) fn host_globals(
1267        &self,
1268    ) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1269        &self.host_globals
1270    }
1271
1272    pub(crate) fn host_globals_mut(
1273        &mut self,
1274    ) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1275        &mut self.host_globals
1276    }
1277
1278    pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1279        instance.store_id().assert_belongs_to(self.id());
1280        match self.instances[instance.instance()].kind {
1281            StoreInstanceKind::Dummy => None,
1282            StoreInstanceKind::Real { module_id } => {
1283                let module = self
1284                    .modules()
1285                    .lookup_module_by_id(module_id)
1286                    .expect("should always have a registered module for real instances");
1287                Some(module)
1288            }
1289        }
1290    }
1291
1292    /// Accessor from `InstanceId` to `&vm::Instance`.
1293    ///
1294    /// Note that if you have a `StoreInstanceId` you should use
1295    /// `StoreInstanceId::get` instead. This assumes that `id` has been
1296    /// validated to already belong to this store.
1297    #[inline]
1298    pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1299        self.instances[id].handle.get()
1300    }
1301
1302    /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1303    ///
1304    /// Note that if you have a `StoreInstanceId` you should use
1305    /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1306    /// validated to already belong to this store.
1307    #[inline]
1308    pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1309        self.instances[id].handle.get_mut()
1310    }
1311
1312    /// Access multiple instances specified via `ids`.
1313    ///
1314    /// # Panics
1315    ///
1316    /// This method will panic if any indices in `ids` overlap.
1317    ///
1318    /// # Safety
1319    ///
1320    /// This method is not safe if the returned instances are used to traverse
1321    /// "laterally" between other instances. For example accessing imported
1322    /// items in an instance may traverse laterally to a sibling instance thus
1323    /// aliasing a returned value here. The caller must ensure that only defined
1324    /// items within the instances themselves are accessed.
1325    #[inline]
1326    pub unsafe fn optional_gc_store_and_instances_mut<const N: usize>(
1327        &mut self,
1328        ids: [InstanceId; N],
1329    ) -> (Option<&mut GcStore>, [Pin<&mut vm::Instance>; N]) {
1330        let instances = self
1331            .instances
1332            .get_disjoint_mut(ids)
1333            .unwrap()
1334            .map(|h| h.handle.get_mut());
1335        (self.gc_store.as_mut(), instances)
1336    }
1337
1338    /// Pair of `Self::optional_gc_store_mut` and `Self::instance_mut`
1339    pub fn optional_gc_store_and_instance_mut(
1340        &mut self,
1341        id: InstanceId,
1342    ) -> (Option<&mut GcStore>, Pin<&mut vm::Instance>) {
1343        (self.gc_store.as_mut(), self.instances[id].handle.get_mut())
1344    }
1345
1346    /// Get all instances (ignoring dummy instances) within this store.
1347    pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1348        let instances = self
1349            .instances
1350            .iter()
1351            .filter_map(|(id, inst)| {
1352                if let StoreInstanceKind::Dummy = inst.kind {
1353                    None
1354                } else {
1355                    Some(id)
1356                }
1357            })
1358            .collect::<Vec<_>>();
1359        instances
1360            .into_iter()
1361            .map(|i| Instance::from_wasmtime(i, self))
1362    }
1363
1364    /// Get all memories (host- or Wasm-defined) within this store.
1365    pub fn all_memories<'a>(&'a self) -> impl Iterator<Item = Memory> + 'a {
1366        // NB: Host-created memories have dummy instances. Therefore, we can get
1367        // all memories in the store by iterating over all instances (including
1368        // dummy instances) and getting each of their defined memories.
1369        let id = self.id();
1370        self.instances
1371            .iter()
1372            .flat_map(move |(_, instance)| instance.handle.get().defined_memories(id))
1373    }
1374
1375    /// Iterate over all tables (host- or Wasm-defined) within this store.
1376    pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1377        // NB: Host-created tables have dummy instances. Therefore, we can get
1378        // all tables in the store by iterating over all instances (including
1379        // dummy instances) and getting each of their defined memories.
1380        for id in self.instances.keys() {
1381            let instance = StoreInstanceId::new(self.id(), id);
1382            for table in 0..self.instance(id).env_module().num_defined_tables() {
1383                let table = DefinedTableIndex::new(table);
1384                f(self, Table::from_raw(instance, table));
1385            }
1386        }
1387    }
1388
1389    /// Iterate over all globals (host- or Wasm-defined) within this store.
1390    pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1391        // First enumerate all the host-created globals.
1392        for global in self.host_globals.keys() {
1393            let global = Global::new_host(self, global);
1394            f(self, global);
1395        }
1396
1397        // Then enumerate all instances' defined globals.
1398        for id in self.instances.keys() {
1399            for index in 0..self.instance(id).env_module().num_defined_globals() {
1400                let index = DefinedGlobalIndex::new(index);
1401                let global = Global::new_instance(self, id, index);
1402                f(self, global);
1403            }
1404        }
1405    }
1406
1407    #[cfg(all(feature = "std", any(unix, windows)))]
1408    pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1409        self.signal_handler = handler;
1410    }
1411
1412    #[inline]
1413    pub fn vm_store_context(&self) -> &VMStoreContext {
1414        &self.vm_store_context
1415    }
1416
1417    #[inline]
1418    pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
1419        &mut self.vm_store_context
1420    }
1421
1422    #[inline(never)]
1423    pub(crate) fn allocate_gc_heap(&mut self) -> Result<()> {
1424        log::trace!("allocating GC heap for store {:?}", self.id());
1425
1426        assert!(self.gc_store.is_none());
1427        assert_eq!(
1428            self.vm_store_context.gc_heap.base.as_non_null(),
1429            NonNull::dangling(),
1430        );
1431        assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1432
1433        let vmstore = self.traitobj();
1434        let gc_store = allocate_gc_store(self.engine(), vmstore, self.get_pkey())?;
1435        self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1436        self.gc_store = Some(gc_store);
1437        return Ok(());
1438
1439        #[cfg(feature = "gc")]
1440        fn allocate_gc_store(
1441            engine: &Engine,
1442            vmstore: NonNull<dyn vm::VMStore>,
1443            pkey: Option<ProtectionKey>,
1444        ) -> Result<GcStore> {
1445            use wasmtime_environ::packed_option::ReservedValue;
1446
1447            ensure!(
1448                engine.features().gc_types(),
1449                "cannot allocate a GC store when GC is disabled at configuration time"
1450            );
1451
1452            // First, allocate the memory that will be our GC heap's storage.
1453            let mut request = InstanceAllocationRequest {
1454                id: InstanceId::reserved_value(),
1455                runtime_info: &ModuleRuntimeInfo::bare(Arc::new(
1456                    wasmtime_environ::Module::default(),
1457                )),
1458                imports: vm::Imports::default(),
1459                store: StorePtr::new(vmstore),
1460                #[cfg(feature = "wmemcheck")]
1461                wmemcheck: false,
1462                pkey,
1463                tunables: engine.tunables(),
1464            };
1465            let mem_ty = engine.tunables().gc_heap_memory_type();
1466            let tunables = engine.tunables();
1467
1468            let (mem_alloc_index, mem) =
1469                engine
1470                    .allocator()
1471                    .allocate_memory(&mut request, &mem_ty, tunables, None)?;
1472
1473            // Then, allocate the actual GC heap, passing in that memory
1474            // storage.
1475            let gc_runtime = engine
1476                .gc_runtime()
1477                .context("no GC runtime: GC disabled at compile time or configuration time")?;
1478            let (index, heap) =
1479                engine
1480                    .allocator()
1481                    .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
1482
1483            Ok(GcStore::new(index, heap))
1484        }
1485
1486        #[cfg(not(feature = "gc"))]
1487        fn allocate_gc_store(
1488            _engine: &Engine,
1489            _vmstore: NonNull<dyn vm::VMStore>,
1490            _pkey: Option<ProtectionKey>,
1491        ) -> Result<GcStore> {
1492            bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1493        }
1494    }
1495
1496    #[inline]
1497    pub(crate) fn gc_store(&self) -> Result<&GcStore> {
1498        match &self.gc_store {
1499            Some(gc_store) => Ok(gc_store),
1500            None => bail!("GC heap not initialized yet"),
1501        }
1502    }
1503
1504    #[inline]
1505    pub(crate) fn gc_store_mut(&mut self) -> Result<&mut GcStore> {
1506        if self.gc_store.is_none() {
1507            self.allocate_gc_heap()?;
1508        }
1509        Ok(self.unwrap_gc_store_mut())
1510    }
1511
1512    /// If this store is configured with a GC heap, return a mutable reference
1513    /// to it. Otherwise, return `None`.
1514    #[inline]
1515    pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
1516        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1517            debug_assert!(self.gc_store.is_none());
1518            None
1519        } else {
1520            self.gc_store.as_mut()
1521        }
1522    }
1523
1524    #[inline]
1525    #[track_caller]
1526    #[cfg(feature = "gc")]
1527    pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
1528        self.gc_store
1529            .as_ref()
1530            .expect("attempted to access the store's GC heap before it has been allocated")
1531    }
1532
1533    #[inline]
1534    #[track_caller]
1535    pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
1536        self.gc_store
1537            .as_mut()
1538            .expect("attempted to access the store's GC heap before it has been allocated")
1539    }
1540
1541    #[inline]
1542    pub(crate) fn gc_roots(&self) -> &RootSet {
1543        &self.gc_roots
1544    }
1545
1546    #[inline]
1547    #[cfg(feature = "gc")]
1548    pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
1549        &mut self.gc_roots
1550    }
1551
1552    #[inline]
1553    pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
1554        self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
1555    }
1556
1557    #[cfg(feature = "gc")]
1558    fn do_gc(&mut self) {
1559        assert!(
1560            !self.async_support(),
1561            "must use `store.gc_async()` instead of `store.gc()` for async stores"
1562        );
1563
1564        // If the GC heap hasn't been initialized, there is nothing to collect.
1565        if self.gc_store.is_none() {
1566            return;
1567        }
1568
1569        log::trace!("============ Begin GC ===========");
1570
1571        // Take the GC roots out of `self` so we can borrow it mutably but still
1572        // call mutable methods on `self`.
1573        let mut roots = core::mem::take(&mut self.gc_roots_list);
1574
1575        self.trace_roots(&mut roots);
1576        self.unwrap_gc_store_mut().gc(unsafe { roots.iter() });
1577
1578        // Restore the GC roots for the next GC.
1579        roots.clear();
1580        self.gc_roots_list = roots;
1581
1582        log::trace!("============ End GC ===========");
1583    }
1584
1585    #[cfg(feature = "gc")]
1586    fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1587        log::trace!("Begin trace GC roots");
1588
1589        // We shouldn't have any leftover, stale GC roots.
1590        assert!(gc_roots_list.is_empty());
1591
1592        self.trace_wasm_stack_roots(gc_roots_list);
1593        #[cfg(feature = "stack-switching")]
1594        self.trace_wasm_continuation_roots(gc_roots_list);
1595        self.trace_vmctx_roots(gc_roots_list);
1596        self.trace_user_roots(gc_roots_list);
1597
1598        log::trace!("End trace GC roots")
1599    }
1600
1601    #[cfg(feature = "gc")]
1602    fn trace_wasm_stack_frame(
1603        &self,
1604        gc_roots_list: &mut GcRootsList,
1605        frame: crate::runtime::vm::Frame,
1606    ) {
1607        use crate::runtime::vm::SendSyncPtr;
1608        use core::ptr::NonNull;
1609
1610        let pc = frame.pc();
1611        debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
1612
1613        let fp = frame.fp() as *mut usize;
1614        debug_assert!(
1615            !fp.is_null(),
1616            "we should always get a valid frame pointer for Wasm frames"
1617        );
1618
1619        let module_info = self
1620            .modules()
1621            .lookup_module_by_pc(pc)
1622            .expect("should have module info for Wasm frame");
1623
1624        let stack_map = match module_info.lookup_stack_map(pc) {
1625            Some(sm) => sm,
1626            None => {
1627                log::trace!("No stack map for this Wasm frame");
1628                return;
1629            }
1630        };
1631        log::trace!(
1632            "We have a stack map that maps {} bytes in this Wasm frame",
1633            stack_map.frame_size()
1634        );
1635
1636        let sp = unsafe { stack_map.sp(fp) };
1637        for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
1638            let raw: u32 = unsafe { core::ptr::read(stack_slot) };
1639            log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
1640
1641            let gc_ref = VMGcRef::from_raw_u32(raw);
1642            if gc_ref.is_some() {
1643                unsafe {
1644                    gc_roots_list
1645                        .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
1646                }
1647            }
1648        }
1649    }
1650
1651    #[cfg(feature = "gc")]
1652    fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1653        use crate::runtime::vm::Backtrace;
1654        log::trace!("Begin trace GC roots :: Wasm stack");
1655
1656        Backtrace::trace(self, |frame| {
1657            self.trace_wasm_stack_frame(gc_roots_list, frame);
1658            core::ops::ControlFlow::Continue(())
1659        });
1660
1661        log::trace!("End trace GC roots :: Wasm stack");
1662    }
1663
1664    #[cfg(all(feature = "gc", feature = "stack-switching"))]
1665    fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1666        use crate::{runtime::vm::Backtrace, vm::VMStackState};
1667        log::trace!("Begin trace GC roots :: continuations");
1668
1669        for continuation in &self.continuations {
1670            let state = continuation.common_stack_information.state;
1671
1672            // FIXME(frank-emrich) In general, it is not enough to just trace
1673            // through the stacks of continuations; we also need to look through
1674            // their `cont.bind` arguments. However, we don't currently have
1675            // enough RTTI information to check if any of the values in the
1676            // buffers used by `cont.bind` are GC values. As a workaround, note
1677            // that we currently disallow cont.bind-ing GC values altogether.
1678            // This way, it is okay not to check them here.
1679            match state {
1680                VMStackState::Suspended => {
1681                    Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
1682                        self.trace_wasm_stack_frame(gc_roots_list, frame);
1683                        core::ops::ControlFlow::Continue(())
1684                    });
1685                }
1686                VMStackState::Running => {
1687                    // Handled by `trace_wasm_stack_roots`.
1688                }
1689                VMStackState::Parent => {
1690                    // We don't know whether our child is suspended or running, but in
1691                    // either case things should be hanlded correctly when traversing
1692                    // further along in the chain, nothing required at this point.
1693                }
1694                VMStackState::Fresh | VMStackState::Returned => {
1695                    // Fresh/Returned continuations have no gc values on their stack.
1696                }
1697            }
1698        }
1699
1700        log::trace!("End trace GC roots :: continuations");
1701    }
1702
1703    #[cfg(feature = "gc")]
1704    fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1705        log::trace!("Begin trace GC roots :: vmctx");
1706        self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
1707        self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
1708        log::trace!("End trace GC roots :: vmctx");
1709    }
1710
1711    #[cfg(feature = "gc")]
1712    fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1713        log::trace!("Begin trace GC roots :: user");
1714        self.gc_roots.trace_roots(gc_roots_list);
1715        log::trace!("End trace GC roots :: user");
1716    }
1717
1718    /// Insert a host-allocated GC type into this store.
1719    ///
1720    /// This makes it suitable for the embedder to allocate instances of this
1721    /// type in this store, and we don't have to worry about the type being
1722    /// reclaimed (since it is possible that none of the Wasm modules in this
1723    /// store are holding it alive).
1724    #[cfg(feature = "gc")]
1725    pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
1726        self.gc_host_alloc_types.insert(ty);
1727    }
1728
1729    pub fn get_fuel(&self) -> Result<u64> {
1730        anyhow::ensure!(
1731            self.engine().tunables().consume_fuel,
1732            "fuel is not configured in this store"
1733        );
1734        let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
1735        Ok(get_fuel(injected_fuel, self.fuel_reserve))
1736    }
1737
1738    fn refuel(&mut self) -> bool {
1739        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
1740        refuel(
1741            injected_fuel,
1742            &mut self.fuel_reserve,
1743            self.fuel_yield_interval,
1744        )
1745    }
1746
1747    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1748        anyhow::ensure!(
1749            self.engine().tunables().consume_fuel,
1750            "fuel is not configured in this store"
1751        );
1752        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
1753        set_fuel(
1754            injected_fuel,
1755            &mut self.fuel_reserve,
1756            self.fuel_yield_interval,
1757            fuel,
1758        );
1759        Ok(())
1760    }
1761
1762    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1763        anyhow::ensure!(
1764            self.engine().tunables().consume_fuel,
1765            "fuel is not configured in this store"
1766        );
1767        anyhow::ensure!(
1768            self.engine().config().async_support,
1769            "async support is not configured in this store"
1770        );
1771        anyhow::ensure!(
1772            interval != Some(0),
1773            "fuel_async_yield_interval must not be 0"
1774        );
1775        self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
1776        // Reset the fuel active + reserve states by resetting the amount.
1777        self.set_fuel(self.get_fuel()?)
1778    }
1779
1780    #[inline]
1781    pub fn signal_handler(&self) -> Option<*const SignalHandler> {
1782        let handler = self.signal_handler.as_ref()?;
1783        Some(handler)
1784    }
1785
1786    #[inline]
1787    pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
1788        NonNull::from(&self.vm_store_context)
1789    }
1790
1791    #[inline]
1792    pub fn default_caller(&self) -> NonNull<VMContext> {
1793        self.default_caller_vmctx.as_non_null()
1794    }
1795
1796    #[inline]
1797    pub fn traitobj(&self) -> NonNull<dyn vm::VMStore> {
1798        self.traitobj.as_raw().unwrap()
1799    }
1800
1801    #[inline]
1802    pub fn traitobj_mut(&mut self) -> &mut dyn vm::VMStore {
1803        unsafe { self.traitobj().as_mut() }
1804    }
1805
1806    /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
1807    /// used as part of calling the host in a `Func::new` method invocation.
1808    #[inline]
1809    pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
1810        mem::take(&mut self.hostcall_val_storage)
1811    }
1812
1813    /// Restores the vector previously taken by `take_hostcall_val_storage`
1814    /// above back into the store, allowing it to be used in the future for the
1815    /// next wasm->host call.
1816    #[inline]
1817    pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
1818        if storage.capacity() > self.hostcall_val_storage.capacity() {
1819            self.hostcall_val_storage = storage;
1820        }
1821    }
1822
1823    /// Same as `take_hostcall_val_storage`, but for the direction of the host
1824    /// calling wasm.
1825    #[inline]
1826    pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
1827        mem::take(&mut self.wasm_val_raw_storage)
1828    }
1829
1830    /// Same as `save_hostcall_val_storage`, but for the direction of the host
1831    /// calling wasm.
1832    #[inline]
1833    pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
1834        if storage.capacity() > self.wasm_val_raw_storage.capacity() {
1835            self.wasm_val_raw_storage = storage;
1836        }
1837    }
1838
1839    /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
1840    /// WebAssembly-relative fault.
1841    ///
1842    /// This function may abort the process if `addr` is not found to actually
1843    /// reside in any linear memory. In such a situation it means that the
1844    /// segfault was erroneously caught by Wasmtime and is possibly indicative
1845    /// of a code generator bug.
1846    ///
1847    /// This function returns `None` for dynamically-bounds-checked-memories
1848    /// with spectre mitigations enabled since the hardware fault address is
1849    /// always zero in these situations which means that the trapping context
1850    /// doesn't have enough information to report the fault address.
1851    pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
1852        // There are a few instances where a "close to zero" pointer is loaded
1853        // and we expect that to happen:
1854        //
1855        // * Explicitly bounds-checked memories with spectre-guards enabled will
1856        //   cause out-of-bounds accesses to get routed to address 0, so allow
1857        //   wasm instructions to fault on the null address.
1858        // * `call_indirect` when invoking a null function pointer may load data
1859        //   from the a `VMFuncRef` whose address is null, meaning any field of
1860        //   `VMFuncRef` could be the address of the fault.
1861        //
1862        // In these situations where the address is so small it won't be in any
1863        // instance, so skip the checks below.
1864        if addr <= mem::size_of::<VMFuncRef>() {
1865            const _: () = {
1866                // static-assert that `VMFuncRef` isn't too big to ensure that
1867                // it lives solely within the first page as we currently only
1868                // have the guarantee that the first page of memory is unmapped,
1869                // no more.
1870                assert!(mem::size_of::<VMFuncRef>() <= 512);
1871            };
1872            return None;
1873        }
1874
1875        // Search all known instances in this store for this address. Note that
1876        // this is probably not the speediest way to do this. Traps, however,
1877        // are generally not expected to be super fast and additionally stores
1878        // probably don't have all that many instances or memories.
1879        //
1880        // If this loop becomes hot in the future, however, it should be
1881        // possible to precompute maps about linear memories in a store and have
1882        // a quicker lookup.
1883        let mut fault = None;
1884        for (_, instance) in self.instances.iter() {
1885            if let Some(f) = instance.handle.get().wasm_fault(addr) {
1886                assert!(fault.is_none());
1887                fault = Some(f);
1888            }
1889        }
1890        if fault.is_some() {
1891            return fault;
1892        }
1893
1894        cfg_if::cfg_if! {
1895            if #[cfg(feature = "std")] {
1896                // With the standard library a rich error can be printed here
1897                // to stderr and the native abort path is used.
1898                eprintln!(
1899                    "\
1900Wasmtime caught a segfault for a wasm program because the faulting instruction
1901is allowed to segfault due to how linear memories are implemented. The address
1902that was accessed, however, is not known to any linear memory in use within this
1903Store. This may be indicative of a critical bug in Wasmtime's code generation
1904because all addresses which are known to be reachable from wasm won't reach this
1905message.
1906
1907    pc:      0x{pc:x}
1908    address: 0x{addr:x}
1909
1910This is a possible security issue because WebAssembly has accessed something it
1911shouldn't have been able to. Other accesses may have succeeded and this one just
1912happened to be caught. The process will now be aborted to prevent this damage
1913from going any further and to alert what's going on. If this is a security
1914issue please reach out to the Wasmtime team via its security policy
1915at https://bytecodealliance.org/security.
1916"
1917                );
1918                std::process::abort();
1919            } else if #[cfg(panic = "abort")] {
1920                // Without the standard library but with `panic=abort` then
1921                // it's safe to panic as that's known to halt execution. For
1922                // now avoid the above error message as well since without
1923                // `std` it's probably best to be a bit more size-conscious.
1924                let _ = pc;
1925                panic!("invalid fault");
1926            } else {
1927                // Without `std` and with `panic = "unwind"` there's no
1928                // dedicated API to abort the process portably, so manufacture
1929                // this with a double-panic.
1930                let _ = pc;
1931
1932                struct PanicAgainOnDrop;
1933
1934                impl Drop for PanicAgainOnDrop {
1935                    fn drop(&mut self) {
1936                        panic!("panicking again to trigger a process abort");
1937                    }
1938
1939                }
1940
1941                let _bomb = PanicAgainOnDrop;
1942
1943                panic!("invalid fault");
1944            }
1945        }
1946    }
1947
1948    /// Retrieve the store's protection key.
1949    #[inline]
1950    pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
1951        self.pkey
1952    }
1953
1954    #[inline]
1955    #[cfg(feature = "component-model")]
1956    pub(crate) fn component_resource_state(
1957        &mut self,
1958    ) -> (
1959        &mut vm::component::CallContexts,
1960        &mut vm::component::ResourceTable,
1961        &mut crate::component::HostResourceData,
1962    ) {
1963        (
1964            &mut self.component_calls,
1965            &mut self.component_host_table,
1966            &mut self.host_resource_data,
1967        )
1968    }
1969
1970    #[cfg(feature = "component-model")]
1971    pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
1972        // We don't actually need the instance itself right now, but it seems
1973        // like something we will almost certainly eventually want to keep
1974        // around, so force callers to provide it.
1975        let _ = instance;
1976
1977        self.num_component_instances += 1;
1978    }
1979
1980    #[inline]
1981    #[cfg(feature = "component-model")]
1982    pub(crate) fn component_resource_state_with_instance(
1983        &mut self,
1984        instance: crate::component::Instance,
1985    ) -> (
1986        &mut vm::component::CallContexts,
1987        &mut vm::component::ResourceTable,
1988        &mut crate::component::HostResourceData,
1989        Pin<&mut vm::component::ComponentInstance>,
1990    ) {
1991        (
1992            &mut self.component_calls,
1993            &mut self.component_host_table,
1994            &mut self.host_resource_data,
1995            instance.id().from_data_get_mut(&mut self.store_data),
1996        )
1997    }
1998
1999    #[cfg(feature = "async")]
2000    pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
2001        &mut self.async_state
2002    }
2003
2004    #[cfg(feature = "component-model-async")]
2005    pub(crate) fn concurrent_async_state_mut(&mut self) -> &mut concurrent::AsyncState {
2006        &mut self.concurrent_async_state
2007    }
2008
2009    #[cfg(feature = "async")]
2010    pub(crate) fn has_pkey(&self) -> bool {
2011        self.pkey.is_some()
2012    }
2013
2014    pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
2015        match &mut self.executor {
2016            Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
2017            #[cfg(has_host_compiler_backend)]
2018            Executor::Native => ExecutorRef::Native,
2019        }
2020    }
2021
2022    #[cfg(feature = "async")]
2023    pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
2024        mem::swap(&mut self.executor, executor);
2025    }
2026
2027    pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
2028        match &self.executor {
2029            Executor::Interpreter(i) => i.unwinder(),
2030            #[cfg(has_host_compiler_backend)]
2031            Executor::Native => &vm::UnwindHost,
2032        }
2033    }
2034
2035    /// Allocates a new continuation. Note that we currently don't support
2036    /// deallocating them. Instead, all continuations remain allocated
2037    /// throughout the store's lifetime.
2038    #[cfg(feature = "stack-switching")]
2039    pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
2040        // FIXME(frank-emrich) Do we need to pin this?
2041        let mut continuation = Box::new(VMContRef::empty());
2042        let stack_size = self.engine.config().async_stack_size;
2043        let stack = crate::vm::VMContinuationStack::new(stack_size)?;
2044        continuation.stack = stack;
2045        let ptr = continuation.deref_mut() as *mut VMContRef;
2046        self.continuations.push(continuation);
2047        Ok(ptr)
2048    }
2049
2050    /// Constructs and executes an `InstanceAllocationRequest` and pushes the
2051    /// returned instance into the store.
2052    ///
2053    /// This is a helper method for invoking
2054    /// `InstanceAllocator::allocate_module` with the appropriate parameters
2055    /// from this store's own configuration. The `kind` provided is used to
2056    /// distinguish between "real" modules and dummy ones that are synthesized
2057    /// for embedder-created memories, globals, tables, etc. The `kind` will
2058    /// also use a different instance allocator by default, the one passed in,
2059    /// rather than the engine's default allocator.
2060    ///
2061    /// This method will push the instance within `StoreOpaque` onto the
2062    /// `instances` array and return the `InstanceId` which can be use to look
2063    /// it up within the store.
2064    ///
2065    /// # Safety
2066    ///
2067    /// The `imports` provided must be correctly sized/typed for the module
2068    /// being allocated.
2069    pub(crate) unsafe fn allocate_instance(
2070        &mut self,
2071        kind: AllocateInstanceKind<'_>,
2072        runtime_info: &ModuleRuntimeInfo,
2073        imports: Imports<'_>,
2074    ) -> Result<InstanceId> {
2075        let id = self.instances.next_key();
2076
2077        let allocator = match kind {
2078            AllocateInstanceKind::Module(_) => self.engine().allocator(),
2079            AllocateInstanceKind::Dummy { allocator } => allocator,
2080        };
2081        // SAFETY: this function's own contract is the same as
2082        // `allocate_module`, namely the imports provided are valid.
2083        let handle = unsafe {
2084            allocator.allocate_module(InstanceAllocationRequest {
2085                id,
2086                runtime_info,
2087                imports,
2088                store: StorePtr::new(self.traitobj()),
2089                #[cfg(feature = "wmemcheck")]
2090                wmemcheck: self.engine().config().wmemcheck,
2091                pkey: self.get_pkey(),
2092                tunables: self.engine().tunables(),
2093            })?
2094        };
2095
2096        let actual = match kind {
2097            AllocateInstanceKind::Module(module_id) => {
2098                log::trace!(
2099                    "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2100                    self.id()
2101                );
2102                self.instances.push(StoreInstance {
2103                    handle,
2104                    kind: StoreInstanceKind::Real { module_id },
2105                })
2106            }
2107            AllocateInstanceKind::Dummy { .. } => {
2108                log::trace!(
2109                    "Adding dummy instance to store: store={:?}, instance={id:?}",
2110                    self.id()
2111                );
2112                self.instances.push(StoreInstance {
2113                    handle,
2114                    kind: StoreInstanceKind::Dummy,
2115                })
2116            }
2117        };
2118
2119        // double-check we didn't accidentally allocate two instances and our
2120        // prediction of what the id would be is indeed the id it should be.
2121        assert_eq!(id, actual);
2122
2123        Ok(id)
2124    }
2125}
2126
2127/// Helper parameter to [`StoreOpaque::allocate_instance`].
2128pub(crate) enum AllocateInstanceKind<'a> {
2129    /// An embedder-provided module is being allocated meaning that the default
2130    /// engine's allocator will be used.
2131    Module(RegisteredModuleId),
2132
2133    /// Add a dummy instance that to the store.
2134    ///
2135    /// These are instances that are just implementation details of something
2136    /// else (e.g. host-created memories that are not actually defined in any
2137    /// Wasm module) and therefore shouldn't show up in things like core dumps.
2138    ///
2139    /// A custom, typically OnDemand-flavored, allocator is provided to execute
2140    /// the allocation.
2141    Dummy {
2142        allocator: &'a dyn InstanceAllocator,
2143    },
2144}
2145
2146unsafe impl<T> vm::VMStore for StoreInner<T> {
2147    #[cfg(feature = "component-model-async")]
2148    fn component_async_store(
2149        &mut self,
2150    ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2151        self
2152    }
2153
2154    fn store_opaque(&self) -> &StoreOpaque {
2155        &self.inner
2156    }
2157
2158    fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2159        &mut self.inner
2160    }
2161
2162    fn memory_growing(
2163        &mut self,
2164        current: usize,
2165        desired: usize,
2166        maximum: Option<usize>,
2167    ) -> Result<bool, anyhow::Error> {
2168        match self.limiter {
2169            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2170                limiter(&mut self.data).memory_growing(current, desired, maximum)
2171            }
2172            #[cfg(feature = "async")]
2173            Some(ResourceLimiterInner::Async(_)) => self.block_on(|store| {
2174                let limiter = match &mut store.0.limiter {
2175                    Some(ResourceLimiterInner::Async(limiter)) => limiter,
2176                    _ => unreachable!(),
2177                };
2178                limiter(&mut store.0.data).memory_growing(current, desired, maximum)
2179            })?,
2180            None => Ok(true),
2181        }
2182    }
2183
2184    fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2185        match self.limiter {
2186            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2187                limiter(&mut self.data).memory_grow_failed(error)
2188            }
2189            #[cfg(feature = "async")]
2190            Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2191                limiter(&mut self.data).memory_grow_failed(error)
2192            }
2193            None => {
2194                log::debug!("ignoring memory growth failure error: {error:?}");
2195                Ok(())
2196            }
2197        }
2198    }
2199
2200    fn table_growing(
2201        &mut self,
2202        current: usize,
2203        desired: usize,
2204        maximum: Option<usize>,
2205    ) -> Result<bool, anyhow::Error> {
2206        match self.limiter {
2207            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2208                limiter(&mut self.data).table_growing(current, desired, maximum)
2209            }
2210            #[cfg(feature = "async")]
2211            Some(ResourceLimiterInner::Async(_)) => self.block_on(|store| {
2212                let limiter = match &mut store.0.limiter {
2213                    Some(ResourceLimiterInner::Async(limiter)) => limiter,
2214                    _ => unreachable!(),
2215                };
2216                limiter(&mut store.0.data).table_growing(current, desired, maximum)
2217            })?,
2218            None => Ok(true),
2219        }
2220    }
2221
2222    fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2223        match self.limiter {
2224            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2225                limiter(&mut self.data).table_grow_failed(error)
2226            }
2227            #[cfg(feature = "async")]
2228            Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2229                limiter(&mut self.data).table_grow_failed(error)
2230            }
2231            None => {
2232                log::debug!("ignoring table growth failure: {error:?}");
2233                Ok(())
2234            }
2235        }
2236    }
2237
2238    fn out_of_gas(&mut self) -> Result<()> {
2239        if !self.refuel() {
2240            return Err(Trap::OutOfFuel.into());
2241        }
2242        #[cfg(feature = "async")]
2243        if self.fuel_yield_interval.is_some() {
2244            self.async_yield_impl()?;
2245        }
2246        Ok(())
2247    }
2248
2249    #[cfg(target_has_atomic = "64")]
2250    fn new_epoch(&mut self) -> Result<u64, anyhow::Error> {
2251        // Temporarily take the configured behavior to avoid mutably borrowing
2252        // multiple times.
2253        let mut behavior = self.epoch_deadline_behavior.take();
2254        let delta_result = match &mut behavior {
2255            None => Err(Trap::Interrupt.into()),
2256            Some(callback) => callback((&mut *self).as_context_mut()).and_then(|update| {
2257                let delta = match update {
2258                    UpdateDeadline::Continue(delta) => delta,
2259                    #[cfg(feature = "async")]
2260                    UpdateDeadline::Yield(delta) => {
2261                        assert!(
2262                            self.async_support(),
2263                            "cannot use `UpdateDeadline::Yield` without enabling async support in the config"
2264                        );
2265                        // Do the async yield. May return a trap if future was
2266                        // canceled while we're yielded.
2267                        self.async_yield_impl()?;
2268                        delta
2269                    }
2270                    #[cfg(feature = "async")]
2271                    UpdateDeadline::YieldCustom(delta, future) => {
2272                        assert!(
2273                            self.async_support(),
2274                            "cannot use `UpdateDeadline::YieldCustom` without enabling async support in the config"
2275                        );
2276
2277                        // When control returns, we have a `Result<()>` passed
2278                        // in from the host fiber. If this finished successfully then
2279                        // we were resumed normally via a `poll`, so keep going.  If
2280                        // the future was dropped while we were yielded, then we need
2281                        // to clean up this fiber. Do so by raising a trap which will
2282                        // abort all wasm and get caught on the other side to clean
2283                        // things up.
2284                        self.block_on(|_| future)?;
2285                        delta
2286                    }
2287                };
2288
2289                // Set a new deadline and return the new epoch deadline so
2290                // the Wasm code doesn't have to reload it.
2291                self.set_epoch_deadline(delta);
2292                Ok(self.get_epoch_deadline())
2293            })
2294        };
2295
2296        // Put back the original behavior which was replaced by `take`.
2297        self.epoch_deadline_behavior = behavior;
2298        delta_result
2299    }
2300
2301    #[cfg(feature = "gc")]
2302    unsafe fn maybe_async_grow_or_collect_gc_heap(
2303        &mut self,
2304        root: Option<VMGcRef>,
2305        bytes_needed: Option<u64>,
2306    ) -> Result<Option<VMGcRef>> {
2307        unsafe { self.inner.maybe_async_gc(root, bytes_needed) }
2308    }
2309
2310    #[cfg(not(feature = "gc"))]
2311    unsafe fn maybe_async_grow_or_collect_gc_heap(
2312        &mut self,
2313        root: Option<VMGcRef>,
2314        _bytes_needed: Option<u64>,
2315    ) -> Result<Option<VMGcRef>> {
2316        Ok(root)
2317    }
2318
2319    #[cfg(feature = "component-model")]
2320    fn component_calls(&mut self) -> &mut vm::component::CallContexts {
2321        &mut self.component_calls
2322    }
2323}
2324
2325impl<T> StoreInner<T> {
2326    #[cfg(target_has_atomic = "64")]
2327    pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2328        // Set a new deadline based on the "epoch deadline delta".
2329        //
2330        // Also, note that when this update is performed while Wasm is
2331        // on the stack, the Wasm will reload the new value once we
2332        // return into it.
2333        let current_epoch = self.engine().current_epoch();
2334        let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2335        *epoch_deadline = current_epoch + delta;
2336    }
2337
2338    #[cfg(target_has_atomic = "64")]
2339    fn epoch_deadline_trap(&mut self) {
2340        self.epoch_deadline_behavior = None;
2341    }
2342
2343    #[cfg(target_has_atomic = "64")]
2344    fn epoch_deadline_callback(
2345        &mut self,
2346        callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2347    ) {
2348        self.epoch_deadline_behavior = Some(callback);
2349    }
2350
2351    fn get_epoch_deadline(&mut self) -> u64 {
2352        *self.vm_store_context.epoch_deadline.get_mut()
2353    }
2354}
2355
2356impl<T: Default> Default for Store<T> {
2357    fn default() -> Store<T> {
2358        Store::new(&Engine::default(), T::default())
2359    }
2360}
2361
2362impl<T: fmt::Debug> fmt::Debug for Store<T> {
2363    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2364        let inner = &**self.inner as *const StoreInner<T>;
2365        f.debug_struct("Store")
2366            .field("inner", &inner)
2367            .field("data", &self.inner.data)
2368            .finish()
2369    }
2370}
2371
2372impl<T> Drop for Store<T> {
2373    fn drop(&mut self) {
2374        self.run_manual_drop_routines();
2375
2376        // for documentation on this `unsafe`, see `into_data`.
2377        unsafe {
2378            ManuallyDrop::drop(&mut self.inner.data);
2379            ManuallyDrop::drop(&mut self.inner);
2380        }
2381    }
2382}
2383
2384impl Drop for StoreOpaque {
2385    fn drop(&mut self) {
2386        // NB it's important that this destructor does not access `self.data`.
2387        // That is deallocated by `Drop for Store<T>` above.
2388
2389        unsafe {
2390            let allocator = self.engine.allocator();
2391            let ondemand = OnDemandInstanceAllocator::default();
2392            let store_id = self.id();
2393
2394            #[cfg(feature = "gc")]
2395            if let Some(gc_store) = self.gc_store.take() {
2396                let gc_alloc_index = gc_store.allocation_index;
2397                log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2398                debug_assert!(self.engine.features().gc_types());
2399                let (mem_alloc_index, mem) =
2400                    allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2401                allocator.deallocate_memory(None, mem_alloc_index, mem);
2402            }
2403
2404            for (id, instance) in self.instances.iter_mut() {
2405                log::trace!("store {store_id:?} is deallocating {id:?}");
2406                if let StoreInstanceKind::Dummy = instance.kind {
2407                    ondemand.deallocate_module(&mut instance.handle);
2408                } else {
2409                    allocator.deallocate_module(&mut instance.handle);
2410                }
2411            }
2412
2413            #[cfg(feature = "component-model")]
2414            {
2415                for _ in 0..self.num_component_instances {
2416                    allocator.decrement_component_instance_count();
2417                }
2418            }
2419        }
2420    }
2421}
2422
2423#[cfg(test)]
2424mod tests {
2425    use super::{get_fuel, refuel, set_fuel};
2426    use std::num::NonZeroU64;
2427
2428    struct FuelTank {
2429        pub consumed_fuel: i64,
2430        pub reserve_fuel: u64,
2431        pub yield_interval: Option<NonZeroU64>,
2432    }
2433
2434    impl FuelTank {
2435        fn new() -> Self {
2436            FuelTank {
2437                consumed_fuel: 0,
2438                reserve_fuel: 0,
2439                yield_interval: None,
2440            }
2441        }
2442        fn get_fuel(&self) -> u64 {
2443            get_fuel(self.consumed_fuel, self.reserve_fuel)
2444        }
2445        fn refuel(&mut self) -> bool {
2446            refuel(
2447                &mut self.consumed_fuel,
2448                &mut self.reserve_fuel,
2449                self.yield_interval,
2450            )
2451        }
2452        fn set_fuel(&mut self, fuel: u64) {
2453            set_fuel(
2454                &mut self.consumed_fuel,
2455                &mut self.reserve_fuel,
2456                self.yield_interval,
2457                fuel,
2458            );
2459        }
2460    }
2461
2462    #[test]
2463    fn smoke() {
2464        let mut tank = FuelTank::new();
2465        tank.set_fuel(10);
2466        assert_eq!(tank.consumed_fuel, -10);
2467        assert_eq!(tank.reserve_fuel, 0);
2468
2469        tank.yield_interval = NonZeroU64::new(10);
2470        tank.set_fuel(25);
2471        assert_eq!(tank.consumed_fuel, -10);
2472        assert_eq!(tank.reserve_fuel, 15);
2473    }
2474
2475    #[test]
2476    fn does_not_lose_precision() {
2477        let mut tank = FuelTank::new();
2478        tank.set_fuel(u64::MAX);
2479        assert_eq!(tank.get_fuel(), u64::MAX);
2480
2481        tank.set_fuel(i64::MAX as u64);
2482        assert_eq!(tank.get_fuel(), i64::MAX as u64);
2483
2484        tank.set_fuel(i64::MAX as u64 + 1);
2485        assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
2486    }
2487
2488    #[test]
2489    fn yielding_does_not_lose_precision() {
2490        let mut tank = FuelTank::new();
2491
2492        tank.yield_interval = NonZeroU64::new(10);
2493        tank.set_fuel(u64::MAX);
2494        assert_eq!(tank.get_fuel(), u64::MAX);
2495        assert_eq!(tank.consumed_fuel, -10);
2496        assert_eq!(tank.reserve_fuel, u64::MAX - 10);
2497
2498        tank.yield_interval = NonZeroU64::new(u64::MAX);
2499        tank.set_fuel(u64::MAX);
2500        assert_eq!(tank.get_fuel(), u64::MAX);
2501        assert_eq!(tank.consumed_fuel, -i64::MAX);
2502        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2503
2504        tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
2505        tank.set_fuel(u64::MAX);
2506        assert_eq!(tank.get_fuel(), u64::MAX);
2507        assert_eq!(tank.consumed_fuel, -i64::MAX);
2508        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2509    }
2510
2511    #[test]
2512    fn refueling() {
2513        // It's possible to fuel to have consumed over the limit as some instructions can consume
2514        // multiple units of fuel at once. Refueling should be strict in it's consumption and not
2515        // add more fuel than there is.
2516        let mut tank = FuelTank::new();
2517
2518        tank.yield_interval = NonZeroU64::new(10);
2519        tank.reserve_fuel = 42;
2520        tank.consumed_fuel = 4;
2521        assert!(tank.refuel());
2522        assert_eq!(tank.reserve_fuel, 28);
2523        assert_eq!(tank.consumed_fuel, -10);
2524
2525        tank.yield_interval = NonZeroU64::new(1);
2526        tank.reserve_fuel = 8;
2527        tank.consumed_fuel = 4;
2528        assert_eq!(tank.get_fuel(), 4);
2529        assert!(tank.refuel());
2530        assert_eq!(tank.reserve_fuel, 3);
2531        assert_eq!(tank.consumed_fuel, -1);
2532        assert_eq!(tank.get_fuel(), 4);
2533
2534        tank.yield_interval = NonZeroU64::new(10);
2535        tank.reserve_fuel = 3;
2536        tank.consumed_fuel = 4;
2537        assert_eq!(tank.get_fuel(), 0);
2538        assert!(!tank.refuel());
2539        assert_eq!(tank.reserve_fuel, 3);
2540        assert_eq!(tank.consumed_fuel, 4);
2541        assert_eq!(tank.get_fuel(), 0);
2542    }
2543}