wasmtime/runtime/store.rs
1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//! Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//! intended to be consumed by the outside world. Note that the "just a
39//! pointer large" is a load-bearing implementation detail in Wasmtime. This
40//! enables it to store a pointer to its own trait object which doesn't need
41//! to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//! stored inside the `Box`. This is the general Rust pattern when one struct
45//! is a layer over another. The surprising part, though, is that this is
46//! further subdivided. This structure only contains things which actually
47//! need `T` itself. The downside of this structure is that it's always
48//! generic and means that code is monomorphized into consumer crates. We
49//! strive to have things be as monomorphic as possible in `wasmtime` so this
50//! type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//! Stored inline in the outer type the "opaque" here means that it's a
54//! "store" but it doesn't have access to the `T`. This is the primary
55//! "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//! internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//! All references of Wasm items into a `Store` are actually indices into a
60//! table in this structure, and the `StoreData` being separate makes it a bit
61//! easier to manage/define/work with. There's no real fundamental reason this
62//! is split out, although sometimes it's useful to have separate borrows into
63//! these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79#[cfg(all(feature = "gc", feature = "debug"))]
80use crate::OwnedRooted;
81use crate::RootSet;
82#[cfg(feature = "gc")]
83use crate::ThrownException;
84#[cfg(feature = "component-model-async")]
85use crate::component::ComponentStoreData;
86#[cfg(feature = "component-model")]
87use crate::component::concurrent;
88#[cfg(feature = "async")]
89use crate::fiber;
90use crate::module::RegisteredModuleId;
91use crate::prelude::*;
92#[cfg(feature = "gc")]
93use crate::runtime::vm::GcRootsList;
94#[cfg(feature = "stack-switching")]
95use crate::runtime::vm::VMContRef;
96use crate::runtime::vm::mpk::ProtectionKey;
97use crate::runtime::vm::{
98 self, ExportMemory, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator,
99 InstanceHandle, Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator,
100 SendSyncPtr, SignalHandler, StoreBox, Unwind, VMContext, VMFuncRef, VMGcRef, VMStore,
101 VMStoreContext,
102};
103use crate::trampoline::VMHostGlobalContext;
104#[cfg(feature = "debug")]
105use crate::{BreakpointState, DebugHandler};
106use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry};
107#[cfg(feature = "gc")]
108use crate::{ExnRef, Rooted};
109use crate::{Global, Instance, Table};
110use alloc::sync::Arc;
111use core::convert::Infallible;
112use core::fmt;
113use core::marker;
114use core::mem::{self, ManuallyDrop, MaybeUninit};
115use core::num::NonZeroU64;
116use core::ops::{Deref, DerefMut};
117use core::pin::Pin;
118use core::ptr::NonNull;
119use wasmtime_environ::StaticModuleIndex;
120use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
121
122mod context;
123pub use self::context::*;
124mod data;
125pub use self::data::*;
126mod func_refs;
127use func_refs::FuncRefs;
128#[cfg(feature = "component-model-async")]
129mod token;
130#[cfg(feature = "component-model-async")]
131pub(crate) use token::StoreToken;
132#[cfg(feature = "async")]
133mod async_;
134#[cfg(all(feature = "async", feature = "call-hook"))]
135pub use self::async_::CallHookHandler;
136
137#[cfg(feature = "gc")]
138use super::vm::VMExnRef;
139#[cfg(feature = "gc")]
140mod gc;
141
142/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
143///
144/// All WebAssembly instances and items will be attached to and refer to a
145/// [`Store`]. For example instances, functions, globals, and tables are all
146/// attached to a [`Store`]. Instances are created by instantiating a
147/// [`Module`](crate::Module) within a [`Store`].
148///
149/// A [`Store`] is intended to be a short-lived object in a program. No form
150/// of GC is implemented at this time so once an instance is created within a
151/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
152/// This makes [`Store`] unsuitable for creating an unbounded number of
153/// instances in it because [`Store`] will never release this memory. It's
154/// recommended to have a [`Store`] correspond roughly to the lifetime of a
155/// "main instance" that an embedding is interested in executing.
156///
157/// ## Type parameter `T`
158///
159/// Each [`Store`] has a type parameter `T` associated with it. This `T`
160/// represents state defined by the host. This state will be accessible through
161/// the [`Caller`](crate::Caller) type that host-defined functions get access
162/// to. This `T` is suitable for storing `Store`-specific information which
163/// imported functions may want access to.
164///
165/// The data `T` can be accessed through methods like [`Store::data`] and
166/// [`Store::data_mut`].
167///
168/// ## Stores, contexts, oh my
169///
170/// Most methods in Wasmtime take something of the form
171/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
172/// the first argument. These two traits allow ergonomically passing in the
173/// context you currently have to any method. The primary two sources of
174/// contexts are:
175///
176/// * `Store<T>`
177/// * `Caller<'_, T>`
178///
179/// corresponding to what you create and what you have access to in a host
180/// function. You can also explicitly acquire a [`StoreContext`] or
181/// [`StoreContextMut`] and pass that around as well.
182///
183/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
184/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
185/// form of context you have you can call various methods, create objects, etc.
186///
187/// ## Stores and `Default`
188///
189/// You can create a store with default configuration settings using
190/// `Store::default()`. This will create a brand new [`Engine`] with default
191/// configuration (see [`Config`](crate::Config) for more information).
192///
193/// ## Cross-store usage of items
194///
195/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
196/// [`Store`]. The store they belong to is the one they were created with
197/// (passed in as a parameter) or instantiated with. This store is the only
198/// store that can be used to interact with wasm items after they're created.
199///
200/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
201/// operations is incorrect. In other words it's considered a programmer error
202/// rather than a recoverable error for the wrong [`Store`] to be used when
203/// calling APIs.
204pub struct Store<T: 'static> {
205 // for comments about `ManuallyDrop`, see `Store::into_data`
206 inner: ManuallyDrop<Box<StoreInner<T>>>,
207}
208
209#[derive(Copy, Clone, Debug)]
210/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
211/// the WebAssembly VM.
212pub enum CallHook {
213 /// Indicates the VM is calling a WebAssembly function, from the host.
214 CallingWasm,
215 /// Indicates the VM is returning from a WebAssembly function, to the host.
216 ReturningFromWasm,
217 /// Indicates the VM is calling a host function, from WebAssembly.
218 CallingHost,
219 /// Indicates the VM is returning from a host function, to WebAssembly.
220 ReturningFromHost,
221}
222
223impl CallHook {
224 /// Indicates the VM is entering host code (exiting WebAssembly code)
225 pub fn entering_host(&self) -> bool {
226 match self {
227 CallHook::ReturningFromWasm | CallHook::CallingHost => true,
228 _ => false,
229 }
230 }
231 /// Indicates the VM is exiting host code (entering WebAssembly code)
232 pub fn exiting_host(&self) -> bool {
233 match self {
234 CallHook::ReturningFromHost | CallHook::CallingWasm => true,
235 _ => false,
236 }
237 }
238}
239
240/// Internal contents of a `Store<T>` that live on the heap.
241///
242/// The members of this struct are those that need to be generic over `T`, the
243/// store's internal type storage. Otherwise all things that don't rely on `T`
244/// should go into `StoreOpaque`.
245pub struct StoreInner<T: 'static> {
246 /// Generic metadata about the store that doesn't need access to `T`.
247 inner: StoreOpaque,
248
249 limiter: Option<ResourceLimiterInner<T>>,
250 call_hook: Option<CallHookInner<T>>,
251 #[cfg(target_has_atomic = "64")]
252 epoch_deadline_behavior:
253 Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
254
255 /// The user's `T` data.
256 ///
257 /// Don't actually access it via this field, however! Use the
258 /// `Store{,Inner,Context,ContextMut}::data[_mut]` methods instead, to
259 /// preserve stacked borrows and provenance in the face of potential
260 /// direct-access of `T` from Wasm code (via unsafe intrinsics).
261 ///
262 /// The only exception to the above is when taking ownership of the value,
263 /// e.g. in `Store::into_data`, after which nothing can access this field
264 /// via raw pointers anymore so there is no more provenance to preserve.
265 ///
266 /// For comments about `ManuallyDrop`, see `Store::into_data`.
267 data_no_provenance: ManuallyDrop<T>,
268
269 /// The user's debug handler, if any. See [`crate::DebugHandler`]
270 /// for more documentation.
271 ///
272 /// We need this to be an `Arc` because the handler itself takes
273 /// `&self` and also the whole Store mutably (via
274 /// `StoreContextMut`); so we need to hold a separate reference to
275 /// it while invoking it.
276 #[cfg(feature = "debug")]
277 debug_handler: Option<Box<dyn StoreDebugHandler<T>>>,
278}
279
280/// Adapter around `DebugHandler` that gets monomorphized into an
281/// object-safe dyn trait to place in `store.debug_handler`.
282#[cfg(feature = "debug")]
283trait StoreDebugHandler<T: 'static>: Send + Sync {
284 fn handle<'a>(
285 self: Box<Self>,
286 store: StoreContextMut<'a, T>,
287 event: crate::DebugEvent<'a>,
288 ) -> Box<dyn Future<Output = ()> + Send + 'a>;
289}
290
291#[cfg(feature = "debug")]
292impl<D> StoreDebugHandler<D::Data> for D
293where
294 D: DebugHandler,
295 D::Data: Send,
296{
297 fn handle<'a>(
298 self: Box<Self>,
299 store: StoreContextMut<'a, D::Data>,
300 event: crate::DebugEvent<'a>,
301 ) -> Box<dyn Future<Output = ()> + Send + 'a> {
302 // Clone the underlying `DebugHandler` (the trait requires
303 // Clone as a supertrait), not the Box. The clone happens here
304 // rather than at the callsite because `Clone::clone` is not
305 // object-safe so needs to be in a monomorphized context.
306 let handler: D = (*self).clone();
307 // Since we temporarily took `self` off the store at the
308 // callsite, put it back now that we've cloned it.
309 store.0.debug_handler = Some(self);
310 Box::new(async move { handler.handle(store, event).await })
311 }
312}
313
314enum ResourceLimiterInner<T> {
315 Sync(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync>),
316 #[cfg(feature = "async")]
317 Async(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiterAsync) + Send + Sync>),
318}
319
320/// Representation of a configured resource limiter for a store.
321///
322/// This is acquired with `resource_limiter_and_store_opaque` for example and is
323/// threaded through to growth operations on tables/memories. Note that this is
324/// passed around as `Option<&mut StoreResourceLimiter<'_>>` to make it
325/// efficient to pass around (nullable pointer) and it's also notably passed
326/// around as an `Option` to represent how this is optionally specified within a
327/// store.
328pub enum StoreResourceLimiter<'a> {
329 Sync(&'a mut dyn crate::ResourceLimiter),
330 #[cfg(feature = "async")]
331 Async(&'a mut dyn crate::ResourceLimiterAsync),
332}
333
334impl StoreResourceLimiter<'_> {
335 pub(crate) async fn memory_growing(
336 &mut self,
337 current: usize,
338 desired: usize,
339 maximum: Option<usize>,
340 ) -> Result<bool, Error> {
341 match self {
342 Self::Sync(s) => s.memory_growing(current, desired, maximum),
343 #[cfg(feature = "async")]
344 Self::Async(s) => s.memory_growing(current, desired, maximum).await,
345 }
346 }
347
348 pub(crate) fn memory_grow_failed(&mut self, error: crate::Error) -> Result<()> {
349 match self {
350 Self::Sync(s) => s.memory_grow_failed(error),
351 #[cfg(feature = "async")]
352 Self::Async(s) => s.memory_grow_failed(error),
353 }
354 }
355
356 pub(crate) async fn table_growing(
357 &mut self,
358 current: usize,
359 desired: usize,
360 maximum: Option<usize>,
361 ) -> Result<bool, Error> {
362 match self {
363 Self::Sync(s) => s.table_growing(current, desired, maximum),
364 #[cfg(feature = "async")]
365 Self::Async(s) => s.table_growing(current, desired, maximum).await,
366 }
367 }
368
369 pub(crate) fn table_grow_failed(&mut self, error: crate::Error) -> Result<()> {
370 match self {
371 Self::Sync(s) => s.table_grow_failed(error),
372 #[cfg(feature = "async")]
373 Self::Async(s) => s.table_grow_failed(error),
374 }
375 }
376}
377
378enum CallHookInner<T: 'static> {
379 #[cfg(feature = "call-hook")]
380 Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
381 #[cfg(all(feature = "async", feature = "call-hook"))]
382 Async(Box<dyn CallHookHandler<T> + Send + Sync>),
383 #[expect(
384 dead_code,
385 reason = "forcing, regardless of cfg, the type param to be used"
386 )]
387 ForceTypeParameterToBeUsed {
388 uninhabited: Infallible,
389 _marker: marker::PhantomData<T>,
390 },
391}
392
393/// What to do after returning from a callback when the engine epoch reaches
394/// the deadline for a Store during execution of a function using that store.
395#[non_exhaustive]
396pub enum UpdateDeadline {
397 /// Halt execution of WebAssembly, don't update the epoch deadline, and
398 /// raise a trap.
399 Interrupt,
400 /// Extend the deadline by the specified number of ticks.
401 Continue(u64),
402 /// Extend the deadline by the specified number of ticks after yielding to
403 /// the async executor loop. This can only be used with an async [`Store`]
404 /// configured via [`Config::async_support`](crate::Config::async_support).
405 #[cfg(feature = "async")]
406 Yield(u64),
407 /// Extend the deadline by the specified number of ticks after yielding to
408 /// the async executor loop. This can only be used with an async [`Store`]
409 /// configured via [`Config::async_support`](crate::Config::async_support).
410 ///
411 /// The yield will be performed by the future provided; when using `tokio`
412 /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
413 /// here.
414 #[cfg(feature = "async")]
415 YieldCustom(
416 u64,
417 ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
418 ),
419}
420
421// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
422impl<T> Deref for StoreInner<T> {
423 type Target = StoreOpaque;
424 fn deref(&self) -> &Self::Target {
425 &self.inner
426 }
427}
428
429impl<T> DerefMut for StoreInner<T> {
430 fn deref_mut(&mut self) -> &mut Self::Target {
431 &mut self.inner
432 }
433}
434
435/// Monomorphic storage for a `Store<T>`.
436///
437/// This structure contains the bulk of the metadata about a `Store`. This is
438/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
439/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
440/// crate itself.
441pub struct StoreOpaque {
442 // This `StoreOpaque` structure has references to itself. These aren't
443 // immediately evident, however, so we need to tell the compiler that it
444 // contains self-references. This notably suppresses `noalias` annotations
445 // when this shows up in compiled code because types of this structure do
446 // indeed alias itself. An example of this is `default_callee` holds a
447 // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
448 // aliasing!
449 //
450 // It's somewhat unclear to me at this time if this is 100% sufficient to
451 // get all the right codegen in all the right places. For example does
452 // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
453 // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
454 // enough with `Pin` to understand if it's appropriate here (we do, for
455 // example want to allow movement in and out of `data: T`, just not movement
456 // of most of the other members). It's also not clear if using `Pin` in a
457 // few places buys us much other than a bunch of `unsafe` that we already
458 // sort of hand-wave away.
459 //
460 // In any case this seems like a good mid-ground for now where we're at
461 // least telling the compiler something about all the aliasing happening
462 // within a `Store`.
463 _marker: marker::PhantomPinned,
464
465 engine: Engine,
466 vm_store_context: VMStoreContext,
467
468 // Contains all continuations ever allocated throughout the lifetime of this
469 // store.
470 #[cfg(feature = "stack-switching")]
471 continuations: Vec<Box<VMContRef>>,
472
473 instances: PrimaryMap<InstanceId, StoreInstance>,
474
475 #[cfg(feature = "component-model")]
476 num_component_instances: usize,
477 signal_handler: Option<SignalHandler>,
478 modules: ModuleRegistry,
479 func_refs: FuncRefs,
480 host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
481 // GC-related fields.
482 gc_store: Option<GcStore>,
483 gc_roots: RootSet,
484 #[cfg(feature = "gc")]
485 gc_roots_list: GcRootsList,
486 // Types for which the embedder has created an allocator for.
487 #[cfg(feature = "gc")]
488 gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
489 /// Pending exception, if any. This is also a GC root, because it
490 /// needs to be rooted somewhere between the time that a pending
491 /// exception is set and the time that the handling code takes the
492 /// exception object. We use this rooting strategy rather than a
493 /// root in an `Err` branch of a `Result` on the host side because
494 /// it is less error-prone with respect to rooting behavior. See
495 /// `throw()`, `take_pending_exception()`,
496 /// `peek_pending_exception()`, `has_pending_exception()`, and
497 /// `catch()`.
498 #[cfg(feature = "gc")]
499 pending_exception: Option<VMExnRef>,
500
501 // Numbers of resources instantiated in this store, and their limits
502 instance_count: usize,
503 instance_limit: usize,
504 memory_count: usize,
505 memory_limit: usize,
506 table_count: usize,
507 table_limit: usize,
508 #[cfg(feature = "async")]
509 async_state: fiber::AsyncState,
510
511 // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
512 // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
513 // together. Then when we run out of gas, we inject the yield amount from the reserve
514 // until the reserve is empty.
515 fuel_reserve: u64,
516 pub(crate) fuel_yield_interval: Option<NonZeroU64>,
517 /// Indexed data within this `Store`, used to store information about
518 /// globals, functions, memories, etc.
519 store_data: StoreData,
520 traitobj: StorePtr,
521 default_caller_vmctx: SendSyncPtr<VMContext>,
522
523 /// Used to optimized wasm->host calls when the host function is defined with
524 /// `Func::new` to avoid allocating a new vector each time a function is
525 /// called.
526 hostcall_val_storage: Vec<Val>,
527 /// Same as `hostcall_val_storage`, but for the direction of the host
528 /// calling wasm.
529 wasm_val_raw_storage: Vec<ValRaw>,
530
531 /// Keep track of what protection key is being used during allocation so
532 /// that the right memory pages can be enabled when entering WebAssembly
533 /// guest code.
534 pkey: Option<ProtectionKey>,
535
536 /// Runtime state for components used in the handling of resources, borrow,
537 /// and calls. These also interact with the `ResourceAny` type and its
538 /// internal representation.
539 #[cfg(feature = "component-model")]
540 component_host_table: vm::component::HandleTable,
541 #[cfg(feature = "component-model")]
542 component_calls: vm::component::CallContexts,
543 #[cfg(feature = "component-model")]
544 host_resource_data: crate::component::HostResourceData,
545 #[cfg(feature = "component-model")]
546 concurrent_state: concurrent::ConcurrentState,
547
548 /// State related to the executor of wasm code.
549 ///
550 /// For example if Pulley is enabled and configured then this will store a
551 /// Pulley interpreter.
552 executor: Executor,
553
554 /// The debug breakpoint state for this store.
555 ///
556 /// When guest debugging is enabled, a given store may have a set
557 /// of breakpoints defined, denoted by module and Wasm PC within
558 /// that module. Or alternately, it may be in "single-step" mode,
559 /// where every possible breakpoint is logically enabled.
560 ///
561 /// When execution of any instance in this store hits any defined
562 /// breakpoint, a `Breakpoint` debug event is emitted and the
563 /// handler defined above, if any, has a chance to perform some
564 /// logic before returning to allow execution to resume.
565 #[cfg(feature = "debug")]
566 breakpoints: BreakpointState,
567}
568
569/// Self-pointer to `StoreInner<T>` from within a `StoreOpaque` which is chiefly
570/// used to copy into instances during instantiation.
571///
572/// FIXME: ideally this type would get deleted and Wasmtime's reliance on it
573/// would go away.
574struct StorePtr(Option<NonNull<dyn VMStore>>);
575
576// We can't make `VMStore: Send + Sync` because that requires making all of
577// Wastime's internals generic over the `Store`'s `T`. So instead, we take care
578// in the whole VM layer to only use the `VMStore` in ways that are `Send`- and
579// `Sync`-safe and we have to have these unsafe impls.
580unsafe impl Send for StorePtr {}
581unsafe impl Sync for StorePtr {}
582
583/// Executor state within `StoreOpaque`.
584///
585/// Effectively stores Pulley interpreter state and handles conditional support
586/// for Cranelift at compile time.
587pub(crate) enum Executor {
588 Interpreter(Interpreter),
589 #[cfg(has_host_compiler_backend)]
590 Native,
591}
592
593impl Executor {
594 pub(crate) fn new(engine: &Engine) -> Self {
595 #[cfg(has_host_compiler_backend)]
596 if cfg!(feature = "pulley") && engine.target().is_pulley() {
597 Executor::Interpreter(Interpreter::new(engine))
598 } else {
599 Executor::Native
600 }
601 #[cfg(not(has_host_compiler_backend))]
602 {
603 debug_assert!(engine.target().is_pulley());
604 Executor::Interpreter(Interpreter::new(engine))
605 }
606 }
607}
608
609/// A borrowed reference to `Executor` above.
610pub(crate) enum ExecutorRef<'a> {
611 Interpreter(InterpreterRef<'a>),
612 #[cfg(has_host_compiler_backend)]
613 Native,
614}
615
616/// An RAII type to automatically mark a region of code as unsafe for GC.
617#[doc(hidden)]
618pub struct AutoAssertNoGc<'a> {
619 store: &'a mut StoreOpaque,
620 entered: bool,
621}
622
623impl<'a> AutoAssertNoGc<'a> {
624 #[inline]
625 pub fn new(store: &'a mut StoreOpaque) -> Self {
626 let entered = if !cfg!(feature = "gc") {
627 false
628 } else if let Some(gc_store) = store.gc_store.as_mut() {
629 gc_store.gc_heap.enter_no_gc_scope();
630 true
631 } else {
632 false
633 };
634
635 AutoAssertNoGc { store, entered }
636 }
637
638 /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
639 /// disables checks for no GC happening for the duration of this value.
640 ///
641 /// This is used when it is statically otherwise known that a GC doesn't
642 /// happen for the various types involved.
643 ///
644 /// # Unsafety
645 ///
646 /// This method is `unsafe` as it does not provide the same safety
647 /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
648 /// caller that a GC doesn't happen.
649 #[inline]
650 pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
651 if cfg!(debug_assertions) {
652 AutoAssertNoGc::new(store)
653 } else {
654 AutoAssertNoGc {
655 store,
656 entered: false,
657 }
658 }
659 }
660}
661
662impl core::ops::Deref for AutoAssertNoGc<'_> {
663 type Target = StoreOpaque;
664
665 #[inline]
666 fn deref(&self) -> &Self::Target {
667 &*self.store
668 }
669}
670
671impl core::ops::DerefMut for AutoAssertNoGc<'_> {
672 #[inline]
673 fn deref_mut(&mut self) -> &mut Self::Target {
674 &mut *self.store
675 }
676}
677
678impl Drop for AutoAssertNoGc<'_> {
679 #[inline]
680 fn drop(&mut self) {
681 if self.entered {
682 self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
683 }
684 }
685}
686
687/// Used to associate instances with the store.
688///
689/// This is needed to track if the instance was allocated explicitly with the on-demand
690/// instance allocator.
691struct StoreInstance {
692 handle: InstanceHandle,
693 kind: StoreInstanceKind,
694}
695
696enum StoreInstanceKind {
697 /// An actual, non-dummy instance.
698 Real {
699 /// The id of this instance's module inside our owning store's
700 /// `ModuleRegistry`.
701 module_id: RegisteredModuleId,
702 },
703
704 /// This is a dummy instance that is just an implementation detail for
705 /// something else. For example, host-created memories internally create a
706 /// dummy instance.
707 ///
708 /// Regardless of the configured instance allocator for the engine, dummy
709 /// instances always use the on-demand allocator to deallocate the instance.
710 Dummy,
711}
712
713impl<T> Store<T> {
714 /// Creates a new [`Store`] to be associated with the given [`Engine`] and
715 /// `data` provided.
716 ///
717 /// The created [`Store`] will place no additional limits on the size of
718 /// linear memories or tables at runtime. Linear memories and tables will
719 /// be allowed to grow to any upper limit specified in their definitions.
720 /// The store will limit the number of instances, linear memories, and
721 /// tables created to 10,000. This can be overridden with the
722 /// [`Store::limiter`] configuration method.
723 pub fn new(engine: &Engine, data: T) -> Self {
724 let store_data = StoreData::new();
725 log::trace!("creating new store {:?}", store_data.id());
726
727 let pkey = engine.allocator().next_available_pkey();
728
729 let inner = StoreOpaque {
730 _marker: marker::PhantomPinned,
731 engine: engine.clone(),
732 vm_store_context: Default::default(),
733 #[cfg(feature = "stack-switching")]
734 continuations: Vec::new(),
735 instances: PrimaryMap::new(),
736 #[cfg(feature = "component-model")]
737 num_component_instances: 0,
738 signal_handler: None,
739 gc_store: None,
740 gc_roots: RootSet::default(),
741 #[cfg(feature = "gc")]
742 gc_roots_list: GcRootsList::default(),
743 #[cfg(feature = "gc")]
744 gc_host_alloc_types: Default::default(),
745 #[cfg(feature = "gc")]
746 pending_exception: None,
747 modules: ModuleRegistry::default(),
748 func_refs: FuncRefs::default(),
749 host_globals: PrimaryMap::new(),
750 instance_count: 0,
751 instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
752 memory_count: 0,
753 memory_limit: crate::DEFAULT_MEMORY_LIMIT,
754 table_count: 0,
755 table_limit: crate::DEFAULT_TABLE_LIMIT,
756 #[cfg(feature = "async")]
757 async_state: Default::default(),
758 fuel_reserve: 0,
759 fuel_yield_interval: None,
760 store_data,
761 traitobj: StorePtr(None),
762 default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
763 hostcall_val_storage: Vec::new(),
764 wasm_val_raw_storage: Vec::new(),
765 pkey,
766 #[cfg(feature = "component-model")]
767 component_host_table: Default::default(),
768 #[cfg(feature = "component-model")]
769 component_calls: Default::default(),
770 #[cfg(feature = "component-model")]
771 host_resource_data: Default::default(),
772 executor: Executor::new(engine),
773 #[cfg(feature = "component-model")]
774 concurrent_state: Default::default(),
775 #[cfg(feature = "debug")]
776 breakpoints: Default::default(),
777 };
778 let mut inner = Box::new(StoreInner {
779 inner,
780 limiter: None,
781 call_hook: None,
782 #[cfg(target_has_atomic = "64")]
783 epoch_deadline_behavior: None,
784 data_no_provenance: ManuallyDrop::new(data),
785 #[cfg(feature = "debug")]
786 debug_handler: None,
787 });
788
789 let store_data =
790 <NonNull<ManuallyDrop<T>>>::from(&mut inner.data_no_provenance).cast::<()>();
791 inner.inner.vm_store_context.store_data = store_data.into();
792
793 inner.traitobj = StorePtr(Some(NonNull::from(&mut *inner)));
794
795 // Wasmtime uses the callee argument to host functions to learn about
796 // the original pointer to the `Store` itself, allowing it to
797 // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
798 // however, there's no "callee" to provide. To fix this we allocate a
799 // single "default callee" for the entire `Store`. This is then used as
800 // part of `Func::call` to guarantee that the `callee: *mut VMContext`
801 // is never null.
802 let module = Arc::new(wasmtime_environ::Module::new(StaticModuleIndex::from_u32(
803 0,
804 )));
805 let shim = ModuleRuntimeInfo::bare(module);
806 let allocator = OnDemandInstanceAllocator::default();
807
808 allocator
809 .validate_module(shim.env_module(), shim.offsets())
810 .unwrap();
811
812 unsafe {
813 // Note that this dummy instance doesn't allocate tables or memories
814 // (also no limiter is passed in) so it won't have an async await
815 // point meaning that it should be ok to assert the future is
816 // always ready.
817 let id = vm::assert_ready(inner.allocate_instance(
818 None,
819 AllocateInstanceKind::Dummy {
820 allocator: &allocator,
821 },
822 &shim,
823 Default::default(),
824 ))
825 .expect("failed to allocate default callee");
826 let default_caller_vmctx = inner.instance(id).vmctx();
827 inner.default_caller_vmctx = default_caller_vmctx.into();
828 }
829
830 Self {
831 inner: ManuallyDrop::new(inner),
832 }
833 }
834
835 /// Access the underlying `T` data owned by this `Store`.
836 #[inline]
837 pub fn data(&self) -> &T {
838 self.inner.data()
839 }
840
841 /// Access the underlying `T` data owned by this `Store`.
842 #[inline]
843 pub fn data_mut(&mut self) -> &mut T {
844 self.inner.data_mut()
845 }
846
847 fn run_manual_drop_routines(&mut self) {
848 // We need to drop the fibers of each component instance before
849 // attempting to drop the instances themselves since the fibers may need
850 // to be resumed and allowed to exit cleanly before we yank the state
851 // out from under them.
852 //
853 // This will also drop any futures which might use a `&Accessor` fields
854 // in their `Drop::drop` implementations, in which case they'll need to
855 // be called from with in the context of a `tls::set` closure.
856 #[cfg(feature = "component-model-async")]
857 ComponentStoreData::drop_fibers_and_futures(&mut **self.inner);
858
859 // Ensure all fiber stacks, even cached ones, are all flushed out to the
860 // instance allocator.
861 self.inner.flush_fiber_stack();
862 }
863
864 /// Consumes this [`Store`], destroying it, and returns the underlying data.
865 pub fn into_data(mut self) -> T {
866 self.run_manual_drop_routines();
867
868 // This is an unsafe operation because we want to avoid having a runtime
869 // check or boolean for whether the data is actually contained within a
870 // `Store`. The data itself is stored as `ManuallyDrop` since we're
871 // manually managing the memory here, and there's also a `ManuallyDrop`
872 // around the `Box<StoreInner<T>>`. The way this works though is a bit
873 // tricky, so here's how things get dropped appropriately:
874 //
875 // * When a `Store<T>` is normally dropped, the custom destructor for
876 // `Store<T>` will drop `T`, then the `self.inner` field. The
877 // rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
878 // `StoreInner<T>`. This cleans up all internal fields and doesn't
879 // touch `T` because it's wrapped in `ManuallyDrop`.
880 //
881 // * When calling this method we skip the top-level destructor for
882 // `Store<T>` with `mem::forget`. This skips both the destructor for
883 // `T` and the destructor for `StoreInner<T>`. We do, however, run the
884 // destructor for `Box<StoreInner<T>>` which, like above, will skip
885 // the destructor for `T` since it's `ManuallyDrop`.
886 //
887 // In both cases all the other fields of `StoreInner<T>` should all get
888 // dropped, and the manual management of destructors is basically
889 // between this method and `Drop for Store<T>`. Note that this also
890 // means that `Drop for StoreInner<T>` cannot access `self.data`, so
891 // there is a comment indicating this as well.
892 unsafe {
893 let mut inner = ManuallyDrop::take(&mut self.inner);
894 core::mem::forget(self);
895 ManuallyDrop::take(&mut inner.data_no_provenance)
896 }
897 }
898
899 /// Configures the [`ResourceLimiter`] used to limit resource creation
900 /// within this [`Store`].
901 ///
902 /// Whenever resources such as linear memory, tables, or instances are
903 /// allocated the `limiter` specified here is invoked with the store's data
904 /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
905 /// being allocated. The returned [`ResourceLimiter`] is intended to live
906 /// within the `T` itself, for example by storing a
907 /// [`StoreLimits`](crate::StoreLimits).
908 ///
909 /// Note that this limiter is only used to limit the creation/growth of
910 /// resources in the future, this does not retroactively attempt to apply
911 /// limits to the [`Store`].
912 ///
913 /// # Examples
914 ///
915 /// ```
916 /// use wasmtime::*;
917 ///
918 /// struct MyApplicationState {
919 /// my_state: u32,
920 /// limits: StoreLimits,
921 /// }
922 ///
923 /// let engine = Engine::default();
924 /// let my_state = MyApplicationState {
925 /// my_state: 42,
926 /// limits: StoreLimitsBuilder::new()
927 /// .memory_size(1 << 20 /* 1 MB */)
928 /// .instances(2)
929 /// .build(),
930 /// };
931 /// let mut store = Store::new(&engine, my_state);
932 /// store.limiter(|state| &mut state.limits);
933 ///
934 /// // Creation of smaller memories is allowed
935 /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
936 ///
937 /// // Creation of a larger memory, however, will exceed the 1MB limit we've
938 /// // configured
939 /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
940 ///
941 /// // The number of instances in this store is limited to 2, so the third
942 /// // instance here should fail.
943 /// let module = Module::new(&engine, "(module)").unwrap();
944 /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
945 /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
946 /// assert!(Instance::new(&mut store, &module, &[]).is_err());
947 /// ```
948 ///
949 /// [`ResourceLimiter`]: crate::ResourceLimiter
950 pub fn limiter(
951 &mut self,
952 mut limiter: impl (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync + 'static,
953 ) {
954 // Apply the limits on instances, tables, and memory given by the limiter:
955 let inner = &mut self.inner;
956 let (instance_limit, table_limit, memory_limit) = {
957 let l = limiter(inner.data_mut());
958 (l.instances(), l.tables(), l.memories())
959 };
960 let innermost = &mut inner.inner;
961 innermost.instance_limit = instance_limit;
962 innermost.table_limit = table_limit;
963 innermost.memory_limit = memory_limit;
964
965 // Save the limiter accessor function:
966 inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
967 }
968
969 /// Configure a function that runs on calls and returns between WebAssembly
970 /// and host code.
971 ///
972 /// The function is passed a [`CallHook`] argument, which indicates which
973 /// state transition the VM is making.
974 ///
975 /// This function may return a [`Trap`]. If a trap is returned when an
976 /// import was called, it is immediately raised as-if the host import had
977 /// returned the trap. If a trap is returned after wasm returns to the host
978 /// then the wasm function's result is ignored and this trap is returned
979 /// instead.
980 ///
981 /// After this function returns a trap, it may be called for subsequent returns
982 /// to host or wasm code as the trap propagates to the root call.
983 #[cfg(feature = "call-hook")]
984 pub fn call_hook(
985 &mut self,
986 hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
987 ) {
988 self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
989 }
990
991 /// Returns the [`Engine`] that this store is associated with.
992 pub fn engine(&self) -> &Engine {
993 self.inner.engine()
994 }
995
996 /// Perform garbage collection.
997 ///
998 /// Note that it is not required to actively call this function. GC will
999 /// automatically happen according to various internal heuristics. This is
1000 /// provided if fine-grained control over the GC is desired.
1001 ///
1002 /// If you are calling this method after an attempted allocation failed, you
1003 /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
1004 /// When you do so, this method will attempt to create enough space in the
1005 /// GC heap for that allocation, so that it will succeed on the next
1006 /// attempt.
1007 ///
1008 /// This method is only available when the `gc` Cargo feature is enabled.
1009 #[cfg(feature = "gc")]
1010 pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
1011 StoreContextMut(&mut self.inner).gc(why)
1012 }
1013
1014 /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
1015 /// be configured via [`Store::set_fuel`].
1016 ///
1017 /// # Errors
1018 ///
1019 /// This function will return an error if fuel consumption is not enabled
1020 /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
1021 pub fn get_fuel(&self) -> Result<u64> {
1022 self.inner.get_fuel()
1023 }
1024
1025 /// Set the fuel to this [`Store`] for wasm to consume while executing.
1026 ///
1027 /// For this method to work fuel consumption must be enabled via
1028 /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
1029 /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
1030 /// immediately trap). This function must be called for the store to have
1031 /// some fuel to allow WebAssembly to execute.
1032 ///
1033 /// Most WebAssembly instructions consume 1 unit of fuel. Some
1034 /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
1035 /// units, as any execution cost associated with them involves other
1036 /// instructions which do consume fuel.
1037 ///
1038 /// Note that when fuel is entirely consumed it will cause wasm to trap.
1039 ///
1040 /// # Errors
1041 ///
1042 /// This function will return an error if fuel consumption is not enabled via
1043 /// [`Config::consume_fuel`](crate::Config::consume_fuel).
1044 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1045 self.inner.set_fuel(fuel)
1046 }
1047
1048 /// Configures a [`Store`] to yield execution of async WebAssembly code
1049 /// periodically.
1050 ///
1051 /// When a [`Store`] is configured to consume fuel with
1052 /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
1053 /// configure WebAssembly to be suspended and control will be yielded back to the
1054 /// caller every `interval` units of fuel consumed. This is only suitable with use of
1055 /// a store associated with an [async config](crate::Config::async_support) because
1056 /// only then are futures used and yields are possible.
1057 ///
1058 /// The purpose of this behavior is to ensure that futures which represent
1059 /// execution of WebAssembly do not execute too long inside their
1060 /// `Future::poll` method. This allows for some form of cooperative
1061 /// multitasking where WebAssembly will voluntarily yield control
1062 /// periodically (based on fuel consumption) back to the running thread.
1063 ///
1064 /// Note that futures returned by this crate will automatically flag
1065 /// themselves to get re-polled if a yield happens. This means that
1066 /// WebAssembly will continue to execute, just after giving the host an
1067 /// opportunity to do something else.
1068 ///
1069 /// The `interval` parameter indicates how much fuel should be
1070 /// consumed between yields of an async future. When fuel runs out wasm will trap.
1071 ///
1072 /// # Error
1073 ///
1074 /// This method will error if it is not called on a store associated with an [async
1075 /// config](crate::Config::async_support).
1076 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1077 self.inner.fuel_async_yield_interval(interval)
1078 }
1079
1080 /// Sets the epoch deadline to a certain number of ticks in the future.
1081 ///
1082 /// When the Wasm guest code is compiled with epoch-interruption
1083 /// instrumentation
1084 /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
1085 /// and when the `Engine`'s epoch is incremented
1086 /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
1087 /// past a deadline, execution can be configured to either trap or
1088 /// yield and then continue.
1089 ///
1090 /// This deadline is always set relative to the current epoch:
1091 /// `ticks_beyond_current` ticks in the future. The deadline can
1092 /// be set explicitly via this method, or refilled automatically
1093 /// on a yield if configured via
1094 /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
1095 /// this method is invoked, the deadline is reached when
1096 /// [`Engine::increment_epoch()`] has been invoked at least
1097 /// `ticks_beyond_current` times.
1098 ///
1099 /// By default a store will trap immediately with an epoch deadline of 0
1100 /// (which has always "elapsed"). This method is required to be configured
1101 /// for stores with epochs enabled to some future epoch deadline.
1102 ///
1103 /// See documentation on
1104 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1105 /// for an introduction to epoch-based interruption.
1106 #[cfg(target_has_atomic = "64")]
1107 pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1108 self.inner.set_epoch_deadline(ticks_beyond_current);
1109 }
1110
1111 /// Configures epoch-deadline expiration to trap.
1112 ///
1113 /// When epoch-interruption-instrumented code is executed on this
1114 /// store and the epoch deadline is reached before completion,
1115 /// with the store configured in this way, execution will
1116 /// terminate with a trap as soon as an epoch check in the
1117 /// instrumented code is reached.
1118 ///
1119 /// This behavior is the default if the store is not otherwise
1120 /// configured via
1121 /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
1122 /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
1123 /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
1124 ///
1125 /// This setting is intended to allow for coarse-grained
1126 /// interruption, but not a deterministic deadline of a fixed,
1127 /// finite interval. For deterministic interruption, see the
1128 /// "fuel" mechanism instead.
1129 ///
1130 /// Note that when this is used it's required to call
1131 /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
1132 /// trap.
1133 ///
1134 /// See documentation on
1135 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1136 /// for an introduction to epoch-based interruption.
1137 #[cfg(target_has_atomic = "64")]
1138 pub fn epoch_deadline_trap(&mut self) {
1139 self.inner.epoch_deadline_trap();
1140 }
1141
1142 /// Configures epoch-deadline expiration to invoke a custom callback
1143 /// function.
1144 ///
1145 /// When epoch-interruption-instrumented code is executed on this
1146 /// store and the epoch deadline is reached before completion, the
1147 /// provided callback function is invoked.
1148 ///
1149 /// This callback should either return an [`UpdateDeadline`], or
1150 /// return an error, which will terminate execution with a trap.
1151 ///
1152 /// The [`UpdateDeadline`] is a positive number of ticks to
1153 /// add to the epoch deadline, as well as indicating what
1154 /// to do after the callback returns. If the [`Store`] is
1155 /// configured with async support, then the callback may return
1156 /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
1157 /// to yield to the async executor before updating the epoch deadline.
1158 /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
1159 /// update the epoch deadline immediately.
1160 ///
1161 /// This setting is intended to allow for coarse-grained
1162 /// interruption, but not a deterministic deadline of a fixed,
1163 /// finite interval. For deterministic interruption, see the
1164 /// "fuel" mechanism instead.
1165 ///
1166 /// See documentation on
1167 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1168 /// for an introduction to epoch-based interruption.
1169 #[cfg(target_has_atomic = "64")]
1170 pub fn epoch_deadline_callback(
1171 &mut self,
1172 callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
1173 ) {
1174 self.inner.epoch_deadline_callback(Box::new(callback));
1175 }
1176
1177 /// Set an exception as the currently pending exception, and
1178 /// return an error that propagates the throw.
1179 ///
1180 /// This method takes an exception object and stores it in the
1181 /// `Store` as the currently pending exception. This is a special
1182 /// rooted slot that holds the exception as long as it is
1183 /// propagating. This method then returns a `ThrownException`
1184 /// error, which is a special type that indicates a pending
1185 /// exception exists. When this type propagates as an error
1186 /// returned from a Wasm-to-host call, the pending exception is
1187 /// thrown within the Wasm context, and either caught or
1188 /// propagated further to the host-to-Wasm call boundary. If an
1189 /// exception is thrown out of Wasm (or across Wasm from a
1190 /// hostcall) back to the host-to-Wasm call boundary, *that*
1191 /// invocation returns a `ThrownException`, and the pending
1192 /// exception slot is again set. In other words, the
1193 /// `ThrownException` error type should propagate upward exactly
1194 /// and only when a pending exception is set.
1195 ///
1196 /// To inspect or take the pending exception, use
1197 /// [`peek_pending_exception`] and [`take_pending_exception`]. For
1198 /// a convenient wrapper that invokes a closure and provides any
1199 /// caught exception from the closure to a separate handler
1200 /// closure, see [`StoreContextMut::catch`].
1201 ///
1202 /// This method is parameterized over `R` for convenience, but
1203 /// will always return an `Err`.
1204 ///
1205 /// # Panics
1206 ///
1207 /// - Will panic if `exception` has been unrooted.
1208 /// - Will panic if `exception` is a null reference.
1209 /// - Will panic if a pending exception has already been set.
1210 #[cfg(feature = "gc")]
1211 pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1212 self.inner.throw_impl(exception);
1213 Err(ThrownException)
1214 }
1215
1216 /// Take the currently pending exception, if any, and return it,
1217 /// removing it from the "pending exception" slot.
1218 ///
1219 /// If there is no pending exception, returns `None`.
1220 ///
1221 /// Note: the returned exception is a LIFO root (see
1222 /// [`crate::Rooted`]), rooted in the current handle scope. Take
1223 /// care to ensure that it is re-rooted or otherwise does not
1224 /// escape this scope! It is usually best to allow an exception
1225 /// object to be rooted in the store's "pending exception" slot
1226 /// until the final consumer has taken it, rather than root it and
1227 /// pass it up the callstack in some other way.
1228 ///
1229 /// This method is useful to implement ad-hoc exception plumbing
1230 /// in various ways, but for the most idiomatic handling, see
1231 /// [`StoreContextMut::catch`].
1232 #[cfg(feature = "gc")]
1233 pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1234 self.inner.take_pending_exception_rooted()
1235 }
1236
1237 /// Tests whether there is a pending exception.
1238 ///
1239 /// Ordinarily, a pending exception will be set on a store if and
1240 /// only if a host-side callstack is propagating a
1241 /// [`crate::ThrownException`] error. The final consumer that
1242 /// catches the exception takes it; it may re-place it to re-throw
1243 /// (using [`throw`]) if it chooses not to actually handle the
1244 /// exception.
1245 ///
1246 /// This method is useful to tell whether a store is in this
1247 /// state, but should not be used as part of the ordinary
1248 /// exception-handling flow. For the most idiomatic handling, see
1249 /// [`StoreContextMut::catch`].
1250 #[cfg(feature = "gc")]
1251 pub fn has_pending_exception(&self) -> bool {
1252 self.inner.pending_exception.is_some()
1253 }
1254
1255 /// Provide an object that views Wasm stack state, including Wasm
1256 /// VM-level values (locals and operand stack), when debugging is
1257 /// enabled.
1258 ///
1259 /// Returns `None` if debug instrumentation is not enabled for
1260 /// the engine containing this store.
1261 #[cfg(feature = "debug")]
1262 pub fn debug_frames(&mut self) -> Option<crate::DebugFrameCursor<'_, T>> {
1263 self.as_context_mut().debug_frames()
1264 }
1265
1266 /// Start an edit session to update breakpoints.
1267 #[cfg(feature = "debug")]
1268 pub fn edit_breakpoints(&mut self) -> Option<crate::BreakpointEdit<'_>> {
1269 self.as_context_mut().edit_breakpoints()
1270 }
1271
1272 /// Return all breakpoints.
1273 #[cfg(feature = "debug")]
1274 pub fn breakpoints(&self) -> Option<impl Iterator<Item = crate::Breakpoint> + '_> {
1275 self.as_context().breakpoints()
1276 }
1277
1278 /// Indicate whether single-step mode is enabled.
1279 #[cfg(feature = "debug")]
1280 pub fn is_single_step(&self) -> bool {
1281 self.as_context().is_single_step()
1282 }
1283
1284 /// Set the debug callback on this store.
1285 ///
1286 /// See [`crate::DebugHandler`] for more documentation.
1287 ///
1288 /// # Panics
1289 ///
1290 /// - Will panic if this store is not configured for async
1291 /// support.
1292 /// - Will panic if guest-debug support was not enabled via
1293 /// [`crate::Config::guest_debug`].
1294 #[cfg(feature = "debug")]
1295 pub fn set_debug_handler(&mut self, handler: impl DebugHandler<Data = T>)
1296 where
1297 // We require `Send` here because the debug handler becomes
1298 // referenced from a future: when `DebugHandler::handle` is
1299 // invoked, its `self` references the `handler` with the
1300 // user's state. Note that we are careful to keep this bound
1301 // constrained to debug-handler-related code only and not
1302 // propagate it outward to the store in general. The presence
1303 // of the trait implementation serves as a witness that `T:
1304 // Send`. This is required in particular because we will have
1305 // a `&mut dyn VMStore` on the stack when we pause a fiber
1306 // with `block_on` to run a debugger hook; that `VMStore` must
1307 // be a `Store<T> where T: Send`.
1308 T: Send,
1309 {
1310 assert!(
1311 self.inner.async_support(),
1312 "debug hooks rely on async support"
1313 );
1314 assert!(
1315 self.engine().tunables().debug_guest,
1316 "debug hooks require guest debugging to be enabled"
1317 );
1318 self.inner.debug_handler = Some(Box::new(handler));
1319 }
1320
1321 /// Clear the debug handler on this store. If any existed, it will
1322 /// be dropped.
1323 #[cfg(feature = "debug")]
1324 pub fn clear_debug_handler(&mut self) {
1325 self.inner.debug_handler = None;
1326 }
1327}
1328
1329impl<'a, T> StoreContext<'a, T> {
1330 pub(crate) fn async_support(&self) -> bool {
1331 self.0.async_support()
1332 }
1333
1334 /// Returns the underlying [`Engine`] this store is connected to.
1335 pub fn engine(&self) -> &Engine {
1336 self.0.engine()
1337 }
1338
1339 /// Access the underlying data owned by this `Store`.
1340 ///
1341 /// Same as [`Store::data`].
1342 pub fn data(&self) -> &'a T {
1343 self.0.data()
1344 }
1345
1346 /// Returns the remaining fuel in this store.
1347 ///
1348 /// For more information see [`Store::get_fuel`].
1349 pub fn get_fuel(&self) -> Result<u64> {
1350 self.0.get_fuel()
1351 }
1352}
1353
1354impl<'a, T> StoreContextMut<'a, T> {
1355 /// Access the underlying data owned by this `Store`.
1356 ///
1357 /// Same as [`Store::data`].
1358 pub fn data(&self) -> &T {
1359 self.0.data()
1360 }
1361
1362 /// Access the underlying data owned by this `Store`.
1363 ///
1364 /// Same as [`Store::data_mut`].
1365 pub fn data_mut(&mut self) -> &mut T {
1366 self.0.data_mut()
1367 }
1368
1369 /// Returns the underlying [`Engine`] this store is connected to.
1370 pub fn engine(&self) -> &Engine {
1371 self.0.engine()
1372 }
1373
1374 /// Perform garbage collection of `ExternRef`s.
1375 ///
1376 /// Same as [`Store::gc`].
1377 ///
1378 /// This method is only available when the `gc` Cargo feature is enabled.
1379 #[cfg(feature = "gc")]
1380 pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
1381 assert!(!self.0.async_support());
1382 let (mut limiter, store) = self.0.resource_limiter_and_store_opaque();
1383 vm::assert_ready(store.gc(limiter.as_mut(), None, why.map(|e| e.bytes_needed())));
1384 }
1385
1386 /// Returns remaining fuel in this store.
1387 ///
1388 /// For more information see [`Store::get_fuel`]
1389 pub fn get_fuel(&self) -> Result<u64> {
1390 self.0.get_fuel()
1391 }
1392
1393 /// Set the amount of fuel in this store.
1394 ///
1395 /// For more information see [`Store::set_fuel`]
1396 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1397 self.0.set_fuel(fuel)
1398 }
1399
1400 /// Configures this `Store` to periodically yield while executing futures.
1401 ///
1402 /// For more information see [`Store::fuel_async_yield_interval`]
1403 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1404 self.0.fuel_async_yield_interval(interval)
1405 }
1406
1407 /// Sets the epoch deadline to a certain number of ticks in the future.
1408 ///
1409 /// For more information see [`Store::set_epoch_deadline`].
1410 #[cfg(target_has_atomic = "64")]
1411 pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1412 self.0.set_epoch_deadline(ticks_beyond_current);
1413 }
1414
1415 /// Configures epoch-deadline expiration to trap.
1416 ///
1417 /// For more information see [`Store::epoch_deadline_trap`].
1418 #[cfg(target_has_atomic = "64")]
1419 pub fn epoch_deadline_trap(&mut self) {
1420 self.0.epoch_deadline_trap();
1421 }
1422
1423 /// Set an exception as the currently pending exception, and
1424 /// return an error that propagates the throw.
1425 ///
1426 /// See [`Store::throw`] for more details.
1427 #[cfg(feature = "gc")]
1428 pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1429 self.0.inner.throw_impl(exception);
1430 Err(ThrownException)
1431 }
1432
1433 /// Take the currently pending exception, if any, and return it,
1434 /// removing it from the "pending exception" slot.
1435 ///
1436 /// See [`Store::take_pending_exception`] for more details.
1437 #[cfg(feature = "gc")]
1438 pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1439 self.0.inner.take_pending_exception_rooted()
1440 }
1441
1442 /// Tests whether there is a pending exception.
1443 ///
1444 /// See [`Store::has_pending_exception`] for more details.
1445 #[cfg(feature = "gc")]
1446 pub fn has_pending_exception(&self) -> bool {
1447 self.0.inner.pending_exception.is_some()
1448 }
1449}
1450
1451impl<T> StoreInner<T> {
1452 #[inline]
1453 fn data(&self) -> &T {
1454 // We are actually just accessing `&self.data_no_provenance` but we must
1455 // do so with the `VMStoreContext::store_data` pointer's provenance. If
1456 // we did otherwise, i.e. directly accessed the field, we would
1457 // invalidate that pointer, which would in turn invalidate any direct
1458 // `T` accesses that Wasm code makes via unsafe intrinsics.
1459 let data: *const ManuallyDrop<T> = &raw const self.data_no_provenance;
1460 let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1461 let ptr = provenance.with_addr(data.addr());
1462
1463 // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1464 // to access because of our `&self` borrow.
1465 debug_assert_ne!(ptr, core::ptr::null_mut());
1466 debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1467 unsafe { &*ptr }
1468 }
1469
1470 #[inline]
1471 fn data_limiter_and_opaque(
1472 &mut self,
1473 ) -> (
1474 &mut T,
1475 Option<&mut ResourceLimiterInner<T>>,
1476 &mut StoreOpaque,
1477 ) {
1478 // See the comments about provenance in `StoreInner::data` above.
1479 let data: *mut ManuallyDrop<T> = &raw mut self.data_no_provenance;
1480 let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1481 let ptr = provenance.with_addr(data.addr());
1482
1483 // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1484 // to access because of our `&mut self` borrow.
1485 debug_assert_ne!(ptr, core::ptr::null_mut());
1486 debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1487 let data = unsafe { &mut *ptr };
1488
1489 let limiter = self.limiter.as_mut();
1490
1491 (data, limiter, &mut self.inner)
1492 }
1493
1494 #[inline]
1495 fn data_mut(&mut self) -> &mut T {
1496 self.data_limiter_and_opaque().0
1497 }
1498
1499 #[inline]
1500 pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1501 if self.inner.pkey.is_none() && self.call_hook.is_none() {
1502 Ok(())
1503 } else {
1504 self.call_hook_slow_path(s)
1505 }
1506 }
1507
1508 fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1509 if let Some(pkey) = &self.inner.pkey {
1510 let allocator = self.engine().allocator();
1511 match s {
1512 CallHook::CallingWasm | CallHook::ReturningFromHost => {
1513 allocator.restrict_to_pkey(*pkey)
1514 }
1515 CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1516 }
1517 }
1518
1519 // Temporarily take the configured behavior to avoid mutably borrowing
1520 // multiple times.
1521 if let Some(mut call_hook) = self.call_hook.take() {
1522 let result = self.invoke_call_hook(&mut call_hook, s);
1523 self.call_hook = Some(call_hook);
1524 return result;
1525 }
1526
1527 Ok(())
1528 }
1529
1530 fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1531 match call_hook {
1532 #[cfg(feature = "call-hook")]
1533 CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1534
1535 #[cfg(all(feature = "async", feature = "call-hook"))]
1536 CallHookInner::Async(handler) => {
1537 if !self.can_block() {
1538 bail!("couldn't grab async_cx for call hook")
1539 }
1540 return (&mut *self)
1541 .as_context_mut()
1542 .with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
1543 }
1544
1545 CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1546 let _ = s;
1547 match *uninhabited {}
1548 }
1549 }
1550 }
1551
1552 #[cfg(not(feature = "async"))]
1553 fn flush_fiber_stack(&mut self) {
1554 // noop shim so code can assume this always exists.
1555 }
1556}
1557
1558fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1559 fuel_reserve.saturating_add_signed(-injected_fuel)
1560}
1561
1562// Add remaining fuel from the reserve into the active fuel if there is any left.
1563fn refuel(
1564 injected_fuel: &mut i64,
1565 fuel_reserve: &mut u64,
1566 yield_interval: Option<NonZeroU64>,
1567) -> bool {
1568 let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1569 if fuel > 0 {
1570 set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1571 true
1572 } else {
1573 false
1574 }
1575}
1576
1577fn set_fuel(
1578 injected_fuel: &mut i64,
1579 fuel_reserve: &mut u64,
1580 yield_interval: Option<NonZeroU64>,
1581 new_fuel_amount: u64,
1582) {
1583 let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1584 // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1585 // for the VM to use.
1586 let injected = core::cmp::min(interval, new_fuel_amount);
1587 // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1588 // VM at once to be i64 range.
1589 let injected = core::cmp::min(injected, i64::MAX as u64);
1590 // Add whatever is left over after injection to the reserve for later use.
1591 *fuel_reserve = new_fuel_amount - injected;
1592 // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1593 // this counter is positive.
1594 *injected_fuel = -(injected as i64);
1595}
1596
1597#[doc(hidden)]
1598impl StoreOpaque {
1599 pub fn id(&self) -> StoreId {
1600 self.store_data.id()
1601 }
1602
1603 pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1604 fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1605 let new = slot.saturating_add(amt);
1606 if new > max {
1607 bail!("resource limit exceeded: {desc} count too high at {new}");
1608 }
1609 *slot = new;
1610 Ok(())
1611 }
1612
1613 let module = module.env_module();
1614 let memories = module.num_defined_memories();
1615 let tables = module.num_defined_tables();
1616
1617 bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1618 bump(
1619 &mut self.memory_count,
1620 self.memory_limit,
1621 memories,
1622 "memory",
1623 )?;
1624 bump(&mut self.table_count, self.table_limit, tables, "table")?;
1625
1626 Ok(())
1627 }
1628
1629 #[inline]
1630 pub fn async_support(&self) -> bool {
1631 cfg!(feature = "async") && self.engine().config().async_support
1632 }
1633
1634 #[inline]
1635 pub fn engine(&self) -> &Engine {
1636 &self.engine
1637 }
1638
1639 #[inline]
1640 pub fn store_data(&self) -> &StoreData {
1641 &self.store_data
1642 }
1643
1644 #[inline]
1645 pub fn store_data_mut(&mut self) -> &mut StoreData {
1646 &mut self.store_data
1647 }
1648
1649 pub fn store_data_mut_and_registry(&mut self) -> (&mut StoreData, &ModuleRegistry) {
1650 (&mut self.store_data, &self.modules)
1651 }
1652
1653 #[cfg(feature = "debug")]
1654 pub(crate) fn breakpoints_and_registry_mut(
1655 &mut self,
1656 ) -> (&mut BreakpointState, &mut ModuleRegistry) {
1657 (&mut self.breakpoints, &mut self.modules)
1658 }
1659
1660 #[cfg(feature = "debug")]
1661 pub(crate) fn breakpoints_and_registry(&self) -> (&BreakpointState, &ModuleRegistry) {
1662 (&self.breakpoints, &self.modules)
1663 }
1664
1665 #[inline]
1666 pub(crate) fn modules(&self) -> &ModuleRegistry {
1667 &self.modules
1668 }
1669
1670 pub(crate) fn register_module(&mut self, module: &Module) -> Result<RegisteredModuleId> {
1671 self.modules.register_module(module, &self.engine)
1672 }
1673
1674 #[cfg(feature = "component-model")]
1675 pub(crate) fn register_component(
1676 &mut self,
1677 component: &crate::component::Component,
1678 ) -> Result<()> {
1679 self.modules.register_component(component, &self.engine)
1680 }
1681
1682 pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1683 (&mut self.func_refs, &self.modules)
1684 }
1685
1686 pub(crate) fn host_globals(
1687 &self,
1688 ) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1689 &self.host_globals
1690 }
1691
1692 pub(crate) fn host_globals_mut(
1693 &mut self,
1694 ) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1695 &mut self.host_globals
1696 }
1697
1698 pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1699 instance.store_id().assert_belongs_to(self.id());
1700 match self.instances[instance.instance()].kind {
1701 StoreInstanceKind::Dummy => None,
1702 StoreInstanceKind::Real { module_id } => {
1703 let module = self
1704 .modules()
1705 .module_by_id(module_id)
1706 .expect("should always have a registered module for real instances");
1707 Some(module)
1708 }
1709 }
1710 }
1711
1712 /// Accessor from `InstanceId` to `&vm::Instance`.
1713 ///
1714 /// Note that if you have a `StoreInstanceId` you should use
1715 /// `StoreInstanceId::get` instead. This assumes that `id` has been
1716 /// validated to already belong to this store.
1717 #[inline]
1718 pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1719 self.instances[id].handle.get()
1720 }
1721
1722 /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1723 ///
1724 /// Note that if you have a `StoreInstanceId` you should use
1725 /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1726 /// validated to already belong to this store.
1727 #[inline]
1728 pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1729 self.instances[id].handle.get_mut()
1730 }
1731
1732 /// Accessor from `InstanceId` to both `Pin<&mut vm::Instance>`
1733 /// and `&ModuleRegistry`.
1734 #[inline]
1735 pub fn instance_and_module_registry_mut(
1736 &mut self,
1737 id: InstanceId,
1738 ) -> (Pin<&mut vm::Instance>, &ModuleRegistry) {
1739 (self.instances[id].handle.get_mut(), &self.modules)
1740 }
1741
1742 /// Access multiple instances specified via `ids`.
1743 ///
1744 /// # Panics
1745 ///
1746 /// This method will panic if any indices in `ids` overlap.
1747 ///
1748 /// # Safety
1749 ///
1750 /// This method is not safe if the returned instances are used to traverse
1751 /// "laterally" between other instances. For example accessing imported
1752 /// items in an instance may traverse laterally to a sibling instance thus
1753 /// aliasing a returned value here. The caller must ensure that only defined
1754 /// items within the instances themselves are accessed.
1755 #[inline]
1756 pub unsafe fn optional_gc_store_and_instances_mut<const N: usize>(
1757 &mut self,
1758 ids: [InstanceId; N],
1759 ) -> (Option<&mut GcStore>, [Pin<&mut vm::Instance>; N]) {
1760 let instances = self
1761 .instances
1762 .get_disjoint_mut(ids)
1763 .unwrap()
1764 .map(|h| h.handle.get_mut());
1765 (self.gc_store.as_mut(), instances)
1766 }
1767
1768 /// Pair of `Self::optional_gc_store_mut` and `Self::instance_mut`
1769 pub fn optional_gc_store_and_instance_mut(
1770 &mut self,
1771 id: InstanceId,
1772 ) -> (Option<&mut GcStore>, Pin<&mut vm::Instance>) {
1773 (self.gc_store.as_mut(), self.instances[id].handle.get_mut())
1774 }
1775
1776 /// Tuple of `Self::optional_gc_store_mut`, `Self::modules`, and
1777 /// `Self::instance_mut`.
1778 pub fn optional_gc_store_and_registry_and_instance_mut(
1779 &mut self,
1780 id: InstanceId,
1781 ) -> (
1782 Option<&mut GcStore>,
1783 &ModuleRegistry,
1784 Pin<&mut vm::Instance>,
1785 ) {
1786 (
1787 self.gc_store.as_mut(),
1788 &self.modules,
1789 self.instances[id].handle.get_mut(),
1790 )
1791 }
1792
1793 /// Get all instances (ignoring dummy instances) within this store.
1794 pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1795 let instances = self
1796 .instances
1797 .iter()
1798 .filter_map(|(id, inst)| {
1799 if let StoreInstanceKind::Dummy = inst.kind {
1800 None
1801 } else {
1802 Some(id)
1803 }
1804 })
1805 .collect::<Vec<_>>();
1806 instances
1807 .into_iter()
1808 .map(|i| Instance::from_wasmtime(i, self))
1809 }
1810
1811 /// Get all memories (host- or Wasm-defined) within this store.
1812 pub fn all_memories<'a>(&'a self) -> impl Iterator<Item = ExportMemory> + 'a {
1813 // NB: Host-created memories have dummy instances. Therefore, we can get
1814 // all memories in the store by iterating over all instances (including
1815 // dummy instances) and getting each of their defined memories.
1816 let id = self.id();
1817 self.instances
1818 .iter()
1819 .flat_map(move |(_, instance)| instance.handle.get().defined_memories(id))
1820 }
1821
1822 /// Iterate over all tables (host- or Wasm-defined) within this store.
1823 pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1824 // NB: Host-created tables have dummy instances. Therefore, we can get
1825 // all tables in the store by iterating over all instances (including
1826 // dummy instances) and getting each of their defined memories.
1827 for id in self.instances.keys() {
1828 let instance = StoreInstanceId::new(self.id(), id);
1829 for table in 0..self.instance(id).env_module().num_defined_tables() {
1830 let table = DefinedTableIndex::new(table);
1831 f(self, Table::from_raw(instance, table));
1832 }
1833 }
1834 }
1835
1836 /// Iterate over all globals (host- or Wasm-defined) within this store.
1837 pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1838 // First enumerate all the host-created globals.
1839 for global in self.host_globals.keys() {
1840 let global = Global::new_host(self, global);
1841 f(self, global);
1842 }
1843
1844 // Then enumerate all instances' defined globals.
1845 for id in self.instances.keys() {
1846 for index in 0..self.instance(id).env_module().num_defined_globals() {
1847 let index = DefinedGlobalIndex::new(index);
1848 let global = Global::new_instance(self, id, index);
1849 f(self, global);
1850 }
1851 }
1852 }
1853
1854 #[cfg(all(feature = "std", any(unix, windows)))]
1855 pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1856 self.signal_handler = handler;
1857 }
1858
1859 #[inline]
1860 pub fn vm_store_context(&self) -> &VMStoreContext {
1861 &self.vm_store_context
1862 }
1863
1864 #[inline]
1865 pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
1866 &mut self.vm_store_context
1867 }
1868
1869 /// Performs a lazy allocation of the `GcStore` within this store, returning
1870 /// the previous allocation if it's already present.
1871 ///
1872 /// This method will, if necessary, allocate a new `GcStore` -- linear
1873 /// memory and all. This is a blocking operation due to
1874 /// `ResourceLimiterAsync` which means that this should only be executed
1875 /// in a fiber context at this time.
1876 #[inline]
1877 pub(crate) async fn ensure_gc_store(
1878 &mut self,
1879 limiter: Option<&mut StoreResourceLimiter<'_>>,
1880 ) -> Result<&mut GcStore> {
1881 if self.gc_store.is_some() {
1882 return Ok(self.gc_store.as_mut().unwrap());
1883 }
1884 self.allocate_gc_store(limiter).await
1885 }
1886
1887 #[inline(never)]
1888 async fn allocate_gc_store(
1889 &mut self,
1890 limiter: Option<&mut StoreResourceLimiter<'_>>,
1891 ) -> Result<&mut GcStore> {
1892 log::trace!("allocating GC heap for store {:?}", self.id());
1893
1894 assert!(self.gc_store.is_none());
1895 assert_eq!(
1896 self.vm_store_context.gc_heap.base.as_non_null(),
1897 NonNull::dangling(),
1898 );
1899 assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1900
1901 let gc_store = allocate_gc_store(self, limiter).await?;
1902 self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1903 return Ok(self.gc_store.insert(gc_store));
1904
1905 #[cfg(feature = "gc")]
1906 async fn allocate_gc_store(
1907 store: &mut StoreOpaque,
1908 limiter: Option<&mut StoreResourceLimiter<'_>>,
1909 ) -> Result<GcStore> {
1910 use wasmtime_environ::{StaticModuleIndex, packed_option::ReservedValue};
1911
1912 let engine = store.engine();
1913 let mem_ty = engine.tunables().gc_heap_memory_type();
1914 ensure!(
1915 engine.features().gc_types(),
1916 "cannot allocate a GC store when GC is disabled at configuration time"
1917 );
1918
1919 // First, allocate the memory that will be our GC heap's storage.
1920 let mut request = InstanceAllocationRequest {
1921 id: InstanceId::reserved_value(),
1922 runtime_info: &ModuleRuntimeInfo::bare(Arc::new(wasmtime_environ::Module::new(
1923 StaticModuleIndex::from_u32(0),
1924 ))),
1925 imports: vm::Imports::default(),
1926 store,
1927 limiter,
1928 };
1929
1930 let (mem_alloc_index, mem) = engine
1931 .allocator()
1932 .allocate_memory(&mut request, &mem_ty, None)
1933 .await?;
1934
1935 // Then, allocate the actual GC heap, passing in that memory
1936 // storage.
1937 let gc_runtime = engine
1938 .gc_runtime()
1939 .context("no GC runtime: GC disabled at compile time or configuration time")?;
1940 let (index, heap) =
1941 engine
1942 .allocator()
1943 .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
1944
1945 Ok(GcStore::new(index, heap))
1946 }
1947
1948 #[cfg(not(feature = "gc"))]
1949 async fn allocate_gc_store(
1950 _: &mut StoreOpaque,
1951 _: Option<&mut StoreResourceLimiter<'_>>,
1952 ) -> Result<GcStore> {
1953 bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1954 }
1955 }
1956
1957 /// Helper method to require that a `GcStore` was previously allocated for
1958 /// this store, failing if it has not yet been allocated.
1959 ///
1960 /// Note that this should only be used in a context where allocation of a
1961 /// `GcStore` is sure to have already happened prior, otherwise this may
1962 /// return a confusing error to embedders which is a bug in Wasmtime.
1963 ///
1964 /// Some situations where it's safe to call this method:
1965 ///
1966 /// * There's already a non-null and non-i31 `VMGcRef` in scope. By existing
1967 /// this shows proof that the `GcStore` was previously allocated.
1968 /// * During instantiation and instance's `needs_gc_heap` flag will be
1969 /// handled and instantiation will automatically create a GC store.
1970 #[inline]
1971 #[cfg(feature = "gc")]
1972 pub(crate) fn require_gc_store(&self) -> Result<&GcStore> {
1973 match &self.gc_store {
1974 Some(gc_store) => Ok(gc_store),
1975 None => bail!("GC heap not initialized yet"),
1976 }
1977 }
1978
1979 /// Same as [`Self::require_gc_store`], but mutable.
1980 #[inline]
1981 #[cfg(feature = "gc")]
1982 pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> {
1983 match &mut self.gc_store {
1984 Some(gc_store) => Ok(gc_store),
1985 None => bail!("GC heap not initialized yet"),
1986 }
1987 }
1988
1989 /// Attempts to access the GC store that has been previously allocated.
1990 ///
1991 /// This method will return `Some` if the GC store was previously allocated.
1992 /// A `None` return value means either that the GC heap hasn't yet been
1993 /// allocated or that it does not need to be allocated for this store. Note
1994 /// that to require a GC store in a particular situation it's recommended to
1995 /// use [`Self::require_gc_store_mut`] instead.
1996 #[inline]
1997 pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
1998 if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1999 debug_assert!(self.gc_store.is_none());
2000 None
2001 } else {
2002 self.gc_store.as_mut()
2003 }
2004 }
2005
2006 /// Helper to assert that a GC store was previously allocated and is
2007 /// present.
2008 ///
2009 /// # Panics
2010 ///
2011 /// This method will panic if the GC store has not yet been allocated. This
2012 /// should only be used in a context where there's an existing GC reference,
2013 /// for example, or if `ensure_gc_store` has already been called.
2014 #[inline]
2015 #[track_caller]
2016 pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
2017 self.gc_store
2018 .as_ref()
2019 .expect("attempted to access the store's GC heap before it has been allocated")
2020 }
2021
2022 /// Same as [`Self::unwrap_gc_store`], but mutable.
2023 #[inline]
2024 #[track_caller]
2025 pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
2026 self.gc_store
2027 .as_mut()
2028 .expect("attempted to access the store's GC heap before it has been allocated")
2029 }
2030
2031 #[inline]
2032 pub(crate) fn gc_roots(&self) -> &RootSet {
2033 &self.gc_roots
2034 }
2035
2036 #[inline]
2037 #[cfg(feature = "gc")]
2038 pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
2039 &mut self.gc_roots
2040 }
2041
2042 #[inline]
2043 pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
2044 self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
2045 }
2046
2047 #[cfg(feature = "gc")]
2048 async fn do_gc(&mut self) {
2049 // If the GC heap hasn't been initialized, there is nothing to collect.
2050 if self.gc_store.is_none() {
2051 return;
2052 }
2053
2054 log::trace!("============ Begin GC ===========");
2055
2056 // Take the GC roots out of `self` so we can borrow it mutably but still
2057 // call mutable methods on `self`.
2058 let mut roots = core::mem::take(&mut self.gc_roots_list);
2059
2060 self.trace_roots(&mut roots).await;
2061 let async_yield = self.async_support();
2062 self.unwrap_gc_store_mut()
2063 .gc(async_yield, unsafe { roots.iter() })
2064 .await;
2065
2066 // Restore the GC roots for the next GC.
2067 roots.clear();
2068 self.gc_roots_list = roots;
2069
2070 log::trace!("============ End GC ===========");
2071 }
2072
2073 #[cfg(feature = "gc")]
2074 async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2075 log::trace!("Begin trace GC roots");
2076
2077 // We shouldn't have any leftover, stale GC roots.
2078 assert!(gc_roots_list.is_empty());
2079
2080 self.trace_wasm_stack_roots(gc_roots_list);
2081 #[cfg(feature = "async")]
2082 if self.async_support() {
2083 vm::Yield::new().await;
2084 }
2085 #[cfg(feature = "stack-switching")]
2086 {
2087 self.trace_wasm_continuation_roots(gc_roots_list);
2088 #[cfg(feature = "async")]
2089 if self.async_support() {
2090 vm::Yield::new().await;
2091 }
2092 }
2093 self.trace_vmctx_roots(gc_roots_list);
2094 #[cfg(feature = "async")]
2095 if self.async_support() {
2096 vm::Yield::new().await;
2097 }
2098 self.trace_user_roots(gc_roots_list);
2099 self.trace_pending_exception_roots(gc_roots_list);
2100
2101 log::trace!("End trace GC roots")
2102 }
2103
2104 #[cfg(feature = "gc")]
2105 fn trace_wasm_stack_frame(
2106 &self,
2107 gc_roots_list: &mut GcRootsList,
2108 frame: crate::runtime::vm::Frame,
2109 ) {
2110 let pc = frame.pc();
2111 debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
2112
2113 let fp = frame.fp() as *mut usize;
2114 debug_assert!(
2115 !fp.is_null(),
2116 "we should always get a valid frame pointer for Wasm frames"
2117 );
2118
2119 let (module_with_code, _offset) = self
2120 .modules()
2121 .module_and_code_by_pc(pc)
2122 .expect("should have module info for Wasm frame");
2123
2124 if let Some(stack_map) = module_with_code.lookup_stack_map(pc) {
2125 log::trace!(
2126 "We have a stack map that maps {} bytes in this Wasm frame",
2127 stack_map.frame_size()
2128 );
2129
2130 let sp = unsafe { stack_map.sp(fp) };
2131 for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
2132 unsafe {
2133 self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2134 }
2135 }
2136 }
2137
2138 #[cfg(feature = "debug")]
2139 if let Some(frame_table) = module_with_code.module().frame_table() {
2140 let relpc = module_with_code
2141 .text_offset(pc)
2142 .expect("PC should be within module");
2143 for stack_slot in super::debug::gc_refs_in_frame(frame_table, relpc, fp) {
2144 unsafe {
2145 self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2146 }
2147 }
2148 }
2149 }
2150
2151 #[cfg(feature = "gc")]
2152 unsafe fn trace_wasm_stack_slot(&self, gc_roots_list: &mut GcRootsList, stack_slot: *mut u32) {
2153 use crate::runtime::vm::SendSyncPtr;
2154 use core::ptr::NonNull;
2155
2156 let raw: u32 = unsafe { core::ptr::read(stack_slot) };
2157 log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
2158
2159 let gc_ref = vm::VMGcRef::from_raw_u32(raw);
2160 if gc_ref.is_some() {
2161 unsafe {
2162 gc_roots_list
2163 .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
2164 }
2165 }
2166 }
2167
2168 #[cfg(feature = "gc")]
2169 fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2170 use crate::runtime::vm::Backtrace;
2171 log::trace!("Begin trace GC roots :: Wasm stack");
2172
2173 Backtrace::trace(self, |frame| {
2174 self.trace_wasm_stack_frame(gc_roots_list, frame);
2175 core::ops::ControlFlow::Continue(())
2176 });
2177
2178 log::trace!("End trace GC roots :: Wasm stack");
2179 }
2180
2181 #[cfg(all(feature = "gc", feature = "stack-switching"))]
2182 fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2183 use crate::{runtime::vm::Backtrace, vm::VMStackState};
2184 log::trace!("Begin trace GC roots :: continuations");
2185
2186 for continuation in &self.continuations {
2187 let state = continuation.common_stack_information.state;
2188
2189 // FIXME(frank-emrich) In general, it is not enough to just trace
2190 // through the stacks of continuations; we also need to look through
2191 // their `cont.bind` arguments. However, we don't currently have
2192 // enough RTTI information to check if any of the values in the
2193 // buffers used by `cont.bind` are GC values. As a workaround, note
2194 // that we currently disallow cont.bind-ing GC values altogether.
2195 // This way, it is okay not to check them here.
2196 match state {
2197 VMStackState::Suspended => {
2198 Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
2199 self.trace_wasm_stack_frame(gc_roots_list, frame);
2200 core::ops::ControlFlow::Continue(())
2201 });
2202 }
2203 VMStackState::Running => {
2204 // Handled by `trace_wasm_stack_roots`.
2205 }
2206 VMStackState::Parent => {
2207 // We don't know whether our child is suspended or running, but in
2208 // either case things should be handled correctly when traversing
2209 // further along in the chain, nothing required at this point.
2210 }
2211 VMStackState::Fresh | VMStackState::Returned => {
2212 // Fresh/Returned continuations have no gc values on their stack.
2213 }
2214 }
2215 }
2216
2217 log::trace!("End trace GC roots :: continuations");
2218 }
2219
2220 #[cfg(feature = "gc")]
2221 fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2222 log::trace!("Begin trace GC roots :: vmctx");
2223 self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
2224 self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
2225 log::trace!("End trace GC roots :: vmctx");
2226 }
2227
2228 #[cfg(feature = "gc")]
2229 fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2230 log::trace!("Begin trace GC roots :: user");
2231 self.gc_roots.trace_roots(gc_roots_list);
2232 log::trace!("End trace GC roots :: user");
2233 }
2234
2235 #[cfg(feature = "gc")]
2236 fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2237 log::trace!("Begin trace GC roots :: pending exception");
2238 if let Some(pending_exception) = self.pending_exception.as_mut() {
2239 unsafe {
2240 let root = pending_exception.as_gc_ref_mut();
2241 gc_roots_list.add_root(root.into(), "Pending exception");
2242 }
2243 }
2244 log::trace!("End trace GC roots :: pending exception");
2245 }
2246
2247 /// Insert a host-allocated GC type into this store.
2248 ///
2249 /// This makes it suitable for the embedder to allocate instances of this
2250 /// type in this store, and we don't have to worry about the type being
2251 /// reclaimed (since it is possible that none of the Wasm modules in this
2252 /// store are holding it alive).
2253 #[cfg(feature = "gc")]
2254 pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
2255 self.gc_host_alloc_types.insert(ty);
2256 }
2257
2258 /// Helper function execute a `init_gc_ref` when placing `gc_ref` in `dest`.
2259 ///
2260 /// This avoids allocating `GcStore` where possible.
2261 pub(crate) fn init_gc_ref(
2262 &mut self,
2263 dest: &mut MaybeUninit<Option<VMGcRef>>,
2264 gc_ref: Option<&VMGcRef>,
2265 ) {
2266 if GcStore::needs_init_barrier(gc_ref) {
2267 self.unwrap_gc_store_mut().init_gc_ref(dest, gc_ref)
2268 } else {
2269 dest.write(gc_ref.map(|r| r.copy_i31()));
2270 }
2271 }
2272
2273 /// Helper function execute a write barrier when placing `gc_ref` in `dest`.
2274 ///
2275 /// This avoids allocating `GcStore` where possible.
2276 pub(crate) fn write_gc_ref(&mut self, dest: &mut Option<VMGcRef>, gc_ref: Option<&VMGcRef>) {
2277 GcStore::write_gc_ref_optional_store(self.optional_gc_store_mut(), dest, gc_ref)
2278 }
2279
2280 /// Helper function to clone `gc_ref` notably avoiding allocating a
2281 /// `GcStore` where possible.
2282 pub(crate) fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
2283 if gc_ref.is_i31() {
2284 gc_ref.copy_i31()
2285 } else {
2286 self.unwrap_gc_store_mut().clone_gc_ref(gc_ref)
2287 }
2288 }
2289
2290 pub fn get_fuel(&self) -> Result<u64> {
2291 crate::ensure!(
2292 self.engine().tunables().consume_fuel,
2293 "fuel is not configured in this store"
2294 );
2295 let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
2296 Ok(get_fuel(injected_fuel, self.fuel_reserve))
2297 }
2298
2299 pub(crate) fn refuel(&mut self) -> bool {
2300 let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2301 refuel(
2302 injected_fuel,
2303 &mut self.fuel_reserve,
2304 self.fuel_yield_interval,
2305 )
2306 }
2307
2308 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
2309 crate::ensure!(
2310 self.engine().tunables().consume_fuel,
2311 "fuel is not configured in this store"
2312 );
2313 let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2314 set_fuel(
2315 injected_fuel,
2316 &mut self.fuel_reserve,
2317 self.fuel_yield_interval,
2318 fuel,
2319 );
2320 Ok(())
2321 }
2322
2323 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
2324 crate::ensure!(
2325 self.engine().tunables().consume_fuel,
2326 "fuel is not configured in this store"
2327 );
2328 crate::ensure!(
2329 self.engine().config().async_support,
2330 "async support is not configured in this store"
2331 );
2332 crate::ensure!(
2333 interval != Some(0),
2334 "fuel_async_yield_interval must not be 0"
2335 );
2336 self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
2337 // Reset the fuel active + reserve states by resetting the amount.
2338 self.set_fuel(self.get_fuel()?)
2339 }
2340
2341 #[inline]
2342 pub fn signal_handler(&self) -> Option<*const SignalHandler> {
2343 let handler = self.signal_handler.as_ref()?;
2344 Some(handler)
2345 }
2346
2347 #[inline]
2348 pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
2349 NonNull::from(&self.vm_store_context)
2350 }
2351
2352 #[inline]
2353 pub fn default_caller(&self) -> NonNull<VMContext> {
2354 self.default_caller_vmctx.as_non_null()
2355 }
2356
2357 #[inline]
2358 pub fn traitobj(&self) -> NonNull<dyn VMStore> {
2359 self.traitobj.0.unwrap()
2360 }
2361
2362 /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
2363 /// used as part of calling the host in a `Func::new` method invocation.
2364 #[inline]
2365 pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
2366 mem::take(&mut self.hostcall_val_storage)
2367 }
2368
2369 /// Restores the vector previously taken by `take_hostcall_val_storage`
2370 /// above back into the store, allowing it to be used in the future for the
2371 /// next wasm->host call.
2372 #[inline]
2373 pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
2374 if storage.capacity() > self.hostcall_val_storage.capacity() {
2375 self.hostcall_val_storage = storage;
2376 }
2377 }
2378
2379 /// Same as `take_hostcall_val_storage`, but for the direction of the host
2380 /// calling wasm.
2381 #[inline]
2382 pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
2383 mem::take(&mut self.wasm_val_raw_storage)
2384 }
2385
2386 /// Same as `save_hostcall_val_storage`, but for the direction of the host
2387 /// calling wasm.
2388 #[inline]
2389 pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
2390 if storage.capacity() > self.wasm_val_raw_storage.capacity() {
2391 self.wasm_val_raw_storage = storage;
2392 }
2393 }
2394
2395 /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
2396 /// WebAssembly-relative fault.
2397 ///
2398 /// This function may abort the process if `addr` is not found to actually
2399 /// reside in any linear memory. In such a situation it means that the
2400 /// segfault was erroneously caught by Wasmtime and is possibly indicative
2401 /// of a code generator bug.
2402 ///
2403 /// This function returns `None` for dynamically-bounds-checked-memories
2404 /// with spectre mitigations enabled since the hardware fault address is
2405 /// always zero in these situations which means that the trapping context
2406 /// doesn't have enough information to report the fault address.
2407 pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
2408 // There are a few instances where a "close to zero" pointer is loaded
2409 // and we expect that to happen:
2410 //
2411 // * Explicitly bounds-checked memories with spectre-guards enabled will
2412 // cause out-of-bounds accesses to get routed to address 0, so allow
2413 // wasm instructions to fault on the null address.
2414 // * `call_indirect` when invoking a null function pointer may load data
2415 // from the a `VMFuncRef` whose address is null, meaning any field of
2416 // `VMFuncRef` could be the address of the fault.
2417 //
2418 // In these situations where the address is so small it won't be in any
2419 // instance, so skip the checks below.
2420 if addr <= mem::size_of::<VMFuncRef>() {
2421 const _: () = {
2422 // static-assert that `VMFuncRef` isn't too big to ensure that
2423 // it lives solely within the first page as we currently only
2424 // have the guarantee that the first page of memory is unmapped,
2425 // no more.
2426 assert!(mem::size_of::<VMFuncRef>() <= 512);
2427 };
2428 return None;
2429 }
2430
2431 // Search all known instances in this store for this address. Note that
2432 // this is probably not the speediest way to do this. Traps, however,
2433 // are generally not expected to be super fast and additionally stores
2434 // probably don't have all that many instances or memories.
2435 //
2436 // If this loop becomes hot in the future, however, it should be
2437 // possible to precompute maps about linear memories in a store and have
2438 // a quicker lookup.
2439 let mut fault = None;
2440 for (_, instance) in self.instances.iter() {
2441 if let Some(f) = instance.handle.get().wasm_fault(addr) {
2442 assert!(fault.is_none());
2443 fault = Some(f);
2444 }
2445 }
2446 if fault.is_some() {
2447 return fault;
2448 }
2449
2450 cfg_if::cfg_if! {
2451 if #[cfg(feature = "std")] {
2452 // With the standard library a rich error can be printed here
2453 // to stderr and the native abort path is used.
2454 eprintln!(
2455 "\
2456Wasmtime caught a segfault for a wasm program because the faulting instruction
2457is allowed to segfault due to how linear memories are implemented. The address
2458that was accessed, however, is not known to any linear memory in use within this
2459Store. This may be indicative of a critical bug in Wasmtime's code generation
2460because all addresses which are known to be reachable from wasm won't reach this
2461message.
2462
2463 pc: 0x{pc:x}
2464 address: 0x{addr:x}
2465
2466This is a possible security issue because WebAssembly has accessed something it
2467shouldn't have been able to. Other accesses may have succeeded and this one just
2468happened to be caught. The process will now be aborted to prevent this damage
2469from going any further and to alert what's going on. If this is a security
2470issue please reach out to the Wasmtime team via its security policy
2471at https://bytecodealliance.org/security.
2472"
2473 );
2474 std::process::abort();
2475 } else if #[cfg(panic = "abort")] {
2476 // Without the standard library but with `panic=abort` then
2477 // it's safe to panic as that's known to halt execution. For
2478 // now avoid the above error message as well since without
2479 // `std` it's probably best to be a bit more size-conscious.
2480 let _ = pc;
2481 panic!("invalid fault");
2482 } else {
2483 // Without `std` and with `panic = "unwind"` there's no
2484 // dedicated API to abort the process portably, so manufacture
2485 // this with a double-panic.
2486 let _ = pc;
2487
2488 struct PanicAgainOnDrop;
2489
2490 impl Drop for PanicAgainOnDrop {
2491 fn drop(&mut self) {
2492 panic!("panicking again to trigger a process abort");
2493 }
2494
2495 }
2496
2497 let _bomb = PanicAgainOnDrop;
2498
2499 panic!("invalid fault");
2500 }
2501 }
2502 }
2503
2504 /// Retrieve the store's protection key.
2505 #[inline]
2506 #[cfg(feature = "pooling-allocator")]
2507 pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
2508 self.pkey
2509 }
2510
2511 #[inline]
2512 #[cfg(feature = "component-model")]
2513 pub(crate) fn component_resource_state(
2514 &mut self,
2515 ) -> (
2516 &mut vm::component::CallContexts,
2517 &mut vm::component::HandleTable,
2518 &mut crate::component::HostResourceData,
2519 ) {
2520 (
2521 &mut self.component_calls,
2522 &mut self.component_host_table,
2523 &mut self.host_resource_data,
2524 )
2525 }
2526
2527 #[cfg(feature = "component-model")]
2528 pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
2529 // We don't actually need the instance itself right now, but it seems
2530 // like something we will almost certainly eventually want to keep
2531 // around, so force callers to provide it.
2532 let _ = instance;
2533
2534 self.num_component_instances += 1;
2535 }
2536
2537 #[inline]
2538 #[cfg(feature = "component-model")]
2539 pub(crate) fn component_resource_state_with_instance(
2540 &mut self,
2541 instance: crate::component::Instance,
2542 ) -> (
2543 &mut vm::component::CallContexts,
2544 &mut vm::component::HandleTable,
2545 &mut crate::component::HostResourceData,
2546 Pin<&mut vm::component::ComponentInstance>,
2547 ) {
2548 (
2549 &mut self.component_calls,
2550 &mut self.component_host_table,
2551 &mut self.host_resource_data,
2552 instance.id().from_data_get_mut(&mut self.store_data),
2553 )
2554 }
2555
2556 #[cfg(feature = "component-model")]
2557 pub(crate) fn component_resource_state_with_instance_and_concurrent_state(
2558 &mut self,
2559 instance: crate::component::Instance,
2560 ) -> (
2561 &mut vm::component::CallContexts,
2562 &mut vm::component::HandleTable,
2563 &mut crate::component::HostResourceData,
2564 Pin<&mut vm::component::ComponentInstance>,
2565 &mut concurrent::ConcurrentState,
2566 ) {
2567 (
2568 &mut self.component_calls,
2569 &mut self.component_host_table,
2570 &mut self.host_resource_data,
2571 instance.id().from_data_get_mut(&mut self.store_data),
2572 &mut self.concurrent_state,
2573 )
2574 }
2575
2576 #[cfg(feature = "async")]
2577 pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
2578 &mut self.async_state
2579 }
2580
2581 #[cfg(feature = "component-model-async")]
2582 pub(crate) fn concurrent_state_mut(&mut self) -> &mut concurrent::ConcurrentState {
2583 &mut self.concurrent_state
2584 }
2585
2586 #[cfg(feature = "async")]
2587 pub(crate) fn has_pkey(&self) -> bool {
2588 self.pkey.is_some()
2589 }
2590
2591 pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
2592 match &mut self.executor {
2593 Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
2594 #[cfg(has_host_compiler_backend)]
2595 Executor::Native => ExecutorRef::Native,
2596 }
2597 }
2598
2599 #[cfg(feature = "async")]
2600 pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
2601 mem::swap(&mut self.executor, executor);
2602 }
2603
2604 pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
2605 match &self.executor {
2606 Executor::Interpreter(i) => i.unwinder(),
2607 #[cfg(has_host_compiler_backend)]
2608 Executor::Native => &vm::UnwindHost,
2609 }
2610 }
2611
2612 /// Allocates a new continuation. Note that we currently don't support
2613 /// deallocating them. Instead, all continuations remain allocated
2614 /// throughout the store's lifetime.
2615 #[cfg(feature = "stack-switching")]
2616 pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
2617 // FIXME(frank-emrich) Do we need to pin this?
2618 let mut continuation = Box::new(VMContRef::empty());
2619 let stack_size = self.engine.config().async_stack_size;
2620 let stack = crate::vm::VMContinuationStack::new(stack_size)?;
2621 continuation.stack = stack;
2622 let ptr = continuation.deref_mut() as *mut VMContRef;
2623 self.continuations.push(continuation);
2624 Ok(ptr)
2625 }
2626
2627 /// Constructs and executes an `InstanceAllocationRequest` and pushes the
2628 /// returned instance into the store.
2629 ///
2630 /// This is a helper method for invoking
2631 /// `InstanceAllocator::allocate_module` with the appropriate parameters
2632 /// from this store's own configuration. The `kind` provided is used to
2633 /// distinguish between "real" modules and dummy ones that are synthesized
2634 /// for embedder-created memories, globals, tables, etc. The `kind` will
2635 /// also use a different instance allocator by default, the one passed in,
2636 /// rather than the engine's default allocator.
2637 ///
2638 /// This method will push the instance within `StoreOpaque` onto the
2639 /// `instances` array and return the `InstanceId` which can be use to look
2640 /// it up within the store.
2641 ///
2642 /// # Safety
2643 ///
2644 /// The `imports` provided must be correctly sized/typed for the module
2645 /// being allocated.
2646 pub(crate) async unsafe fn allocate_instance(
2647 &mut self,
2648 limiter: Option<&mut StoreResourceLimiter<'_>>,
2649 kind: AllocateInstanceKind<'_>,
2650 runtime_info: &ModuleRuntimeInfo,
2651 imports: Imports<'_>,
2652 ) -> Result<InstanceId> {
2653 let id = self.instances.next_key();
2654
2655 let allocator = match kind {
2656 AllocateInstanceKind::Module(_) => self.engine().allocator(),
2657 AllocateInstanceKind::Dummy { allocator } => allocator,
2658 };
2659 // SAFETY: this function's own contract is the same as
2660 // `allocate_module`, namely the imports provided are valid.
2661 let handle = unsafe {
2662 allocator
2663 .allocate_module(InstanceAllocationRequest {
2664 id,
2665 runtime_info,
2666 imports,
2667 store: self,
2668 limiter,
2669 })
2670 .await?
2671 };
2672
2673 let actual = match kind {
2674 AllocateInstanceKind::Module(module_id) => {
2675 log::trace!(
2676 "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2677 self.id()
2678 );
2679 self.instances.push(StoreInstance {
2680 handle,
2681 kind: StoreInstanceKind::Real { module_id },
2682 })
2683 }
2684 AllocateInstanceKind::Dummy { .. } => {
2685 log::trace!(
2686 "Adding dummy instance to store: store={:?}, instance={id:?}",
2687 self.id()
2688 );
2689 self.instances.push(StoreInstance {
2690 handle,
2691 kind: StoreInstanceKind::Dummy,
2692 })
2693 }
2694 };
2695
2696 // double-check we didn't accidentally allocate two instances and our
2697 // prediction of what the id would be is indeed the id it should be.
2698 assert_eq!(id, actual);
2699
2700 Ok(id)
2701 }
2702
2703 /// Set a pending exception. The `exnref` is taken and held on
2704 /// this store to be fetched later by an unwind. This method does
2705 /// *not* set up an unwind request on the TLS call state; that
2706 /// must be done separately.
2707 #[cfg(feature = "gc")]
2708 pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) {
2709 self.pending_exception = Some(exnref);
2710 }
2711
2712 /// Take a pending exception, if any.
2713 #[cfg(feature = "gc")]
2714 pub(crate) fn take_pending_exception(&mut self) -> Option<VMExnRef> {
2715 self.pending_exception.take()
2716 }
2717
2718 /// Tests whether there is a pending exception.
2719 #[cfg(feature = "gc")]
2720 pub fn has_pending_exception(&self) -> bool {
2721 self.pending_exception.is_some()
2722 }
2723
2724 #[cfg(feature = "gc")]
2725 fn take_pending_exception_rooted(&mut self) -> Option<Rooted<ExnRef>> {
2726 let vmexnref = self.take_pending_exception()?;
2727 let mut nogc = AutoAssertNoGc::new(self);
2728 Some(Rooted::new(&mut nogc, vmexnref.into()))
2729 }
2730
2731 /// Get an owned rooted reference to the pending exception,
2732 /// without taking it off the store.
2733 #[cfg(all(feature = "gc", feature = "debug"))]
2734 pub(crate) fn pending_exception_owned_rooted(&mut self) -> Option<OwnedRooted<ExnRef>> {
2735 let mut nogc = AutoAssertNoGc::new(self);
2736 nogc.pending_exception.take().map(|vmexnref| {
2737 let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref());
2738 nogc.pending_exception = Some(cloned.into_exnref_unchecked());
2739 OwnedRooted::new(&mut nogc, vmexnref.into())
2740 })
2741 }
2742
2743 #[cfg(feature = "gc")]
2744 fn throw_impl(&mut self, exception: Rooted<ExnRef>) {
2745 let mut nogc = AutoAssertNoGc::new(self);
2746 let exnref = exception._to_raw(&mut nogc).unwrap();
2747 let exnref = VMGcRef::from_raw_u32(exnref)
2748 .expect("exception cannot be null")
2749 .into_exnref_unchecked();
2750 nogc.set_pending_exception(exnref);
2751 }
2752
2753 #[cfg(target_has_atomic = "64")]
2754 pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2755 // Set a new deadline based on the "epoch deadline delta".
2756 //
2757 // Also, note that when this update is performed while Wasm is
2758 // on the stack, the Wasm will reload the new value once we
2759 // return into it.
2760 let current_epoch = self.engine().current_epoch();
2761 let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2762 *epoch_deadline = current_epoch + delta;
2763 }
2764
2765 pub(crate) fn get_epoch_deadline(&mut self) -> u64 {
2766 *self.vm_store_context.epoch_deadline.get_mut()
2767 }
2768}
2769
2770/// Helper parameter to [`StoreOpaque::allocate_instance`].
2771pub(crate) enum AllocateInstanceKind<'a> {
2772 /// An embedder-provided module is being allocated meaning that the default
2773 /// engine's allocator will be used.
2774 Module(RegisteredModuleId),
2775
2776 /// Add a dummy instance that to the store.
2777 ///
2778 /// These are instances that are just implementation details of something
2779 /// else (e.g. host-created memories that are not actually defined in any
2780 /// Wasm module) and therefore shouldn't show up in things like core dumps.
2781 ///
2782 /// A custom, typically OnDemand-flavored, allocator is provided to execute
2783 /// the allocation.
2784 Dummy {
2785 allocator: &'a dyn InstanceAllocator,
2786 },
2787}
2788
2789unsafe impl<T> VMStore for StoreInner<T> {
2790 #[cfg(feature = "component-model-async")]
2791 fn component_async_store(
2792 &mut self,
2793 ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2794 self
2795 }
2796
2797 fn store_opaque(&self) -> &StoreOpaque {
2798 &self.inner
2799 }
2800
2801 fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2802 &mut self.inner
2803 }
2804
2805 fn resource_limiter_and_store_opaque(
2806 &mut self,
2807 ) -> (Option<StoreResourceLimiter<'_>>, &mut StoreOpaque) {
2808 let (data, limiter, opaque) = self.data_limiter_and_opaque();
2809
2810 let limiter = limiter.map(|l| match l {
2811 ResourceLimiterInner::Sync(s) => StoreResourceLimiter::Sync(s(data)),
2812 #[cfg(feature = "async")]
2813 ResourceLimiterInner::Async(s) => StoreResourceLimiter::Async(s(data)),
2814 });
2815
2816 (limiter, opaque)
2817 }
2818
2819 #[cfg(target_has_atomic = "64")]
2820 fn new_epoch_updated_deadline(&mut self) -> Result<UpdateDeadline> {
2821 // Temporarily take the configured behavior to avoid mutably borrowing
2822 // multiple times.
2823 let mut behavior = self.epoch_deadline_behavior.take();
2824 let update = match &mut behavior {
2825 Some(callback) => callback((&mut *self).as_context_mut()),
2826 None => Ok(UpdateDeadline::Interrupt),
2827 };
2828
2829 // Put back the original behavior which was replaced by `take`.
2830 self.epoch_deadline_behavior = behavior;
2831 update
2832 }
2833
2834 #[cfg(feature = "component-model")]
2835 fn component_calls(&mut self) -> &mut vm::component::CallContexts {
2836 &mut self.component_calls
2837 }
2838
2839 #[cfg(feature = "debug")]
2840 fn block_on_debug_handler(&mut self, event: crate::DebugEvent<'_>) -> crate::Result<()> {
2841 if let Some(handler) = self.debug_handler.take() {
2842 if !self.can_block() {
2843 bail!("could not invoke debug handler without async context");
2844 }
2845 log::trace!("about to raise debug event {event:?}");
2846 StoreContextMut(self).with_blocking(|store, cx| {
2847 cx.block_on(Pin::from(handler.handle(store, event)).as_mut())
2848 })
2849 } else {
2850 Ok(())
2851 }
2852 }
2853}
2854
2855impl<T> StoreInner<T> {
2856 #[cfg(target_has_atomic = "64")]
2857 fn epoch_deadline_trap(&mut self) {
2858 self.epoch_deadline_behavior = None;
2859 }
2860
2861 #[cfg(target_has_atomic = "64")]
2862 fn epoch_deadline_callback(
2863 &mut self,
2864 callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2865 ) {
2866 self.epoch_deadline_behavior = Some(callback);
2867 }
2868}
2869
2870impl<T: Default> Default for Store<T> {
2871 fn default() -> Store<T> {
2872 Store::new(&Engine::default(), T::default())
2873 }
2874}
2875
2876impl<T: fmt::Debug> fmt::Debug for Store<T> {
2877 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2878 let inner = &**self.inner as *const StoreInner<T>;
2879 f.debug_struct("Store")
2880 .field("inner", &inner)
2881 .field("data", self.inner.data())
2882 .finish()
2883 }
2884}
2885
2886impl<T> Drop for Store<T> {
2887 fn drop(&mut self) {
2888 self.run_manual_drop_routines();
2889
2890 // For documentation on this `unsafe`, see `into_data`.
2891 unsafe {
2892 ManuallyDrop::drop(&mut self.inner.data_no_provenance);
2893 ManuallyDrop::drop(&mut self.inner);
2894 }
2895 }
2896}
2897
2898impl Drop for StoreOpaque {
2899 fn drop(&mut self) {
2900 // NB it's important that this destructor does not access `self.data`.
2901 // That is deallocated by `Drop for Store<T>` above.
2902
2903 unsafe {
2904 let allocator = self.engine.allocator();
2905 let ondemand = OnDemandInstanceAllocator::default();
2906 let store_id = self.id();
2907
2908 #[cfg(feature = "gc")]
2909 if let Some(gc_store) = self.gc_store.take() {
2910 let gc_alloc_index = gc_store.allocation_index;
2911 log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2912 debug_assert!(self.engine.features().gc_types());
2913 let (mem_alloc_index, mem) =
2914 allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2915 allocator.deallocate_memory(None, mem_alloc_index, mem);
2916 }
2917
2918 for (id, instance) in self.instances.iter_mut() {
2919 log::trace!("store {store_id:?} is deallocating {id:?}");
2920 let allocator = match instance.kind {
2921 StoreInstanceKind::Dummy => &ondemand,
2922 _ => allocator,
2923 };
2924 allocator.deallocate_module(&mut instance.handle);
2925 }
2926
2927 #[cfg(feature = "component-model")]
2928 {
2929 for _ in 0..self.num_component_instances {
2930 allocator.decrement_component_instance_count();
2931 }
2932 }
2933 }
2934 }
2935}
2936
2937#[cfg_attr(
2938 not(any(feature = "gc", feature = "async")),
2939 // NB: Rust 1.89, current stable, does not fire this lint. Rust 1.90,
2940 // however, does, so use #[allow] until our MSRV is 1.90.
2941 allow(dead_code, reason = "don't want to put #[cfg] on all impls below too")
2942)]
2943pub(crate) trait AsStoreOpaque {
2944 fn as_store_opaque(&mut self) -> &mut StoreOpaque;
2945}
2946
2947impl AsStoreOpaque for StoreOpaque {
2948 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2949 self
2950 }
2951}
2952
2953impl AsStoreOpaque for dyn VMStore {
2954 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2955 self
2956 }
2957}
2958
2959impl<T: 'static> AsStoreOpaque for Store<T> {
2960 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2961 &mut self.inner.inner
2962 }
2963}
2964
2965impl<T: 'static> AsStoreOpaque for StoreInner<T> {
2966 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2967 self
2968 }
2969}
2970
2971impl<T: AsStoreOpaque + ?Sized> AsStoreOpaque for &mut T {
2972 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2973 T::as_store_opaque(self)
2974 }
2975}
2976
2977#[cfg(test)]
2978mod tests {
2979 use super::*;
2980
2981 struct FuelTank {
2982 pub consumed_fuel: i64,
2983 pub reserve_fuel: u64,
2984 pub yield_interval: Option<NonZeroU64>,
2985 }
2986
2987 impl FuelTank {
2988 fn new() -> Self {
2989 FuelTank {
2990 consumed_fuel: 0,
2991 reserve_fuel: 0,
2992 yield_interval: None,
2993 }
2994 }
2995 fn get_fuel(&self) -> u64 {
2996 get_fuel(self.consumed_fuel, self.reserve_fuel)
2997 }
2998 fn refuel(&mut self) -> bool {
2999 refuel(
3000 &mut self.consumed_fuel,
3001 &mut self.reserve_fuel,
3002 self.yield_interval,
3003 )
3004 }
3005 fn set_fuel(&mut self, fuel: u64) {
3006 set_fuel(
3007 &mut self.consumed_fuel,
3008 &mut self.reserve_fuel,
3009 self.yield_interval,
3010 fuel,
3011 );
3012 }
3013 }
3014
3015 #[test]
3016 fn smoke() {
3017 let mut tank = FuelTank::new();
3018 tank.set_fuel(10);
3019 assert_eq!(tank.consumed_fuel, -10);
3020 assert_eq!(tank.reserve_fuel, 0);
3021
3022 tank.yield_interval = NonZeroU64::new(10);
3023 tank.set_fuel(25);
3024 assert_eq!(tank.consumed_fuel, -10);
3025 assert_eq!(tank.reserve_fuel, 15);
3026 }
3027
3028 #[test]
3029 fn does_not_lose_precision() {
3030 let mut tank = FuelTank::new();
3031 tank.set_fuel(u64::MAX);
3032 assert_eq!(tank.get_fuel(), u64::MAX);
3033
3034 tank.set_fuel(i64::MAX as u64);
3035 assert_eq!(tank.get_fuel(), i64::MAX as u64);
3036
3037 tank.set_fuel(i64::MAX as u64 + 1);
3038 assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
3039 }
3040
3041 #[test]
3042 fn yielding_does_not_lose_precision() {
3043 let mut tank = FuelTank::new();
3044
3045 tank.yield_interval = NonZeroU64::new(10);
3046 tank.set_fuel(u64::MAX);
3047 assert_eq!(tank.get_fuel(), u64::MAX);
3048 assert_eq!(tank.consumed_fuel, -10);
3049 assert_eq!(tank.reserve_fuel, u64::MAX - 10);
3050
3051 tank.yield_interval = NonZeroU64::new(u64::MAX);
3052 tank.set_fuel(u64::MAX);
3053 assert_eq!(tank.get_fuel(), u64::MAX);
3054 assert_eq!(tank.consumed_fuel, -i64::MAX);
3055 assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3056
3057 tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
3058 tank.set_fuel(u64::MAX);
3059 assert_eq!(tank.get_fuel(), u64::MAX);
3060 assert_eq!(tank.consumed_fuel, -i64::MAX);
3061 assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3062 }
3063
3064 #[test]
3065 fn refueling() {
3066 // It's possible to fuel to have consumed over the limit as some instructions can consume
3067 // multiple units of fuel at once. Refueling should be strict in it's consumption and not
3068 // add more fuel than there is.
3069 let mut tank = FuelTank::new();
3070
3071 tank.yield_interval = NonZeroU64::new(10);
3072 tank.reserve_fuel = 42;
3073 tank.consumed_fuel = 4;
3074 assert!(tank.refuel());
3075 assert_eq!(tank.reserve_fuel, 28);
3076 assert_eq!(tank.consumed_fuel, -10);
3077
3078 tank.yield_interval = NonZeroU64::new(1);
3079 tank.reserve_fuel = 8;
3080 tank.consumed_fuel = 4;
3081 assert_eq!(tank.get_fuel(), 4);
3082 assert!(tank.refuel());
3083 assert_eq!(tank.reserve_fuel, 3);
3084 assert_eq!(tank.consumed_fuel, -1);
3085 assert_eq!(tank.get_fuel(), 4);
3086
3087 tank.yield_interval = NonZeroU64::new(10);
3088 tank.reserve_fuel = 3;
3089 tank.consumed_fuel = 4;
3090 assert_eq!(tank.get_fuel(), 0);
3091 assert!(!tank.refuel());
3092 assert_eq!(tank.reserve_fuel, 3);
3093 assert_eq!(tank.consumed_fuel, 4);
3094 assert_eq!(tank.get_fuel(), 0);
3095 }
3096
3097 #[test]
3098 fn store_data_provenance() {
3099 // Test that we juggle pointer provenance and all that correctly, and
3100 // miri is happy with everything, while allowing both Rust code and
3101 // "Wasm" to access and modify the store's `T` data. Note that this is
3102 // not actually Wasm mutating the store data here because compiling Wasm
3103 // under miri is way too slow.
3104
3105 unsafe fn run_wasm(store: &mut Store<u32>) {
3106 let ptr = store
3107 .inner
3108 .inner
3109 .vm_store_context
3110 .store_data
3111 .as_ptr()
3112 .cast::<u32>();
3113 unsafe { *ptr += 1 }
3114 }
3115
3116 let engine = Engine::default();
3117 let mut store = Store::new(&engine, 0_u32);
3118
3119 assert_eq!(*store.data(), 0);
3120 *store.data_mut() += 1;
3121 assert_eq!(*store.data(), 1);
3122 unsafe { run_wasm(&mut store) }
3123 assert_eq!(*store.data(), 2);
3124 *store.data_mut() += 1;
3125 assert_eq!(*store.data(), 3);
3126 }
3127}