wasmtime/runtime/store.rs
1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//! Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//! intended to be consumed by the outside world. Note that the "just a
39//! pointer large" is a load-bearing implementation detail in Wasmtime. This
40//! enables it to store a pointer to its own trait object which doesn't need
41//! to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//! stored inside the `Box`. This is the general Rust pattern when one struct
45//! is a layer over another. The surprising part, though, is that this is
46//! further subdivided. This structure only contains things which actually
47//! need `T` itself. The downside of this structure is that it's always
48//! generic and means that code is monomorphized into consumer crates. We
49//! strive to have things be as monomorphic as possible in `wasmtime` so this
50//! type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//! Stored inline in the outer type the "opaque" here means that it's a
54//! "store" but it doesn't have access to the `T`. This is the primary
55//! "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//! internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//! All references of Wasm items into a `Store` are actually indices into a
60//! table in this structure, and the `StoreData` being separate makes it a bit
61//! easier to manage/define/work with. There's no real fundamental reason this
62//! is split out, although sometimes it's useful to have separate borrows into
63//! these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79#[cfg(all(feature = "gc", feature = "debug"))]
80use crate::OwnedRooted;
81use crate::RootSet;
82#[cfg(feature = "gc")]
83use crate::ThrownException;
84use crate::error::OutOfMemory;
85#[cfg(feature = "async")]
86use crate::fiber;
87use crate::module::{RegisterBreakpointState, RegisteredModuleId};
88use crate::prelude::*;
89#[cfg(feature = "gc")]
90use crate::runtime::vm::GcRootsList;
91#[cfg(feature = "stack-switching")]
92use crate::runtime::vm::VMContRef;
93use crate::runtime::vm::mpk::ProtectionKey;
94use crate::runtime::vm::{
95 self, ExportMemory, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator,
96 InstanceHandle, Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator,
97 SendSyncPtr, SignalHandler, StoreBox, Unwind, VMContext, VMFuncRef, VMGcRef, VMStore,
98 VMStoreContext,
99};
100use crate::trampoline::VMHostGlobalContext;
101#[cfg(feature = "debug")]
102use crate::{BreakpointState, DebugHandler, FrameDataCache};
103use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry};
104#[cfg(feature = "gc")]
105use crate::{ExnRef, Rooted};
106use crate::{Global, Instance, Table};
107use core::convert::Infallible;
108use core::fmt;
109use core::marker;
110use core::mem::{self, ManuallyDrop, MaybeUninit};
111use core::num::NonZeroU64;
112use core::ops::{Deref, DerefMut};
113use core::pin::Pin;
114use core::ptr::NonNull;
115use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, TripleExt};
116
117mod context;
118pub use self::context::*;
119mod data;
120pub use self::data::*;
121mod func_refs;
122use func_refs::FuncRefs;
123#[cfg(feature = "component-model-async")]
124mod token;
125#[cfg(feature = "component-model-async")]
126pub(crate) use token::StoreToken;
127#[cfg(feature = "async")]
128mod async_;
129#[cfg(all(feature = "async", feature = "call-hook"))]
130pub use self::async_::CallHookHandler;
131
132#[cfg(feature = "gc")]
133use super::vm::VMExnRef;
134#[cfg(feature = "gc")]
135mod gc;
136
137/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
138///
139/// All WebAssembly instances and items will be attached to and refer to a
140/// [`Store`]. For example instances, functions, globals, and tables are all
141/// attached to a [`Store`]. Instances are created by instantiating a
142/// [`Module`](crate::Module) within a [`Store`].
143///
144/// A [`Store`] is intended to be a short-lived object in a program. No form
145/// of GC is implemented at this time so once an instance is created within a
146/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
147/// This makes [`Store`] unsuitable for creating an unbounded number of
148/// instances in it because [`Store`] will never release this memory. It's
149/// recommended to have a [`Store`] correspond roughly to the lifetime of a
150/// "main instance" that an embedding is interested in executing.
151///
152/// ## Type parameter `T`
153///
154/// Each [`Store`] has a type parameter `T` associated with it. This `T`
155/// represents state defined by the host. This state will be accessible through
156/// the [`Caller`](crate::Caller) type that host-defined functions get access
157/// to. This `T` is suitable for storing `Store`-specific information which
158/// imported functions may want access to.
159///
160/// The data `T` can be accessed through methods like [`Store::data`] and
161/// [`Store::data_mut`].
162///
163/// ## Stores, contexts, oh my
164///
165/// Most methods in Wasmtime take something of the form
166/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
167/// the first argument. These two traits allow ergonomically passing in the
168/// context you currently have to any method. The primary two sources of
169/// contexts are:
170///
171/// * `Store<T>`
172/// * `Caller<'_, T>`
173///
174/// corresponding to what you create and what you have access to in a host
175/// function. You can also explicitly acquire a [`StoreContext`] or
176/// [`StoreContextMut`] and pass that around as well.
177///
178/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
179/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
180/// form of context you have you can call various methods, create objects, etc.
181///
182/// ## Stores and `Default`
183///
184/// You can create a store with default configuration settings using
185/// `Store::default()`. This will create a brand new [`Engine`] with default
186/// configuration (see [`Config`](crate::Config) for more information).
187///
188/// ## Cross-store usage of items
189///
190/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
191/// [`Store`]. The store they belong to is the one they were created with
192/// (passed in as a parameter) or instantiated with. This store is the only
193/// store that can be used to interact with wasm items after they're created.
194///
195/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
196/// operations is incorrect. In other words it's considered a programmer error
197/// rather than a recoverable error for the wrong [`Store`] to be used when
198/// calling APIs.
199///
200/// [`Memory`]: crate::Memory
201pub struct Store<T: 'static> {
202 // for comments about `ManuallyDrop`, see `Store::into_data`
203 inner: ManuallyDrop<Box<StoreInner<T>>>,
204}
205
206#[derive(Copy, Clone, Debug)]
207/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
208/// the WebAssembly VM.
209pub enum CallHook {
210 /// Indicates the VM is calling a WebAssembly function, from the host.
211 CallingWasm,
212 /// Indicates the VM is returning from a WebAssembly function, to the host.
213 ReturningFromWasm,
214 /// Indicates the VM is calling a host function, from WebAssembly.
215 CallingHost,
216 /// Indicates the VM is returning from a host function, to WebAssembly.
217 ReturningFromHost,
218}
219
220impl CallHook {
221 /// Indicates the VM is entering host code (exiting WebAssembly code)
222 pub fn entering_host(&self) -> bool {
223 match self {
224 CallHook::ReturningFromWasm | CallHook::CallingHost => true,
225 _ => false,
226 }
227 }
228 /// Indicates the VM is exiting host code (entering WebAssembly code)
229 pub fn exiting_host(&self) -> bool {
230 match self {
231 CallHook::ReturningFromHost | CallHook::CallingWasm => true,
232 _ => false,
233 }
234 }
235}
236
237/// Internal contents of a `Store<T>` that live on the heap.
238///
239/// The members of this struct are those that need to be generic over `T`, the
240/// store's internal type storage. Otherwise all things that don't rely on `T`
241/// should go into `StoreOpaque`.
242pub struct StoreInner<T: 'static> {
243 /// Generic metadata about the store that doesn't need access to `T`.
244 inner: StoreOpaque,
245
246 limiter: Option<ResourceLimiterInner<T>>,
247 call_hook: Option<CallHookInner<T>>,
248 #[cfg(target_has_atomic = "64")]
249 epoch_deadline_behavior:
250 Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
251
252 /// The user's `T` data.
253 ///
254 /// Don't actually access it via this field, however! Use the
255 /// `Store{,Inner,Context,ContextMut}::data[_mut]` methods instead, to
256 /// preserve stacked borrows and provenance in the face of potential
257 /// direct-access of `T` from Wasm code (via unsafe intrinsics).
258 ///
259 /// The only exception to the above is when taking ownership of the value,
260 /// e.g. in `Store::into_data`, after which nothing can access this field
261 /// via raw pointers anymore so there is no more provenance to preserve.
262 ///
263 /// For comments about `ManuallyDrop`, see `Store::into_data`.
264 data_no_provenance: ManuallyDrop<T>,
265
266 /// The user's debug handler, if any. See [`crate::DebugHandler`]
267 /// for more documentation.
268 ///
269 /// We need this to be an `Arc` because the handler itself takes
270 /// `&self` and also the whole Store mutably (via
271 /// `StoreContextMut`); so we need to hold a separate reference to
272 /// it while invoking it.
273 #[cfg(feature = "debug")]
274 debug_handler: Option<Box<dyn StoreDebugHandler<T>>>,
275}
276
277/// Adapter around `DebugHandler` that gets monomorphized into an
278/// object-safe dyn trait to place in `store.debug_handler`.
279#[cfg(feature = "debug")]
280trait StoreDebugHandler<T: 'static>: Send + Sync {
281 fn handle<'a>(
282 self: Box<Self>,
283 store: StoreContextMut<'a, T>,
284 event: crate::DebugEvent<'a>,
285 ) -> Box<dyn Future<Output = ()> + Send + 'a>;
286}
287
288#[cfg(feature = "debug")]
289impl<D> StoreDebugHandler<D::Data> for D
290where
291 D: DebugHandler,
292 D::Data: Send,
293{
294 fn handle<'a>(
295 self: Box<Self>,
296 store: StoreContextMut<'a, D::Data>,
297 event: crate::DebugEvent<'a>,
298 ) -> Box<dyn Future<Output = ()> + Send + 'a> {
299 // Clone the underlying `DebugHandler` (the trait requires
300 // Clone as a supertrait), not the Box. The clone happens here
301 // rather than at the callsite because `Clone::clone` is not
302 // object-safe so needs to be in a monomorphized context.
303 let handler: D = (*self).clone();
304 // Since we temporarily took `self` off the store at the
305 // callsite, put it back now that we've cloned it.
306 store.0.debug_handler = Some(self);
307 Box::new(async move { handler.handle(store, event).await })
308 }
309}
310
311enum ResourceLimiterInner<T> {
312 Sync(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync>),
313 #[cfg(feature = "async")]
314 Async(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiterAsync) + Send + Sync>),
315}
316
317/// Representation of a configured resource limiter for a store.
318///
319/// This is acquired with `resource_limiter_and_store_opaque` for example and is
320/// threaded through to growth operations on tables/memories. Note that this is
321/// passed around as `Option<&mut StoreResourceLimiter<'_>>` to make it
322/// efficient to pass around (nullable pointer) and it's also notably passed
323/// around as an `Option` to represent how this is optionally specified within a
324/// store.
325pub enum StoreResourceLimiter<'a> {
326 Sync(&'a mut dyn crate::ResourceLimiter),
327 #[cfg(feature = "async")]
328 Async(&'a mut dyn crate::ResourceLimiterAsync),
329}
330
331impl StoreResourceLimiter<'_> {
332 pub(crate) async fn memory_growing(
333 &mut self,
334 current: usize,
335 desired: usize,
336 maximum: Option<usize>,
337 ) -> Result<bool, Error> {
338 match self {
339 Self::Sync(s) => s.memory_growing(current, desired, maximum),
340 #[cfg(feature = "async")]
341 Self::Async(s) => s.memory_growing(current, desired, maximum).await,
342 }
343 }
344
345 pub(crate) fn memory_grow_failed(&mut self, error: crate::Error) -> Result<()> {
346 match self {
347 Self::Sync(s) => s.memory_grow_failed(error),
348 #[cfg(feature = "async")]
349 Self::Async(s) => s.memory_grow_failed(error),
350 }
351 }
352
353 pub(crate) async fn table_growing(
354 &mut self,
355 current: usize,
356 desired: usize,
357 maximum: Option<usize>,
358 ) -> Result<bool, Error> {
359 match self {
360 Self::Sync(s) => s.table_growing(current, desired, maximum),
361 #[cfg(feature = "async")]
362 Self::Async(s) => s.table_growing(current, desired, maximum).await,
363 }
364 }
365
366 pub(crate) fn table_grow_failed(&mut self, error: crate::Error) -> Result<()> {
367 match self {
368 Self::Sync(s) => s.table_grow_failed(error),
369 #[cfg(feature = "async")]
370 Self::Async(s) => s.table_grow_failed(error),
371 }
372 }
373}
374
375enum CallHookInner<T: 'static> {
376 #[cfg(feature = "call-hook")]
377 Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
378 #[cfg(all(feature = "async", feature = "call-hook"))]
379 Async(Box<dyn CallHookHandler<T> + Send + Sync>),
380 #[expect(
381 dead_code,
382 reason = "forcing, regardless of cfg, the type param to be used"
383 )]
384 ForceTypeParameterToBeUsed {
385 uninhabited: Infallible,
386 _marker: marker::PhantomData<T>,
387 },
388}
389
390/// What to do after returning from a callback when the engine epoch reaches
391/// the deadline for a Store during execution of a function using that store.
392#[non_exhaustive]
393pub enum UpdateDeadline {
394 /// Halt execution of WebAssembly, don't update the epoch deadline, and
395 /// raise a trap.
396 Interrupt,
397 /// Extend the deadline by the specified number of ticks.
398 Continue(u64),
399 /// Extend the deadline by the specified number of ticks after yielding to
400 /// the async executor loop.
401 ///
402 /// This can only be used when WebAssembly is invoked with `*_async`
403 /// methods. If WebAssembly was invoked with a synchronous method then
404 /// returning this variant will raise a trap.
405 #[cfg(feature = "async")]
406 Yield(u64),
407 /// Extend the deadline by the specified number of ticks after yielding to
408 /// the async executor loop.
409 ///
410 /// This can only be used when WebAssembly is invoked with `*_async`
411 /// methods. If WebAssembly was invoked with a synchronous method then
412 /// returning this variant will raise a trap.
413 ///
414 /// The yield will be performed by the future provided; when using `tokio`
415 /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
416 /// here.
417 #[cfg(feature = "async")]
418 YieldCustom(
419 u64,
420 ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
421 ),
422}
423
424// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
425impl<T> Deref for StoreInner<T> {
426 type Target = StoreOpaque;
427 fn deref(&self) -> &Self::Target {
428 &self.inner
429 }
430}
431
432impl<T> DerefMut for StoreInner<T> {
433 fn deref_mut(&mut self) -> &mut Self::Target {
434 &mut self.inner
435 }
436}
437
438/// Monomorphic storage for a `Store<T>`.
439///
440/// This structure contains the bulk of the metadata about a `Store`. This is
441/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
442/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
443/// crate itself.
444pub struct StoreOpaque {
445 // This `StoreOpaque` structure has references to itself. These aren't
446 // immediately evident, however, so we need to tell the compiler that it
447 // contains self-references. This notably suppresses `noalias` annotations
448 // when this shows up in compiled code because types of this structure do
449 // indeed alias itself. An example of this is `default_callee` holds a
450 // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
451 // aliasing!
452 //
453 // It's somewhat unclear to me at this time if this is 100% sufficient to
454 // get all the right codegen in all the right places. For example does
455 // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
456 // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
457 // enough with `Pin` to understand if it's appropriate here (we do, for
458 // example want to allow movement in and out of `data: T`, just not movement
459 // of most of the other members). It's also not clear if using `Pin` in a
460 // few places buys us much other than a bunch of `unsafe` that we already
461 // sort of hand-wave away.
462 //
463 // In any case this seems like a good mid-ground for now where we're at
464 // least telling the compiler something about all the aliasing happening
465 // within a `Store`.
466 _marker: marker::PhantomPinned,
467
468 engine: Engine,
469 vm_store_context: VMStoreContext,
470
471 // Contains all continuations ever allocated throughout the lifetime of this
472 // store.
473 #[cfg(feature = "stack-switching")]
474 continuations: Vec<Box<VMContRef>>,
475
476 instances: TryPrimaryMap<InstanceId, StoreInstance>,
477
478 signal_handler: Option<SignalHandler>,
479 modules: ModuleRegistry,
480 func_refs: FuncRefs,
481 host_globals: TryPrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
482 // GC-related fields.
483 gc_store: Option<GcStore>,
484 gc_roots: RootSet,
485 #[cfg(feature = "gc")]
486 gc_roots_list: GcRootsList,
487 // Types for which the embedder has created an allocator for.
488 #[cfg(feature = "gc")]
489 gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
490 /// Pending exception, if any. This is also a GC root, because it
491 /// needs to be rooted somewhere between the time that a pending
492 /// exception is set and the time that the handling code takes the
493 /// exception object. We use this rooting strategy rather than a
494 /// root in an `Err` branch of a `Result` on the host side because
495 /// it is less error-prone with respect to rooting behavior. See
496 /// `throw()`, `take_pending_exception()`,
497 /// `peek_pending_exception()`, `has_pending_exception()`, and
498 /// `catch()`.
499 #[cfg(feature = "gc")]
500 pending_exception: Option<VMExnRef>,
501
502 // Numbers of resources instantiated in this store, and their limits
503 instance_count: usize,
504 instance_limit: usize,
505 memory_count: usize,
506 memory_limit: usize,
507 table_count: usize,
508 table_limit: usize,
509 #[cfg(feature = "async")]
510 async_state: fiber::AsyncState,
511
512 // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
513 // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
514 // together. Then when we run out of gas, we inject the yield amount from the reserve
515 // until the reserve is empty.
516 fuel_reserve: u64,
517 pub(crate) fuel_yield_interval: Option<NonZeroU64>,
518 /// Indexed data within this `Store`, used to store information about
519 /// globals, functions, memories, etc.
520 store_data: StoreData,
521 traitobj: StorePtr,
522 default_caller_vmctx: SendSyncPtr<VMContext>,
523
524 /// Used to optimized wasm->host calls when the host function is defined with
525 /// `Func::new` to avoid allocating a new vector each time a function is
526 /// called.
527 hostcall_val_storage: Vec<Val>,
528 /// Same as `hostcall_val_storage`, but for the direction of the host
529 /// calling wasm.
530 wasm_val_raw_storage: TryVec<ValRaw>,
531
532 /// Keep track of what protection key is being used during allocation so
533 /// that the right memory pages can be enabled when entering WebAssembly
534 /// guest code.
535 pkey: Option<ProtectionKey>,
536
537 /// State related to the executor of wasm code.
538 ///
539 /// For example if Pulley is enabled and configured then this will store a
540 /// Pulley interpreter.
541 executor: Executor,
542
543 /// The debug breakpoint state for this store.
544 ///
545 /// When guest debugging is enabled, a given store may have a set
546 /// of breakpoints defined, denoted by module and Wasm PC within
547 /// that module. Or alternately, it may be in "single-step" mode,
548 /// where every possible breakpoint is logically enabled.
549 ///
550 /// When execution of any instance in this store hits any defined
551 /// breakpoint, a `Breakpoint` debug event is emitted and the
552 /// handler defined above, if any, has a chance to perform some
553 /// logic before returning to allow execution to resume.
554 #[cfg(feature = "debug")]
555 breakpoints: BreakpointState,
556
557 /// The debug PC-to-FrameData cache for this store.
558 ///
559 /// When guest debugging is enabled, we parse compiler metadata
560 /// and pass out `FrameHandle`s that represent Wasm guest
561 /// frames. These handles represent a specific frame within a
562 /// frozen stack and are invalidated upon further execution. In
563 /// order to keep these handles lightweight, and to avoid
564 /// redundant work when passing out *new* handles after further
565 /// execution, we cache the mapping from store-specific PCs to
566 /// parsed frame data. (This cache needs to be store-specific
567 /// rather than e.g. engine-specific because each store has its
568 /// own privately mapped copy of guest code when debugging is
569 /// enabled, so the key-space is unique for each store.)
570 #[cfg(feature = "debug")]
571 frame_data_cache: FrameDataCache,
572}
573
574/// Self-pointer to `StoreInner<T>` from within a `StoreOpaque` which is chiefly
575/// used to copy into instances during instantiation.
576///
577/// FIXME: ideally this type would get deleted and Wasmtime's reliance on it
578/// would go away.
579struct StorePtr(Option<NonNull<dyn VMStore>>);
580
581// We can't make `VMStore: Send + Sync` because that requires making all of
582// Wastime's internals generic over the `Store`'s `T`. So instead, we take care
583// in the whole VM layer to only use the `VMStore` in ways that are `Send`- and
584// `Sync`-safe and we have to have these unsafe impls.
585unsafe impl Send for StorePtr {}
586unsafe impl Sync for StorePtr {}
587
588/// Executor state within `StoreOpaque`.
589///
590/// Effectively stores Pulley interpreter state and handles conditional support
591/// for Cranelift at compile time.
592pub(crate) enum Executor {
593 Interpreter(Interpreter),
594 #[cfg(has_host_compiler_backend)]
595 Native,
596}
597
598impl Executor {
599 pub(crate) fn new(engine: &Engine) -> Result<Self, OutOfMemory> {
600 #[cfg(has_host_compiler_backend)]
601 if cfg!(feature = "pulley") && engine.target().is_pulley() {
602 Ok(Executor::Interpreter(Interpreter::new(engine)?))
603 } else {
604 Ok(Executor::Native)
605 }
606 #[cfg(not(has_host_compiler_backend))]
607 {
608 debug_assert!(engine.target().is_pulley());
609 Ok(Executor::Interpreter(Interpreter::new(engine)?))
610 }
611 }
612}
613
614/// A borrowed reference to `Executor` above.
615pub(crate) enum ExecutorRef<'a> {
616 Interpreter(InterpreterRef<'a>),
617 #[cfg(has_host_compiler_backend)]
618 Native,
619}
620
621/// An RAII type to automatically mark a region of code as unsafe for GC.
622#[doc(hidden)]
623pub struct AutoAssertNoGc<'a> {
624 store: &'a mut StoreOpaque,
625 entered: bool,
626}
627
628impl<'a> AutoAssertNoGc<'a> {
629 #[inline]
630 pub fn new(store: &'a mut StoreOpaque) -> Self {
631 let entered = if !cfg!(feature = "gc") {
632 false
633 } else if let Some(gc_store) = store.gc_store.as_mut() {
634 gc_store.gc_heap.enter_no_gc_scope();
635 true
636 } else {
637 false
638 };
639
640 AutoAssertNoGc { store, entered }
641 }
642
643 /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
644 /// disables checks for no GC happening for the duration of this value.
645 ///
646 /// This is used when it is statically otherwise known that a GC doesn't
647 /// happen for the various types involved.
648 ///
649 /// # Unsafety
650 ///
651 /// This method is `unsafe` as it does not provide the same safety
652 /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
653 /// caller that a GC doesn't happen.
654 #[inline]
655 pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
656 if cfg!(debug_assertions) {
657 AutoAssertNoGc::new(store)
658 } else {
659 AutoAssertNoGc {
660 store,
661 entered: false,
662 }
663 }
664 }
665}
666
667impl core::ops::Deref for AutoAssertNoGc<'_> {
668 type Target = StoreOpaque;
669
670 #[inline]
671 fn deref(&self) -> &Self::Target {
672 &*self.store
673 }
674}
675
676impl core::ops::DerefMut for AutoAssertNoGc<'_> {
677 #[inline]
678 fn deref_mut(&mut self) -> &mut Self::Target {
679 &mut *self.store
680 }
681}
682
683impl Drop for AutoAssertNoGc<'_> {
684 #[inline]
685 fn drop(&mut self) {
686 if self.entered {
687 self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
688 }
689 }
690}
691
692/// Used to associate instances with the store.
693///
694/// This is needed to track if the instance was allocated explicitly with the on-demand
695/// instance allocator.
696struct StoreInstance {
697 handle: InstanceHandle,
698 kind: StoreInstanceKind,
699}
700
701enum StoreInstanceKind {
702 /// An actual, non-dummy instance.
703 Real {
704 /// The id of this instance's module inside our owning store's
705 /// `ModuleRegistry`.
706 module_id: RegisteredModuleId,
707 },
708
709 /// This is a dummy instance that is just an implementation detail for
710 /// something else. For example, host-created memories internally create a
711 /// dummy instance.
712 ///
713 /// Regardless of the configured instance allocator for the engine, dummy
714 /// instances always use the on-demand allocator to deallocate the instance.
715 Dummy,
716}
717
718impl<T> Store<T> {
719 /// Creates a new [`Store`] to be associated with the given [`Engine`] and
720 /// `data` provided.
721 ///
722 /// The created [`Store`] will place no additional limits on the size of
723 /// linear memories or tables at runtime. Linear memories and tables will
724 /// be allowed to grow to any upper limit specified in their definitions.
725 /// The store will limit the number of instances, linear memories, and
726 /// tables created to 10,000. This can be overridden with the
727 /// [`Store::limiter`] configuration method.
728 pub fn new(engine: &Engine, data: T) -> Self {
729 Self::try_new(engine, data).expect(
730 "allocation failure during `Store::new` (use `Store::try_new` to handle such errors)",
731 )
732 }
733
734 /// Like `Store::new` but returns an error on allocation failure.
735 pub fn try_new(engine: &Engine, data: T) -> Result<Self> {
736 let store_data = StoreData::new(engine);
737 log::trace!("creating new store {:?}", store_data.id());
738
739 let pkey = engine.allocator().next_available_pkey();
740
741 let inner = StoreOpaque {
742 _marker: marker::PhantomPinned,
743 engine: engine.clone(),
744 vm_store_context: Default::default(),
745 #[cfg(feature = "stack-switching")]
746 continuations: Vec::new(),
747 instances: TryPrimaryMap::new(),
748 signal_handler: None,
749 gc_store: None,
750 gc_roots: RootSet::default(),
751 #[cfg(feature = "gc")]
752 gc_roots_list: GcRootsList::default(),
753 #[cfg(feature = "gc")]
754 gc_host_alloc_types: Default::default(),
755 #[cfg(feature = "gc")]
756 pending_exception: None,
757 modules: ModuleRegistry::default(),
758 func_refs: FuncRefs::default(),
759 host_globals: TryPrimaryMap::new(),
760 instance_count: 0,
761 instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
762 memory_count: 0,
763 memory_limit: crate::DEFAULT_MEMORY_LIMIT,
764 table_count: 0,
765 table_limit: crate::DEFAULT_TABLE_LIMIT,
766 #[cfg(feature = "async")]
767 async_state: Default::default(),
768 fuel_reserve: 0,
769 fuel_yield_interval: None,
770 store_data,
771 traitobj: StorePtr(None),
772 default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
773 hostcall_val_storage: Vec::new(),
774 wasm_val_raw_storage: TryVec::new(),
775 pkey,
776 executor: Executor::new(engine)?,
777 #[cfg(feature = "debug")]
778 breakpoints: Default::default(),
779 #[cfg(feature = "debug")]
780 frame_data_cache: FrameDataCache::new(),
781 };
782 let mut inner = try_new::<Box<_>>(StoreInner {
783 inner,
784 limiter: None,
785 call_hook: None,
786 #[cfg(target_has_atomic = "64")]
787 epoch_deadline_behavior: None,
788 data_no_provenance: ManuallyDrop::new(data),
789 #[cfg(feature = "debug")]
790 debug_handler: None,
791 })?;
792
793 let store_data =
794 <NonNull<ManuallyDrop<T>>>::from(&mut inner.data_no_provenance).cast::<()>();
795 inner.inner.vm_store_context.store_data = store_data.into();
796
797 inner.traitobj = StorePtr(Some(NonNull::from(&mut *inner)));
798
799 // Wasmtime uses the callee argument to host functions to learn about
800 // the original pointer to the `Store` itself, allowing it to
801 // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
802 // however, there's no "callee" to provide. To fix this we allocate a
803 // single "default callee" for the entire `Store`. This is then used as
804 // part of `Func::call` to guarantee that the `callee: *mut VMContext`
805 // is never null.
806 let allocator = OnDemandInstanceAllocator::default();
807 let info = engine.empty_module_runtime_info();
808 allocator
809 .validate_module(info.env_module(), info.offsets())
810 .unwrap();
811
812 unsafe {
813 // Note that this dummy instance doesn't allocate tables or memories
814 // (also no limiter is passed in) so it won't have an async await
815 // point meaning that it should be ok to assert the future is
816 // always ready.
817 let result = vm::assert_ready(inner.allocate_instance(
818 None,
819 AllocateInstanceKind::Dummy {
820 allocator: &allocator,
821 },
822 info,
823 Default::default(),
824 ));
825 let id = match result {
826 Ok(id) => id,
827 Err(e) => {
828 if e.is::<OutOfMemory>() {
829 return Err(e);
830 }
831 panic!("instance allocator failed to allocate default callee")
832 }
833 };
834 let default_caller_vmctx = inner.instance(id).vmctx();
835 inner.default_caller_vmctx = default_caller_vmctx.into();
836 }
837
838 Ok(Self {
839 inner: ManuallyDrop::new(inner),
840 })
841 }
842
843 /// Access the underlying `T` data owned by this `Store`.
844 #[inline]
845 pub fn data(&self) -> &T {
846 self.inner.data()
847 }
848
849 /// Access the underlying `T` data owned by this `Store`.
850 #[inline]
851 pub fn data_mut(&mut self) -> &mut T {
852 self.inner.data_mut()
853 }
854
855 fn run_manual_drop_routines(&mut self) {
856 StoreData::run_manual_drop_routines(StoreContextMut(&mut self.inner));
857
858 // Ensure all fiber stacks, even cached ones, are all flushed out to the
859 // instance allocator.
860 self.inner.flush_fiber_stack();
861 }
862
863 /// Consumes this [`Store`], destroying it, and returns the underlying data.
864 pub fn into_data(mut self) -> T {
865 self.run_manual_drop_routines();
866
867 // This is an unsafe operation because we want to avoid having a runtime
868 // check or boolean for whether the data is actually contained within a
869 // `Store`. The data itself is stored as `ManuallyDrop` since we're
870 // manually managing the memory here, and there's also a `ManuallyDrop`
871 // around the `Box<StoreInner<T>>`. The way this works though is a bit
872 // tricky, so here's how things get dropped appropriately:
873 //
874 // * When a `Store<T>` is normally dropped, the custom destructor for
875 // `Store<T>` will drop `T`, then the `self.inner` field. The
876 // rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
877 // `StoreInner<T>`. This cleans up all internal fields and doesn't
878 // touch `T` because it's wrapped in `ManuallyDrop`.
879 //
880 // * When calling this method we skip the top-level destructor for
881 // `Store<T>` with `mem::forget`. This skips both the destructor for
882 // `T` and the destructor for `StoreInner<T>`. We do, however, run the
883 // destructor for `Box<StoreInner<T>>` which, like above, will skip
884 // the destructor for `T` since it's `ManuallyDrop`.
885 //
886 // In both cases all the other fields of `StoreInner<T>` should all get
887 // dropped, and the manual management of destructors is basically
888 // between this method and `Drop for Store<T>`. Note that this also
889 // means that `Drop for StoreInner<T>` cannot access `self.data`, so
890 // there is a comment indicating this as well.
891 unsafe {
892 let mut inner = ManuallyDrop::take(&mut self.inner);
893 core::mem::forget(self);
894 ManuallyDrop::take(&mut inner.data_no_provenance)
895 }
896 }
897
898 /// Configures the [`ResourceLimiter`] used to limit resource creation
899 /// within this [`Store`].
900 ///
901 /// Whenever resources such as linear memory, tables, or instances are
902 /// allocated the `limiter` specified here is invoked with the store's data
903 /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
904 /// being allocated. The returned [`ResourceLimiter`] is intended to live
905 /// within the `T` itself, for example by storing a
906 /// [`StoreLimits`](crate::StoreLimits).
907 ///
908 /// Note that this limiter is only used to limit the creation/growth of
909 /// resources in the future, this does not retroactively attempt to apply
910 /// limits to the [`Store`].
911 ///
912 /// # Examples
913 ///
914 /// ```
915 /// use wasmtime::*;
916 ///
917 /// struct MyApplicationState {
918 /// my_state: u32,
919 /// limits: StoreLimits,
920 /// }
921 ///
922 /// let engine = Engine::default();
923 /// let my_state = MyApplicationState {
924 /// my_state: 42,
925 /// limits: StoreLimitsBuilder::new()
926 /// .memory_size(1 << 20 /* 1 MB */)
927 /// .instances(2)
928 /// .build(),
929 /// };
930 /// let mut store = Store::new(&engine, my_state);
931 /// store.limiter(|state| &mut state.limits);
932 ///
933 /// // Creation of smaller memories is allowed
934 /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
935 ///
936 /// // Creation of a larger memory, however, will exceed the 1MB limit we've
937 /// // configured
938 /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
939 ///
940 /// // The number of instances in this store is limited to 2, so the third
941 /// // instance here should fail.
942 /// let module = Module::new(&engine, "(module)").unwrap();
943 /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
944 /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
945 /// assert!(Instance::new(&mut store, &module, &[]).is_err());
946 /// ```
947 ///
948 /// [`ResourceLimiter`]: crate::ResourceLimiter
949 pub fn limiter(
950 &mut self,
951 mut limiter: impl (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync + 'static,
952 ) {
953 // Apply the limits on instances, tables, and memory given by the limiter:
954 let inner = &mut self.inner;
955 let (instance_limit, table_limit, memory_limit) = {
956 let l = limiter(inner.data_mut());
957 (l.instances(), l.tables(), l.memories())
958 };
959 let innermost = &mut inner.inner;
960 innermost.instance_limit = instance_limit;
961 innermost.table_limit = table_limit;
962 innermost.memory_limit = memory_limit;
963
964 // Save the limiter accessor function:
965 inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
966 }
967
968 /// Configure a function that runs on calls and returns between WebAssembly
969 /// and host code.
970 ///
971 /// The function is passed a [`CallHook`] argument, which indicates which
972 /// state transition the VM is making.
973 ///
974 /// This function may return a [`Trap`]. If a trap is returned when an
975 /// import was called, it is immediately raised as-if the host import had
976 /// returned the trap. If a trap is returned after wasm returns to the host
977 /// then the wasm function's result is ignored and this trap is returned
978 /// instead.
979 ///
980 /// After this function returns a trap, it may be called for subsequent returns
981 /// to host or wasm code as the trap propagates to the root call.
982 ///
983 /// [`Trap`]: crate::Trap
984 #[cfg(feature = "call-hook")]
985 pub fn call_hook(
986 &mut self,
987 hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
988 ) {
989 self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
990 }
991
992 /// Returns the [`Engine`] that this store is associated with.
993 pub fn engine(&self) -> &Engine {
994 self.inner.engine()
995 }
996
997 /// Perform garbage collection.
998 ///
999 /// Note that it is not required to actively call this function. GC will
1000 /// automatically happen according to various internal heuristics. This is
1001 /// provided if fine-grained control over the GC is desired.
1002 ///
1003 /// If you are calling this method after an attempted allocation failed, you
1004 /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
1005 /// When you do so, this method will attempt to create enough space in the
1006 /// GC heap for that allocation, so that it will succeed on the next
1007 /// attempt.
1008 ///
1009 /// # Errors
1010 ///
1011 /// This method will fail if an [async limiter is
1012 /// configured](Store::limiter_async) in which case [`Store::gc_async`] must
1013 /// be used instead.
1014 #[cfg(feature = "gc")]
1015 pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> {
1016 StoreContextMut(&mut self.inner).gc(why)
1017 }
1018
1019 /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
1020 /// be configured via [`Store::set_fuel`].
1021 ///
1022 /// # Errors
1023 ///
1024 /// This function will return an error if fuel consumption is not enabled
1025 /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
1026 pub fn get_fuel(&self) -> Result<u64> {
1027 self.inner.get_fuel()
1028 }
1029
1030 /// Set the fuel to this [`Store`] for wasm to consume while executing.
1031 ///
1032 /// For this method to work fuel consumption must be enabled via
1033 /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
1034 /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
1035 /// immediately trap). This function must be called for the store to have
1036 /// some fuel to allow WebAssembly to execute.
1037 ///
1038 /// Most WebAssembly instructions consume 1 unit of fuel. Some
1039 /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
1040 /// units, as any execution cost associated with them involves other
1041 /// instructions which do consume fuel.
1042 ///
1043 /// Note that when fuel is entirely consumed it will cause wasm to trap.
1044 ///
1045 /// # Errors
1046 ///
1047 /// This function will return an error if fuel consumption is not enabled via
1048 /// [`Config::consume_fuel`](crate::Config::consume_fuel).
1049 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1050 self.inner.set_fuel(fuel)
1051 }
1052
1053 /// Configures a [`Store`] to yield execution of async WebAssembly code
1054 /// periodically.
1055 ///
1056 /// When a [`Store`] is configured to consume fuel with
1057 /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
1058 /// configure WebAssembly to be suspended and control will be yielded back
1059 /// to the caller every `interval` units of fuel consumed. When using this
1060 /// method it requires further invocations of WebAssembly to use `*_async`
1061 /// entrypoints.
1062 ///
1063 /// The purpose of this behavior is to ensure that futures which represent
1064 /// execution of WebAssembly do not execute too long inside their
1065 /// `Future::poll` method. This allows for some form of cooperative
1066 /// multitasking where WebAssembly will voluntarily yield control
1067 /// periodically (based on fuel consumption) back to the running thread.
1068 ///
1069 /// Note that futures returned by this crate will automatically flag
1070 /// themselves to get re-polled if a yield happens. This means that
1071 /// WebAssembly will continue to execute, just after giving the host an
1072 /// opportunity to do something else.
1073 ///
1074 /// The `interval` parameter indicates how much fuel should be
1075 /// consumed between yields of an async future. When fuel runs out wasm will trap.
1076 ///
1077 /// # Error
1078 ///
1079 /// This method will error if fuel is not enabled or `interval` is
1080 /// `Some(0)`.
1081 #[cfg(feature = "async")]
1082 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1083 self.inner.fuel_async_yield_interval(interval)
1084 }
1085
1086 /// Sets the epoch deadline to a certain number of ticks in the future.
1087 ///
1088 /// When the Wasm guest code is compiled with epoch-interruption
1089 /// instrumentation
1090 /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
1091 /// and when the `Engine`'s epoch is incremented
1092 /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
1093 /// past a deadline, execution can be configured to either trap or
1094 /// yield and then continue.
1095 ///
1096 /// This deadline is always set relative to the current epoch:
1097 /// `ticks_beyond_current` ticks in the future. The deadline can
1098 /// be set explicitly via this method, or refilled automatically
1099 /// on a yield if configured via
1100 /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
1101 /// this method is invoked, the deadline is reached when
1102 /// [`Engine::increment_epoch()`] has been invoked at least
1103 /// `ticks_beyond_current` times.
1104 ///
1105 /// By default a store will trap immediately with an epoch deadline of 0
1106 /// (which has always "elapsed"). This method is required to be configured
1107 /// for stores with epochs enabled to some future epoch deadline.
1108 ///
1109 /// See documentation on
1110 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1111 /// for an introduction to epoch-based interruption.
1112 #[cfg(target_has_atomic = "64")]
1113 pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1114 self.inner.set_epoch_deadline(ticks_beyond_current);
1115 }
1116
1117 /// Configures epoch-deadline expiration to trap.
1118 ///
1119 /// When epoch-interruption-instrumented code is executed on this
1120 /// store and the epoch deadline is reached before completion,
1121 /// with the store configured in this way, execution will
1122 /// terminate with a trap as soon as an epoch check in the
1123 /// instrumented code is reached.
1124 ///
1125 /// This behavior is the default if the store is not otherwise
1126 /// configured via
1127 /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
1128 /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
1129 /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
1130 ///
1131 /// This setting is intended to allow for coarse-grained
1132 /// interruption, but not a deterministic deadline of a fixed,
1133 /// finite interval. For deterministic interruption, see the
1134 /// "fuel" mechanism instead.
1135 ///
1136 /// Note that when this is used it's required to call
1137 /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
1138 /// trap.
1139 ///
1140 /// See documentation on
1141 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1142 /// for an introduction to epoch-based interruption.
1143 #[cfg(target_has_atomic = "64")]
1144 pub fn epoch_deadline_trap(&mut self) {
1145 self.inner.epoch_deadline_trap();
1146 }
1147
1148 /// Configures epoch-deadline expiration to invoke a custom callback
1149 /// function.
1150 ///
1151 /// When epoch-interruption-instrumented code is executed on this
1152 /// store and the epoch deadline is reached before completion, the
1153 /// provided callback function is invoked.
1154 ///
1155 /// This callback should either return an [`UpdateDeadline`], or
1156 /// return an error, which will terminate execution with a trap.
1157 ///
1158 /// The [`UpdateDeadline`] is a positive number of ticks to
1159 /// add to the epoch deadline, as well as indicating what
1160 /// to do after the callback returns. If the [`Store`] is
1161 /// configured with async support, then the callback may return
1162 /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
1163 /// to yield to the async executor before updating the epoch deadline.
1164 /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
1165 /// update the epoch deadline immediately.
1166 ///
1167 /// This setting is intended to allow for coarse-grained
1168 /// interruption, but not a deterministic deadline of a fixed,
1169 /// finite interval. For deterministic interruption, see the
1170 /// "fuel" mechanism instead.
1171 ///
1172 /// See documentation on
1173 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1174 /// for an introduction to epoch-based interruption.
1175 #[cfg(target_has_atomic = "64")]
1176 pub fn epoch_deadline_callback(
1177 &mut self,
1178 callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
1179 ) {
1180 self.inner.epoch_deadline_callback(Box::new(callback));
1181 }
1182
1183 /// Set an exception as the currently pending exception, and
1184 /// return an error that propagates the throw.
1185 ///
1186 /// This method takes an exception object and stores it in the
1187 /// `Store` as the currently pending exception. This is a special
1188 /// rooted slot that holds the exception as long as it is
1189 /// propagating. This method then returns a `ThrownException`
1190 /// error, which is a special type that indicates a pending
1191 /// exception exists. When this type propagates as an error
1192 /// returned from a Wasm-to-host call, the pending exception is
1193 /// thrown within the Wasm context, and either caught or
1194 /// propagated further to the host-to-Wasm call boundary. If an
1195 /// exception is thrown out of Wasm (or across Wasm from a
1196 /// hostcall) back to the host-to-Wasm call boundary, *that*
1197 /// invocation returns a `ThrownException`, and the pending
1198 /// exception slot is again set. In other words, the
1199 /// `ThrownException` error type should propagate upward exactly
1200 /// and only when a pending exception is set.
1201 ///
1202 /// To take the pending exception, use [`Self::take_pending_exception`].
1203 ///
1204 /// This method is parameterized over `R` for convenience, but
1205 /// will always return an `Err`.
1206 ///
1207 /// # Panics
1208 ///
1209 /// - Will panic if `exception` has been unrooted.
1210 /// - Will panic if `exception` is a null reference.
1211 /// - Will panic if a pending exception has already been set.
1212 #[cfg(feature = "gc")]
1213 pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1214 self.inner.throw_impl(exception);
1215 Err(ThrownException)
1216 }
1217
1218 /// Take the currently pending exception, if any, and return it,
1219 /// removing it from the "pending exception" slot.
1220 ///
1221 /// If there is no pending exception, returns `None`.
1222 ///
1223 /// Note: the returned exception is a LIFO root (see
1224 /// [`crate::Rooted`]), rooted in the current handle scope. Take
1225 /// care to ensure that it is re-rooted or otherwise does not
1226 /// escape this scope! It is usually best to allow an exception
1227 /// object to be rooted in the store's "pending exception" slot
1228 /// until the final consumer has taken it, rather than root it and
1229 /// pass it up the callstack in some other way.
1230 ///
1231 /// This method is useful to implement ad-hoc exception plumbing
1232 /// in various ways, but for the most idiomatic handling, see
1233 /// [`StoreContextMut::throw`].
1234 #[cfg(feature = "gc")]
1235 pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1236 self.inner.take_pending_exception_rooted()
1237 }
1238
1239 /// Tests whether there is a pending exception.
1240 ///
1241 /// Ordinarily, a pending exception will be set on a store if and
1242 /// only if a host-side callstack is propagating a
1243 /// [`crate::ThrownException`] error. The final consumer that
1244 /// catches the exception takes it; it may re-place it to re-throw
1245 /// (using [`Self::throw`]) if it chooses not to actually handle the
1246 /// exception.
1247 ///
1248 /// This method is useful to tell whether a store is in this
1249 /// state, but should not be used as part of the ordinary
1250 /// exception-handling flow. For the most idiomatic handling, see
1251 /// [`StoreContextMut::throw`].
1252 #[cfg(feature = "gc")]
1253 pub fn has_pending_exception(&self) -> bool {
1254 self.inner.pending_exception.is_some()
1255 }
1256
1257 /// Return all breakpoints.
1258 #[cfg(feature = "debug")]
1259 pub fn breakpoints(&self) -> Option<impl Iterator<Item = crate::Breakpoint> + '_> {
1260 self.as_context().breakpoints()
1261 }
1262
1263 /// Indicate whether single-step mode is enabled.
1264 #[cfg(feature = "debug")]
1265 pub fn is_single_step(&self) -> bool {
1266 self.as_context().is_single_step()
1267 }
1268
1269 /// Set the debug callback on this store.
1270 ///
1271 /// See [`crate::DebugHandler`] for more documentation.
1272 ///
1273 /// # Panics
1274 ///
1275 /// - Will panic if guest-debug support was not enabled via
1276 /// [`crate::Config::guest_debug`].
1277 #[cfg(feature = "debug")]
1278 pub fn set_debug_handler(&mut self, handler: impl DebugHandler<Data = T>)
1279 where
1280 // We require `Send` here because the debug handler becomes
1281 // referenced from a future: when `DebugHandler::handle` is
1282 // invoked, its `self` references the `handler` with the
1283 // user's state. Note that we are careful to keep this bound
1284 // constrained to debug-handler-related code only and not
1285 // propagate it outward to the store in general. The presence
1286 // of the trait implementation serves as a witness that `T:
1287 // Send`. This is required in particular because we will have
1288 // a `&mut dyn VMStore` on the stack when we pause a fiber
1289 // with `block_on` to run a debugger hook; that `VMStore` must
1290 // be a `Store<T> where T: Send`.
1291 T: Send,
1292 {
1293 // Debug hooks rely on async support, so async entrypoints are required.
1294 self.inner.set_async_required(Asyncness::Yes);
1295
1296 assert!(
1297 self.engine().tunables().debug_guest,
1298 "debug hooks require guest debugging to be enabled"
1299 );
1300 self.inner.debug_handler = Some(Box::new(handler));
1301 }
1302
1303 /// Clear the debug handler on this store. If any existed, it will
1304 /// be dropped.
1305 #[cfg(feature = "debug")]
1306 pub fn clear_debug_handler(&mut self) {
1307 self.inner.debug_handler = None;
1308 }
1309
1310 /// Register a [`Module`] with this store's module registry for
1311 /// debugging, without instantiating it.
1312 ///
1313 /// This makes the module visible to debuggers (via
1314 /// `debug_all_modules`) before the module is actually
1315 /// instantiated. This is useful for guest-debug workflows where
1316 /// the debugger needs to see modules to set breakpoints before
1317 /// the first Wasm instruction executes.
1318 #[cfg(feature = "debug")]
1319 pub fn debug_register_module(&mut self, module: &crate::Module) -> crate::Result<()> {
1320 let (modules, engine, breakpoints) = self.inner.modules_and_engine_and_breakpoints_mut();
1321 modules.register_module(module, engine, breakpoints)?;
1322 Ok(())
1323 }
1324
1325 /// Register all inner modules of a [`Component`](crate::component::Component)
1326 /// with this store's module registry for debugging, without instantiating
1327 /// the component.
1328 #[cfg(all(feature = "debug", feature = "component-model"))]
1329 pub fn debug_register_component(
1330 &mut self,
1331 component: &crate::component::Component,
1332 ) -> crate::Result<()> {
1333 for module in component.static_modules() {
1334 self.debug_register_module(module)?;
1335 }
1336 Ok(())
1337 }
1338}
1339
1340impl<'a, T> StoreContext<'a, T> {
1341 /// Returns the underlying [`Engine`] this store is connected to.
1342 pub fn engine(&self) -> &Engine {
1343 self.0.engine()
1344 }
1345
1346 /// Access the underlying data owned by this `Store`.
1347 ///
1348 /// Same as [`Store::data`].
1349 pub fn data(&self) -> &'a T {
1350 self.0.data()
1351 }
1352
1353 /// Returns the remaining fuel in this store.
1354 ///
1355 /// For more information see [`Store::get_fuel`].
1356 pub fn get_fuel(&self) -> Result<u64> {
1357 self.0.get_fuel()
1358 }
1359}
1360
1361impl<'a, T> StoreContextMut<'a, T> {
1362 /// Access the underlying data owned by this `Store`.
1363 ///
1364 /// Same as [`Store::data`].
1365 pub fn data(&self) -> &T {
1366 self.0.data()
1367 }
1368
1369 /// Access the underlying data owned by this `Store`.
1370 ///
1371 /// Same as [`Store::data_mut`].
1372 pub fn data_mut(&mut self) -> &mut T {
1373 self.0.data_mut()
1374 }
1375
1376 /// Returns the underlying [`Engine`] this store is connected to.
1377 pub fn engine(&self) -> &Engine {
1378 self.0.engine()
1379 }
1380
1381 /// Perform garbage collection of `ExternRef`s.
1382 ///
1383 /// Same as [`Store::gc`].
1384 #[cfg(feature = "gc")]
1385 pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> {
1386 let (mut limiter, store) = self.0.validate_sync_resource_limiter_and_store_opaque()?;
1387 vm::assert_ready(store.gc(
1388 limiter.as_mut(),
1389 None,
1390 why.map(|e| e.bytes_needed()),
1391 Asyncness::No,
1392 ));
1393 Ok(())
1394 }
1395
1396 /// Returns remaining fuel in this store.
1397 ///
1398 /// For more information see [`Store::get_fuel`]
1399 pub fn get_fuel(&self) -> Result<u64> {
1400 self.0.get_fuel()
1401 }
1402
1403 /// Set the amount of fuel in this store.
1404 ///
1405 /// For more information see [`Store::set_fuel`]
1406 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1407 self.0.set_fuel(fuel)
1408 }
1409
1410 /// Configures this `Store` to periodically yield while executing futures.
1411 ///
1412 /// For more information see [`Store::fuel_async_yield_interval`]
1413 #[cfg(feature = "async")]
1414 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1415 self.0.fuel_async_yield_interval(interval)
1416 }
1417
1418 /// Sets the epoch deadline to a certain number of ticks in the future.
1419 ///
1420 /// For more information see [`Store::set_epoch_deadline`].
1421 #[cfg(target_has_atomic = "64")]
1422 pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1423 self.0.set_epoch_deadline(ticks_beyond_current);
1424 }
1425
1426 /// Configures epoch-deadline expiration to trap.
1427 ///
1428 /// For more information see [`Store::epoch_deadline_trap`].
1429 #[cfg(target_has_atomic = "64")]
1430 pub fn epoch_deadline_trap(&mut self) {
1431 self.0.epoch_deadline_trap();
1432 }
1433
1434 /// Set an exception as the currently pending exception, and
1435 /// return an error that propagates the throw.
1436 ///
1437 /// See [`Store::throw`] for more details.
1438 #[cfg(feature = "gc")]
1439 pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1440 self.0.inner.throw_impl(exception);
1441 Err(ThrownException)
1442 }
1443
1444 /// Take the currently pending exception, if any, and return it,
1445 /// removing it from the "pending exception" slot.
1446 ///
1447 /// See [`Store::take_pending_exception`] for more details.
1448 #[cfg(feature = "gc")]
1449 pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1450 self.0.inner.take_pending_exception_rooted()
1451 }
1452
1453 /// Tests whether there is a pending exception.
1454 ///
1455 /// See [`Store::has_pending_exception`] for more details.
1456 #[cfg(feature = "gc")]
1457 pub fn has_pending_exception(&self) -> bool {
1458 self.0.inner.pending_exception.is_some()
1459 }
1460}
1461
1462impl<T> StoreInner<T> {
1463 #[inline]
1464 fn data(&self) -> &T {
1465 // We are actually just accessing `&self.data_no_provenance` but we must
1466 // do so with the `VMStoreContext::store_data` pointer's provenance. If
1467 // we did otherwise, i.e. directly accessed the field, we would
1468 // invalidate that pointer, which would in turn invalidate any direct
1469 // `T` accesses that Wasm code makes via unsafe intrinsics.
1470 let data: *const ManuallyDrop<T> = &raw const self.data_no_provenance;
1471 let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1472 let ptr = provenance.with_addr(data.addr());
1473
1474 // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1475 // to access because of our `&self` borrow.
1476 debug_assert_ne!(ptr, core::ptr::null_mut());
1477 debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1478 unsafe { &*ptr }
1479 }
1480
1481 #[inline]
1482 fn data_limiter_and_opaque(
1483 &mut self,
1484 ) -> (
1485 &mut T,
1486 Option<&mut ResourceLimiterInner<T>>,
1487 &mut StoreOpaque,
1488 ) {
1489 // See the comments about provenance in `StoreInner::data` above.
1490 let data: *mut ManuallyDrop<T> = &raw mut self.data_no_provenance;
1491 let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1492 let ptr = provenance.with_addr(data.addr());
1493
1494 // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1495 // to access because of our `&mut self` borrow.
1496 debug_assert_ne!(ptr, core::ptr::null_mut());
1497 debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1498 let data = unsafe { &mut *ptr };
1499
1500 let limiter = self.limiter.as_mut();
1501
1502 (data, limiter, &mut self.inner)
1503 }
1504
1505 #[inline]
1506 fn data_mut(&mut self) -> &mut T {
1507 self.data_limiter_and_opaque().0
1508 }
1509
1510 #[inline]
1511 pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1512 if self.inner.pkey.is_none() && self.call_hook.is_none() {
1513 Ok(())
1514 } else {
1515 self.call_hook_slow_path(s)
1516 }
1517 }
1518
1519 fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1520 if let Some(pkey) = &self.inner.pkey {
1521 let allocator = self.engine().allocator();
1522 match s {
1523 CallHook::CallingWasm | CallHook::ReturningFromHost => {
1524 allocator.restrict_to_pkey(*pkey)
1525 }
1526 CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1527 }
1528 }
1529
1530 // Temporarily take the configured behavior to avoid mutably borrowing
1531 // multiple times.
1532 if let Some(mut call_hook) = self.call_hook.take() {
1533 let result = self.invoke_call_hook(&mut call_hook, s);
1534 self.call_hook = Some(call_hook);
1535 return result;
1536 }
1537
1538 Ok(())
1539 }
1540
1541 fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1542 match call_hook {
1543 #[cfg(feature = "call-hook")]
1544 CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1545
1546 #[cfg(all(feature = "async", feature = "call-hook"))]
1547 CallHookInner::Async(handler) => {
1548 if !self.can_block() {
1549 bail!("couldn't grab async_cx for call hook")
1550 }
1551 return (&mut *self)
1552 .as_context_mut()
1553 .with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
1554 }
1555
1556 CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1557 let _ = s;
1558 match *uninhabited {}
1559 }
1560 }
1561 }
1562
1563 #[cfg(not(feature = "async"))]
1564 fn flush_fiber_stack(&mut self) {
1565 // noop shim so code can assume this always exists.
1566 }
1567
1568 /// Splits this `StoreInner<T>` into a `limiter`/`StoreOpaque` borrow while
1569 /// validating that an async limiter is not configured.
1570 ///
1571 /// This is used for sync entrypoints which need to fail if an async limiter
1572 /// is configured as otherwise the async entrypoint must be used instead.
1573 pub(crate) fn validate_sync_resource_limiter_and_store_opaque(
1574 &mut self,
1575 ) -> Result<(Option<StoreResourceLimiter<'_>>, &mut StoreOpaque)> {
1576 let (limiter, store) = self.resource_limiter_and_store_opaque();
1577 if !matches!(limiter, None | Some(StoreResourceLimiter::Sync(_))) {
1578 bail!(
1579 "when using an async resource limiter `*_async` functions must \
1580 be used instead"
1581 );
1582 }
1583 Ok((limiter, store))
1584 }
1585}
1586
1587fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1588 fuel_reserve.saturating_add_signed(-injected_fuel)
1589}
1590
1591// Add remaining fuel from the reserve into the active fuel if there is any left.
1592fn refuel(
1593 injected_fuel: &mut i64,
1594 fuel_reserve: &mut u64,
1595 yield_interval: Option<NonZeroU64>,
1596) -> bool {
1597 let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1598 if fuel > 0 {
1599 set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1600 true
1601 } else {
1602 false
1603 }
1604}
1605
1606fn set_fuel(
1607 injected_fuel: &mut i64,
1608 fuel_reserve: &mut u64,
1609 yield_interval: Option<NonZeroU64>,
1610 new_fuel_amount: u64,
1611) {
1612 let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1613 // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1614 // for the VM to use.
1615 let injected = core::cmp::min(interval, new_fuel_amount);
1616 // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1617 // VM at once to be i64 range.
1618 let injected = core::cmp::min(injected, i64::MAX as u64);
1619 // Add whatever is left over after injection to the reserve for later use.
1620 *fuel_reserve = new_fuel_amount - injected;
1621 // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1622 // this counter is positive.
1623 *injected_fuel = -(injected as i64);
1624}
1625
1626#[doc(hidden)]
1627impl StoreOpaque {
1628 pub fn id(&self) -> StoreId {
1629 self.store_data.id()
1630 }
1631
1632 pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1633 fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1634 let new = slot.saturating_add(amt);
1635 if new > max {
1636 bail!("resource limit exceeded: {desc} count too high at {new}");
1637 }
1638 *slot = new;
1639 Ok(())
1640 }
1641
1642 let module = module.env_module();
1643 let memories = module.num_defined_memories();
1644 let tables = module.num_defined_tables();
1645
1646 bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1647 bump(
1648 &mut self.memory_count,
1649 self.memory_limit,
1650 memories,
1651 "memory",
1652 )?;
1653 bump(&mut self.table_count, self.table_limit, tables, "table")?;
1654
1655 Ok(())
1656 }
1657
1658 #[inline]
1659 pub fn engine(&self) -> &Engine {
1660 &self.engine
1661 }
1662
1663 #[inline]
1664 pub fn store_data(&self) -> &StoreData {
1665 &self.store_data
1666 }
1667
1668 #[inline]
1669 pub fn store_data_mut(&mut self) -> &mut StoreData {
1670 &mut self.store_data
1671 }
1672
1673 pub fn store_data_mut_and_registry(&mut self) -> (&mut StoreData, &ModuleRegistry) {
1674 (&mut self.store_data, &self.modules)
1675 }
1676
1677 #[cfg(feature = "debug")]
1678 pub(crate) fn breakpoints_and_registry_mut(
1679 &mut self,
1680 ) -> (&mut BreakpointState, &mut ModuleRegistry) {
1681 (&mut self.breakpoints, &mut self.modules)
1682 }
1683
1684 #[cfg(feature = "debug")]
1685 pub(crate) fn breakpoints_and_registry(&self) -> (&BreakpointState, &ModuleRegistry) {
1686 (&self.breakpoints, &self.modules)
1687 }
1688
1689 #[cfg(feature = "debug")]
1690 pub(crate) fn frame_data_cache_mut_and_registry(
1691 &mut self,
1692 ) -> (&mut FrameDataCache, &ModuleRegistry) {
1693 (&mut self.frame_data_cache, &self.modules)
1694 }
1695
1696 #[inline]
1697 pub(crate) fn modules(&self) -> &ModuleRegistry {
1698 &self.modules
1699 }
1700
1701 #[inline]
1702 pub(crate) fn modules_and_engine_and_breakpoints_mut(
1703 &mut self,
1704 ) -> (&mut ModuleRegistry, &Engine, RegisterBreakpointState<'_>) {
1705 #[cfg(feature = "debug")]
1706 let breakpoints = RegisterBreakpointState(&self.breakpoints);
1707 #[cfg(not(feature = "debug"))]
1708 let breakpoints = RegisterBreakpointState(core::marker::PhantomData);
1709
1710 (&mut self.modules, &self.engine, breakpoints)
1711 }
1712
1713 pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1714 (&mut self.func_refs, &self.modules)
1715 }
1716
1717 pub(crate) fn host_globals(
1718 &self,
1719 ) -> &TryPrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1720 &self.host_globals
1721 }
1722
1723 pub(crate) fn host_globals_mut(
1724 &mut self,
1725 ) -> &mut TryPrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1726 &mut self.host_globals
1727 }
1728
1729 pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1730 instance.store_id().assert_belongs_to(self.id());
1731 match self.instances[instance.instance()].kind {
1732 StoreInstanceKind::Dummy => None,
1733 StoreInstanceKind::Real { module_id } => {
1734 let module = self
1735 .modules()
1736 .module_by_id(module_id)
1737 .expect("should always have a registered module for real instances");
1738 Some(module)
1739 }
1740 }
1741 }
1742
1743 /// Accessor from `InstanceId` to `&vm::Instance`.
1744 ///
1745 /// Note that if you have a `StoreInstanceId` you should use
1746 /// `StoreInstanceId::get` instead. This assumes that `id` has been
1747 /// validated to already belong to this store.
1748 #[inline]
1749 pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1750 self.instances[id].handle.get()
1751 }
1752
1753 /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1754 ///
1755 /// Note that if you have a `StoreInstanceId` you should use
1756 /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1757 /// validated to already belong to this store.
1758 #[inline]
1759 pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1760 self.instances[id].handle.get_mut()
1761 }
1762
1763 /// Accessor from `InstanceId` to both `Pin<&mut vm::Instance>`
1764 /// and `&ModuleRegistry`.
1765 #[inline]
1766 pub fn instance_and_module_registry_mut(
1767 &mut self,
1768 id: InstanceId,
1769 ) -> (Pin<&mut vm::Instance>, &ModuleRegistry) {
1770 (self.instances[id].handle.get_mut(), &self.modules)
1771 }
1772
1773 /// Access multiple instances specified via `ids`.
1774 ///
1775 /// # Panics
1776 ///
1777 /// This method will panic if any indices in `ids` overlap.
1778 ///
1779 /// # Safety
1780 ///
1781 /// This method is not safe if the returned instances are used to traverse
1782 /// "laterally" between other instances. For example accessing imported
1783 /// items in an instance may traverse laterally to a sibling instance thus
1784 /// aliasing a returned value here. The caller must ensure that only defined
1785 /// items within the instances themselves are accessed.
1786 #[inline]
1787 pub unsafe fn optional_gc_store_and_instances_mut<const N: usize>(
1788 &mut self,
1789 ids: [InstanceId; N],
1790 ) -> (Option<&mut GcStore>, [Pin<&mut vm::Instance>; N]) {
1791 let instances = self
1792 .instances
1793 .get_disjoint_mut(ids)
1794 .unwrap()
1795 .map(|h| h.handle.get_mut());
1796 (self.gc_store.as_mut(), instances)
1797 }
1798
1799 /// Pair of `Self::optional_gc_store_mut` and `Self::instance_mut`
1800 pub fn optional_gc_store_and_instance_mut(
1801 &mut self,
1802 id: InstanceId,
1803 ) -> (Option<&mut GcStore>, Pin<&mut vm::Instance>) {
1804 (self.gc_store.as_mut(), self.instances[id].handle.get_mut())
1805 }
1806
1807 /// Tuple of `Self::optional_gc_store_mut`, `Self::modules`, and
1808 /// `Self::instance_mut`.
1809 pub fn optional_gc_store_and_registry_and_instance_mut(
1810 &mut self,
1811 id: InstanceId,
1812 ) -> (
1813 Option<&mut GcStore>,
1814 &ModuleRegistry,
1815 Pin<&mut vm::Instance>,
1816 ) {
1817 (
1818 self.gc_store.as_mut(),
1819 &self.modules,
1820 self.instances[id].handle.get_mut(),
1821 )
1822 }
1823
1824 /// Get all instances (ignoring dummy instances) within this store.
1825 pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1826 let instances = self
1827 .instances
1828 .iter()
1829 .filter_map(|(id, inst)| {
1830 if let StoreInstanceKind::Dummy = inst.kind {
1831 None
1832 } else {
1833 Some(id)
1834 }
1835 })
1836 .collect::<Vec<_>>();
1837 instances
1838 .into_iter()
1839 .map(|i| Instance::from_wasmtime(i, self))
1840 }
1841
1842 /// Get all memories (host- or Wasm-defined) within this store.
1843 pub fn all_memories<'a>(&'a self) -> impl Iterator<Item = ExportMemory> + 'a {
1844 // NB: Host-created memories have dummy instances. Therefore, we can get
1845 // all memories in the store by iterating over all instances (including
1846 // dummy instances) and getting each of their defined memories.
1847 let id = self.id();
1848 self.instances
1849 .iter()
1850 .flat_map(move |(_, instance)| instance.handle.get().defined_memories(id))
1851 }
1852
1853 /// Iterate over all tables (host- or Wasm-defined) within this store.
1854 pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1855 // NB: Host-created tables have dummy instances. Therefore, we can get
1856 // all tables in the store by iterating over all instances (including
1857 // dummy instances) and getting each of their defined memories.
1858 for id in self.instances.keys() {
1859 let instance = StoreInstanceId::new(self.id(), id);
1860 for table in 0..self.instance(id).env_module().num_defined_tables() {
1861 let table = DefinedTableIndex::new(table);
1862 f(self, Table::from_raw(instance, table));
1863 }
1864 }
1865 }
1866
1867 /// Iterate over all globals (host- or Wasm-defined) within this store.
1868 pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1869 // First enumerate all the host-created globals.
1870 for global in self.host_globals.keys() {
1871 let global = Global::new_host(self, global);
1872 f(self, global);
1873 }
1874
1875 // Then enumerate all instances' defined globals.
1876 for id in self.instances.keys() {
1877 for index in 0..self.instance(id).env_module().num_defined_globals() {
1878 let index = DefinedGlobalIndex::new(index);
1879 let global = Global::new_instance(self, id, index);
1880 f(self, global);
1881 }
1882 }
1883 }
1884
1885 #[cfg(all(feature = "std", any(unix, windows)))]
1886 pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1887 self.signal_handler = handler;
1888 }
1889
1890 #[inline]
1891 pub fn vm_store_context(&self) -> &VMStoreContext {
1892 &self.vm_store_context
1893 }
1894
1895 #[inline]
1896 pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
1897 &mut self.vm_store_context
1898 }
1899
1900 /// Performs a lazy allocation of the `GcStore` within this store, returning
1901 /// the previous allocation if it's already present.
1902 ///
1903 /// This method will, if necessary, allocate a new `GcStore` -- linear
1904 /// memory and all. This is a blocking operation due to
1905 /// `ResourceLimiterAsync` which means that this should only be executed
1906 /// in a fiber context at this time.
1907 #[inline]
1908 pub(crate) async fn ensure_gc_store(
1909 &mut self,
1910 limiter: Option<&mut StoreResourceLimiter<'_>>,
1911 ) -> Result<&mut GcStore> {
1912 if self.gc_store.is_some() {
1913 return Ok(self.gc_store.as_mut().unwrap());
1914 }
1915 self.allocate_gc_store(limiter).await
1916 }
1917
1918 #[inline(never)]
1919 async fn allocate_gc_store(
1920 &mut self,
1921 limiter: Option<&mut StoreResourceLimiter<'_>>,
1922 ) -> Result<&mut GcStore> {
1923 log::trace!("allocating GC heap for store {:?}", self.id());
1924
1925 assert!(self.gc_store.is_none());
1926 assert_eq!(
1927 self.vm_store_context.gc_heap.base.as_non_null(),
1928 NonNull::dangling(),
1929 );
1930 assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1931
1932 let gc_store = allocate_gc_store(self, limiter).await?;
1933 self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1934 return Ok(self.gc_store.insert(gc_store));
1935
1936 #[cfg(feature = "gc")]
1937 async fn allocate_gc_store(
1938 store: &mut StoreOpaque,
1939 limiter: Option<&mut StoreResourceLimiter<'_>>,
1940 ) -> Result<GcStore> {
1941 use wasmtime_environ::packed_option::ReservedValue;
1942
1943 let engine = store.engine();
1944 let mem_ty = engine.tunables().gc_heap_memory_type();
1945 ensure!(
1946 engine.features().gc_types(),
1947 "cannot allocate a GC store when GC is disabled at configuration time"
1948 );
1949
1950 // First, allocate the memory that will be our GC heap's storage.
1951 let mut request = InstanceAllocationRequest {
1952 id: InstanceId::reserved_value(),
1953 runtime_info: engine.empty_module_runtime_info(),
1954 imports: vm::Imports::default(),
1955 store,
1956 limiter,
1957 };
1958
1959 let (mem_alloc_index, mem) = engine
1960 .allocator()
1961 .allocate_memory(&mut request, &mem_ty, None)
1962 .await?;
1963
1964 // Then, allocate the actual GC heap, passing in that memory
1965 // storage.
1966 let gc_runtime = engine
1967 .gc_runtime()
1968 .context("no GC runtime: GC disabled at compile time or configuration time")?;
1969 let (index, heap) =
1970 engine
1971 .allocator()
1972 .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
1973
1974 Ok(GcStore::new(index, heap))
1975 }
1976
1977 #[cfg(not(feature = "gc"))]
1978 async fn allocate_gc_store(
1979 _: &mut StoreOpaque,
1980 _: Option<&mut StoreResourceLimiter<'_>>,
1981 ) -> Result<GcStore> {
1982 bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1983 }
1984 }
1985
1986 /// Helper method to require that a `GcStore` was previously allocated for
1987 /// this store, failing if it has not yet been allocated.
1988 ///
1989 /// Note that this should only be used in a context where allocation of a
1990 /// `GcStore` is sure to have already happened prior, otherwise this may
1991 /// return a confusing error to embedders which is a bug in Wasmtime.
1992 ///
1993 /// Some situations where it's safe to call this method:
1994 ///
1995 /// * There's already a non-null and non-i31 `VMGcRef` in scope. By existing
1996 /// this shows proof that the `GcStore` was previously allocated.
1997 /// * During instantiation and instance's `needs_gc_heap` flag will be
1998 /// handled and instantiation will automatically create a GC store.
1999 #[inline]
2000 #[cfg(feature = "gc")]
2001 pub(crate) fn require_gc_store(&self) -> Result<&GcStore> {
2002 match &self.gc_store {
2003 Some(gc_store) => Ok(gc_store),
2004 None => bail!("GC heap not initialized yet"),
2005 }
2006 }
2007
2008 /// Same as [`Self::require_gc_store`], but mutable.
2009 #[inline]
2010 #[cfg(feature = "gc")]
2011 pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> {
2012 match &mut self.gc_store {
2013 Some(gc_store) => Ok(gc_store),
2014 None => bail!("GC heap not initialized yet"),
2015 }
2016 }
2017
2018 /// Attempts to access the GC store that has been previously allocated.
2019 ///
2020 /// This method will return `Some` if the GC store was previously allocated.
2021 /// A `None` return value means either that the GC heap hasn't yet been
2022 /// allocated or that it does not need to be allocated for this store. Note
2023 /// that to require a GC store in a particular situation it's recommended to
2024 /// use [`Self::require_gc_store_mut`] instead.
2025 #[inline]
2026 pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
2027 if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
2028 debug_assert!(self.gc_store.is_none());
2029 None
2030 } else {
2031 self.gc_store.as_mut()
2032 }
2033 }
2034
2035 /// Helper to assert that a GC store was previously allocated and is
2036 /// present.
2037 ///
2038 /// # Panics
2039 ///
2040 /// This method will panic if the GC store has not yet been allocated. This
2041 /// should only be used in a context where there's an existing GC reference,
2042 /// for example, or if `ensure_gc_store` has already been called.
2043 #[inline]
2044 #[track_caller]
2045 pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
2046 self.gc_store
2047 .as_ref()
2048 .expect("attempted to access the store's GC heap before it has been allocated")
2049 }
2050
2051 /// Same as [`Self::unwrap_gc_store`], but mutable.
2052 #[inline]
2053 #[track_caller]
2054 pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
2055 self.gc_store
2056 .as_mut()
2057 .expect("attempted to access the store's GC heap before it has been allocated")
2058 }
2059
2060 #[inline]
2061 pub(crate) fn gc_roots(&self) -> &RootSet {
2062 &self.gc_roots
2063 }
2064
2065 #[inline]
2066 #[cfg(feature = "gc")]
2067 pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
2068 &mut self.gc_roots
2069 }
2070
2071 #[inline]
2072 pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
2073 self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
2074 }
2075
2076 #[cfg(feature = "gc")]
2077 async fn do_gc(&mut self, asyncness: Asyncness) {
2078 // If the GC heap hasn't been initialized, there is nothing to collect.
2079 if self.gc_store.is_none() {
2080 return;
2081 }
2082
2083 log::trace!("============ Begin GC ===========");
2084
2085 // Take the GC roots out of `self` so we can borrow it mutably but still
2086 // call mutable methods on `self`.
2087 let mut roots = core::mem::take(&mut self.gc_roots_list);
2088
2089 self.trace_roots(&mut roots, asyncness).await;
2090 self.unwrap_gc_store_mut()
2091 .gc(asyncness, unsafe { roots.iter() })
2092 .await;
2093
2094 // Restore the GC roots for the next GC.
2095 roots.clear();
2096 self.gc_roots_list = roots;
2097
2098 log::trace!("============ End GC ===========");
2099 }
2100
2101 #[cfg(feature = "gc")]
2102 async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList, asyncness: Asyncness) {
2103 log::trace!("Begin trace GC roots");
2104
2105 // We shouldn't have any leftover, stale GC roots.
2106 assert!(gc_roots_list.is_empty());
2107
2108 self.trace_wasm_stack_roots(gc_roots_list);
2109 if asyncness != Asyncness::No {
2110 vm::Yield::new().await;
2111 }
2112
2113 #[cfg(feature = "stack-switching")]
2114 {
2115 self.trace_wasm_continuation_roots(gc_roots_list);
2116 if asyncness != Asyncness::No {
2117 vm::Yield::new().await;
2118 }
2119 }
2120
2121 self.trace_vmctx_roots(gc_roots_list);
2122 if asyncness != Asyncness::No {
2123 vm::Yield::new().await;
2124 }
2125
2126 self.trace_instance_roots(gc_roots_list);
2127 if asyncness != Asyncness::No {
2128 vm::Yield::new().await;
2129 }
2130
2131 self.trace_user_roots(gc_roots_list);
2132 if asyncness != Asyncness::No {
2133 vm::Yield::new().await;
2134 }
2135
2136 self.trace_pending_exception_roots(gc_roots_list);
2137
2138 log::trace!("End trace GC roots")
2139 }
2140
2141 #[cfg(feature = "gc")]
2142 fn trace_wasm_stack_frame(
2143 &self,
2144 gc_roots_list: &mut GcRootsList,
2145 frame: crate::runtime::vm::Frame,
2146 ) {
2147 let pc = frame.pc();
2148 debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
2149
2150 let fp = frame.fp() as *mut usize;
2151 debug_assert!(
2152 !fp.is_null(),
2153 "we should always get a valid frame pointer for Wasm frames"
2154 );
2155
2156 let (module_with_code, _offset) = self
2157 .modules()
2158 .module_and_code_by_pc(pc)
2159 .expect("should have module info for Wasm frame");
2160
2161 if let Some(stack_map) = module_with_code.lookup_stack_map(pc) {
2162 log::trace!(
2163 "We have a stack map that maps {} bytes in this Wasm frame",
2164 stack_map.frame_size()
2165 );
2166
2167 let sp = unsafe { stack_map.sp(fp) };
2168 for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
2169 unsafe {
2170 self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2171 }
2172 }
2173 }
2174
2175 #[cfg(feature = "debug")]
2176 if let Some(frame_table) = module_with_code.module().frame_table() {
2177 let relpc = module_with_code
2178 .text_offset(pc)
2179 .expect("PC should be within module");
2180 for stack_slot in super::debug::gc_refs_in_frame(frame_table, relpc, fp) {
2181 unsafe {
2182 self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2183 }
2184 }
2185 }
2186 }
2187
2188 #[cfg(feature = "gc")]
2189 unsafe fn trace_wasm_stack_slot(&self, gc_roots_list: &mut GcRootsList, stack_slot: *mut u32) {
2190 use crate::runtime::vm::SendSyncPtr;
2191 use core::ptr::NonNull;
2192
2193 let raw: u32 = unsafe { core::ptr::read(stack_slot) };
2194 log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
2195
2196 let gc_ref = vm::VMGcRef::from_raw_u32(raw);
2197 if gc_ref.is_some() {
2198 unsafe {
2199 gc_roots_list
2200 .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
2201 }
2202 }
2203 }
2204
2205 #[cfg(feature = "gc")]
2206 fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2207 use crate::runtime::vm::Backtrace;
2208 log::trace!("Begin trace GC roots :: Wasm stack");
2209
2210 Backtrace::trace(self, |frame| {
2211 self.trace_wasm_stack_frame(gc_roots_list, frame);
2212 core::ops::ControlFlow::Continue(())
2213 });
2214
2215 log::trace!("End trace GC roots :: Wasm stack");
2216 }
2217
2218 #[cfg(all(feature = "gc", feature = "stack-switching"))]
2219 fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2220 use crate::{runtime::vm::Backtrace, vm::VMStackState};
2221 log::trace!("Begin trace GC roots :: continuations");
2222
2223 for continuation in &self.continuations {
2224 let state = continuation.common_stack_information.state;
2225
2226 // FIXME(frank-emrich) In general, it is not enough to just trace
2227 // through the stacks of continuations; we also need to look through
2228 // their `cont.bind` arguments. However, we don't currently have
2229 // enough RTTI information to check if any of the values in the
2230 // buffers used by `cont.bind` are GC values. As a workaround, note
2231 // that we currently disallow cont.bind-ing GC values altogether.
2232 // This way, it is okay not to check them here.
2233 match state {
2234 VMStackState::Suspended => {
2235 Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
2236 self.trace_wasm_stack_frame(gc_roots_list, frame);
2237 core::ops::ControlFlow::Continue(())
2238 });
2239 }
2240 VMStackState::Running => {
2241 // Handled by `trace_wasm_stack_roots`.
2242 }
2243 VMStackState::Parent => {
2244 // We don't know whether our child is suspended or running, but in
2245 // either case things should be handled correctly when traversing
2246 // further along in the chain, nothing required at this point.
2247 }
2248 VMStackState::Fresh | VMStackState::Returned => {
2249 // Fresh/Returned continuations have no gc values on their stack.
2250 }
2251 }
2252 }
2253
2254 log::trace!("End trace GC roots :: continuations");
2255 }
2256
2257 #[cfg(feature = "gc")]
2258 fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2259 log::trace!("Begin trace GC roots :: vmctx");
2260 self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
2261 self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
2262 log::trace!("End trace GC roots :: vmctx");
2263 }
2264
2265 #[cfg(feature = "gc")]
2266 fn trace_instance_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2267 log::trace!("Begin trace GC roots :: instance");
2268 for (_id, instance) in &mut self.instances {
2269 // SAFETY: the instance's GC roots will remain valid for the
2270 // duration of this GC cycle.
2271 unsafe {
2272 instance.handle.get_mut().trace_roots(gc_roots_list);
2273 }
2274 }
2275 log::trace!("End trace GC roots :: instance");
2276 }
2277
2278 #[cfg(feature = "gc")]
2279 fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2280 log::trace!("Begin trace GC roots :: user");
2281 self.gc_roots.trace_roots(gc_roots_list);
2282 log::trace!("End trace GC roots :: user");
2283 }
2284
2285 #[cfg(feature = "gc")]
2286 fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2287 log::trace!("Begin trace GC roots :: pending exception");
2288 if let Some(pending_exception) = self.pending_exception.as_mut() {
2289 unsafe {
2290 let root = pending_exception.as_gc_ref_mut();
2291 gc_roots_list.add_root(root.into(), "Pending exception");
2292 }
2293 }
2294 log::trace!("End trace GC roots :: pending exception");
2295 }
2296
2297 /// Insert a host-allocated GC type into this store.
2298 ///
2299 /// This makes it suitable for the embedder to allocate instances of this
2300 /// type in this store, and we don't have to worry about the type being
2301 /// reclaimed (since it is possible that none of the Wasm modules in this
2302 /// store are holding it alive).
2303 #[cfg(feature = "gc")]
2304 pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
2305 self.gc_host_alloc_types.insert(ty);
2306 }
2307
2308 /// Helper function execute a `init_gc_ref` when placing `gc_ref` in `dest`.
2309 ///
2310 /// This avoids allocating `GcStore` where possible.
2311 pub(crate) fn init_gc_ref(
2312 &mut self,
2313 dest: &mut MaybeUninit<Option<VMGcRef>>,
2314 gc_ref: Option<&VMGcRef>,
2315 ) {
2316 if GcStore::needs_init_barrier(gc_ref) {
2317 self.unwrap_gc_store_mut().init_gc_ref(dest, gc_ref)
2318 } else {
2319 dest.write(gc_ref.map(|r| r.copy_i31()));
2320 }
2321 }
2322
2323 /// Helper function execute a write barrier when placing `gc_ref` in `dest`.
2324 ///
2325 /// This avoids allocating `GcStore` where possible.
2326 pub(crate) fn write_gc_ref(&mut self, dest: &mut Option<VMGcRef>, gc_ref: Option<&VMGcRef>) {
2327 GcStore::write_gc_ref_optional_store(self.optional_gc_store_mut(), dest, gc_ref)
2328 }
2329
2330 /// Helper function to clone `gc_ref` notably avoiding allocating a
2331 /// `GcStore` where possible.
2332 pub(crate) fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
2333 if gc_ref.is_i31() {
2334 gc_ref.copy_i31()
2335 } else {
2336 self.unwrap_gc_store_mut().clone_gc_ref(gc_ref)
2337 }
2338 }
2339
2340 pub fn get_fuel(&self) -> Result<u64> {
2341 crate::ensure!(
2342 self.engine().tunables().consume_fuel,
2343 "fuel is not configured in this store"
2344 );
2345 let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
2346 Ok(get_fuel(injected_fuel, self.fuel_reserve))
2347 }
2348
2349 pub(crate) fn refuel(&mut self) -> bool {
2350 let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2351 refuel(
2352 injected_fuel,
2353 &mut self.fuel_reserve,
2354 self.fuel_yield_interval,
2355 )
2356 }
2357
2358 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
2359 crate::ensure!(
2360 self.engine().tunables().consume_fuel,
2361 "fuel is not configured in this store"
2362 );
2363 let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2364 set_fuel(
2365 injected_fuel,
2366 &mut self.fuel_reserve,
2367 self.fuel_yield_interval,
2368 fuel,
2369 );
2370 Ok(())
2371 }
2372
2373 #[cfg(feature = "async")]
2374 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
2375 crate::ensure!(
2376 self.engine().tunables().consume_fuel,
2377 "fuel is not configured in this store"
2378 );
2379 crate::ensure!(
2380 interval != Some(0),
2381 "fuel_async_yield_interval must not be 0"
2382 );
2383
2384 // All future entrypoints must be async to handle the case that fuel
2385 // runs out and an async yield is needed.
2386 self.set_async_required(Asyncness::Yes);
2387
2388 self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
2389 // Reset the fuel active + reserve states by resetting the amount.
2390 self.set_fuel(self.get_fuel()?)
2391 }
2392
2393 #[inline]
2394 pub fn signal_handler(&self) -> Option<*const SignalHandler> {
2395 let handler = self.signal_handler.as_ref()?;
2396 Some(handler)
2397 }
2398
2399 #[inline]
2400 pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
2401 NonNull::from(&self.vm_store_context)
2402 }
2403
2404 #[inline]
2405 pub fn default_caller(&self) -> NonNull<VMContext> {
2406 self.default_caller_vmctx.as_non_null()
2407 }
2408
2409 #[inline]
2410 pub fn traitobj(&self) -> NonNull<dyn VMStore> {
2411 self.traitobj.0.unwrap()
2412 }
2413
2414 /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
2415 /// used as part of calling the host in a `Func::new` method invocation.
2416 #[inline]
2417 pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
2418 mem::take(&mut self.hostcall_val_storage)
2419 }
2420
2421 /// Restores the vector previously taken by `take_hostcall_val_storage`
2422 /// above back into the store, allowing it to be used in the future for the
2423 /// next wasm->host call.
2424 #[inline]
2425 pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
2426 if storage.capacity() > self.hostcall_val_storage.capacity() {
2427 self.hostcall_val_storage = storage;
2428 }
2429 }
2430
2431 /// Same as `take_hostcall_val_storage`, but for the direction of the host
2432 /// calling wasm.
2433 #[inline]
2434 pub fn take_wasm_val_raw_storage(&mut self) -> TryVec<ValRaw> {
2435 mem::take(&mut self.wasm_val_raw_storage)
2436 }
2437
2438 /// Same as `save_hostcall_val_storage`, but for the direction of the host
2439 /// calling wasm.
2440 #[inline]
2441 pub fn save_wasm_val_raw_storage(&mut self, storage: TryVec<ValRaw>) {
2442 if storage.capacity() > self.wasm_val_raw_storage.capacity() {
2443 self.wasm_val_raw_storage = storage;
2444 }
2445 }
2446
2447 /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
2448 /// WebAssembly-relative fault.
2449 ///
2450 /// This function may abort the process if `addr` is not found to actually
2451 /// reside in any linear memory. In such a situation it means that the
2452 /// segfault was erroneously caught by Wasmtime and is possibly indicative
2453 /// of a code generator bug.
2454 ///
2455 /// This function returns `None` for dynamically-bounds-checked-memories
2456 /// with spectre mitigations enabled since the hardware fault address is
2457 /// always zero in these situations which means that the trapping context
2458 /// doesn't have enough information to report the fault address.
2459 pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
2460 // There are a few instances where a "close to zero" pointer is loaded
2461 // and we expect that to happen:
2462 //
2463 // * Explicitly bounds-checked memories with spectre-guards enabled will
2464 // cause out-of-bounds accesses to get routed to address 0, so allow
2465 // wasm instructions to fault on the null address.
2466 // * `call_indirect` when invoking a null function pointer may load data
2467 // from the a `VMFuncRef` whose address is null, meaning any field of
2468 // `VMFuncRef` could be the address of the fault.
2469 //
2470 // In these situations where the address is so small it won't be in any
2471 // instance, so skip the checks below.
2472 if addr <= mem::size_of::<VMFuncRef>() {
2473 const _: () = {
2474 // static-assert that `VMFuncRef` isn't too big to ensure that
2475 // it lives solely within the first page as we currently only
2476 // have the guarantee that the first page of memory is unmapped,
2477 // no more.
2478 assert!(mem::size_of::<VMFuncRef>() <= 512);
2479 };
2480 return None;
2481 }
2482
2483 // Search all known instances in this store for this address. Note that
2484 // this is probably not the speediest way to do this. Traps, however,
2485 // are generally not expected to be super fast and additionally stores
2486 // probably don't have all that many instances or memories.
2487 //
2488 // If this loop becomes hot in the future, however, it should be
2489 // possible to precompute maps about linear memories in a store and have
2490 // a quicker lookup.
2491 let mut fault = None;
2492 for (_, instance) in self.instances.iter() {
2493 if let Some(f) = instance.handle.get().wasm_fault(addr) {
2494 assert!(fault.is_none());
2495 fault = Some(f);
2496 }
2497 }
2498 if fault.is_some() {
2499 return fault;
2500 }
2501
2502 cfg_if::cfg_if! {
2503 if #[cfg(feature = "std")] {
2504 // With the standard library a rich error can be printed here
2505 // to stderr and the native abort path is used.
2506 eprintln!(
2507 "\
2508Wasmtime caught a segfault for a wasm program because the faulting instruction
2509is allowed to segfault due to how linear memories are implemented. The address
2510that was accessed, however, is not known to any linear memory in use within this
2511Store. This may be indicative of a critical bug in Wasmtime's code generation
2512because all addresses which are known to be reachable from wasm won't reach this
2513message.
2514
2515 pc: 0x{pc:x}
2516 address: 0x{addr:x}
2517
2518This is a possible security issue because WebAssembly has accessed something it
2519shouldn't have been able to. Other accesses may have succeeded and this one just
2520happened to be caught. The process will now be aborted to prevent this damage
2521from going any further and to alert what's going on. If this is a security
2522issue please reach out to the Wasmtime team via its security policy
2523at https://bytecodealliance.org/security.
2524"
2525 );
2526 std::process::abort();
2527 } else if #[cfg(panic = "abort")] {
2528 // Without the standard library but with `panic=abort` then
2529 // it's safe to panic as that's known to halt execution. For
2530 // now avoid the above error message as well since without
2531 // `std` it's probably best to be a bit more size-conscious.
2532 let _ = pc;
2533 panic!("invalid fault");
2534 } else {
2535 // Without `std` and with `panic = "unwind"` there's no
2536 // dedicated API to abort the process portably, so manufacture
2537 // this with a double-panic.
2538 let _ = pc;
2539
2540 struct PanicAgainOnDrop;
2541
2542 impl Drop for PanicAgainOnDrop {
2543 fn drop(&mut self) {
2544 panic!("panicking again to trigger a process abort");
2545 }
2546
2547 }
2548
2549 let _bomb = PanicAgainOnDrop;
2550
2551 panic!("invalid fault");
2552 }
2553 }
2554 }
2555
2556 /// Retrieve the store's protection key.
2557 #[inline]
2558 #[cfg(feature = "pooling-allocator")]
2559 pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
2560 self.pkey
2561 }
2562
2563 #[cfg(feature = "async")]
2564 pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
2565 &mut self.async_state
2566 }
2567
2568 #[cfg(feature = "async")]
2569 pub(crate) fn has_pkey(&self) -> bool {
2570 self.pkey.is_some()
2571 }
2572
2573 pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
2574 match &mut self.executor {
2575 Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
2576 #[cfg(has_host_compiler_backend)]
2577 Executor::Native => ExecutorRef::Native,
2578 }
2579 }
2580
2581 #[cfg(feature = "async")]
2582 pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
2583 mem::swap(&mut self.executor, executor);
2584 }
2585
2586 pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
2587 match &self.executor {
2588 Executor::Interpreter(i) => i.unwinder(),
2589 #[cfg(has_host_compiler_backend)]
2590 Executor::Native => &vm::UnwindHost,
2591 }
2592 }
2593
2594 /// Allocates a new continuation. Note that we currently don't support
2595 /// deallocating them. Instead, all continuations remain allocated
2596 /// throughout the store's lifetime.
2597 #[cfg(feature = "stack-switching")]
2598 pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
2599 // FIXME(frank-emrich) Do we need to pin this?
2600 let mut continuation = Box::new(VMContRef::empty());
2601 let stack_size = self.engine.config().async_stack_size;
2602 let stack = crate::vm::VMContinuationStack::new(stack_size)?;
2603 continuation.stack = stack;
2604 let ptr = continuation.deref_mut() as *mut VMContRef;
2605 self.continuations.push(continuation);
2606 Ok(ptr)
2607 }
2608
2609 /// Constructs and executes an `InstanceAllocationRequest` and pushes the
2610 /// returned instance into the store.
2611 ///
2612 /// This is a helper method for invoking
2613 /// `InstanceAllocator::allocate_module` with the appropriate parameters
2614 /// from this store's own configuration. The `kind` provided is used to
2615 /// distinguish between "real" modules and dummy ones that are synthesized
2616 /// for embedder-created memories, globals, tables, etc. The `kind` will
2617 /// also use a different instance allocator by default, the one passed in,
2618 /// rather than the engine's default allocator.
2619 ///
2620 /// This method will push the instance within `StoreOpaque` onto the
2621 /// `instances` array and return the `InstanceId` which can be use to look
2622 /// it up within the store.
2623 ///
2624 /// # Safety
2625 ///
2626 /// The `imports` provided must be correctly sized/typed for the module
2627 /// being allocated.
2628 pub(crate) async unsafe fn allocate_instance(
2629 &mut self,
2630 limiter: Option<&mut StoreResourceLimiter<'_>>,
2631 kind: AllocateInstanceKind<'_>,
2632 runtime_info: &ModuleRuntimeInfo,
2633 imports: Imports<'_>,
2634 ) -> Result<InstanceId> {
2635 let id = self.instances.next_key();
2636
2637 let allocator = match kind {
2638 AllocateInstanceKind::Module(_) => self.engine().allocator(),
2639 AllocateInstanceKind::Dummy { allocator } => allocator,
2640 };
2641 // SAFETY: this function's own contract is the same as
2642 // `allocate_module`, namely the imports provided are valid.
2643 let handle = unsafe {
2644 allocator
2645 .allocate_module(InstanceAllocationRequest {
2646 id,
2647 runtime_info,
2648 imports,
2649 store: self,
2650 limiter,
2651 })
2652 .await?
2653 };
2654
2655 let actual = match kind {
2656 AllocateInstanceKind::Module(module_id) => {
2657 log::trace!(
2658 "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2659 self.id()
2660 );
2661 self.instances.push(StoreInstance {
2662 handle,
2663 kind: StoreInstanceKind::Real { module_id },
2664 })?
2665 }
2666 AllocateInstanceKind::Dummy { .. } => {
2667 log::trace!(
2668 "Adding dummy instance to store: store={:?}, instance={id:?}",
2669 self.id()
2670 );
2671 self.instances.push(StoreInstance {
2672 handle,
2673 kind: StoreInstanceKind::Dummy,
2674 })?
2675 }
2676 };
2677
2678 // double-check we didn't accidentally allocate two instances and our
2679 // prediction of what the id would be is indeed the id it should be.
2680 assert_eq!(id, actual);
2681
2682 Ok(id)
2683 }
2684
2685 /// Set a pending exception. The `exnref` is taken and held on
2686 /// this store to be fetched later by an unwind. This method does
2687 /// *not* set up an unwind request on the TLS call state; that
2688 /// must be done separately.
2689 #[cfg(feature = "gc")]
2690 pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) {
2691 self.pending_exception = Some(exnref);
2692 }
2693
2694 /// Take a pending exception, if any.
2695 #[cfg(feature = "gc")]
2696 pub(crate) fn take_pending_exception(&mut self) -> Option<VMExnRef> {
2697 self.pending_exception.take()
2698 }
2699
2700 /// Tests whether there is a pending exception.
2701 #[cfg(feature = "gc")]
2702 pub fn has_pending_exception(&self) -> bool {
2703 self.pending_exception.is_some()
2704 }
2705
2706 #[cfg(feature = "gc")]
2707 fn take_pending_exception_rooted(&mut self) -> Option<Rooted<ExnRef>> {
2708 let vmexnref = self.take_pending_exception()?;
2709 let mut nogc = AutoAssertNoGc::new(self);
2710 Some(Rooted::new(&mut nogc, vmexnref.into()))
2711 }
2712
2713 /// Get an owned rooted reference to the pending exception,
2714 /// without taking it off the store.
2715 #[cfg(all(feature = "gc", feature = "debug"))]
2716 pub(crate) fn pending_exception_owned_rooted(
2717 &mut self,
2718 ) -> Result<Option<OwnedRooted<ExnRef>>, crate::error::OutOfMemory> {
2719 let mut nogc = AutoAssertNoGc::new(self);
2720 nogc.pending_exception
2721 .take()
2722 .map(|vmexnref| {
2723 let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref());
2724 nogc.pending_exception = Some(cloned.into_exnref_unchecked());
2725 OwnedRooted::new(&mut nogc, vmexnref.into())
2726 })
2727 .transpose()
2728 }
2729
2730 #[cfg(feature = "gc")]
2731 fn throw_impl(&mut self, exception: Rooted<ExnRef>) {
2732 let mut nogc = AutoAssertNoGc::new(self);
2733 let exnref = exception._to_raw(&mut nogc).unwrap();
2734 let exnref = VMGcRef::from_raw_u32(exnref)
2735 .expect("exception cannot be null")
2736 .into_exnref_unchecked();
2737 nogc.set_pending_exception(exnref);
2738 }
2739
2740 #[cfg(target_has_atomic = "64")]
2741 pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2742 // Set a new deadline based on the "epoch deadline delta".
2743 //
2744 // Also, note that when this update is performed while Wasm is
2745 // on the stack, the Wasm will reload the new value once we
2746 // return into it.
2747 let current_epoch = self.engine().current_epoch();
2748 let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2749 *epoch_deadline = current_epoch + delta;
2750 }
2751
2752 pub(crate) fn get_epoch_deadline(&mut self) -> u64 {
2753 *self.vm_store_context.epoch_deadline.get_mut()
2754 }
2755
2756 #[inline]
2757 pub(crate) fn validate_sync_call(&self) -> Result<()> {
2758 #[cfg(feature = "async")]
2759 if self.async_state.async_required {
2760 bail!("store configuration requires that `*_async` functions are used instead");
2761 }
2762 Ok(())
2763 }
2764
2765 /// Returns whether this store is presently on a fiber and is allowed to
2766 /// block via `block_on` with fibers.
2767 pub(crate) fn can_block(&mut self) -> bool {
2768 #[cfg(feature = "async")]
2769 if true {
2770 return self.fiber_async_state_mut().can_block();
2771 }
2772
2773 false
2774 }
2775
2776 #[cfg(not(feature = "async"))]
2777 pub(crate) fn set_async_required(&mut self, asyncness: Asyncness) {
2778 match asyncness {
2779 Asyncness::No => {}
2780 }
2781 }
2782}
2783
2784/// Helper parameter to [`StoreOpaque::allocate_instance`].
2785pub(crate) enum AllocateInstanceKind<'a> {
2786 /// An embedder-provided module is being allocated meaning that the default
2787 /// engine's allocator will be used.
2788 Module(RegisteredModuleId),
2789
2790 /// Add a dummy instance that to the store.
2791 ///
2792 /// These are instances that are just implementation details of something
2793 /// else (e.g. host-created memories that are not actually defined in any
2794 /// Wasm module) and therefore shouldn't show up in things like core dumps.
2795 ///
2796 /// A custom, typically OnDemand-flavored, allocator is provided to execute
2797 /// the allocation.
2798 Dummy {
2799 allocator: &'a dyn InstanceAllocator,
2800 },
2801}
2802
2803unsafe impl<T> VMStore for StoreInner<T> {
2804 #[cfg(feature = "component-model-async")]
2805 fn component_async_store(
2806 &mut self,
2807 ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2808 self
2809 }
2810
2811 #[cfg(feature = "component-model")]
2812 fn component_task_state_mut(&mut self) -> &mut crate::component::store::ComponentTaskState {
2813 StoreOpaque::component_task_state_mut(self)
2814 }
2815
2816 fn store_opaque(&self) -> &StoreOpaque {
2817 &self.inner
2818 }
2819
2820 fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2821 &mut self.inner
2822 }
2823
2824 fn resource_limiter_and_store_opaque(
2825 &mut self,
2826 ) -> (Option<StoreResourceLimiter<'_>>, &mut StoreOpaque) {
2827 let (data, limiter, opaque) = self.data_limiter_and_opaque();
2828
2829 let limiter = limiter.map(|l| match l {
2830 ResourceLimiterInner::Sync(s) => StoreResourceLimiter::Sync(s(data)),
2831 #[cfg(feature = "async")]
2832 ResourceLimiterInner::Async(s) => StoreResourceLimiter::Async(s(data)),
2833 });
2834
2835 (limiter, opaque)
2836 }
2837
2838 #[cfg(target_has_atomic = "64")]
2839 fn new_epoch_updated_deadline(&mut self) -> Result<UpdateDeadline> {
2840 // Temporarily take the configured behavior to avoid mutably borrowing
2841 // multiple times.
2842 let mut behavior = self.epoch_deadline_behavior.take();
2843 let update = match &mut behavior {
2844 Some(callback) => callback((&mut *self).as_context_mut()),
2845 None => Ok(UpdateDeadline::Interrupt),
2846 };
2847
2848 // Put back the original behavior which was replaced by `take`.
2849 self.epoch_deadline_behavior = behavior;
2850 update
2851 }
2852
2853 #[cfg(feature = "debug")]
2854 fn block_on_debug_handler(&mut self, event: crate::DebugEvent<'_>) -> crate::Result<()> {
2855 if let Some(handler) = self.debug_handler.take() {
2856 if !self.can_block() {
2857 bail!("could not invoke debug handler without async context");
2858 }
2859 log::trace!("about to raise debug event {event:?}");
2860 StoreContextMut(self).with_blocking(|store, cx| {
2861 cx.block_on(Pin::from(handler.handle(store, event)).as_mut())
2862 })
2863 } else {
2864 Ok(())
2865 }
2866 }
2867}
2868
2869impl<T> StoreInner<T> {
2870 #[cfg(target_has_atomic = "64")]
2871 fn epoch_deadline_trap(&mut self) {
2872 self.epoch_deadline_behavior = None;
2873 }
2874
2875 #[cfg(target_has_atomic = "64")]
2876 fn epoch_deadline_callback(
2877 &mut self,
2878 callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2879 ) {
2880 self.epoch_deadline_behavior = Some(callback);
2881 }
2882}
2883
2884impl<T: Default> Default for Store<T> {
2885 fn default() -> Store<T> {
2886 Store::new(&Engine::default(), T::default())
2887 }
2888}
2889
2890impl<T: fmt::Debug> fmt::Debug for Store<T> {
2891 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2892 let inner = &**self.inner as *const StoreInner<T>;
2893 f.debug_struct("Store")
2894 .field("inner", &inner)
2895 .field("data", self.inner.data())
2896 .finish()
2897 }
2898}
2899
2900impl<T> Drop for Store<T> {
2901 fn drop(&mut self) {
2902 self.run_manual_drop_routines();
2903
2904 // For documentation on this `unsafe`, see `into_data`.
2905 unsafe {
2906 ManuallyDrop::drop(&mut self.inner.data_no_provenance);
2907 ManuallyDrop::drop(&mut self.inner);
2908 }
2909 }
2910}
2911
2912impl Drop for StoreOpaque {
2913 fn drop(&mut self) {
2914 // NB it's important that this destructor does not access `self.data`.
2915 // That is deallocated by `Drop for Store<T>` above.
2916
2917 unsafe {
2918 let allocator = self.engine.allocator();
2919 let ondemand = OnDemandInstanceAllocator::default();
2920 let store_id = self.id();
2921
2922 #[cfg(feature = "gc")]
2923 if let Some(gc_store) = self.gc_store.take() {
2924 let gc_alloc_index = gc_store.allocation_index;
2925 log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2926 debug_assert!(self.engine.features().gc_types());
2927 let (mem_alloc_index, mem) =
2928 allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2929 allocator.deallocate_memory(None, mem_alloc_index, mem);
2930 }
2931
2932 for (id, instance) in self.instances.iter_mut() {
2933 log::trace!("store {store_id:?} is deallocating {id:?}");
2934 let allocator = match instance.kind {
2935 StoreInstanceKind::Dummy => &ondemand,
2936 _ => allocator,
2937 };
2938 allocator.deallocate_module(&mut instance.handle);
2939 }
2940
2941 self.store_data.decrement_allocator_resources(allocator);
2942 }
2943 }
2944}
2945
2946#[cfg_attr(
2947 not(any(feature = "gc", feature = "async")),
2948 // NB: Rust 1.89, current stable, does not fire this lint. Rust 1.90,
2949 // however, does, so use #[allow] until our MSRV is 1.90.
2950 allow(dead_code, reason = "don't want to put #[cfg] on all impls below too")
2951)]
2952pub(crate) trait AsStoreOpaque {
2953 fn as_store_opaque(&mut self) -> &mut StoreOpaque;
2954}
2955
2956impl AsStoreOpaque for StoreOpaque {
2957 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2958 self
2959 }
2960}
2961
2962impl AsStoreOpaque for dyn VMStore {
2963 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2964 self
2965 }
2966}
2967
2968impl<T: 'static> AsStoreOpaque for Store<T> {
2969 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2970 &mut self.inner.inner
2971 }
2972}
2973
2974impl<T: 'static> AsStoreOpaque for StoreInner<T> {
2975 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2976 self
2977 }
2978}
2979
2980impl<T: AsStoreOpaque + ?Sized> AsStoreOpaque for &mut T {
2981 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
2982 T::as_store_opaque(self)
2983 }
2984}
2985
2986/// Helper enum to indicate, in some function contexts, whether `async` should
2987/// be taken advantage of or not.
2988///
2989/// This is used throughout Wasmtime where internal functions are all `async`
2990/// but external functions might be either sync or `async`. If the external
2991/// function is sync, then internally Wasmtime shouldn't yield as it won't do
2992/// anything. If the external function is `async`, however, yields are fine.
2993///
2994/// An example of this is GC. Right now GC will cooperatively yield after phases
2995/// of GC have passed, but this cooperative yielding is only enabled with
2996/// `Asyncness::Yes`.
2997///
2998/// This enum is additionally conditionally defined such that `Yes` is only
2999/// present in `async`-enabled builds. That ensures that this compiles down to a
3000/// zero-sized type in `async`-disabled builds in case that interests embedders.
3001#[derive(PartialEq, Eq, Copy, Clone)]
3002pub enum Asyncness {
3003 /// Don't do async things, don't yield, etc. It's ok to execute an `async`
3004 /// function, but it should be validated ahead of time that when doing so a
3005 /// yield isn't possible (e.g. `validate_sync_*` methods on Store.
3006 No,
3007
3008 /// Async things is OK. This should only be used when the API entrypoint is
3009 /// itself `async`.
3010 #[cfg(feature = "async")]
3011 Yes,
3012}
3013
3014impl core::ops::BitOr for Asyncness {
3015 type Output = Self;
3016
3017 fn bitor(self, rhs: Self) -> Self::Output {
3018 match (self, rhs) {
3019 (Asyncness::No, Asyncness::No) => Asyncness::No,
3020 #[cfg(feature = "async")]
3021 (Asyncness::Yes, _) | (_, Asyncness::Yes) => Asyncness::Yes,
3022 }
3023 }
3024}
3025
3026#[cfg(test)]
3027mod tests {
3028 use super::*;
3029
3030 struct FuelTank {
3031 pub consumed_fuel: i64,
3032 pub reserve_fuel: u64,
3033 pub yield_interval: Option<NonZeroU64>,
3034 }
3035
3036 impl FuelTank {
3037 fn new() -> Self {
3038 FuelTank {
3039 consumed_fuel: 0,
3040 reserve_fuel: 0,
3041 yield_interval: None,
3042 }
3043 }
3044 fn get_fuel(&self) -> u64 {
3045 get_fuel(self.consumed_fuel, self.reserve_fuel)
3046 }
3047 fn refuel(&mut self) -> bool {
3048 refuel(
3049 &mut self.consumed_fuel,
3050 &mut self.reserve_fuel,
3051 self.yield_interval,
3052 )
3053 }
3054 fn set_fuel(&mut self, fuel: u64) {
3055 set_fuel(
3056 &mut self.consumed_fuel,
3057 &mut self.reserve_fuel,
3058 self.yield_interval,
3059 fuel,
3060 );
3061 }
3062 }
3063
3064 #[test]
3065 fn smoke() {
3066 let mut tank = FuelTank::new();
3067 tank.set_fuel(10);
3068 assert_eq!(tank.consumed_fuel, -10);
3069 assert_eq!(tank.reserve_fuel, 0);
3070
3071 tank.yield_interval = NonZeroU64::new(10);
3072 tank.set_fuel(25);
3073 assert_eq!(tank.consumed_fuel, -10);
3074 assert_eq!(tank.reserve_fuel, 15);
3075 }
3076
3077 #[test]
3078 fn does_not_lose_precision() {
3079 let mut tank = FuelTank::new();
3080 tank.set_fuel(u64::MAX);
3081 assert_eq!(tank.get_fuel(), u64::MAX);
3082
3083 tank.set_fuel(i64::MAX as u64);
3084 assert_eq!(tank.get_fuel(), i64::MAX as u64);
3085
3086 tank.set_fuel(i64::MAX as u64 + 1);
3087 assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
3088 }
3089
3090 #[test]
3091 fn yielding_does_not_lose_precision() {
3092 let mut tank = FuelTank::new();
3093
3094 tank.yield_interval = NonZeroU64::new(10);
3095 tank.set_fuel(u64::MAX);
3096 assert_eq!(tank.get_fuel(), u64::MAX);
3097 assert_eq!(tank.consumed_fuel, -10);
3098 assert_eq!(tank.reserve_fuel, u64::MAX - 10);
3099
3100 tank.yield_interval = NonZeroU64::new(u64::MAX);
3101 tank.set_fuel(u64::MAX);
3102 assert_eq!(tank.get_fuel(), u64::MAX);
3103 assert_eq!(tank.consumed_fuel, -i64::MAX);
3104 assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3105
3106 tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
3107 tank.set_fuel(u64::MAX);
3108 assert_eq!(tank.get_fuel(), u64::MAX);
3109 assert_eq!(tank.consumed_fuel, -i64::MAX);
3110 assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3111 }
3112
3113 #[test]
3114 fn refueling() {
3115 // It's possible to fuel to have consumed over the limit as some instructions can consume
3116 // multiple units of fuel at once. Refueling should be strict in it's consumption and not
3117 // add more fuel than there is.
3118 let mut tank = FuelTank::new();
3119
3120 tank.yield_interval = NonZeroU64::new(10);
3121 tank.reserve_fuel = 42;
3122 tank.consumed_fuel = 4;
3123 assert!(tank.refuel());
3124 assert_eq!(tank.reserve_fuel, 28);
3125 assert_eq!(tank.consumed_fuel, -10);
3126
3127 tank.yield_interval = NonZeroU64::new(1);
3128 tank.reserve_fuel = 8;
3129 tank.consumed_fuel = 4;
3130 assert_eq!(tank.get_fuel(), 4);
3131 assert!(tank.refuel());
3132 assert_eq!(tank.reserve_fuel, 3);
3133 assert_eq!(tank.consumed_fuel, -1);
3134 assert_eq!(tank.get_fuel(), 4);
3135
3136 tank.yield_interval = NonZeroU64::new(10);
3137 tank.reserve_fuel = 3;
3138 tank.consumed_fuel = 4;
3139 assert_eq!(tank.get_fuel(), 0);
3140 assert!(!tank.refuel());
3141 assert_eq!(tank.reserve_fuel, 3);
3142 assert_eq!(tank.consumed_fuel, 4);
3143 assert_eq!(tank.get_fuel(), 0);
3144 }
3145
3146 #[test]
3147 fn store_data_provenance() {
3148 // Test that we juggle pointer provenance and all that correctly, and
3149 // miri is happy with everything, while allowing both Rust code and
3150 // "Wasm" to access and modify the store's `T` data. Note that this is
3151 // not actually Wasm mutating the store data here because compiling Wasm
3152 // under miri is way too slow.
3153
3154 unsafe fn run_wasm(store: &mut Store<u32>) {
3155 let ptr = store
3156 .inner
3157 .inner
3158 .vm_store_context
3159 .store_data
3160 .as_ptr()
3161 .cast::<u32>();
3162 unsafe { *ptr += 1 }
3163 }
3164
3165 let engine = Engine::default();
3166 let mut store = Store::new(&engine, 0_u32);
3167
3168 assert_eq!(*store.data(), 0);
3169 *store.data_mut() += 1;
3170 assert_eq!(*store.data(), 1);
3171 unsafe { run_wasm(&mut store) }
3172 assert_eq!(*store.data(), 2);
3173 *store.data_mut() += 1;
3174 assert_eq!(*store.data(), 3);
3175 }
3176}