wasmtime/runtime/store.rs
1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//! Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//! intended to be consumed by the outside world. Note that the "just a
39//! pointer large" is a load-bearing implementation detail in Wasmtime. This
40//! enables it to store a pointer to its own trait object which doesn't need
41//! to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//! stored inside the `Box`. This is the general Rust pattern when one struct
45//! is a layer over another. The surprising part, though, is that this is
46//! further subdivided. This structure only contains things which actually
47//! need `T` itself. The downside of this structure is that it's always
48//! generic and means that code is monomorphized into consumer crates. We
49//! strive to have things be as monomorphic as possible in `wasmtime` so this
50//! type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//! Stored inline in the outer type the "opaque" here means that it's a
54//! "store" but it doesn't have access to the `T`. This is the primary
55//! "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//! internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//! All references of Wasm items into a `Store` are actually indices into a
60//! table in this structure, and the `StoreData` being separate makes it a bit
61//! easier to manage/define/work with. There's no real fundamental reason this
62//! is split out, although sometimes it's useful to have separate borrows into
63//! these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79#[cfg(all(feature = "gc", feature = "debug"))]
80use crate::OwnedRooted;
81use crate::RootSet;
82#[cfg(feature = "gc")]
83use crate::ThrownException;
84use crate::error::OutOfMemory;
85#[cfg(feature = "async")]
86use crate::fiber;
87use crate::module::{RegisterBreakpointState, RegisteredModuleId};
88use crate::prelude::*;
89#[cfg(feature = "gc")]
90use crate::runtime::vm::GcRootsList;
91#[cfg(feature = "stack-switching")]
92use crate::runtime::vm::VMContRef;
93use crate::runtime::vm::mpk::ProtectionKey;
94use crate::runtime::vm::{
95 self, ExportMemory, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator,
96 InstanceHandle, Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator,
97 SendSyncPtr, SignalHandler, StoreBox, Unwind, VMContext, VMFuncRef, VMGcRef, VMStore,
98 VMStoreContext,
99};
100use crate::trampoline::VMHostGlobalContext;
101#[cfg(feature = "debug")]
102use crate::{BreakpointState, DebugHandler, FrameDataCache};
103use crate::{Engine, Module, Val, ValRaw, module::ModuleRegistry};
104#[cfg(feature = "gc")]
105use crate::{ExnRef, Rooted};
106use crate::{Global, Instance, Table};
107use core::convert::Infallible;
108use core::fmt;
109#[cfg(any(feature = "async", feature = "gc"))]
110use core::future;
111use core::marker;
112use core::mem::{self, ManuallyDrop, MaybeUninit};
113use core::num::NonZeroU64;
114use core::ops::{Deref, DerefMut};
115use core::pin::Pin;
116use core::ptr::NonNull;
117#[cfg(any(feature = "async", feature = "gc"))]
118use core::task::Poll;
119use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, TripleExt};
120
121mod context;
122pub use self::context::*;
123mod data;
124pub use self::data::*;
125mod func_refs;
126use func_refs::FuncRefs;
127#[cfg(feature = "component-model-async")]
128mod token;
129#[cfg(feature = "component-model-async")]
130pub(crate) use token::StoreToken;
131#[cfg(feature = "async")]
132mod async_;
133#[cfg(all(feature = "async", feature = "call-hook"))]
134pub use self::async_::CallHookHandler;
135
136#[cfg(feature = "gc")]
137use super::vm::VMExnRef;
138#[cfg(feature = "gc")]
139mod gc;
140
141/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
142///
143/// All WebAssembly instances and items will be attached to and refer to a
144/// [`Store`]. For example instances, functions, globals, and tables are all
145/// attached to a [`Store`]. Instances are created by instantiating a
146/// [`Module`](crate::Module) within a [`Store`].
147///
148/// A [`Store`] is intended to be a short-lived object in a program. No form
149/// of GC is implemented at this time so once an instance is created within a
150/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
151/// This makes [`Store`] unsuitable for creating an unbounded number of
152/// instances in it because [`Store`] will never release this memory. It's
153/// recommended to have a [`Store`] correspond roughly to the lifetime of a
154/// "main instance" that an embedding is interested in executing.
155///
156/// ## Type parameter `T`
157///
158/// Each [`Store`] has a type parameter `T` associated with it. This `T`
159/// represents state defined by the host. This state will be accessible through
160/// the [`Caller`](crate::Caller) type that host-defined functions get access
161/// to. This `T` is suitable for storing `Store`-specific information which
162/// imported functions may want access to.
163///
164/// The data `T` can be accessed through methods like [`Store::data`] and
165/// [`Store::data_mut`].
166///
167/// ## Stores, contexts, oh my
168///
169/// Most methods in Wasmtime take something of the form
170/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
171/// the first argument. These two traits allow ergonomically passing in the
172/// context you currently have to any method. The primary two sources of
173/// contexts are:
174///
175/// * `Store<T>`
176/// * `Caller<'_, T>`
177///
178/// corresponding to what you create and what you have access to in a host
179/// function. You can also explicitly acquire a [`StoreContext`] or
180/// [`StoreContextMut`] and pass that around as well.
181///
182/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
183/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
184/// form of context you have you can call various methods, create objects, etc.
185///
186/// ## Stores and `Default`
187///
188/// You can create a store with default configuration settings using
189/// `Store::default()`. This will create a brand new [`Engine`] with default
190/// configuration (see [`Config`](crate::Config) for more information).
191///
192/// ## Cross-store usage of items
193///
194/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
195/// [`Store`]. The store they belong to is the one they were created with
196/// (passed in as a parameter) or instantiated with. This store is the only
197/// store that can be used to interact with wasm items after they're created.
198///
199/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
200/// operations is incorrect. In other words it's considered a programmer error
201/// rather than a recoverable error for the wrong [`Store`] to be used when
202/// calling APIs.
203///
204/// [`Memory`]: crate::Memory
205pub struct Store<T: 'static> {
206 // for comments about `ManuallyDrop`, see `Store::into_data`
207 inner: ManuallyDrop<Box<StoreInner<T>>>,
208}
209
210#[derive(Copy, Clone, Debug)]
211/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
212/// the WebAssembly VM.
213pub enum CallHook {
214 /// Indicates the VM is calling a WebAssembly function, from the host.
215 CallingWasm,
216 /// Indicates the VM is returning from a WebAssembly function, to the host.
217 ReturningFromWasm,
218 /// Indicates the VM is calling a host function, from WebAssembly.
219 CallingHost,
220 /// Indicates the VM is returning from a host function, to WebAssembly.
221 ReturningFromHost,
222}
223
224impl CallHook {
225 /// Indicates the VM is entering host code (exiting WebAssembly code)
226 pub fn entering_host(&self) -> bool {
227 match self {
228 CallHook::ReturningFromWasm | CallHook::CallingHost => true,
229 _ => false,
230 }
231 }
232 /// Indicates the VM is exiting host code (entering WebAssembly code)
233 pub fn exiting_host(&self) -> bool {
234 match self {
235 CallHook::ReturningFromHost | CallHook::CallingWasm => true,
236 _ => false,
237 }
238 }
239}
240
241/// Internal contents of a `Store<T>` that live on the heap.
242///
243/// The members of this struct are those that need to be generic over `T`, the
244/// store's internal type storage. Otherwise all things that don't rely on `T`
245/// should go into `StoreOpaque`.
246pub struct StoreInner<T: 'static> {
247 /// Generic metadata about the store that doesn't need access to `T`.
248 inner: StoreOpaque,
249
250 limiter: Option<ResourceLimiterInner<T>>,
251 call_hook: Option<CallHookInner<T>>,
252 #[cfg(target_has_atomic = "64")]
253 epoch_deadline_behavior:
254 Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
255
256 /// The user's `T` data.
257 ///
258 /// Don't actually access it via this field, however! Use the
259 /// `Store{,Inner,Context,ContextMut}::data[_mut]` methods instead, to
260 /// preserve stacked borrows and provenance in the face of potential
261 /// direct-access of `T` from Wasm code (via unsafe intrinsics).
262 ///
263 /// The only exception to the above is when taking ownership of the value,
264 /// e.g. in `Store::into_data`, after which nothing can access this field
265 /// via raw pointers anymore so there is no more provenance to preserve.
266 ///
267 /// For comments about `ManuallyDrop`, see `Store::into_data`.
268 data_no_provenance: ManuallyDrop<T>,
269
270 /// The user's debug handler, if any. See [`crate::DebugHandler`]
271 /// for more documentation.
272 ///
273 /// We need this to be an `Arc` because the handler itself takes
274 /// `&self` and also the whole Store mutably (via
275 /// `StoreContextMut`); so we need to hold a separate reference to
276 /// it while invoking it.
277 #[cfg(feature = "debug")]
278 debug_handler: Option<Box<dyn StoreDebugHandler<T>>>,
279}
280
281/// Adapter around `DebugHandler` that gets monomorphized into an
282/// object-safe dyn trait to place in `store.debug_handler`.
283#[cfg(feature = "debug")]
284trait StoreDebugHandler<T: 'static>: Send + Sync {
285 fn handle<'a>(
286 self: Box<Self>,
287 store: StoreContextMut<'a, T>,
288 event: crate::DebugEvent<'a>,
289 ) -> Box<dyn Future<Output = ()> + Send + 'a>;
290}
291
292#[cfg(feature = "debug")]
293impl<D> StoreDebugHandler<D::Data> for D
294where
295 D: DebugHandler,
296 D::Data: Send,
297{
298 fn handle<'a>(
299 self: Box<Self>,
300 store: StoreContextMut<'a, D::Data>,
301 event: crate::DebugEvent<'a>,
302 ) -> Box<dyn Future<Output = ()> + Send + 'a> {
303 // Clone the underlying `DebugHandler` (the trait requires
304 // Clone as a supertrait), not the Box. The clone happens here
305 // rather than at the callsite because `Clone::clone` is not
306 // object-safe so needs to be in a monomorphized context.
307 let handler: D = (*self).clone();
308 // Since we temporarily took `self` off the store at the
309 // callsite, put it back now that we've cloned it.
310 store.0.debug_handler = Some(self);
311 Box::new(async move { handler.handle(store, event).await })
312 }
313}
314
315enum ResourceLimiterInner<T> {
316 Sync(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync>),
317 #[cfg(feature = "async")]
318 Async(Box<dyn (FnMut(&mut T) -> &mut dyn crate::ResourceLimiterAsync) + Send + Sync>),
319}
320
321/// Representation of a configured resource limiter for a store.
322///
323/// This is acquired with `resource_limiter_and_store_opaque` for example and is
324/// threaded through to growth operations on tables/memories. Note that this is
325/// passed around as `Option<&mut StoreResourceLimiter<'_>>` to make it
326/// efficient to pass around (nullable pointer) and it's also notably passed
327/// around as an `Option` to represent how this is optionally specified within a
328/// store.
329pub enum StoreResourceLimiter<'a> {
330 Sync(&'a mut dyn crate::ResourceLimiter),
331 #[cfg(feature = "async")]
332 Async(&'a mut dyn crate::ResourceLimiterAsync),
333}
334
335impl StoreResourceLimiter<'_> {
336 pub(crate) async fn memory_growing(
337 &mut self,
338 current: usize,
339 desired: usize,
340 maximum: Option<usize>,
341 ) -> Result<bool, Error> {
342 match self {
343 Self::Sync(s) => s.memory_growing(current, desired, maximum),
344 #[cfg(feature = "async")]
345 Self::Async(s) => s.memory_growing(current, desired, maximum).await,
346 }
347 }
348
349 pub(crate) fn memory_grow_failed(&mut self, error: crate::Error) -> Result<()> {
350 match self {
351 Self::Sync(s) => s.memory_grow_failed(error),
352 #[cfg(feature = "async")]
353 Self::Async(s) => s.memory_grow_failed(error),
354 }
355 }
356
357 pub(crate) async fn table_growing(
358 &mut self,
359 current: usize,
360 desired: usize,
361 maximum: Option<usize>,
362 ) -> Result<bool, Error> {
363 match self {
364 Self::Sync(s) => s.table_growing(current, desired, maximum),
365 #[cfg(feature = "async")]
366 Self::Async(s) => s.table_growing(current, desired, maximum).await,
367 }
368 }
369
370 pub(crate) fn table_grow_failed(&mut self, error: crate::Error) -> Result<()> {
371 match self {
372 Self::Sync(s) => s.table_grow_failed(error),
373 #[cfg(feature = "async")]
374 Self::Async(s) => s.table_grow_failed(error),
375 }
376 }
377}
378
379enum CallHookInner<T: 'static> {
380 #[cfg(feature = "call-hook")]
381 Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
382 #[cfg(all(feature = "async", feature = "call-hook"))]
383 Async(Box<dyn CallHookHandler<T> + Send + Sync>),
384 #[expect(
385 dead_code,
386 reason = "forcing, regardless of cfg, the type param to be used"
387 )]
388 ForceTypeParameterToBeUsed {
389 uninhabited: Infallible,
390 _marker: marker::PhantomData<T>,
391 },
392}
393
394/// What to do after returning from a callback when the engine epoch reaches
395/// the deadline for a Store during execution of a function using that store.
396#[non_exhaustive]
397pub enum UpdateDeadline {
398 /// Halt execution of WebAssembly, don't update the epoch deadline, and
399 /// raise a trap.
400 Interrupt,
401 /// Extend the deadline by the specified number of ticks.
402 Continue(u64),
403 /// Extend the deadline by the specified number of ticks after yielding to
404 /// the async executor loop.
405 ///
406 /// This can only be used when WebAssembly is invoked with `*_async`
407 /// methods. If WebAssembly was invoked with a synchronous method then
408 /// returning this variant will raise a trap.
409 #[cfg(feature = "async")]
410 Yield(u64),
411 /// Extend the deadline by the specified number of ticks after yielding to
412 /// the async executor loop.
413 ///
414 /// This can only be used when WebAssembly is invoked with `*_async`
415 /// methods. If WebAssembly was invoked with a synchronous method then
416 /// returning this variant will raise a trap.
417 ///
418 /// The yield will be performed by the future provided; when using `tokio`
419 /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
420 /// here.
421 #[cfg(feature = "async")]
422 YieldCustom(
423 u64,
424 ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
425 ),
426}
427
428// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
429impl<T> Deref for StoreInner<T> {
430 type Target = StoreOpaque;
431 fn deref(&self) -> &Self::Target {
432 &self.inner
433 }
434}
435
436impl<T> DerefMut for StoreInner<T> {
437 fn deref_mut(&mut self) -> &mut Self::Target {
438 &mut self.inner
439 }
440}
441
442/// Monomorphic storage for a `Store<T>`.
443///
444/// This structure contains the bulk of the metadata about a `Store`. This is
445/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
446/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
447/// crate itself.
448pub struct StoreOpaque {
449 // This `StoreOpaque` structure has references to itself. These aren't
450 // immediately evident, however, so we need to tell the compiler that it
451 // contains self-references. This notably suppresses `noalias` annotations
452 // when this shows up in compiled code because types of this structure do
453 // indeed alias itself. An example of this is `default_callee` holds a
454 // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
455 // aliasing!
456 //
457 // It's somewhat unclear to me at this time if this is 100% sufficient to
458 // get all the right codegen in all the right places. For example does
459 // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
460 // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
461 // enough with `Pin` to understand if it's appropriate here (we do, for
462 // example want to allow movement in and out of `data: T`, just not movement
463 // of most of the other members). It's also not clear if using `Pin` in a
464 // few places buys us much other than a bunch of `unsafe` that we already
465 // sort of hand-wave away.
466 //
467 // In any case this seems like a good mid-ground for now where we're at
468 // least telling the compiler something about all the aliasing happening
469 // within a `Store`.
470 _marker: marker::PhantomPinned,
471
472 engine: Engine,
473 vm_store_context: VMStoreContext,
474
475 // Contains all continuations ever allocated throughout the lifetime of this
476 // store.
477 #[cfg(feature = "stack-switching")]
478 continuations: Vec<Box<VMContRef>>,
479
480 instances: TryPrimaryMap<InstanceId, StoreInstance>,
481
482 signal_handler: Option<SignalHandler>,
483 modules: ModuleRegistry,
484 func_refs: FuncRefs,
485 host_globals: TryPrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
486 // GC-related fields.
487 gc_store: Option<GcStore>,
488 gc_roots: RootSet,
489 #[cfg(feature = "gc")]
490 gc_roots_list: GcRootsList,
491 // Types for which the embedder has created an allocator for.
492 #[cfg(feature = "gc")]
493 gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
494 /// Pending exception, if any. This is also a GC root, because it
495 /// needs to be rooted somewhere between the time that a pending
496 /// exception is set and the time that the handling code takes the
497 /// exception object. We use this rooting strategy rather than a
498 /// root in an `Err` branch of a `Result` on the host side because
499 /// it is less error-prone with respect to rooting behavior. See
500 /// `throw()`, `take_pending_exception()`,
501 /// `peek_pending_exception()`, `has_pending_exception()`, and
502 /// `catch()`.
503 #[cfg(feature = "gc")]
504 pending_exception: Option<VMExnRef>,
505
506 // Numbers of resources instantiated in this store, and their limits
507 instance_count: usize,
508 instance_limit: usize,
509 memory_count: usize,
510 memory_limit: usize,
511 table_count: usize,
512 table_limit: usize,
513 #[cfg(feature = "async")]
514 async_state: fiber::AsyncState,
515
516 // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
517 // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
518 // together. Then when we run out of gas, we inject the yield amount from the reserve
519 // until the reserve is empty.
520 fuel_reserve: u64,
521 pub(crate) fuel_yield_interval: Option<NonZeroU64>,
522 /// Indexed data within this `Store`, used to store information about
523 /// globals, functions, memories, etc.
524 store_data: StoreData,
525 traitobj: StorePtr,
526 default_caller_vmctx: SendSyncPtr<VMContext>,
527
528 /// Used to optimized wasm->host calls when the host function is defined with
529 /// `Func::new` to avoid allocating a new vector each time a function is
530 /// called.
531 hostcall_val_storage: Vec<Val>,
532 /// Same as `hostcall_val_storage`, but for the direction of the host
533 /// calling wasm.
534 wasm_val_raw_storage: TryVec<ValRaw>,
535
536 /// Keep track of what protection key is being used during allocation so
537 /// that the right memory pages can be enabled when entering WebAssembly
538 /// guest code.
539 pkey: Option<ProtectionKey>,
540
541 /// State related to the executor of wasm code.
542 ///
543 /// For example if Pulley is enabled and configured then this will store a
544 /// Pulley interpreter.
545 executor: Executor,
546
547 /// The debug breakpoint state for this store.
548 ///
549 /// When guest debugging is enabled, a given store may have a set
550 /// of breakpoints defined, denoted by module and Wasm PC within
551 /// that module. Or alternately, it may be in "single-step" mode,
552 /// where every possible breakpoint is logically enabled.
553 ///
554 /// When execution of any instance in this store hits any defined
555 /// breakpoint, a `Breakpoint` debug event is emitted and the
556 /// handler defined above, if any, has a chance to perform some
557 /// logic before returning to allow execution to resume.
558 #[cfg(feature = "debug")]
559 breakpoints: BreakpointState,
560
561 /// The debug PC-to-FrameData cache for this store.
562 ///
563 /// When guest debugging is enabled, we parse compiler metadata
564 /// and pass out `FrameHandle`s that represent Wasm guest
565 /// frames. These handles represent a specific frame within a
566 /// frozen stack and are invalidated upon further execution. In
567 /// order to keep these handles lightweight, and to avoid
568 /// redundant work when passing out *new* handles after further
569 /// execution, we cache the mapping from store-specific PCs to
570 /// parsed frame data. (This cache needs to be store-specific
571 /// rather than e.g. engine-specific because each store has its
572 /// own privately mapped copy of guest code when debugging is
573 /// enabled, so the key-space is unique for each store.)
574 #[cfg(feature = "debug")]
575 frame_data_cache: FrameDataCache,
576}
577
578/// Self-pointer to `StoreInner<T>` from within a `StoreOpaque` which is chiefly
579/// used to copy into instances during instantiation.
580///
581/// FIXME: ideally this type would get deleted and Wasmtime's reliance on it
582/// would go away.
583struct StorePtr(Option<NonNull<dyn VMStore>>);
584
585// We can't make `VMStore: Send + Sync` because that requires making all of
586// Wastime's internals generic over the `Store`'s `T`. So instead, we take care
587// in the whole VM layer to only use the `VMStore` in ways that are `Send`- and
588// `Sync`-safe and we have to have these unsafe impls.
589unsafe impl Send for StorePtr {}
590unsafe impl Sync for StorePtr {}
591
592/// Executor state within `StoreOpaque`.
593///
594/// Effectively stores Pulley interpreter state and handles conditional support
595/// for Cranelift at compile time.
596pub(crate) enum Executor {
597 Interpreter(Interpreter),
598 #[cfg(has_host_compiler_backend)]
599 Native,
600}
601
602impl Executor {
603 pub(crate) fn new(engine: &Engine) -> Result<Self, OutOfMemory> {
604 #[cfg(has_host_compiler_backend)]
605 if cfg!(feature = "pulley") && engine.target().is_pulley() {
606 Ok(Executor::Interpreter(Interpreter::new(engine)?))
607 } else {
608 Ok(Executor::Native)
609 }
610 #[cfg(not(has_host_compiler_backend))]
611 {
612 debug_assert!(engine.target().is_pulley());
613 Ok(Executor::Interpreter(Interpreter::new(engine)?))
614 }
615 }
616}
617
618/// A borrowed reference to `Executor` above.
619pub(crate) enum ExecutorRef<'a> {
620 Interpreter(InterpreterRef<'a>),
621 #[cfg(has_host_compiler_backend)]
622 Native,
623}
624
625/// An RAII type to automatically mark a region of code as unsafe for GC.
626#[doc(hidden)]
627pub struct AutoAssertNoGc<'a> {
628 store: &'a mut StoreOpaque,
629 entered: bool,
630}
631
632impl<'a> AutoAssertNoGc<'a> {
633 #[inline]
634 pub fn new(store: &'a mut StoreOpaque) -> Self {
635 let entered = if !cfg!(feature = "gc") {
636 false
637 } else if let Some(gc_store) = store.gc_store.as_mut() {
638 gc_store.gc_heap.enter_no_gc_scope();
639 true
640 } else {
641 false
642 };
643
644 AutoAssertNoGc { store, entered }
645 }
646
647 /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
648 /// disables checks for no GC happening for the duration of this value.
649 ///
650 /// This is used when it is statically otherwise known that a GC doesn't
651 /// happen for the various types involved.
652 ///
653 /// # Unsafety
654 ///
655 /// This method is `unsafe` as it does not provide the same safety
656 /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
657 /// caller that a GC doesn't happen.
658 #[inline]
659 pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
660 if cfg!(debug_assertions) {
661 AutoAssertNoGc::new(store)
662 } else {
663 AutoAssertNoGc {
664 store,
665 entered: false,
666 }
667 }
668 }
669}
670
671impl core::ops::Deref for AutoAssertNoGc<'_> {
672 type Target = StoreOpaque;
673
674 #[inline]
675 fn deref(&self) -> &Self::Target {
676 &*self.store
677 }
678}
679
680impl core::ops::DerefMut for AutoAssertNoGc<'_> {
681 #[inline]
682 fn deref_mut(&mut self) -> &mut Self::Target {
683 &mut *self.store
684 }
685}
686
687impl Drop for AutoAssertNoGc<'_> {
688 #[inline]
689 fn drop(&mut self) {
690 if self.entered {
691 self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
692 }
693 }
694}
695
696/// Used to associate instances with the store.
697///
698/// This is needed to track if the instance was allocated explicitly with the on-demand
699/// instance allocator.
700struct StoreInstance {
701 handle: InstanceHandle,
702 kind: StoreInstanceKind,
703}
704
705enum StoreInstanceKind {
706 /// An actual, non-dummy instance.
707 Real {
708 /// The id of this instance's module inside our owning store's
709 /// `ModuleRegistry`.
710 module_id: RegisteredModuleId,
711 },
712
713 /// This is a dummy instance that is just an implementation detail for
714 /// something else. For example, host-created memories internally create a
715 /// dummy instance.
716 ///
717 /// Regardless of the configured instance allocator for the engine, dummy
718 /// instances always use the on-demand allocator to deallocate the instance.
719 Dummy,
720}
721
722impl<T> Store<T> {
723 /// Creates a new [`Store`] to be associated with the given [`Engine`] and
724 /// `data` provided.
725 ///
726 /// The created [`Store`] will place no additional limits on the size of
727 /// linear memories or tables at runtime. Linear memories and tables will
728 /// be allowed to grow to any upper limit specified in their definitions.
729 /// The store will limit the number of instances, linear memories, and
730 /// tables created to 10,000. This can be overridden with the
731 /// [`Store::limiter`] configuration method.
732 pub fn new(engine: &Engine, data: T) -> Self {
733 Self::try_new(engine, data).expect(
734 "allocation failure during `Store::new` (use `Store::try_new` to handle such errors)",
735 )
736 }
737
738 /// Like `Store::new` but returns an error on allocation failure.
739 ///
740 /// # Errors
741 ///
742 /// This function will return an [`OutOfMemory`][crate::OutOfMemory] error when
743 /// memory allocation fails. See the `OutOfMemory` type's documentation for
744 /// details on Wasmtime's out-of-memory handling.
745 pub fn try_new(engine: &Engine, data: T) -> Result<Self> {
746 let store_data = StoreData::new(engine);
747 log::trace!("creating new store {:?}", store_data.id());
748
749 let pkey = engine.allocator().next_available_pkey();
750
751 let inner = StoreOpaque {
752 _marker: marker::PhantomPinned,
753 engine: engine.clone(),
754 vm_store_context: Default::default(),
755 #[cfg(feature = "stack-switching")]
756 continuations: Vec::new(),
757 instances: TryPrimaryMap::new(),
758 signal_handler: None,
759 gc_store: None,
760 gc_roots: RootSet::default(),
761 #[cfg(feature = "gc")]
762 gc_roots_list: GcRootsList::default(),
763 #[cfg(feature = "gc")]
764 gc_host_alloc_types: Default::default(),
765 #[cfg(feature = "gc")]
766 pending_exception: None,
767 modules: ModuleRegistry::default(),
768 func_refs: FuncRefs::default(),
769 host_globals: TryPrimaryMap::new(),
770 instance_count: 0,
771 instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
772 memory_count: 0,
773 memory_limit: crate::DEFAULT_MEMORY_LIMIT,
774 table_count: 0,
775 table_limit: crate::DEFAULT_TABLE_LIMIT,
776 #[cfg(feature = "async")]
777 async_state: Default::default(),
778 fuel_reserve: 0,
779 fuel_yield_interval: None,
780 store_data,
781 traitobj: StorePtr(None),
782 default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
783 hostcall_val_storage: Vec::new(),
784 wasm_val_raw_storage: TryVec::new(),
785 pkey,
786 executor: Executor::new(engine)?,
787 #[cfg(feature = "debug")]
788 breakpoints: Default::default(),
789 #[cfg(feature = "debug")]
790 frame_data_cache: FrameDataCache::new(),
791 };
792 let mut inner = try_new::<Box<_>>(StoreInner {
793 inner,
794 limiter: None,
795 call_hook: None,
796 #[cfg(target_has_atomic = "64")]
797 epoch_deadline_behavior: None,
798 data_no_provenance: ManuallyDrop::new(data),
799 #[cfg(feature = "debug")]
800 debug_handler: None,
801 })?;
802
803 let store_data =
804 <NonNull<ManuallyDrop<T>>>::from(&mut inner.data_no_provenance).cast::<()>();
805 inner.inner.vm_store_context.store_data = store_data.into();
806
807 inner.traitobj = StorePtr(Some(NonNull::from(&mut *inner)));
808
809 // Wasmtime uses the callee argument to host functions to learn about
810 // the original pointer to the `Store` itself, allowing it to
811 // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
812 // however, there's no "callee" to provide. To fix this we allocate a
813 // single "default callee" for the entire `Store`. This is then used as
814 // part of `Func::call` to guarantee that the `callee: *mut VMContext`
815 // is never null.
816 let allocator = OnDemandInstanceAllocator::default();
817 let info = engine.empty_module_runtime_info();
818 allocator
819 .validate_module(info.env_module(), info.offsets())
820 .unwrap();
821
822 unsafe {
823 // Note that this dummy instance doesn't allocate tables or memories
824 // (also no limiter is passed in) so it won't have an async await
825 // point meaning that it should be ok to assert the future is
826 // always ready.
827 let result = vm::assert_ready(inner.allocate_instance(
828 None,
829 AllocateInstanceKind::Dummy {
830 allocator: &allocator,
831 },
832 info,
833 Default::default(),
834 ));
835 let id = match result {
836 Ok(id) => id,
837 Err(e) => {
838 if e.is::<OutOfMemory>() {
839 return Err(e);
840 }
841 panic!("instance allocator failed to allocate default callee")
842 }
843 };
844 let default_caller_vmctx = inner.instance(id).vmctx();
845 inner.default_caller_vmctx = default_caller_vmctx.into();
846 }
847
848 Ok(Self {
849 inner: ManuallyDrop::new(inner),
850 })
851 }
852
853 /// Access the underlying `T` data owned by this `Store`.
854 #[inline]
855 pub fn data(&self) -> &T {
856 self.inner.data()
857 }
858
859 /// Access the underlying `T` data owned by this `Store`.
860 #[inline]
861 pub fn data_mut(&mut self) -> &mut T {
862 self.inner.data_mut()
863 }
864
865 fn run_manual_drop_routines(&mut self) {
866 StoreData::run_manual_drop_routines(StoreContextMut(&mut self.inner));
867
868 // Ensure all fiber stacks, even cached ones, are all flushed out to the
869 // instance allocator.
870 self.inner.flush_fiber_stack();
871 }
872
873 /// Consumes this [`Store`], destroying it, and returns the underlying data.
874 pub fn into_data(mut self) -> T {
875 self.run_manual_drop_routines();
876
877 // This is an unsafe operation because we want to avoid having a runtime
878 // check or boolean for whether the data is actually contained within a
879 // `Store`. The data itself is stored as `ManuallyDrop` since we're
880 // manually managing the memory here, and there's also a `ManuallyDrop`
881 // around the `Box<StoreInner<T>>`. The way this works though is a bit
882 // tricky, so here's how things get dropped appropriately:
883 //
884 // * When a `Store<T>` is normally dropped, the custom destructor for
885 // `Store<T>` will drop `T`, then the `self.inner` field. The
886 // rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
887 // `StoreInner<T>`. This cleans up all internal fields and doesn't
888 // touch `T` because it's wrapped in `ManuallyDrop`.
889 //
890 // * When calling this method we skip the top-level destructor for
891 // `Store<T>` with `mem::forget`. This skips both the destructor for
892 // `T` and the destructor for `StoreInner<T>`. We do, however, run the
893 // destructor for `Box<StoreInner<T>>` which, like above, will skip
894 // the destructor for `T` since it's `ManuallyDrop`.
895 //
896 // In both cases all the other fields of `StoreInner<T>` should all get
897 // dropped, and the manual management of destructors is basically
898 // between this method and `Drop for Store<T>`. Note that this also
899 // means that `Drop for StoreInner<T>` cannot access `self.data`, so
900 // there is a comment indicating this as well.
901 unsafe {
902 let mut inner = ManuallyDrop::take(&mut self.inner);
903 core::mem::forget(self);
904 ManuallyDrop::take(&mut inner.data_no_provenance)
905 }
906 }
907
908 /// Configures the [`ResourceLimiter`] used to limit resource creation
909 /// within this [`Store`].
910 ///
911 /// Whenever resources such as linear memory, tables, or instances are
912 /// allocated the `limiter` specified here is invoked with the store's data
913 /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
914 /// being allocated. The returned [`ResourceLimiter`] is intended to live
915 /// within the `T` itself, for example by storing a
916 /// [`StoreLimits`](crate::StoreLimits).
917 ///
918 /// Note that this limiter is only used to limit the creation/growth of
919 /// resources in the future, this does not retroactively attempt to apply
920 /// limits to the [`Store`].
921 ///
922 /// # Examples
923 ///
924 /// ```
925 /// use wasmtime::*;
926 ///
927 /// struct MyApplicationState {
928 /// my_state: u32,
929 /// limits: StoreLimits,
930 /// }
931 ///
932 /// let engine = Engine::default();
933 /// let my_state = MyApplicationState {
934 /// my_state: 42,
935 /// limits: StoreLimitsBuilder::new()
936 /// .memory_size(1 << 20 /* 1 MB */)
937 /// .instances(2)
938 /// .build(),
939 /// };
940 /// let mut store = Store::new(&engine, my_state);
941 /// store.limiter(|state| &mut state.limits);
942 ///
943 /// // Creation of smaller memories is allowed
944 /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
945 ///
946 /// // Creation of a larger memory, however, will exceed the 1MB limit we've
947 /// // configured
948 /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
949 ///
950 /// // The number of instances in this store is limited to 2, so the third
951 /// // instance here should fail.
952 /// let module = Module::new(&engine, "(module)").unwrap();
953 /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
954 /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
955 /// assert!(Instance::new(&mut store, &module, &[]).is_err());
956 /// ```
957 ///
958 /// [`ResourceLimiter`]: crate::ResourceLimiter
959 pub fn limiter(
960 &mut self,
961 mut limiter: impl (FnMut(&mut T) -> &mut dyn crate::ResourceLimiter) + Send + Sync + 'static,
962 ) {
963 // Apply the limits on instances, tables, and memory given by the limiter:
964 let inner = &mut self.inner;
965 let (instance_limit, table_limit, memory_limit) = {
966 let l = limiter(inner.data_mut());
967 (l.instances(), l.tables(), l.memories())
968 };
969 let innermost = &mut inner.inner;
970 innermost.instance_limit = instance_limit;
971 innermost.table_limit = table_limit;
972 innermost.memory_limit = memory_limit;
973
974 // Save the limiter accessor function:
975 inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
976 }
977
978 /// Configure a function that runs on calls and returns between WebAssembly
979 /// and host code.
980 ///
981 /// The function is passed a [`CallHook`] argument, which indicates which
982 /// state transition the VM is making.
983 ///
984 /// This function may return a [`Trap`]. If a trap is returned when an
985 /// import was called, it is immediately raised as-if the host import had
986 /// returned the trap. If a trap is returned after wasm returns to the host
987 /// then the wasm function's result is ignored and this trap is returned
988 /// instead.
989 ///
990 /// After this function returns a trap, it may be called for subsequent returns
991 /// to host or wasm code as the trap propagates to the root call.
992 ///
993 /// [`Trap`]: crate::Trap
994 #[cfg(feature = "call-hook")]
995 pub fn call_hook(
996 &mut self,
997 hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
998 ) {
999 self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
1000 }
1001
1002 /// Returns the [`Engine`] that this store is associated with.
1003 pub fn engine(&self) -> &Engine {
1004 self.inner.engine()
1005 }
1006
1007 /// Perform garbage collection.
1008 ///
1009 /// Note that it is not required to actively call this function. GC will
1010 /// automatically happen according to various internal heuristics. This is
1011 /// provided if fine-grained control over the GC is desired.
1012 ///
1013 /// If you are calling this method after an attempted allocation failed, you
1014 /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
1015 /// When you do so, this method will attempt to create enough space in the
1016 /// GC heap for that allocation, so that it will succeed on the next
1017 /// attempt.
1018 ///
1019 /// # Errors
1020 ///
1021 /// This method will fail if an [async limiter is
1022 /// configured](Store::limiter_async) in which case [`Store::gc_async`] must
1023 /// be used instead.
1024 #[cfg(feature = "gc")]
1025 pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> {
1026 StoreContextMut(&mut self.inner).gc(why)
1027 }
1028
1029 /// Returns the current capacity of the GC heap in bytes, or 0 if the GC
1030 /// heap has not been initialized yet.
1031 #[cfg(feature = "gc")]
1032 pub fn gc_heap_capacity(&self) -> usize {
1033 self.inner.gc_heap_capacity()
1034 }
1035
1036 /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
1037 /// be configured via [`Store::set_fuel`].
1038 ///
1039 /// # Errors
1040 ///
1041 /// This function will return an error if fuel consumption is not enabled
1042 /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
1043 ///
1044 /// This function will return an [`OutOfMemory`][crate::OutOfMemory] error when
1045 /// memory allocation fails. See the `OutOfMemory` type's documentation for
1046 /// details on Wasmtime's out-of-memory handling.
1047 pub fn get_fuel(&self) -> Result<u64> {
1048 self.inner.get_fuel()
1049 }
1050
1051 /// Set the fuel to this [`Store`] for wasm to consume while executing.
1052 ///
1053 /// For this method to work fuel consumption must be enabled via
1054 /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
1055 /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
1056 /// immediately trap). This function must be called for the store to have
1057 /// some fuel to allow WebAssembly to execute.
1058 ///
1059 /// Most WebAssembly instructions consume 1 unit of fuel. Some
1060 /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
1061 /// units, as any execution cost associated with them involves other
1062 /// instructions which do consume fuel.
1063 ///
1064 /// Note that when fuel is entirely consumed it will cause wasm to trap.
1065 ///
1066 /// # Errors
1067 ///
1068 /// This function will return an error if fuel consumption is not enabled via
1069 /// [`Config::consume_fuel`](crate::Config::consume_fuel).
1070 ///
1071 /// This function will return an [`OutOfMemory`][crate::OutOfMemory] error when
1072 /// memory allocation fails. See the `OutOfMemory` type's documentation for
1073 /// details on Wasmtime's out-of-memory handling.
1074 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1075 self.inner.set_fuel(fuel)
1076 }
1077
1078 /// Configures a [`Store`] to yield execution of async WebAssembly code
1079 /// periodically.
1080 ///
1081 /// When a [`Store`] is configured to consume fuel with
1082 /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
1083 /// configure WebAssembly to be suspended and control will be yielded back
1084 /// to the caller every `interval` units of fuel consumed. When using this
1085 /// method it requires further invocations of WebAssembly to use `*_async`
1086 /// entrypoints.
1087 ///
1088 /// The purpose of this behavior is to ensure that futures which represent
1089 /// execution of WebAssembly do not execute too long inside their
1090 /// `Future::poll` method. This allows for some form of cooperative
1091 /// multitasking where WebAssembly will voluntarily yield control
1092 /// periodically (based on fuel consumption) back to the running thread.
1093 ///
1094 /// Note that futures returned by this crate will automatically flag
1095 /// themselves to get re-polled if a yield happens. This means that
1096 /// WebAssembly will continue to execute, just after giving the host an
1097 /// opportunity to do something else.
1098 ///
1099 /// The `interval` parameter indicates how much fuel should be
1100 /// consumed between yields of an async future. When fuel runs out wasm will trap.
1101 ///
1102 /// # Error
1103 ///
1104 /// This method will error if fuel is not enabled or `interval` is
1105 /// `Some(0)`.
1106 #[cfg(feature = "async")]
1107 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1108 self.inner.fuel_async_yield_interval(interval)
1109 }
1110
1111 /// Sets the epoch deadline to a certain number of ticks in the future.
1112 ///
1113 /// When the Wasm guest code is compiled with epoch-interruption
1114 /// instrumentation
1115 /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
1116 /// and when the `Engine`'s epoch is incremented
1117 /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
1118 /// past a deadline, execution can be configured to either trap or
1119 /// yield and then continue.
1120 ///
1121 /// This deadline is always set relative to the current epoch:
1122 /// `ticks_beyond_current` ticks in the future. The deadline can
1123 /// be set explicitly via this method, or refilled automatically
1124 /// on a yield if configured via
1125 /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
1126 /// this method is invoked, the deadline is reached when
1127 /// [`Engine::increment_epoch()`] has been invoked at least
1128 /// `ticks_beyond_current` times.
1129 ///
1130 /// By default a store will trap immediately with an epoch deadline of 0
1131 /// (which has always "elapsed"). This method is required to be configured
1132 /// for stores with epochs enabled to some future epoch deadline.
1133 ///
1134 /// See documentation on
1135 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1136 /// for an introduction to epoch-based interruption.
1137 #[cfg(target_has_atomic = "64")]
1138 pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1139 self.inner.set_epoch_deadline(ticks_beyond_current);
1140 }
1141
1142 /// Configures epoch-deadline expiration to trap.
1143 ///
1144 /// When epoch-interruption-instrumented code is executed on this
1145 /// store and the epoch deadline is reached before completion,
1146 /// with the store configured in this way, execution will
1147 /// terminate with a trap as soon as an epoch check in the
1148 /// instrumented code is reached.
1149 ///
1150 /// This behavior is the default if the store is not otherwise
1151 /// configured via
1152 /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
1153 /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
1154 /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
1155 ///
1156 /// This setting is intended to allow for coarse-grained
1157 /// interruption, but not a deterministic deadline of a fixed,
1158 /// finite interval. For deterministic interruption, see the
1159 /// "fuel" mechanism instead.
1160 ///
1161 /// Note that when this is used it's required to call
1162 /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
1163 /// trap.
1164 ///
1165 /// See documentation on
1166 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1167 /// for an introduction to epoch-based interruption.
1168 #[cfg(target_has_atomic = "64")]
1169 pub fn epoch_deadline_trap(&mut self) {
1170 self.inner.epoch_deadline_trap();
1171 }
1172
1173 /// Configures epoch-deadline expiration to invoke a custom callback
1174 /// function.
1175 ///
1176 /// When epoch-interruption-instrumented code is executed on this
1177 /// store and the epoch deadline is reached before completion, the
1178 /// provided callback function is invoked.
1179 ///
1180 /// This callback should either return an [`UpdateDeadline`], or
1181 /// return an error, which will terminate execution with a trap.
1182 ///
1183 /// The [`UpdateDeadline`] is a positive number of ticks to
1184 /// add to the epoch deadline, as well as indicating what
1185 /// to do after the callback returns. If the [`Store`] is
1186 /// configured with async support, then the callback may return
1187 /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
1188 /// to yield to the async executor before updating the epoch deadline.
1189 /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
1190 /// update the epoch deadline immediately.
1191 ///
1192 /// This setting is intended to allow for coarse-grained
1193 /// interruption, but not a deterministic deadline of a fixed,
1194 /// finite interval. For deterministic interruption, see the
1195 /// "fuel" mechanism instead.
1196 ///
1197 /// See documentation on
1198 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
1199 /// for an introduction to epoch-based interruption.
1200 #[cfg(target_has_atomic = "64")]
1201 pub fn epoch_deadline_callback(
1202 &mut self,
1203 callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
1204 ) {
1205 self.inner.epoch_deadline_callback(Box::new(callback));
1206 }
1207
1208 /// Set an exception as the currently pending exception, and
1209 /// return an error that propagates the throw.
1210 ///
1211 /// This method takes an exception object and stores it in the
1212 /// `Store` as the currently pending exception. This is a special
1213 /// rooted slot that holds the exception as long as it is
1214 /// propagating. This method then returns a `ThrownException`
1215 /// error, which is a special type that indicates a pending
1216 /// exception exists. When this type propagates as an error
1217 /// returned from a Wasm-to-host call, the pending exception is
1218 /// thrown within the Wasm context, and either caught or
1219 /// propagated further to the host-to-Wasm call boundary. If an
1220 /// exception is thrown out of Wasm (or across Wasm from a
1221 /// hostcall) back to the host-to-Wasm call boundary, *that*
1222 /// invocation returns a `ThrownException`, and the pending
1223 /// exception slot is again set. In other words, the
1224 /// `ThrownException` error type should propagate upward exactly
1225 /// and only when a pending exception is set.
1226 ///
1227 /// To take the pending exception, use [`Self::take_pending_exception`].
1228 ///
1229 /// This method is parameterized over `R` for convenience, but
1230 /// will always return an `Err`.
1231 ///
1232 /// # Panics
1233 ///
1234 /// - Will panic if `exception` has been unrooted.
1235 /// - Will panic if `exception` is a null reference.
1236 /// - Will panic if a pending exception has already been set.
1237 #[cfg(feature = "gc")]
1238 pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1239 self.inner.throw_impl(exception);
1240 Err(ThrownException)
1241 }
1242
1243 /// Take the currently pending exception, if any, and return it,
1244 /// removing it from the "pending exception" slot.
1245 ///
1246 /// If there is no pending exception, returns `None`.
1247 ///
1248 /// Note: the returned exception is a LIFO root (see
1249 /// [`crate::Rooted`]), rooted in the current handle scope. Take
1250 /// care to ensure that it is re-rooted or otherwise does not
1251 /// escape this scope! It is usually best to allow an exception
1252 /// object to be rooted in the store's "pending exception" slot
1253 /// until the final consumer has taken it, rather than root it and
1254 /// pass it up the callstack in some other way.
1255 ///
1256 /// This method is useful to implement ad-hoc exception plumbing
1257 /// in various ways, but for the most idiomatic handling, see
1258 /// [`StoreContextMut::throw`].
1259 #[cfg(feature = "gc")]
1260 pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1261 self.inner.take_pending_exception_rooted()
1262 }
1263
1264 /// Tests whether there is a pending exception.
1265 ///
1266 /// Ordinarily, a pending exception will be set on a store if and
1267 /// only if a host-side callstack is propagating a
1268 /// [`crate::ThrownException`] error. The final consumer that
1269 /// catches the exception takes it; it may re-place it to re-throw
1270 /// (using [`Self::throw`]) if it chooses not to actually handle the
1271 /// exception.
1272 ///
1273 /// This method is useful to tell whether a store is in this
1274 /// state, but should not be used as part of the ordinary
1275 /// exception-handling flow. For the most idiomatic handling, see
1276 /// [`StoreContextMut::throw`].
1277 #[cfg(feature = "gc")]
1278 pub fn has_pending_exception(&self) -> bool {
1279 self.inner.pending_exception.is_some()
1280 }
1281
1282 /// Return all breakpoints.
1283 #[cfg(feature = "debug")]
1284 pub fn breakpoints(&self) -> Option<impl Iterator<Item = crate::Breakpoint> + '_> {
1285 self.as_context().breakpoints()
1286 }
1287
1288 /// Indicate whether single-step mode is enabled.
1289 #[cfg(feature = "debug")]
1290 pub fn is_single_step(&self) -> bool {
1291 self.as_context().is_single_step()
1292 }
1293
1294 /// Set the debug callback on this store.
1295 ///
1296 /// See [`crate::DebugHandler`] for more documentation.
1297 ///
1298 /// # Panics
1299 ///
1300 /// - Will panic if guest-debug support was not enabled via
1301 /// [`crate::Config::guest_debug`].
1302 #[cfg(feature = "debug")]
1303 pub fn set_debug_handler(&mut self, handler: impl DebugHandler<Data = T>)
1304 where
1305 // We require `Send` here because the debug handler becomes
1306 // referenced from a future: when `DebugHandler::handle` is
1307 // invoked, its `self` references the `handler` with the
1308 // user's state. Note that we are careful to keep this bound
1309 // constrained to debug-handler-related code only and not
1310 // propagate it outward to the store in general. The presence
1311 // of the trait implementation serves as a witness that `T:
1312 // Send`. This is required in particular because we will have
1313 // a `&mut dyn VMStore` on the stack when we pause a fiber
1314 // with `block_on` to run a debugger hook; that `VMStore` must
1315 // be a `Store<T> where T: Send`.
1316 T: Send,
1317 {
1318 // Debug hooks rely on async support, so async entrypoints are required.
1319 self.inner.set_async_required(Asyncness::Yes);
1320
1321 assert!(
1322 self.engine().tunables().debug_guest,
1323 "debug hooks require guest debugging to be enabled"
1324 );
1325 self.inner.debug_handler = Some(Box::new(handler));
1326 }
1327
1328 /// Clear the debug handler on this store. If any existed, it will
1329 /// be dropped.
1330 #[cfg(feature = "debug")]
1331 pub fn clear_debug_handler(&mut self) {
1332 self.inner.debug_handler = None;
1333 }
1334
1335 /// Register a [`Module`] with this store's module registry for
1336 /// debugging, without instantiating it.
1337 ///
1338 /// This makes the module visible to debuggers (via
1339 /// `debug_all_modules`) before the module is actually
1340 /// instantiated. This is useful for guest-debug workflows where
1341 /// the debugger needs to see modules to set breakpoints before
1342 /// the first Wasm instruction executes.
1343 #[cfg(feature = "debug")]
1344 pub fn debug_register_module(&mut self, module: &crate::Module) -> crate::Result<()> {
1345 let (modules, engine, breakpoints) = self.inner.modules_and_engine_and_breakpoints_mut();
1346 modules.register_module(module, engine, breakpoints)?;
1347 Ok(())
1348 }
1349
1350 /// Register all inner modules of a [`Component`](crate::component::Component)
1351 /// with this store's module registry for debugging, without instantiating
1352 /// the component.
1353 #[cfg(all(feature = "debug", feature = "component-model"))]
1354 pub fn debug_register_component(
1355 &mut self,
1356 component: &crate::component::Component,
1357 ) -> crate::Result<()> {
1358 for module in component.static_modules() {
1359 self.debug_register_module(module)?;
1360 }
1361 Ok(())
1362 }
1363}
1364
1365impl<'a, T> StoreContext<'a, T> {
1366 /// Returns the underlying [`Engine`] this store is connected to.
1367 pub fn engine(&self) -> &Engine {
1368 self.0.engine()
1369 }
1370
1371 /// Access the underlying data owned by this `Store`.
1372 ///
1373 /// Same as [`Store::data`].
1374 pub fn data(&self) -> &'a T {
1375 self.0.data()
1376 }
1377
1378 /// Returns the remaining fuel in this store.
1379 ///
1380 /// For more information see [`Store::get_fuel`].
1381 pub fn get_fuel(&self) -> Result<u64> {
1382 self.0.get_fuel()
1383 }
1384}
1385
1386impl<'a, T> StoreContextMut<'a, T> {
1387 /// Access the underlying data owned by this `Store`.
1388 ///
1389 /// Same as [`Store::data`].
1390 pub fn data(&self) -> &T {
1391 self.0.data()
1392 }
1393
1394 /// Access the underlying data owned by this `Store`.
1395 ///
1396 /// Same as [`Store::data_mut`].
1397 pub fn data_mut(&mut self) -> &mut T {
1398 self.0.data_mut()
1399 }
1400
1401 /// Returns the underlying [`Engine`] this store is connected to.
1402 pub fn engine(&self) -> &Engine {
1403 self.0.engine()
1404 }
1405
1406 /// Perform garbage collection of `ExternRef`s.
1407 ///
1408 /// Same as [`Store::gc`].
1409 #[cfg(feature = "gc")]
1410 pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()> {
1411 let (mut limiter, store) = self.0.validate_sync_resource_limiter_and_store_opaque()?;
1412 vm::assert_ready(store.gc(
1413 limiter.as_mut(),
1414 None,
1415 why.map(|e| e.bytes_needed()),
1416 Asyncness::No,
1417 ));
1418 Ok(())
1419 }
1420
1421 /// Returns remaining fuel in this store.
1422 ///
1423 /// For more information see [`Store::get_fuel`]
1424 pub fn get_fuel(&self) -> Result<u64> {
1425 self.0.get_fuel()
1426 }
1427
1428 /// Set the amount of fuel in this store.
1429 ///
1430 /// For more information see [`Store::set_fuel`]
1431 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1432 self.0.set_fuel(fuel)
1433 }
1434
1435 /// Configures this `Store` to periodically yield while executing futures.
1436 ///
1437 /// For more information see [`Store::fuel_async_yield_interval`]
1438 #[cfg(feature = "async")]
1439 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1440 self.0.fuel_async_yield_interval(interval)
1441 }
1442
1443 /// Sets the epoch deadline to a certain number of ticks in the future.
1444 ///
1445 /// For more information see [`Store::set_epoch_deadline`].
1446 #[cfg(target_has_atomic = "64")]
1447 pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1448 self.0.set_epoch_deadline(ticks_beyond_current);
1449 }
1450
1451 /// Configures epoch-deadline expiration to trap.
1452 ///
1453 /// For more information see [`Store::epoch_deadline_trap`].
1454 #[cfg(target_has_atomic = "64")]
1455 pub fn epoch_deadline_trap(&mut self) {
1456 self.0.epoch_deadline_trap();
1457 }
1458
1459 /// Set an exception as the currently pending exception, and
1460 /// return an error that propagates the throw.
1461 ///
1462 /// See [`Store::throw`] for more details.
1463 #[cfg(feature = "gc")]
1464 pub fn throw<R>(&mut self, exception: Rooted<ExnRef>) -> Result<R, ThrownException> {
1465 self.0.inner.throw_impl(exception);
1466 Err(ThrownException)
1467 }
1468
1469 /// Take the currently pending exception, if any, and return it,
1470 /// removing it from the "pending exception" slot.
1471 ///
1472 /// See [`Store::take_pending_exception`] for more details.
1473 #[cfg(feature = "gc")]
1474 pub fn take_pending_exception(&mut self) -> Option<Rooted<ExnRef>> {
1475 self.0.inner.take_pending_exception_rooted()
1476 }
1477
1478 /// Tests whether there is a pending exception.
1479 ///
1480 /// See [`Store::has_pending_exception`] for more details.
1481 #[cfg(feature = "gc")]
1482 pub fn has_pending_exception(&self) -> bool {
1483 self.0.inner.pending_exception.is_some()
1484 }
1485}
1486
1487impl<T> StoreInner<T> {
1488 #[inline]
1489 fn data(&self) -> &T {
1490 // We are actually just accessing `&self.data_no_provenance` but we must
1491 // do so with the `VMStoreContext::store_data` pointer's provenance. If
1492 // we did otherwise, i.e. directly accessed the field, we would
1493 // invalidate that pointer, which would in turn invalidate any direct
1494 // `T` accesses that Wasm code makes via unsafe intrinsics.
1495 let data: *const ManuallyDrop<T> = &raw const self.data_no_provenance;
1496 let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1497 let ptr = provenance.with_addr(data.addr());
1498
1499 // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1500 // to access because of our `&self` borrow.
1501 debug_assert_ne!(ptr, core::ptr::null_mut());
1502 debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1503 unsafe { &*ptr }
1504 }
1505
1506 #[inline]
1507 fn data_limiter_and_opaque(
1508 &mut self,
1509 ) -> (
1510 &mut T,
1511 Option<&mut ResourceLimiterInner<T>>,
1512 &mut StoreOpaque,
1513 ) {
1514 // See the comments about provenance in `StoreInner::data` above.
1515 let data: *mut ManuallyDrop<T> = &raw mut self.data_no_provenance;
1516 let provenance = self.inner.vm_store_context.store_data.as_ptr().cast::<T>();
1517 let ptr = provenance.with_addr(data.addr());
1518
1519 // SAFETY: The pointer is non-null, points to our `T` data, and is valid
1520 // to access because of our `&mut self` borrow.
1521 debug_assert_ne!(ptr, core::ptr::null_mut());
1522 debug_assert_eq!(ptr.addr(), (&raw const self.data_no_provenance).addr());
1523 let data = unsafe { &mut *ptr };
1524
1525 let limiter = self.limiter.as_mut();
1526
1527 (data, limiter, &mut self.inner)
1528 }
1529
1530 #[inline]
1531 fn data_mut(&mut self) -> &mut T {
1532 self.data_limiter_and_opaque().0
1533 }
1534
1535 #[inline]
1536 pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1537 if self.inner.pkey.is_none() && self.call_hook.is_none() {
1538 Ok(())
1539 } else {
1540 self.call_hook_slow_path(s)
1541 }
1542 }
1543
1544 fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1545 if let Some(pkey) = &self.inner.pkey {
1546 let allocator = self.engine().allocator();
1547 match s {
1548 CallHook::CallingWasm | CallHook::ReturningFromHost => {
1549 allocator.restrict_to_pkey(*pkey)
1550 }
1551 CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1552 }
1553 }
1554
1555 // Temporarily take the configured behavior to avoid mutably borrowing
1556 // multiple times.
1557 if let Some(mut call_hook) = self.call_hook.take() {
1558 let result = self.invoke_call_hook(&mut call_hook, s);
1559 self.call_hook = Some(call_hook);
1560 return result;
1561 }
1562
1563 Ok(())
1564 }
1565
1566 fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1567 match call_hook {
1568 #[cfg(feature = "call-hook")]
1569 CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1570
1571 #[cfg(all(feature = "async", feature = "call-hook"))]
1572 CallHookInner::Async(handler) => {
1573 if !self.can_block() {
1574 bail!("couldn't grab async_cx for call hook")
1575 }
1576 return (&mut *self)
1577 .as_context_mut()
1578 .with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
1579 }
1580
1581 CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1582 let _ = s;
1583 match *uninhabited {}
1584 }
1585 }
1586 }
1587
1588 #[cfg(not(feature = "async"))]
1589 fn flush_fiber_stack(&mut self) {
1590 // noop shim so code can assume this always exists.
1591 }
1592
1593 /// Splits this `StoreInner<T>` into a `limiter`/`StoreOpaque` borrow while
1594 /// validating that an async limiter is not configured.
1595 ///
1596 /// This is used for sync entrypoints which need to fail if an async limiter
1597 /// is configured as otherwise the async entrypoint must be used instead.
1598 pub(crate) fn validate_sync_resource_limiter_and_store_opaque(
1599 &mut self,
1600 ) -> Result<(Option<StoreResourceLimiter<'_>>, &mut StoreOpaque)> {
1601 let (limiter, store) = self.resource_limiter_and_store_opaque();
1602 if !matches!(limiter, None | Some(StoreResourceLimiter::Sync(_))) {
1603 bail!(
1604 "when using an async resource limiter `*_async` functions must \
1605 be used instead"
1606 );
1607 }
1608 Ok((limiter, store))
1609 }
1610}
1611
1612fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1613 fuel_reserve.saturating_add_signed(-injected_fuel)
1614}
1615
1616// Add remaining fuel from the reserve into the active fuel if there is any left.
1617fn refuel(
1618 injected_fuel: &mut i64,
1619 fuel_reserve: &mut u64,
1620 yield_interval: Option<NonZeroU64>,
1621) -> bool {
1622 let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1623 if fuel > 0 {
1624 set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1625 true
1626 } else {
1627 false
1628 }
1629}
1630
1631fn set_fuel(
1632 injected_fuel: &mut i64,
1633 fuel_reserve: &mut u64,
1634 yield_interval: Option<NonZeroU64>,
1635 new_fuel_amount: u64,
1636) {
1637 let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1638 // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1639 // for the VM to use.
1640 let injected = core::cmp::min(interval, new_fuel_amount);
1641 // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1642 // VM at once to be i64 range.
1643 let injected = core::cmp::min(injected, i64::MAX as u64);
1644 // Add whatever is left over after injection to the reserve for later use.
1645 *fuel_reserve = new_fuel_amount - injected;
1646 // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1647 // this counter is positive.
1648 *injected_fuel = -(injected as i64);
1649}
1650
1651#[doc(hidden)]
1652impl StoreOpaque {
1653 pub fn id(&self) -> StoreId {
1654 self.store_data.id()
1655 }
1656
1657 pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1658 fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1659 let new = slot.saturating_add(amt);
1660 if new > max {
1661 bail!("resource limit exceeded: {desc} count too high at {new}");
1662 }
1663 *slot = new;
1664 Ok(())
1665 }
1666
1667 let module = module.env_module();
1668 let memories = module.num_defined_memories();
1669 let tables = module.num_defined_tables();
1670
1671 bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1672 bump(
1673 &mut self.memory_count,
1674 self.memory_limit,
1675 memories,
1676 "memory",
1677 )?;
1678 bump(&mut self.table_count, self.table_limit, tables, "table")?;
1679
1680 Ok(())
1681 }
1682
1683 #[inline]
1684 pub fn engine(&self) -> &Engine {
1685 &self.engine
1686 }
1687
1688 #[inline]
1689 pub fn store_data(&self) -> &StoreData {
1690 &self.store_data
1691 }
1692
1693 #[inline]
1694 pub fn store_data_mut(&mut self) -> &mut StoreData {
1695 &mut self.store_data
1696 }
1697
1698 pub fn store_data_mut_and_registry(&mut self) -> (&mut StoreData, &ModuleRegistry) {
1699 (&mut self.store_data, &self.modules)
1700 }
1701
1702 #[cfg(feature = "debug")]
1703 pub(crate) fn breakpoints_and_registry_mut(
1704 &mut self,
1705 ) -> (&mut BreakpointState, &mut ModuleRegistry) {
1706 (&mut self.breakpoints, &mut self.modules)
1707 }
1708
1709 #[cfg(feature = "debug")]
1710 pub(crate) fn breakpoints_and_registry(&self) -> (&BreakpointState, &ModuleRegistry) {
1711 (&self.breakpoints, &self.modules)
1712 }
1713
1714 #[cfg(feature = "debug")]
1715 pub(crate) fn frame_data_cache_mut_and_registry(
1716 &mut self,
1717 ) -> (&mut FrameDataCache, &ModuleRegistry) {
1718 (&mut self.frame_data_cache, &self.modules)
1719 }
1720
1721 #[inline]
1722 pub(crate) fn modules(&self) -> &ModuleRegistry {
1723 &self.modules
1724 }
1725
1726 #[inline]
1727 pub(crate) fn modules_and_engine_and_breakpoints_mut(
1728 &mut self,
1729 ) -> (&mut ModuleRegistry, &Engine, RegisterBreakpointState<'_>) {
1730 #[cfg(feature = "debug")]
1731 let breakpoints = RegisterBreakpointState(&self.breakpoints);
1732 #[cfg(not(feature = "debug"))]
1733 let breakpoints = RegisterBreakpointState(core::marker::PhantomData);
1734
1735 (&mut self.modules, &self.engine, breakpoints)
1736 }
1737
1738 pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1739 (&mut self.func_refs, &self.modules)
1740 }
1741
1742 pub(crate) fn host_globals(
1743 &self,
1744 ) -> &TryPrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1745 &self.host_globals
1746 }
1747
1748 pub(crate) fn host_globals_mut(
1749 &mut self,
1750 ) -> &mut TryPrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1751 &mut self.host_globals
1752 }
1753
1754 pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1755 instance.store_id().assert_belongs_to(self.id());
1756 match self.instances[instance.instance()].kind {
1757 StoreInstanceKind::Dummy => None,
1758 StoreInstanceKind::Real { module_id } => {
1759 let module = self
1760 .modules()
1761 .module_by_id(module_id)
1762 .expect("should always have a registered module for real instances");
1763 Some(module)
1764 }
1765 }
1766 }
1767
1768 /// Accessor from `InstanceId` to `&vm::Instance`.
1769 ///
1770 /// Note that if you have a `StoreInstanceId` you should use
1771 /// `StoreInstanceId::get` instead. This assumes that `id` has been
1772 /// validated to already belong to this store.
1773 #[inline]
1774 pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1775 self.instances[id].handle.get()
1776 }
1777
1778 /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1779 ///
1780 /// Note that if you have a `StoreInstanceId` you should use
1781 /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1782 /// validated to already belong to this store.
1783 #[inline]
1784 pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1785 self.instances[id].handle.get_mut()
1786 }
1787
1788 /// Accessor from `InstanceId` to both `Pin<&mut vm::Instance>`
1789 /// and `&ModuleRegistry`.
1790 #[inline]
1791 pub fn instance_and_module_registry_mut(
1792 &mut self,
1793 id: InstanceId,
1794 ) -> (Pin<&mut vm::Instance>, &ModuleRegistry) {
1795 (self.instances[id].handle.get_mut(), &self.modules)
1796 }
1797
1798 /// Access multiple instances specified via `ids`.
1799 ///
1800 /// # Panics
1801 ///
1802 /// This method will panic if any indices in `ids` overlap.
1803 ///
1804 /// # Safety
1805 ///
1806 /// This method is not safe if the returned instances are used to traverse
1807 /// "laterally" between other instances. For example accessing imported
1808 /// items in an instance may traverse laterally to a sibling instance thus
1809 /// aliasing a returned value here. The caller must ensure that only defined
1810 /// items within the instances themselves are accessed.
1811 #[inline]
1812 pub unsafe fn optional_gc_store_and_instances_mut<const N: usize>(
1813 &mut self,
1814 ids: [InstanceId; N],
1815 ) -> (Option<&mut GcStore>, [Pin<&mut vm::Instance>; N]) {
1816 let instances = self
1817 .instances
1818 .get_disjoint_mut(ids)
1819 .unwrap()
1820 .map(|h| h.handle.get_mut());
1821 (self.gc_store.as_mut(), instances)
1822 }
1823
1824 /// Pair of `Self::optional_gc_store_mut` and `Self::instance_mut`
1825 pub fn optional_gc_store_and_instance_mut(
1826 &mut self,
1827 id: InstanceId,
1828 ) -> (Option<&mut GcStore>, Pin<&mut vm::Instance>) {
1829 (self.gc_store.as_mut(), self.instances[id].handle.get_mut())
1830 }
1831
1832 /// Tuple of `Self::optional_gc_store_mut`, `Self::modules`, and
1833 /// `Self::instance_mut`.
1834 pub fn optional_gc_store_and_registry_and_instance_mut(
1835 &mut self,
1836 id: InstanceId,
1837 ) -> (
1838 Option<&mut GcStore>,
1839 &ModuleRegistry,
1840 Pin<&mut vm::Instance>,
1841 ) {
1842 (
1843 self.gc_store.as_mut(),
1844 &self.modules,
1845 self.instances[id].handle.get_mut(),
1846 )
1847 }
1848
1849 /// Get all instances (ignoring dummy instances) within this store.
1850 pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1851 let instances = self
1852 .instances
1853 .iter()
1854 .filter_map(|(id, inst)| {
1855 if let StoreInstanceKind::Dummy = inst.kind {
1856 None
1857 } else {
1858 Some(id)
1859 }
1860 })
1861 .collect::<Vec<_>>();
1862 instances
1863 .into_iter()
1864 .map(|i| Instance::from_wasmtime(i, self))
1865 }
1866
1867 /// Get all memories (host- or Wasm-defined) within this store.
1868 pub fn all_memories<'a>(&'a self) -> impl Iterator<Item = ExportMemory> + 'a {
1869 // NB: Host-created memories have dummy instances. Therefore, we can get
1870 // all memories in the store by iterating over all instances (including
1871 // dummy instances) and getting each of their defined memories.
1872 let id = self.id();
1873 self.instances
1874 .iter()
1875 .flat_map(move |(_, instance)| instance.handle.get().defined_memories(id))
1876 }
1877
1878 /// Iterate over all tables (host- or Wasm-defined) within this store.
1879 pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1880 // NB: Host-created tables have dummy instances. Therefore, we can get
1881 // all tables in the store by iterating over all instances (including
1882 // dummy instances) and getting each of their defined memories.
1883 for id in self.instances.keys() {
1884 let instance = StoreInstanceId::new(self.id(), id);
1885 for table in 0..self.instance(id).env_module().num_defined_tables() {
1886 let table = DefinedTableIndex::new(table);
1887 f(self, Table::from_raw(instance, table));
1888 }
1889 }
1890 }
1891
1892 /// Iterate over all globals (host- or Wasm-defined) within this store.
1893 pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1894 // First enumerate all the host-created globals.
1895 for global in self.host_globals.keys() {
1896 let global = Global::new_host(self, global);
1897 f(self, global);
1898 }
1899
1900 // Then enumerate all instances' defined globals.
1901 for id in self.instances.keys() {
1902 for index in 0..self.instance(id).env_module().num_defined_globals() {
1903 let index = DefinedGlobalIndex::new(index);
1904 let global = Global::new_instance(self, id, index);
1905 f(self, global);
1906 }
1907 }
1908 }
1909
1910 #[cfg(all(feature = "std", any(unix, windows)))]
1911 pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1912 self.signal_handler = handler;
1913 }
1914
1915 #[inline]
1916 pub fn vm_store_context(&self) -> &VMStoreContext {
1917 &self.vm_store_context
1918 }
1919
1920 #[inline]
1921 pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
1922 &mut self.vm_store_context
1923 }
1924
1925 /// Performs a lazy allocation of the `GcStore` within this store, returning
1926 /// the previous allocation if it's already present.
1927 ///
1928 /// This method will, if necessary, allocate a new `GcStore` -- linear
1929 /// memory and all. This is a blocking operation due to
1930 /// `ResourceLimiterAsync` which means that this should only be executed
1931 /// in a fiber context at this time.
1932 #[inline]
1933 pub(crate) async fn ensure_gc_store(
1934 &mut self,
1935 limiter: Option<&mut StoreResourceLimiter<'_>>,
1936 ) -> Result<&mut GcStore> {
1937 if self.gc_store.is_some() {
1938 return Ok(self.gc_store.as_mut().unwrap());
1939 }
1940 self.allocate_gc_store(limiter).await
1941 }
1942
1943 #[inline(never)]
1944 async fn allocate_gc_store(
1945 &mut self,
1946 limiter: Option<&mut StoreResourceLimiter<'_>>,
1947 ) -> Result<&mut GcStore> {
1948 log::trace!("allocating GC heap for store {:?}", self.id());
1949
1950 assert!(self.gc_store.is_none());
1951 assert_eq!(
1952 self.vm_store_context.gc_heap.base.as_non_null(),
1953 NonNull::dangling(),
1954 );
1955 assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1956
1957 let gc_store = allocate_gc_store(self, limiter).await?;
1958 self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1959 return Ok(self.gc_store.insert(gc_store));
1960
1961 #[cfg(feature = "gc")]
1962 async fn allocate_gc_store(
1963 store: &mut StoreOpaque,
1964 limiter: Option<&mut StoreResourceLimiter<'_>>,
1965 ) -> Result<GcStore> {
1966 use wasmtime_environ::packed_option::ReservedValue;
1967
1968 let engine = store.engine();
1969 let mem_ty = engine.tunables().gc_heap_memory_type();
1970 ensure!(
1971 engine.features().gc_types(),
1972 "cannot allocate a GC store when GC is disabled at configuration time"
1973 );
1974
1975 // First, allocate the memory that will be our GC heap's storage.
1976 let mut request = InstanceAllocationRequest {
1977 id: InstanceId::reserved_value(),
1978 runtime_info: engine.empty_module_runtime_info(),
1979 imports: vm::Imports::default(),
1980 store,
1981 limiter,
1982 };
1983
1984 let (mem_alloc_index, mem) = engine
1985 .allocator()
1986 .allocate_memory(
1987 &mut request,
1988 &mem_ty,
1989 None,
1990 wasmtime_environ::MemoryKind::GcHeap,
1991 )
1992 .await?;
1993
1994 // Then, allocate the actual GC heap, passing in that memory
1995 // storage.
1996 let gc_runtime = engine
1997 .gc_runtime()
1998 .context("no GC runtime: GC disabled at compile time or configuration time")?;
1999 let (index, heap) =
2000 engine
2001 .allocator()
2002 .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
2003
2004 Ok(GcStore::new(
2005 index,
2006 heap,
2007 engine.tunables().gc_zeal_alloc_counter,
2008 ))
2009 }
2010
2011 #[cfg(not(feature = "gc"))]
2012 async fn allocate_gc_store(
2013 _: &mut StoreOpaque,
2014 _: Option<&mut StoreResourceLimiter<'_>>,
2015 ) -> Result<GcStore> {
2016 bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
2017 }
2018 }
2019
2020 /// Helper method to require that a `GcStore` was previously allocated for
2021 /// this store, failing if it has not yet been allocated.
2022 ///
2023 /// Note that this should only be used in a context where allocation of a
2024 /// `GcStore` is sure to have already happened prior, otherwise this may
2025 /// return a confusing error to embedders which is a bug in Wasmtime.
2026 ///
2027 /// Some situations where it's safe to call this method:
2028 ///
2029 /// * There's already a non-null and non-i31 `VMGcRef` in scope. By existing
2030 /// this shows proof that the `GcStore` was previously allocated.
2031 /// * During instantiation and instance's `needs_gc_heap` flag will be
2032 /// handled and instantiation will automatically create a GC store.
2033 #[inline]
2034 #[cfg(feature = "gc")]
2035 pub(crate) fn require_gc_store(&self) -> Result<&GcStore> {
2036 match &self.gc_store {
2037 Some(gc_store) => Ok(gc_store),
2038 None => bail!("GC heap not initialized yet"),
2039 }
2040 }
2041
2042 /// Same as [`Self::require_gc_store`], but mutable.
2043 #[inline]
2044 #[cfg(feature = "gc")]
2045 pub(crate) fn require_gc_store_mut(&mut self) -> Result<&mut GcStore> {
2046 match &mut self.gc_store {
2047 Some(gc_store) => Ok(gc_store),
2048 None => bail!("GC heap not initialized yet"),
2049 }
2050 }
2051
2052 /// Attempts to access the GC store that has been previously allocated.
2053 ///
2054 /// This method will return `Some` if the GC store was previously allocated.
2055 /// A `None` return value means either that the GC heap hasn't yet been
2056 /// allocated or that it does not need to be allocated for this store. Note
2057 /// that to require a GC store in a particular situation it's recommended to
2058 /// use [`Self::require_gc_store_mut`] instead.
2059 #[inline]
2060 pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
2061 if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
2062 debug_assert!(self.gc_store.is_none());
2063 None
2064 } else {
2065 self.gc_store.as_mut()
2066 }
2067 }
2068
2069 /// Returns the current capacity of the GC heap in bytes, or 0 if the GC
2070 /// heap has not been initialized yet.
2071 #[cfg(feature = "gc")]
2072 pub(crate) fn gc_heap_capacity(&self) -> usize {
2073 match self.gc_store.as_ref() {
2074 Some(gc_store) => gc_store.gc_heap_capacity(),
2075 None => 0,
2076 }
2077 }
2078
2079 /// Helper to assert that a GC store was previously allocated and is
2080 /// present.
2081 ///
2082 /// # Panics
2083 ///
2084 /// This method will panic if the GC store has not yet been allocated. This
2085 /// should only be used in a context where there's an existing GC reference,
2086 /// for example, or if `ensure_gc_store` has already been called.
2087 #[inline]
2088 #[track_caller]
2089 pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
2090 self.gc_store
2091 .as_ref()
2092 .expect("attempted to access the store's GC heap before it has been allocated")
2093 }
2094
2095 /// Same as [`Self::unwrap_gc_store`], but mutable.
2096 #[inline]
2097 #[track_caller]
2098 pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
2099 self.gc_store
2100 .as_mut()
2101 .expect("attempted to access the store's GC heap before it has been allocated")
2102 }
2103
2104 /// Returns a mutable reference to the GC store if it has been allocated.
2105 #[inline]
2106 #[cfg(feature = "gc-drc")]
2107 pub(crate) fn try_gc_store_mut(&mut self) -> Option<&mut GcStore> {
2108 self.gc_store.as_mut()
2109 }
2110
2111 #[inline]
2112 pub(crate) fn gc_roots(&self) -> &RootSet {
2113 &self.gc_roots
2114 }
2115
2116 #[inline]
2117 #[cfg(feature = "gc")]
2118 pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
2119 &mut self.gc_roots
2120 }
2121
2122 #[inline]
2123 pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
2124 self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
2125 }
2126
2127 #[cfg(feature = "gc")]
2128 async fn do_gc(&mut self, asyncness: Asyncness) {
2129 // If the GC heap hasn't been initialized, there is nothing to collect.
2130 if self.gc_store.is_none() {
2131 return;
2132 }
2133
2134 log::trace!("============ Begin GC ===========");
2135
2136 // Take the GC roots out of `self` so we can borrow it mutably but still
2137 // call mutable methods on `self`.
2138 let mut roots = core::mem::take(&mut self.gc_roots_list);
2139
2140 self.trace_roots(&mut roots, asyncness).await;
2141 self.unwrap_gc_store_mut()
2142 .gc(
2143 asyncness,
2144 unsafe { roots.iter() },
2145 // TODO: Once `Config` has an optional `AsyncFn` field for
2146 // yielding to the current async runtime
2147 // (e.g. `tokio::task::yield_now`), use that if set; otherwise
2148 // fall back to the runtime-agnostic code.
2149 yield_now,
2150 )
2151 .await;
2152
2153 // Restore the GC roots for the next GC.
2154 roots.clear();
2155 self.gc_roots_list = roots;
2156
2157 log::trace!("============ End GC ===========");
2158 }
2159
2160 #[cfg(feature = "gc")]
2161 async fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList, asyncness: Asyncness) {
2162 log::trace!("Begin trace GC roots");
2163
2164 // We shouldn't have any leftover, stale GC roots.
2165 assert!(gc_roots_list.is_empty());
2166
2167 self.trace_wasm_stack_roots(gc_roots_list);
2168 if asyncness != Asyncness::No {
2169 self.yield_now().await;
2170 }
2171
2172 #[cfg(feature = "stack-switching")]
2173 {
2174 self.trace_wasm_continuation_roots(gc_roots_list);
2175 if asyncness != Asyncness::No {
2176 self.yield_now().await;
2177 }
2178 }
2179
2180 self.trace_vmctx_roots(gc_roots_list);
2181 if asyncness != Asyncness::No {
2182 self.yield_now().await;
2183 }
2184
2185 self.trace_instance_roots(gc_roots_list);
2186 if asyncness != Asyncness::No {
2187 self.yield_now().await;
2188 }
2189
2190 self.trace_user_roots(gc_roots_list);
2191 if asyncness != Asyncness::No {
2192 self.yield_now().await;
2193 }
2194
2195 self.trace_pending_exception_roots(gc_roots_list);
2196
2197 log::trace!("End trace GC roots")
2198 }
2199
2200 #[cfg(feature = "gc")]
2201 fn trace_wasm_stack_frame(
2202 &self,
2203 gc_roots_list: &mut GcRootsList,
2204 frame: crate::runtime::vm::Frame,
2205 ) {
2206 let pc = frame.pc();
2207 debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
2208
2209 let fp = frame.fp() as *mut usize;
2210 debug_assert!(
2211 !fp.is_null(),
2212 "we should always get a valid frame pointer for Wasm frames"
2213 );
2214
2215 let (module_with_code, _offset) = self
2216 .modules()
2217 .module_and_code_by_pc(pc)
2218 .expect("should have module info for Wasm frame");
2219
2220 if let Some(stack_map) = module_with_code.lookup_stack_map(pc) {
2221 log::trace!(
2222 "We have a stack map that maps {} bytes in this Wasm frame",
2223 stack_map.frame_size()
2224 );
2225
2226 let sp = unsafe { stack_map.sp(fp) };
2227 for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
2228 unsafe {
2229 self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2230 }
2231 }
2232 }
2233
2234 #[cfg(feature = "debug")]
2235 if let Some(frame_table) = module_with_code.module().frame_table() {
2236 let relpc = module_with_code
2237 .text_offset(pc)
2238 .expect("PC should be within module");
2239 for stack_slot in super::debug::gc_refs_in_frame(frame_table, relpc, fp) {
2240 unsafe {
2241 self.trace_wasm_stack_slot(gc_roots_list, stack_slot);
2242 }
2243 }
2244 }
2245 }
2246
2247 #[cfg(feature = "gc")]
2248 unsafe fn trace_wasm_stack_slot(&self, gc_roots_list: &mut GcRootsList, stack_slot: *mut u32) {
2249 use crate::runtime::vm::SendSyncPtr;
2250 use core::ptr::NonNull;
2251
2252 let raw: u32 = unsafe { core::ptr::read(stack_slot) };
2253 log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
2254
2255 let gc_ref = vm::VMGcRef::from_raw_u32(raw);
2256 if gc_ref.is_some() {
2257 unsafe {
2258 gc_roots_list
2259 .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
2260 }
2261 }
2262 }
2263
2264 #[cfg(feature = "gc")]
2265 fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2266 use crate::runtime::vm::Backtrace;
2267 log::trace!("Begin trace GC roots :: Wasm stack");
2268
2269 Backtrace::trace(self, |frame| {
2270 self.trace_wasm_stack_frame(gc_roots_list, frame);
2271 core::ops::ControlFlow::Continue(())
2272 });
2273
2274 log::trace!("End trace GC roots :: Wasm stack");
2275 }
2276
2277 #[cfg(all(feature = "gc", feature = "stack-switching"))]
2278 fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2279 use crate::{runtime::vm::Backtrace, vm::VMStackState};
2280 log::trace!("Begin trace GC roots :: continuations");
2281
2282 for continuation in &self.continuations {
2283 let state = continuation.common_stack_information.state;
2284
2285 // FIXME(frank-emrich) In general, it is not enough to just trace
2286 // through the stacks of continuations; we also need to look through
2287 // their `cont.bind` arguments. However, we don't currently have
2288 // enough RTTI information to check if any of the values in the
2289 // buffers used by `cont.bind` are GC values. As a workaround, note
2290 // that we currently disallow cont.bind-ing GC values altogether.
2291 // This way, it is okay not to check them here.
2292 match state {
2293 VMStackState::Suspended => {
2294 Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
2295 self.trace_wasm_stack_frame(gc_roots_list, frame);
2296 core::ops::ControlFlow::Continue(())
2297 });
2298 }
2299 VMStackState::Running => {
2300 // Handled by `trace_wasm_stack_roots`.
2301 }
2302 VMStackState::Parent => {
2303 // We don't know whether our child is suspended or running, but in
2304 // either case things should be handled correctly when traversing
2305 // further along in the chain, nothing required at this point.
2306 }
2307 VMStackState::Fresh | VMStackState::Returned => {
2308 // Fresh/Returned continuations have no gc values on their stack.
2309 }
2310 }
2311 }
2312
2313 log::trace!("End trace GC roots :: continuations");
2314 }
2315
2316 #[cfg(feature = "gc")]
2317 fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2318 log::trace!("Begin trace GC roots :: vmctx");
2319 self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
2320 self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
2321 log::trace!("End trace GC roots :: vmctx");
2322 }
2323
2324 #[cfg(feature = "gc")]
2325 fn trace_instance_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2326 log::trace!("Begin trace GC roots :: instance");
2327 for (_id, instance) in &mut self.instances {
2328 // SAFETY: the instance's GC roots will remain valid for the
2329 // duration of this GC cycle.
2330 unsafe {
2331 instance
2332 .handle
2333 .get_mut()
2334 .trace_element_segment_roots(gc_roots_list);
2335 }
2336 }
2337 log::trace!("End trace GC roots :: instance");
2338 }
2339
2340 #[cfg(feature = "gc")]
2341 fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2342 log::trace!("Begin trace GC roots :: user");
2343 self.gc_roots.trace_roots(gc_roots_list);
2344 log::trace!("End trace GC roots :: user");
2345 }
2346
2347 #[cfg(feature = "gc")]
2348 fn trace_pending_exception_roots(&mut self, gc_roots_list: &mut GcRootsList) {
2349 log::trace!("Begin trace GC roots :: pending exception");
2350 if let Some(pending_exception) = self.pending_exception.as_mut() {
2351 unsafe {
2352 let root = pending_exception.as_gc_ref_mut();
2353 gc_roots_list.add_vmgcref_root(root.into(), "Pending exception");
2354 }
2355 }
2356 log::trace!("End trace GC roots :: pending exception");
2357 }
2358
2359 /// Insert a host-allocated GC type into this store.
2360 ///
2361 /// This makes it suitable for the embedder to allocate instances of this
2362 /// type in this store, and we don't have to worry about the type being
2363 /// reclaimed (since it is possible that none of the Wasm modules in this
2364 /// store are holding it alive).
2365 #[cfg(feature = "gc")]
2366 pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
2367 self.gc_host_alloc_types.insert(ty);
2368 }
2369
2370 /// Helper function execute a `init_gc_ref` when placing `gc_ref` in `dest`.
2371 ///
2372 /// This avoids allocating `GcStore` where possible.
2373 pub(crate) fn init_gc_ref(
2374 &mut self,
2375 dest: &mut MaybeUninit<Option<VMGcRef>>,
2376 gc_ref: Option<&VMGcRef>,
2377 ) {
2378 if GcStore::needs_init_barrier(gc_ref) {
2379 self.unwrap_gc_store_mut().init_gc_ref(dest, gc_ref)
2380 } else {
2381 dest.write(gc_ref.map(|r| r.copy_i31()));
2382 }
2383 }
2384
2385 /// Helper function execute a write barrier when placing `gc_ref` in `dest`.
2386 ///
2387 /// This avoids allocating `GcStore` where possible.
2388 pub(crate) fn write_gc_ref(&mut self, dest: &mut Option<VMGcRef>, gc_ref: Option<&VMGcRef>) {
2389 GcStore::write_gc_ref_optional_store(self.optional_gc_store_mut(), dest, gc_ref)
2390 }
2391
2392 /// Helper function to clone `gc_ref` notably avoiding allocating a
2393 /// `GcStore` where possible.
2394 pub(crate) fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
2395 if gc_ref.is_i31() {
2396 gc_ref.copy_i31()
2397 } else {
2398 self.unwrap_gc_store_mut().clone_gc_ref(gc_ref)
2399 }
2400 }
2401
2402 pub fn get_fuel(&self) -> Result<u64> {
2403 crate::ensure!(
2404 self.engine().tunables().consume_fuel,
2405 "fuel is not configured in this store"
2406 );
2407 let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
2408 Ok(get_fuel(injected_fuel, self.fuel_reserve))
2409 }
2410
2411 pub(crate) fn refuel(&mut self) -> bool {
2412 let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2413 refuel(
2414 injected_fuel,
2415 &mut self.fuel_reserve,
2416 self.fuel_yield_interval,
2417 )
2418 }
2419
2420 pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
2421 crate::ensure!(
2422 self.engine().tunables().consume_fuel,
2423 "fuel is not configured in this store"
2424 );
2425 let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
2426 set_fuel(
2427 injected_fuel,
2428 &mut self.fuel_reserve,
2429 self.fuel_yield_interval,
2430 fuel,
2431 );
2432 Ok(())
2433 }
2434
2435 #[cfg(feature = "async")]
2436 pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
2437 crate::ensure!(
2438 self.engine().tunables().consume_fuel,
2439 "fuel is not configured in this store"
2440 );
2441 crate::ensure!(
2442 interval != Some(0),
2443 "fuel_async_yield_interval must not be 0"
2444 );
2445
2446 // All future entrypoints must be async to handle the case that fuel
2447 // runs out and an async yield is needed.
2448 self.set_async_required(Asyncness::Yes);
2449
2450 self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
2451 // Reset the fuel active + reserve states by resetting the amount.
2452 self.set_fuel(self.get_fuel()?)
2453 }
2454
2455 #[inline]
2456 pub fn signal_handler(&self) -> Option<*const SignalHandler> {
2457 let handler = self.signal_handler.as_ref()?;
2458 Some(handler)
2459 }
2460
2461 #[inline]
2462 pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
2463 NonNull::from(&self.vm_store_context)
2464 }
2465
2466 #[inline]
2467 pub fn default_caller(&self) -> NonNull<VMContext> {
2468 self.default_caller_vmctx.as_non_null()
2469 }
2470
2471 #[inline]
2472 pub fn traitobj(&self) -> NonNull<dyn VMStore> {
2473 self.traitobj.0.unwrap()
2474 }
2475
2476 /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
2477 /// used as part of calling the host in a `Func::new` method invocation.
2478 #[inline]
2479 pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
2480 mem::take(&mut self.hostcall_val_storage)
2481 }
2482
2483 /// Restores the vector previously taken by `take_hostcall_val_storage`
2484 /// above back into the store, allowing it to be used in the future for the
2485 /// next wasm->host call.
2486 #[inline]
2487 pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
2488 if storage.capacity() > self.hostcall_val_storage.capacity() {
2489 self.hostcall_val_storage = storage;
2490 }
2491 }
2492
2493 /// Same as `take_hostcall_val_storage`, but for the direction of the host
2494 /// calling wasm.
2495 #[inline]
2496 pub fn take_wasm_val_raw_storage(&mut self) -> TryVec<ValRaw> {
2497 mem::take(&mut self.wasm_val_raw_storage)
2498 }
2499
2500 /// Same as `save_hostcall_val_storage`, but for the direction of the host
2501 /// calling wasm.
2502 #[inline]
2503 pub fn save_wasm_val_raw_storage(&mut self, storage: TryVec<ValRaw>) {
2504 if storage.capacity() > self.wasm_val_raw_storage.capacity() {
2505 self.wasm_val_raw_storage = storage;
2506 }
2507 }
2508
2509 /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
2510 /// WebAssembly-relative fault.
2511 ///
2512 /// This function may abort the process if `addr` is not found to actually
2513 /// reside in any linear memory. In such a situation it means that the
2514 /// segfault was erroneously caught by Wasmtime and is possibly indicative
2515 /// of a code generator bug.
2516 ///
2517 /// This function returns `None` for dynamically-bounds-checked-memories
2518 /// with spectre mitigations enabled since the hardware fault address is
2519 /// always zero in these situations which means that the trapping context
2520 /// doesn't have enough information to report the fault address.
2521 pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
2522 // There are a few instances where a "close to zero" pointer is loaded
2523 // and we expect that to happen:
2524 //
2525 // * Explicitly bounds-checked memories with spectre-guards enabled will
2526 // cause out-of-bounds accesses to get routed to address 0, so allow
2527 // wasm instructions to fault on the null address.
2528 // * `call_indirect` when invoking a null function pointer may load data
2529 // from the a `VMFuncRef` whose address is null, meaning any field of
2530 // `VMFuncRef` could be the address of the fault.
2531 //
2532 // In these situations where the address is so small it won't be in any
2533 // instance, so skip the checks below.
2534 if addr <= mem::size_of::<VMFuncRef>() {
2535 const _: () = {
2536 // static-assert that `VMFuncRef` isn't too big to ensure that
2537 // it lives solely within the first page as we currently only
2538 // have the guarantee that the first page of memory is unmapped,
2539 // no more.
2540 assert!(mem::size_of::<VMFuncRef>() <= 512);
2541 };
2542 return None;
2543 }
2544
2545 // Search all known instances in this store for this address. Note that
2546 // this is probably not the speediest way to do this. Traps, however,
2547 // are generally not expected to be super fast and additionally stores
2548 // probably don't have all that many instances or memories.
2549 //
2550 // If this loop becomes hot in the future, however, it should be
2551 // possible to precompute maps about linear memories in a store and have
2552 // a quicker lookup.
2553 let mut fault = None;
2554 for (_, instance) in self.instances.iter() {
2555 if let Some(f) = instance.handle.get().wasm_fault(addr) {
2556 assert!(fault.is_none());
2557 fault = Some(f);
2558 }
2559 }
2560 if fault.is_some() {
2561 return fault;
2562 }
2563
2564 cfg_if::cfg_if! {
2565 if #[cfg(feature = "std")] {
2566 // With the standard library a rich error can be printed here
2567 // to stderr and the native abort path is used.
2568 eprintln!(
2569 "\
2570Wasmtime caught a segfault for a wasm program because the faulting instruction
2571is allowed to segfault due to how linear memories are implemented. The address
2572that was accessed, however, is not known to any linear memory in use within this
2573Store. This may be indicative of a critical bug in Wasmtime's code generation
2574because all addresses which are known to be reachable from wasm won't reach this
2575message.
2576
2577 pc: 0x{pc:x}
2578 address: 0x{addr:x}
2579
2580This is a possible security issue because WebAssembly has accessed something it
2581shouldn't have been able to. Other accesses may have succeeded and this one just
2582happened to be caught. The process will now be aborted to prevent this damage
2583from going any further and to alert what's going on. If this is a security
2584issue please reach out to the Wasmtime team via its security policy
2585at https://bytecodealliance.org/security.
2586"
2587 );
2588 std::process::abort();
2589 } else if #[cfg(panic = "abort")] {
2590 // Without the standard library but with `panic=abort` then
2591 // it's safe to panic as that's known to halt execution. For
2592 // now avoid the above error message as well since without
2593 // `std` it's probably best to be a bit more size-conscious.
2594 let _ = pc;
2595 panic!("invalid fault");
2596 } else {
2597 // Without `std` and with `panic = "unwind"` there's no
2598 // dedicated API to abort the process portably, so manufacture
2599 // this with a double-panic.
2600 let _ = pc;
2601
2602 struct PanicAgainOnDrop;
2603
2604 impl Drop for PanicAgainOnDrop {
2605 fn drop(&mut self) {
2606 panic!("panicking again to trigger a process abort");
2607 }
2608
2609 }
2610
2611 let _bomb = PanicAgainOnDrop;
2612
2613 panic!("invalid fault");
2614 }
2615 }
2616 }
2617
2618 /// Retrieve the store's protection key.
2619 #[inline]
2620 #[cfg(feature = "pooling-allocator")]
2621 pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
2622 self.pkey
2623 }
2624
2625 #[cfg(feature = "async")]
2626 pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
2627 &mut self.async_state
2628 }
2629
2630 #[cfg(feature = "async")]
2631 pub(crate) fn has_pkey(&self) -> bool {
2632 self.pkey.is_some()
2633 }
2634
2635 pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
2636 match &mut self.executor {
2637 Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
2638 #[cfg(has_host_compiler_backend)]
2639 Executor::Native => ExecutorRef::Native,
2640 }
2641 }
2642
2643 #[cfg(feature = "async")]
2644 pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
2645 mem::swap(&mut self.executor, executor);
2646 }
2647
2648 pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
2649 match &self.executor {
2650 Executor::Interpreter(i) => i.unwinder(),
2651 #[cfg(has_host_compiler_backend)]
2652 Executor::Native => &vm::UnwindHost,
2653 }
2654 }
2655
2656 /// Allocates a new continuation. Note that we currently don't support
2657 /// deallocating them. Instead, all continuations remain allocated
2658 /// throughout the store's lifetime.
2659 #[cfg(feature = "stack-switching")]
2660 pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
2661 // FIXME(frank-emrich) Do we need to pin this?
2662 let mut continuation = Box::new(VMContRef::empty());
2663 let stack_size = self.engine.config().async_stack_size;
2664 let stack = crate::vm::VMContinuationStack::new(stack_size)?;
2665 continuation.stack = stack;
2666 let ptr = continuation.deref_mut() as *mut VMContRef;
2667 self.continuations.push(continuation);
2668 Ok(ptr)
2669 }
2670
2671 /// Constructs and executes an `InstanceAllocationRequest` and pushes the
2672 /// returned instance into the store.
2673 ///
2674 /// This is a helper method for invoking
2675 /// `InstanceAllocator::allocate_module` with the appropriate parameters
2676 /// from this store's own configuration. The `kind` provided is used to
2677 /// distinguish between "real" modules and dummy ones that are synthesized
2678 /// for embedder-created memories, globals, tables, etc. The `kind` will
2679 /// also use a different instance allocator by default, the one passed in,
2680 /// rather than the engine's default allocator.
2681 ///
2682 /// This method will push the instance within `StoreOpaque` onto the
2683 /// `instances` array and return the `InstanceId` which can be use to look
2684 /// it up within the store.
2685 ///
2686 /// # Safety
2687 ///
2688 /// The `imports` provided must be correctly sized/typed for the module
2689 /// being allocated.
2690 pub(crate) async unsafe fn allocate_instance(
2691 &mut self,
2692 limiter: Option<&mut StoreResourceLimiter<'_>>,
2693 kind: AllocateInstanceKind<'_>,
2694 runtime_info: &ModuleRuntimeInfo,
2695 imports: Imports<'_>,
2696 ) -> Result<InstanceId> {
2697 let id = self.instances.next_key();
2698
2699 let allocator = match kind {
2700 AllocateInstanceKind::Module(_) => self.engine().allocator(),
2701 AllocateInstanceKind::Dummy { allocator } => allocator,
2702 };
2703 // SAFETY: this function's own contract is the same as
2704 // `allocate_module`, namely the imports provided are valid.
2705 let handle = unsafe {
2706 allocator
2707 .allocate_module(InstanceAllocationRequest {
2708 id,
2709 runtime_info,
2710 imports,
2711 store: self,
2712 limiter,
2713 })
2714 .await?
2715 };
2716
2717 let actual = match kind {
2718 AllocateInstanceKind::Module(module_id) => {
2719 log::trace!(
2720 "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2721 self.id()
2722 );
2723 self.instances.push(StoreInstance {
2724 handle,
2725 kind: StoreInstanceKind::Real { module_id },
2726 })?
2727 }
2728 AllocateInstanceKind::Dummy { .. } => {
2729 log::trace!(
2730 "Adding dummy instance to store: store={:?}, instance={id:?}",
2731 self.id()
2732 );
2733 self.instances.push(StoreInstance {
2734 handle,
2735 kind: StoreInstanceKind::Dummy,
2736 })?
2737 }
2738 };
2739
2740 // double-check we didn't accidentally allocate two instances and our
2741 // prediction of what the id would be is indeed the id it should be.
2742 assert_eq!(id, actual);
2743
2744 Ok(id)
2745 }
2746
2747 /// Set a pending exception. The `exnref` is taken and held on
2748 /// this store to be fetched later by an unwind. This method does
2749 /// *not* set up an unwind request on the TLS call state; that
2750 /// must be done separately.
2751 #[cfg(feature = "gc")]
2752 pub(crate) fn set_pending_exception(&mut self, exnref: VMExnRef) {
2753 self.pending_exception = Some(exnref);
2754 }
2755
2756 /// Take a pending exception, if any.
2757 #[cfg(feature = "gc")]
2758 pub(crate) fn take_pending_exception(&mut self) -> Option<VMExnRef> {
2759 self.pending_exception.take()
2760 }
2761
2762 /// Tests whether there is a pending exception.
2763 #[cfg(feature = "gc")]
2764 pub fn has_pending_exception(&self) -> bool {
2765 self.pending_exception.is_some()
2766 }
2767
2768 #[cfg(feature = "gc")]
2769 fn take_pending_exception_rooted(&mut self) -> Option<Rooted<ExnRef>> {
2770 let vmexnref = self.take_pending_exception()?;
2771 let mut nogc = AutoAssertNoGc::new(self);
2772 Some(Rooted::new(&mut nogc, vmexnref.into()))
2773 }
2774
2775 /// Get an owned rooted reference to the pending exception,
2776 /// without taking it off the store.
2777 #[cfg(all(feature = "gc", feature = "debug"))]
2778 pub(crate) fn pending_exception_owned_rooted(
2779 &mut self,
2780 ) -> Result<Option<OwnedRooted<ExnRef>>, crate::error::OutOfMemory> {
2781 let mut nogc = AutoAssertNoGc::new(self);
2782 nogc.pending_exception
2783 .take()
2784 .map(|vmexnref| {
2785 let cloned = nogc.clone_gc_ref(vmexnref.as_gc_ref());
2786 nogc.pending_exception = Some(cloned.into_exnref_unchecked());
2787 OwnedRooted::new(&mut nogc, vmexnref.into())
2788 })
2789 .transpose()
2790 }
2791
2792 #[cfg(feature = "gc")]
2793 fn throw_impl(&mut self, exception: Rooted<ExnRef>) {
2794 let mut nogc = AutoAssertNoGc::new(self);
2795 let exnref = exception._to_raw(&mut nogc).unwrap();
2796 let exnref = VMGcRef::from_raw_u32(exnref)
2797 .expect("exception cannot be null")
2798 .into_exnref_unchecked();
2799 nogc.set_pending_exception(exnref);
2800 }
2801
2802 #[cfg(target_has_atomic = "64")]
2803 pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2804 // Set a new deadline based on the "epoch deadline delta".
2805 //
2806 // Also, note that when this update is performed while Wasm is
2807 // on the stack, the Wasm will reload the new value once we
2808 // return into it.
2809 let current_epoch = self.engine().current_epoch();
2810 let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2811 *epoch_deadline = current_epoch + delta;
2812 }
2813
2814 pub(crate) fn get_epoch_deadline(&mut self) -> u64 {
2815 *self.vm_store_context.epoch_deadline.get_mut()
2816 }
2817
2818 #[inline]
2819 pub(crate) fn validate_sync_call(&self) -> Result<()> {
2820 #[cfg(feature = "async")]
2821 if self.async_state.async_required {
2822 bail!("store configuration requires that `*_async` functions are used instead");
2823 }
2824 Ok(())
2825 }
2826
2827 /// Returns whether this store is presently on a fiber and is allowed to
2828 /// block via `block_on` with fibers.
2829 pub(crate) fn can_block(&mut self) -> bool {
2830 #[cfg(feature = "async")]
2831 if true {
2832 return self.fiber_async_state_mut().can_block();
2833 }
2834
2835 false
2836 }
2837
2838 #[cfg(not(feature = "async"))]
2839 pub(crate) fn set_async_required(&mut self, asyncness: Asyncness) {
2840 match asyncness {
2841 Asyncness::No => {}
2842 }
2843 }
2844
2845 #[cfg(any(feature = "async", feature = "gc"))]
2846 pub(crate) async fn yield_now(&self) {
2847 // TODO: Once `Config` has an optional `AsyncFn` field for yielding to the
2848 // current async runtime (e.g. `tokio::task::yield_now`), use that if set;
2849 // otherwise fall back to the runtime-agnostic code.
2850 yield_now().await
2851 }
2852}
2853
2854#[cfg(any(feature = "async", feature = "gc"))]
2855async fn yield_now() {
2856 let mut yielded = false;
2857 future::poll_fn(move |cx| {
2858 if yielded {
2859 Poll::Ready(())
2860 } else {
2861 yielded = true;
2862 cx.waker().wake_by_ref();
2863 Poll::Pending
2864 }
2865 })
2866 .await;
2867}
2868
2869/// Helper parameter to [`StoreOpaque::allocate_instance`].
2870pub(crate) enum AllocateInstanceKind<'a> {
2871 /// An embedder-provided module is being allocated meaning that the default
2872 /// engine's allocator will be used.
2873 Module(RegisteredModuleId),
2874
2875 /// Add a dummy instance that to the store.
2876 ///
2877 /// These are instances that are just implementation details of something
2878 /// else (e.g. host-created memories that are not actually defined in any
2879 /// Wasm module) and therefore shouldn't show up in things like core dumps.
2880 ///
2881 /// A custom, typically OnDemand-flavored, allocator is provided to execute
2882 /// the allocation.
2883 Dummy {
2884 allocator: &'a dyn InstanceAllocator,
2885 },
2886}
2887
2888unsafe impl<T> VMStore for StoreInner<T> {
2889 #[cfg(feature = "component-model-async")]
2890 fn component_async_store(
2891 &mut self,
2892 ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2893 self
2894 }
2895
2896 #[cfg(feature = "component-model")]
2897 fn component_task_state_mut(&mut self) -> &mut crate::component::store::ComponentTaskState {
2898 StoreOpaque::component_task_state_mut(self)
2899 }
2900
2901 fn store_opaque(&self) -> &StoreOpaque {
2902 &self.inner
2903 }
2904
2905 fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2906 &mut self.inner
2907 }
2908
2909 fn resource_limiter_and_store_opaque(
2910 &mut self,
2911 ) -> (Option<StoreResourceLimiter<'_>>, &mut StoreOpaque) {
2912 let (data, limiter, opaque) = self.data_limiter_and_opaque();
2913
2914 let limiter = limiter.map(|l| match l {
2915 ResourceLimiterInner::Sync(s) => StoreResourceLimiter::Sync(s(data)),
2916 #[cfg(feature = "async")]
2917 ResourceLimiterInner::Async(s) => StoreResourceLimiter::Async(s(data)),
2918 });
2919
2920 (limiter, opaque)
2921 }
2922
2923 #[cfg(target_has_atomic = "64")]
2924 fn new_epoch_updated_deadline(&mut self) -> Result<UpdateDeadline> {
2925 // Temporarily take the configured behavior to avoid mutably borrowing
2926 // multiple times.
2927 let mut behavior = self.epoch_deadline_behavior.take();
2928 let update = match &mut behavior {
2929 Some(callback) => callback((&mut *self).as_context_mut()),
2930 None => Ok(UpdateDeadline::Interrupt),
2931 };
2932
2933 // Put back the original behavior which was replaced by `take`.
2934 self.epoch_deadline_behavior = behavior;
2935 update
2936 }
2937
2938 #[cfg(feature = "debug")]
2939 fn block_on_debug_handler(&mut self, event: crate::DebugEvent<'_>) -> crate::Result<()> {
2940 if let Some(handler) = self.debug_handler.take() {
2941 if !self.can_block() {
2942 bail!("could not invoke debug handler without async context");
2943 }
2944 log::trace!("about to raise debug event {event:?}");
2945 StoreContextMut(self).with_blocking(|store, cx| {
2946 cx.block_on(Pin::from(handler.handle(store, event)).as_mut())
2947 })
2948 } else {
2949 Ok(())
2950 }
2951 }
2952}
2953
2954impl<T> StoreInner<T> {
2955 #[cfg(target_has_atomic = "64")]
2956 fn epoch_deadline_trap(&mut self) {
2957 self.epoch_deadline_behavior = None;
2958 }
2959
2960 #[cfg(target_has_atomic = "64")]
2961 fn epoch_deadline_callback(
2962 &mut self,
2963 callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2964 ) {
2965 self.epoch_deadline_behavior = Some(callback);
2966 }
2967}
2968
2969impl<T: Default> Default for Store<T> {
2970 fn default() -> Store<T> {
2971 Store::new(&Engine::default(), T::default())
2972 }
2973}
2974
2975impl<T: fmt::Debug> fmt::Debug for Store<T> {
2976 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2977 let inner = &**self.inner as *const StoreInner<T>;
2978 f.debug_struct("Store")
2979 .field("inner", &inner)
2980 .field("data", self.inner.data())
2981 .finish()
2982 }
2983}
2984
2985impl<T> Drop for Store<T> {
2986 fn drop(&mut self) {
2987 self.run_manual_drop_routines();
2988
2989 // For documentation on this `unsafe`, see `into_data`.
2990 unsafe {
2991 ManuallyDrop::drop(&mut self.inner.data_no_provenance);
2992 ManuallyDrop::drop(&mut self.inner);
2993 }
2994 }
2995}
2996
2997impl Drop for StoreOpaque {
2998 fn drop(&mut self) {
2999 // NB it's important that this destructor does not access `self.data`.
3000 // That is deallocated by `Drop for Store<T>` above.
3001
3002 unsafe {
3003 let allocator = self.engine.allocator();
3004 let ondemand = OnDemandInstanceAllocator::default();
3005 let store_id = self.id();
3006
3007 #[cfg(feature = "gc")]
3008 if let Some(gc_store) = self.gc_store.take() {
3009 let gc_alloc_index = gc_store.allocation_index;
3010 log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
3011 debug_assert!(self.engine.features().gc_types());
3012 let (mem_alloc_index, mem) =
3013 allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
3014 allocator.deallocate_memory(None, mem_alloc_index, mem);
3015 }
3016
3017 for (id, instance) in self.instances.iter_mut() {
3018 log::trace!("store {store_id:?} is deallocating {id:?}");
3019 let allocator = match instance.kind {
3020 StoreInstanceKind::Dummy => &ondemand,
3021 _ => allocator,
3022 };
3023 allocator.deallocate_module(&mut instance.handle);
3024 }
3025
3026 self.store_data.decrement_allocator_resources(allocator);
3027 }
3028 }
3029}
3030
3031#[cfg_attr(
3032 not(any(feature = "gc", feature = "async")),
3033 // NB: Rust 1.89, current stable, does not fire this lint. Rust 1.90,
3034 // however, does, so use #[allow] until our MSRV is 1.90.
3035 allow(dead_code, reason = "don't want to put #[cfg] on all impls below too")
3036)]
3037pub(crate) trait AsStoreOpaque {
3038 fn as_store_opaque(&mut self) -> &mut StoreOpaque;
3039}
3040
3041impl AsStoreOpaque for StoreOpaque {
3042 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
3043 self
3044 }
3045}
3046
3047impl AsStoreOpaque for dyn VMStore {
3048 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
3049 self
3050 }
3051}
3052
3053impl<T: 'static> AsStoreOpaque for Store<T> {
3054 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
3055 &mut self.inner.inner
3056 }
3057}
3058
3059impl<T: 'static> AsStoreOpaque for StoreInner<T> {
3060 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
3061 self
3062 }
3063}
3064
3065impl<T: AsStoreOpaque + ?Sized> AsStoreOpaque for &mut T {
3066 fn as_store_opaque(&mut self) -> &mut StoreOpaque {
3067 T::as_store_opaque(self)
3068 }
3069}
3070
3071/// Helper enum to indicate, in some function contexts, whether `async` should
3072/// be taken advantage of or not.
3073///
3074/// This is used throughout Wasmtime where internal functions are all `async`
3075/// but external functions might be either sync or `async`. If the external
3076/// function is sync, then internally Wasmtime shouldn't yield as it won't do
3077/// anything. If the external function is `async`, however, yields are fine.
3078///
3079/// An example of this is GC. Right now GC will cooperatively yield after phases
3080/// of GC have passed, but this cooperative yielding is only enabled with
3081/// `Asyncness::Yes`.
3082///
3083/// This enum is additionally conditionally defined such that `Yes` is only
3084/// present in `async`-enabled builds. That ensures that this compiles down to a
3085/// zero-sized type in `async`-disabled builds in case that interests embedders.
3086#[derive(PartialEq, Eq, Copy, Clone)]
3087pub enum Asyncness {
3088 /// Don't do async things, don't yield, etc. It's ok to execute an `async`
3089 /// function, but it should be validated ahead of time that when doing so a
3090 /// yield isn't possible (e.g. `validate_sync_*` methods on Store.
3091 No,
3092
3093 /// Async things is OK. This should only be used when the API entrypoint is
3094 /// itself `async`.
3095 #[cfg(feature = "async")]
3096 Yes,
3097}
3098
3099impl core::ops::BitOr for Asyncness {
3100 type Output = Self;
3101
3102 fn bitor(self, rhs: Self) -> Self::Output {
3103 match (self, rhs) {
3104 (Asyncness::No, Asyncness::No) => Asyncness::No,
3105 #[cfg(feature = "async")]
3106 (Asyncness::Yes, _) | (_, Asyncness::Yes) => Asyncness::Yes,
3107 }
3108 }
3109}
3110
3111#[cfg(test)]
3112mod tests {
3113 use super::*;
3114
3115 struct FuelTank {
3116 pub consumed_fuel: i64,
3117 pub reserve_fuel: u64,
3118 pub yield_interval: Option<NonZeroU64>,
3119 }
3120
3121 impl FuelTank {
3122 fn new() -> Self {
3123 FuelTank {
3124 consumed_fuel: 0,
3125 reserve_fuel: 0,
3126 yield_interval: None,
3127 }
3128 }
3129 fn get_fuel(&self) -> u64 {
3130 get_fuel(self.consumed_fuel, self.reserve_fuel)
3131 }
3132 fn refuel(&mut self) -> bool {
3133 refuel(
3134 &mut self.consumed_fuel,
3135 &mut self.reserve_fuel,
3136 self.yield_interval,
3137 )
3138 }
3139 fn set_fuel(&mut self, fuel: u64) {
3140 set_fuel(
3141 &mut self.consumed_fuel,
3142 &mut self.reserve_fuel,
3143 self.yield_interval,
3144 fuel,
3145 );
3146 }
3147 }
3148
3149 #[test]
3150 fn smoke() {
3151 let mut tank = FuelTank::new();
3152 tank.set_fuel(10);
3153 assert_eq!(tank.consumed_fuel, -10);
3154 assert_eq!(tank.reserve_fuel, 0);
3155
3156 tank.yield_interval = NonZeroU64::new(10);
3157 tank.set_fuel(25);
3158 assert_eq!(tank.consumed_fuel, -10);
3159 assert_eq!(tank.reserve_fuel, 15);
3160 }
3161
3162 #[test]
3163 fn does_not_lose_precision() {
3164 let mut tank = FuelTank::new();
3165 tank.set_fuel(u64::MAX);
3166 assert_eq!(tank.get_fuel(), u64::MAX);
3167
3168 tank.set_fuel(i64::MAX as u64);
3169 assert_eq!(tank.get_fuel(), i64::MAX as u64);
3170
3171 tank.set_fuel(i64::MAX as u64 + 1);
3172 assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
3173 }
3174
3175 #[test]
3176 fn yielding_does_not_lose_precision() {
3177 let mut tank = FuelTank::new();
3178
3179 tank.yield_interval = NonZeroU64::new(10);
3180 tank.set_fuel(u64::MAX);
3181 assert_eq!(tank.get_fuel(), u64::MAX);
3182 assert_eq!(tank.consumed_fuel, -10);
3183 assert_eq!(tank.reserve_fuel, u64::MAX - 10);
3184
3185 tank.yield_interval = NonZeroU64::new(u64::MAX);
3186 tank.set_fuel(u64::MAX);
3187 assert_eq!(tank.get_fuel(), u64::MAX);
3188 assert_eq!(tank.consumed_fuel, -i64::MAX);
3189 assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3190
3191 tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
3192 tank.set_fuel(u64::MAX);
3193 assert_eq!(tank.get_fuel(), u64::MAX);
3194 assert_eq!(tank.consumed_fuel, -i64::MAX);
3195 assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
3196 }
3197
3198 #[test]
3199 fn refueling() {
3200 // It's possible to fuel to have consumed over the limit as some instructions can consume
3201 // multiple units of fuel at once. Refueling should be strict in it's consumption and not
3202 // add more fuel than there is.
3203 let mut tank = FuelTank::new();
3204
3205 tank.yield_interval = NonZeroU64::new(10);
3206 tank.reserve_fuel = 42;
3207 tank.consumed_fuel = 4;
3208 assert!(tank.refuel());
3209 assert_eq!(tank.reserve_fuel, 28);
3210 assert_eq!(tank.consumed_fuel, -10);
3211
3212 tank.yield_interval = NonZeroU64::new(1);
3213 tank.reserve_fuel = 8;
3214 tank.consumed_fuel = 4;
3215 assert_eq!(tank.get_fuel(), 4);
3216 assert!(tank.refuel());
3217 assert_eq!(tank.reserve_fuel, 3);
3218 assert_eq!(tank.consumed_fuel, -1);
3219 assert_eq!(tank.get_fuel(), 4);
3220
3221 tank.yield_interval = NonZeroU64::new(10);
3222 tank.reserve_fuel = 3;
3223 tank.consumed_fuel = 4;
3224 assert_eq!(tank.get_fuel(), 0);
3225 assert!(!tank.refuel());
3226 assert_eq!(tank.reserve_fuel, 3);
3227 assert_eq!(tank.consumed_fuel, 4);
3228 assert_eq!(tank.get_fuel(), 0);
3229 }
3230
3231 #[test]
3232 fn store_data_provenance() {
3233 // Test that we juggle pointer provenance and all that correctly, and
3234 // miri is happy with everything, while allowing both Rust code and
3235 // "Wasm" to access and modify the store's `T` data. Note that this is
3236 // not actually Wasm mutating the store data here because compiling Wasm
3237 // under miri is way too slow.
3238
3239 unsafe fn run_wasm(store: &mut Store<u32>) {
3240 let ptr = store
3241 .inner
3242 .inner
3243 .vm_store_context
3244 .store_data
3245 .as_ptr()
3246 .cast::<u32>();
3247 unsafe { *ptr += 1 }
3248 }
3249
3250 let engine = Engine::default();
3251 let mut store = Store::new(&engine, 0_u32);
3252
3253 assert_eq!(*store.data(), 0);
3254 *store.data_mut() += 1;
3255 assert_eq!(*store.data(), 1);
3256 unsafe { run_wasm(&mut store) }
3257 assert_eq!(*store.data(), 2);
3258 *store.data_mut() += 1;
3259 assert_eq!(*store.data(), 3);
3260 }
3261}