wasmtime/runtime/store/
async_.rs

1use crate::prelude::*;
2use crate::runtime::vm::mpk::{self, ProtectionMask};
3use crate::store::{ResourceLimiterInner, StoreInner, StoreOpaque};
4#[cfg(feature = "call-hook")]
5use crate::CallHook;
6use crate::{Engine, Store, StoreContextMut, UpdateDeadline};
7use core::cell::UnsafeCell;
8use core::future::Future;
9use core::ops::Range;
10use core::pin::{pin, Pin};
11use core::ptr;
12use core::task::{Context, Poll};
13
14/// An object that can take callbacks when the runtime enters or exits hostcalls.
15#[cfg(feature = "call-hook")]
16#[async_trait::async_trait]
17pub trait CallHookHandler<T>: Send {
18    /// A callback to run when wasmtime is about to enter a host call, or when about to
19    /// exit the hostcall.
20    async fn handle_call_event(&self, t: StoreContextMut<'_, T>, ch: CallHook) -> Result<()>;
21}
22
23pub struct AsyncState {
24    current_suspend: UnsafeCell<*mut wasmtime_fiber::Suspend<Result<()>, (), Result<()>>>,
25    current_poll_cx: UnsafeCell<PollContext>,
26    /// The last fiber stack that was in use by this store.
27    last_fiber_stack: Option<wasmtime_fiber::FiberStack>,
28}
29
30impl Default for AsyncState {
31    fn default() -> AsyncState {
32        AsyncState {
33            current_suspend: UnsafeCell::new(ptr::null_mut()),
34            current_poll_cx: UnsafeCell::new(PollContext::default()),
35            last_fiber_stack: None,
36        }
37    }
38}
39
40// Lots of pesky unsafe cells and pointers in this structure. This means we need
41// to declare explicitly that we use this in a threadsafe fashion.
42unsafe impl Send for AsyncState {}
43unsafe impl Sync for AsyncState {}
44
45#[derive(Clone, Copy)]
46struct PollContext {
47    future_context: *mut Context<'static>,
48    guard_range_start: *mut u8,
49    guard_range_end: *mut u8,
50}
51
52impl Default for PollContext {
53    fn default() -> PollContext {
54        PollContext {
55            future_context: core::ptr::null_mut(),
56            guard_range_start: core::ptr::null_mut(),
57            guard_range_end: core::ptr::null_mut(),
58        }
59    }
60}
61
62impl<T> Store<T> {
63    /// Configures the [`ResourceLimiterAsync`](crate::ResourceLimiterAsync)
64    /// used to limit resource creation within this [`Store`].
65    ///
66    /// This method is an asynchronous variant of the [`Store::limiter`] method
67    /// where the embedder can block the wasm request for more resources with
68    /// host `async` execution of futures.
69    ///
70    /// By using a [`ResourceLimiterAsync`](`crate::ResourceLimiterAsync`)
71    /// with a [`Store`], you can no longer use
72    /// [`Memory::new`](`crate::Memory::new`),
73    /// [`Memory::grow`](`crate::Memory::grow`),
74    /// [`Table::new`](`crate::Table::new`), and
75    /// [`Table::grow`](`crate::Table::grow`). Instead, you must use their
76    /// `async` variants: [`Memory::new_async`](`crate::Memory::new_async`),
77    /// [`Memory::grow_async`](`crate::Memory::grow_async`),
78    /// [`Table::new_async`](`crate::Table::new_async`), and
79    /// [`Table::grow_async`](`crate::Table::grow_async`).
80    ///
81    /// Note that this limiter is only used to limit the creation/growth of
82    /// resources in the future, this does not retroactively attempt to apply
83    /// limits to the [`Store`]. Additionally this must be used with an async
84    /// [`Store`] configured via
85    /// [`Config::async_support`](crate::Config::async_support).
86    pub fn limiter_async(
87        &mut self,
88        mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync)
89            + Send
90            + Sync
91            + 'static,
92    ) {
93        debug_assert!(self.inner.async_support());
94        // Apply the limits on instances, tables, and memory given by the limiter:
95        let inner = &mut self.inner;
96        let (instance_limit, table_limit, memory_limit) = {
97            let l = limiter(&mut inner.data);
98            (l.instances(), l.tables(), l.memories())
99        };
100        let innermost = &mut inner.inner;
101        innermost.instance_limit = instance_limit;
102        innermost.table_limit = table_limit;
103        innermost.memory_limit = memory_limit;
104
105        // Save the limiter accessor function:
106        inner.limiter = Some(ResourceLimiterInner::Async(Box::new(limiter)));
107    }
108
109    /// Configures an async function that runs on calls and returns between
110    /// WebAssembly and host code. For the non-async equivalent of this method,
111    /// see [`Store::call_hook`].
112    ///
113    /// The function is passed a [`CallHook`] argument, which indicates which
114    /// state transition the VM is making.
115    ///
116    /// This function's future may return a [`Trap`]. If a trap is returned
117    /// when an import was called, it is immediately raised as-if the host
118    /// import had returned the trap. If a trap is returned after wasm returns
119    /// to the host then the wasm function's result is ignored and this trap is
120    /// returned instead.
121    ///
122    /// After this function returns a trap, it may be called for subsequent
123    /// returns to host or wasm code as the trap propagates to the root call.
124    #[cfg(feature = "call-hook")]
125    pub fn call_hook_async(&mut self, hook: impl CallHookHandler<T> + Send + Sync + 'static) {
126        self.inner.call_hook = Some(crate::store::CallHookInner::Async(Box::new(hook)));
127    }
128
129    /// Perform garbage collection asynchronously.
130    ///
131    /// Note that it is not required to actively call this function. GC will
132    /// automatically happen according to various internal heuristics. This is
133    /// provided if fine-grained control over the GC is desired.
134    ///
135    /// This method is only available when the `gc` Cargo feature is enabled.
136    #[cfg(feature = "gc")]
137    pub async fn gc_async(&mut self)
138    where
139        T: Send,
140    {
141        self.inner.gc_async().await;
142    }
143
144    /// Configures epoch-deadline expiration to yield to the async
145    /// caller and the update the deadline.
146    ///
147    /// When epoch-interruption-instrumented code is executed on this
148    /// store and the epoch deadline is reached before completion,
149    /// with the store configured in this way, execution will yield
150    /// (the future will return `Pending` but re-awake itself for
151    /// later execution) and, upon resuming, the store will be
152    /// configured with an epoch deadline equal to the current epoch
153    /// plus `delta` ticks.
154    ///
155    /// This setting is intended to allow for cooperative timeslicing
156    /// of multiple CPU-bound Wasm guests in different stores, all
157    /// executing under the control of an async executor. To drive
158    /// this, stores should be configured to "yield and update"
159    /// automatically with this function, and some external driver (a
160    /// thread that wakes up periodically, or a timer
161    /// signal/interrupt) should call
162    /// [`Engine::increment_epoch()`](crate::Engine::increment_epoch).
163    ///
164    /// See documentation on
165    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
166    /// for an introduction to epoch-based interruption.
167    #[cfg(target_has_atomic = "64")]
168    pub fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
169        self.inner.epoch_deadline_async_yield_and_update(delta);
170    }
171}
172
173impl<'a, T> StoreContextMut<'a, T> {
174    /// Perform garbage collection of `ExternRef`s.
175    ///
176    /// Same as [`Store::gc`].
177    ///
178    /// This method is only available when the `gc` Cargo feature is enabled.
179    #[cfg(feature = "gc")]
180    pub async fn gc_async(&mut self)
181    where
182        T: Send,
183    {
184        self.0.gc_async().await;
185    }
186
187    /// Configures epoch-deadline expiration to yield to the async
188    /// caller and the update the deadline.
189    ///
190    /// For more information see
191    /// [`Store::epoch_deadline_async_yield_and_update`].
192    #[cfg(target_has_atomic = "64")]
193    pub fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
194        self.0.epoch_deadline_async_yield_and_update(delta);
195    }
196}
197
198impl<T> StoreInner<T> {
199    #[cfg(target_has_atomic = "64")]
200    fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
201        assert!(
202            self.async_support(),
203            "cannot use `epoch_deadline_async_yield_and_update` without enabling async support in the config"
204        );
205        self.epoch_deadline_behavior =
206            Some(Box::new(move |_store| Ok(UpdateDeadline::Yield(delta))));
207    }
208}
209
210#[doc(hidden)]
211impl StoreOpaque {
212    /// Executes a synchronous computation `func` asynchronously on a new fiber.
213    ///
214    /// This function will convert the synchronous `func` into an asynchronous
215    /// future. This is done by running `func` in a fiber on a separate native
216    /// stack which can be suspended and resumed from.
217    ///
218    /// Most of the nitty-gritty here is how we juggle the various contexts
219    /// necessary to suspend the fiber later on and poll sub-futures. It's hoped
220    /// that the various comments are illuminating as to what's going on here.
221    pub(crate) async fn on_fiber<R>(
222        &mut self,
223        func: impl FnOnce(&mut Self) -> R + Send,
224    ) -> Result<R> {
225        let config = self.engine().config();
226        debug_assert!(self.async_support());
227        debug_assert!(config.async_stack_size > 0);
228
229        let mut slot = None;
230        let mut future = {
231            let current_poll_cx = self.async_state.current_poll_cx.get();
232            let current_suspend = self.async_state.current_suspend.get();
233            let stack = self.allocate_fiber_stack()?;
234
235            let engine = self.engine().clone();
236            let slot = &mut slot;
237            let this = &mut *self;
238            let fiber = wasmtime_fiber::Fiber::new(stack, move |keep_going, suspend| {
239                // First check and see if we were interrupted/dropped, and only
240                // continue if we haven't been.
241                keep_going?;
242
243                // Configure our store's suspension context for the rest of the
244                // execution of this fiber. Note that a raw pointer is stored here
245                // which is only valid for the duration of this closure.
246                // Consequently we at least replace it with the previous value when
247                // we're done. This reset is also required for correctness because
248                // otherwise our value will overwrite another active fiber's value.
249                // There should be a test that segfaults in `async_functions.rs` if
250                // this `Replace` is removed.
251                unsafe {
252                    let _reset = Reset(current_suspend, *current_suspend);
253                    *current_suspend = suspend;
254
255                    *slot = Some(func(this));
256                    Ok(())
257                }
258            })?;
259
260            // Once we have the fiber representing our synchronous computation, we
261            // wrap that in a custom future implementation which does the
262            // translation from the future protocol to our fiber API.
263            FiberFuture {
264                fiber: Some(fiber),
265                current_poll_cx,
266                engine,
267                state: Some(crate::runtime::vm::AsyncWasmCallState::new()),
268            }
269        };
270        (&mut future).await?;
271        let stack = future.fiber.take().map(|f| f.into_stack());
272        drop(future);
273        if let Some(stack) = stack {
274            self.deallocate_fiber_stack(stack);
275        }
276
277        return Ok(slot.unwrap());
278
279        struct FiberFuture<'a> {
280            fiber: Option<wasmtime_fiber::Fiber<'a, Result<()>, (), Result<()>>>,
281            current_poll_cx: *mut PollContext,
282            engine: Engine,
283            // See comments in `FiberFuture::resume` for this
284            state: Option<crate::runtime::vm::AsyncWasmCallState>,
285        }
286
287        // This is surely the most dangerous `unsafe impl Send` in the entire
288        // crate. There are two members in `FiberFuture` which cause it to not
289        // be `Send`. One is `current_poll_cx` and is entirely uninteresting.
290        // This is just used to manage `Context` pointers across `await` points
291        // in the future, and requires raw pointers to get it to happen easily.
292        // Nothing too weird about the `Send`-ness, values aren't actually
293        // crossing threads.
294        //
295        // The really interesting piece is `fiber`. Now the "fiber" here is
296        // actual honest-to-god Rust code which we're moving around. What we're
297        // doing is the equivalent of moving our thread's stack to another OS
298        // thread. Turns out we, in general, have no idea what's on the stack
299        // and would generally have no way to verify that this is actually safe
300        // to do!
301        //
302        // Thankfully, though, Wasmtime has the power. Without being glib it's
303        // actually worth examining what's on the stack. It's unfortunately not
304        // super-local to this function itself. Our closure to `Fiber::new` runs
305        // `func`, which is given to us from the outside. Thankfully, though, we
306        // have tight control over this. Usage of `on_fiber` is typically done
307        // *just* before entering WebAssembly itself, so we'll have a few stack
308        // frames of Rust code (all in Wasmtime itself) before we enter wasm.
309        //
310        // Once we've entered wasm, well then we have a whole bunch of wasm
311        // frames on the stack. We've got this nifty thing called Cranelift,
312        // though, which allows us to also have complete control over everything
313        // on the stack!
314        //
315        // Finally, when wasm switches back to the fiber's starting pointer
316        // (this future we're returning) then it means wasm has reentered Rust.
317        // Suspension can only happen via the `block_on` function of an
318        // `AsyncCx`. This, conveniently, also happens entirely in Wasmtime
319        // controlled code!
320        //
321        // There's an extremely important point that should be called out here.
322        // User-provided futures **are not on the stack** during suspension
323        // points. This is extremely crucial because we in general cannot reason
324        // about Send/Sync for stack-local variables since rustc doesn't analyze
325        // them at all. With our construction, though, we are guaranteed that
326        // Wasmtime owns all stack frames between the stack of a fiber and when
327        // the fiber suspends (and it could move across threads). At this time
328        // the only user-provided piece of data on the stack is the future
329        // itself given to us. Lo-and-behold as you might notice the future is
330        // required to be `Send`!
331        //
332        // What this all boils down to is that we, as the authors of Wasmtime,
333        // need to be extremely careful that on the async fiber stack we only
334        // store Send things. For example we can't start using `Rc` willy nilly
335        // by accident and leave a copy in TLS somewhere. (similarly we have to
336        // be ready for TLS to change while we're executing wasm code between
337        // suspension points).
338        //
339        // While somewhat onerous it shouldn't be too too hard (the TLS bit is
340        // the hardest bit so far). This does mean, though, that no user should
341        // ever have to worry about the `Send`-ness of Wasmtime. If rustc says
342        // it's ok, then it's ok.
343        //
344        // With all that in mind we unsafely assert here that wasmtime is
345        // correct. We declare the fiber as only containing Send data on its
346        // stack, despite not knowing for sure at compile time that this is
347        // correct. That's what `unsafe` in Rust is all about, though, right?
348        unsafe impl Send for FiberFuture<'_> {}
349
350        impl FiberFuture<'_> {
351            fn fiber(&self) -> &wasmtime_fiber::Fiber<'_, Result<()>, (), Result<()>> {
352                self.fiber.as_ref().unwrap()
353            }
354
355            /// This is a helper function to call `resume` on the underlying
356            /// fiber while correctly managing Wasmtime's thread-local data.
357            ///
358            /// Wasmtime's implementation of traps leverages thread-local data
359            /// to get access to metadata during a signal. This thread-local
360            /// data is a linked list of "activations" where the nodes of the
361            /// linked list are stored on the stack. It would be invalid as a
362            /// result to suspend a computation with the head of the linked list
363            /// on this stack then move the stack to another thread and resume
364            /// it. That means that a different thread would point to our stack
365            /// and our thread doesn't point to our stack at all!
366            ///
367            /// Basically management of TLS is required here one way or another.
368            /// The strategy currently settled on is to manage the list of
369            /// activations created by this fiber as a unit. When a fiber
370            /// resumes the linked list is prepended to the current thread's
371            /// list. When the fiber is suspended then the fiber's list of
372            /// activations are all removed en-masse and saved within the fiber.
373            fn resume(&mut self, val: Result<()>) -> Result<Result<()>, ()> {
374                unsafe {
375                    let prev = self.state.take().unwrap().push();
376                    let restore = Restore {
377                        fiber: self,
378                        state: Some(prev),
379                    };
380                    return restore.fiber.fiber().resume(val);
381                }
382
383                struct Restore<'a, 'b> {
384                    fiber: &'a mut FiberFuture<'b>,
385                    state: Option<crate::runtime::vm::PreviousAsyncWasmCallState>,
386                }
387
388                impl Drop for Restore<'_, '_> {
389                    fn drop(&mut self) {
390                        unsafe {
391                            self.fiber.state = Some(self.state.take().unwrap().restore());
392                        }
393                    }
394                }
395            }
396        }
397
398        impl Future for FiberFuture<'_> {
399            type Output = Result<()>;
400
401            fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
402                // We need to carry over this `cx` into our fiber's runtime
403                // for when it tries to poll sub-futures that are created. Doing
404                // this must be done unsafely, however, since `cx` is only alive
405                // for this one singular function call. Here we do a `transmute`
406                // to extend the lifetime of `Context` so it can be stored in
407                // our `Store`, and then we replace the current polling context
408                // with this one.
409                //
410                // Note that the replace is done for weird situations where
411                // futures might be switching contexts and there's multiple
412                // wasmtime futures in a chain of futures.
413                //
414                // On exit from this function, though, we reset the polling
415                // context back to what it was to signify that `Store` no longer
416                // has access to this pointer.
417                let guard = self
418                    .fiber()
419                    .stack()
420                    .guard_range()
421                    .unwrap_or(core::ptr::null_mut()..core::ptr::null_mut());
422                unsafe {
423                    let _reset = Reset(self.current_poll_cx, *self.current_poll_cx);
424                    *self.current_poll_cx = PollContext {
425                        future_context: core::mem::transmute::<
426                            &mut Context<'_>,
427                            *mut Context<'static>,
428                        >(cx),
429                        guard_range_start: guard.start,
430                        guard_range_end: guard.end,
431                    };
432
433                    // After that's set up we resume execution of the fiber, which
434                    // may also start the fiber for the first time. This either
435                    // returns `Ok` saying the fiber finished (yay!) or it
436                    // returns `Err` with the payload passed to `suspend`, which
437                    // in our case is `()`.
438                    match self.resume(Ok(())) {
439                        Ok(result) => Poll::Ready(result),
440
441                        // If `Err` is returned that means the fiber polled a
442                        // future but it said "Pending", so we propagate that
443                        // here.
444                        //
445                        // An additional safety check is performed when leaving
446                        // this function to help bolster the guarantees of
447                        // `unsafe impl Send` above. Notably this future may get
448                        // re-polled on a different thread. Wasmtime's
449                        // thread-local state points to the stack, however,
450                        // meaning that it would be incorrect to leave a pointer
451                        // in TLS when this function returns. This function
452                        // performs a runtime assert to verify that this is the
453                        // case, notably that the one TLS pointer Wasmtime uses
454                        // is not pointing anywhere within the stack. If it is
455                        // then that's a bug indicating that TLS management in
456                        // Wasmtime is incorrect.
457                        Err(()) => {
458                            if let Some(range) = self.fiber().stack().range() {
459                                crate::runtime::vm::AsyncWasmCallState::assert_current_state_not_in_range(range);
460                            }
461                            Poll::Pending
462                        }
463                    }
464                }
465            }
466        }
467
468        // Dropping futures is pretty special in that it means the future has
469        // been requested to be cancelled. Here we run the risk of dropping an
470        // in-progress fiber, and if we were to do nothing then the fiber would
471        // leak all its owned stack resources.
472        //
473        // To handle this we implement `Drop` here and, if the fiber isn't done,
474        // resume execution of the fiber saying "hey please stop you're
475        // interrupted". Our `Trap` created here (which has the stack trace
476        // of whomever dropped us) will then get propagated in whatever called
477        // `block_on`, and the idea is that the trap propagates all the way back
478        // up to the original fiber start, finishing execution.
479        //
480        // We don't actually care about the fiber's return value here (no one's
481        // around to look at it), we just assert the fiber finished to
482        // completion.
483        impl Drop for FiberFuture<'_> {
484            fn drop(&mut self) {
485                if self.fiber.is_none() {
486                    return;
487                }
488
489                if !self.fiber().done() {
490                    let result = self.resume(Err(anyhow!("future dropped")));
491                    // This resumption with an error should always complete the
492                    // fiber. While it's technically possible for host code to catch
493                    // the trap and re-resume, we'd ideally like to signal that to
494                    // callers that they shouldn't be doing that.
495                    debug_assert!(result.is_ok());
496                }
497
498                self.state.take().unwrap().assert_null();
499
500                unsafe {
501                    self.engine
502                        .allocator()
503                        .deallocate_fiber_stack(self.fiber.take().unwrap().into_stack());
504                }
505            }
506        }
507    }
508
509    #[cfg(feature = "gc")]
510    pub async fn gc_async(&mut self) {
511        assert!(
512            self.async_support(),
513            "cannot use `gc_async` without enabling async support in the config",
514        );
515
516        // If the GC heap hasn't been initialized, there is nothing to collect.
517        if self.gc_store.is_none() {
518            return;
519        }
520
521        log::trace!("============ Begin Async GC ===========");
522
523        // Take the GC roots out of `self` so we can borrow it mutably but still
524        // call mutable methods on `self`.
525        let mut roots = core::mem::take(&mut self.gc_roots_list);
526
527        self.trace_roots_async(&mut roots).await;
528        self.unwrap_gc_store_mut()
529            .gc_async(unsafe { roots.iter() })
530            .await;
531
532        // Restore the GC roots for the next GC.
533        roots.clear();
534        self.gc_roots_list = roots;
535
536        log::trace!("============ End Async GC ===========");
537    }
538
539    #[inline]
540    #[cfg(not(feature = "gc"))]
541    pub async fn gc_async(&mut self) {
542        // Nothing to collect.
543        //
544        // Note that this is *not* a public method, this is just defined for the
545        // crate-internal `StoreOpaque` type. This is a convenience so that we
546        // don't have to `cfg` every call site.
547    }
548
549    #[cfg(feature = "gc")]
550    async fn trace_roots_async(&mut self, gc_roots_list: &mut crate::runtime::vm::GcRootsList) {
551        use crate::runtime::vm::Yield;
552
553        log::trace!("Begin trace GC roots");
554
555        // We shouldn't have any leftover, stale GC roots.
556        assert!(gc_roots_list.is_empty());
557
558        self.trace_wasm_stack_roots(gc_roots_list);
559        Yield::new().await;
560        self.trace_vmctx_roots(gc_roots_list);
561        Yield::new().await;
562        self.trace_user_roots(gc_roots_list);
563
564        log::trace!("End trace GC roots")
565    }
566
567    /// Yields the async context, assuming that we are executing on a fiber and
568    /// that fiber is not in the process of dying. This function will return
569    /// None in the latter case (the fiber is dying), and panic if
570    /// `async_support()` is false.
571    #[inline]
572    pub fn async_cx(&self) -> Option<AsyncCx> {
573        assert!(self.async_support());
574
575        let poll_cx_box_ptr = self.async_state.current_poll_cx.get();
576        if poll_cx_box_ptr.is_null() {
577            return None;
578        }
579
580        let poll_cx_inner_ptr = unsafe { *poll_cx_box_ptr };
581        if poll_cx_inner_ptr.future_context.is_null() {
582            return None;
583        }
584
585        Some(AsyncCx {
586            current_suspend: self.async_state.current_suspend.get(),
587            current_poll_cx: unsafe { &raw mut (*poll_cx_box_ptr).future_context },
588            track_pkey_context_switch: self.pkey.is_some(),
589        })
590    }
591
592    /// Yields execution to the caller on out-of-gas or epoch interruption.
593    ///
594    /// This only works on async futures and stores, and assumes that we're
595    /// executing on a fiber. This will yield execution back to the caller once.
596    pub fn async_yield_impl(&mut self) -> Result<()> {
597        use crate::runtime::vm::Yield;
598
599        let mut future = Yield::new();
600
601        // When control returns, we have a `Result<()>` passed
602        // in from the host fiber. If this finished successfully then
603        // we were resumed normally via a `poll`, so keep going.  If
604        // the future was dropped while we were yielded, then we need
605        // to clean up this fiber. Do so by raising a trap which will
606        // abort all wasm and get caught on the other side to clean
607        // things up.
608        unsafe {
609            self.async_cx()
610                .expect("attempted to pull async context during shutdown")
611                .block_on(Pin::new_unchecked(&mut future))
612        }
613    }
614
615    fn allocate_fiber_stack(&mut self) -> Result<wasmtime_fiber::FiberStack> {
616        if let Some(stack) = self.async_state.last_fiber_stack.take() {
617            return Ok(stack);
618        }
619        self.engine().allocator().allocate_fiber_stack()
620    }
621
622    fn deallocate_fiber_stack(&mut self, stack: wasmtime_fiber::FiberStack) {
623        self.flush_fiber_stack();
624        self.async_state.last_fiber_stack = Some(stack);
625    }
626
627    /// Releases the last fiber stack to the underlying instance allocator, if
628    /// present.
629    pub fn flush_fiber_stack(&mut self) {
630        if let Some(stack) = self.async_state.last_fiber_stack.take() {
631            unsafe {
632                self.engine.allocator().deallocate_fiber_stack(stack);
633            }
634        }
635    }
636
637    pub(crate) fn async_guard_range(&self) -> Range<*mut u8> {
638        unsafe {
639            let ptr = self.async_state.current_poll_cx.get();
640            (*ptr).guard_range_start..(*ptr).guard_range_end
641        }
642    }
643}
644
645impl<T> StoreContextMut<'_, T> {
646    /// Executes a synchronous computation `func` asynchronously on a new fiber.
647    pub(crate) async fn on_fiber<R>(
648        &mut self,
649        func: impl FnOnce(&mut StoreContextMut<'_, T>) -> R + Send,
650    ) -> Result<R>
651    where
652        T: Send,
653    {
654        self.0
655            .on_fiber(|opaque| {
656                let store = unsafe { opaque.traitobj().cast::<StoreInner<T>>().as_mut() };
657                func(&mut StoreContextMut(store))
658            })
659            .await
660    }
661}
662
663pub struct AsyncCx {
664    current_suspend: *mut *mut wasmtime_fiber::Suspend<Result<()>, (), Result<()>>,
665    current_poll_cx: *mut *mut Context<'static>,
666    track_pkey_context_switch: bool,
667}
668
669impl AsyncCx {
670    /// Blocks on the asynchronous computation represented by `future` and
671    /// produces the result here, in-line.
672    ///
673    /// This function is designed to only work when it's currently executing on
674    /// a native fiber. This fiber provides the ability for us to handle the
675    /// future's `Pending` state as "jump back to whomever called the fiber in
676    /// an asynchronous fashion and propagate `Pending`". This tight coupling
677    /// with `on_fiber` below is what powers the asynchronicity of calling wasm.
678    /// Note that the asynchronous part only applies to host functions, wasm
679    /// itself never really does anything asynchronous at this time.
680    ///
681    /// This function takes a `future` and will (appear to) synchronously wait
682    /// on the result. While this function is executing it will fiber switch
683    /// to-and-from the original frame calling `on_fiber` which should be a
684    /// guarantee due to how async stores are configured.
685    ///
686    /// The return value here is either the output of the future `T`, or a trap
687    /// which represents that the asynchronous computation was cancelled. It is
688    /// not recommended to catch the trap and try to keep executing wasm, so
689    /// we've tried to liberally document this.
690    pub unsafe fn block_on<F>(&self, mut future: F) -> Result<F::Output>
691    where
692        F: Future + Send,
693    {
694        let mut future = pin!(future);
695
696        // Take our current `Suspend` context which was configured as soon as
697        // our fiber started. Note that we must load it at the front here and
698        // save it on our stack frame. While we're polling the future other
699        // fibers may be started for recursive computations, and the current
700        // suspend context is only preserved at the edges of the fiber, not
701        // during the fiber itself.
702        //
703        // For a little bit of extra safety we also replace the current value
704        // with null to try to catch any accidental bugs on our part early.
705        // This is all pretty unsafe so we're trying to be careful...
706        //
707        // Note that there should be a segfaulting test  in `async_functions.rs`
708        // if this `Reset` is removed.
709        let suspend = *self.current_suspend;
710        let _reset = Reset(self.current_suspend, suspend);
711        *self.current_suspend = ptr::null_mut();
712        assert!(!suspend.is_null());
713
714        loop {
715            let future_result = {
716                let poll_cx = *self.current_poll_cx;
717                let _reset = Reset(self.current_poll_cx, poll_cx);
718                *self.current_poll_cx = ptr::null_mut();
719                assert!(!poll_cx.is_null());
720                future.as_mut().poll(&mut *poll_cx)
721            };
722
723            match future_result {
724                Poll::Ready(t) => break Ok(t),
725                Poll::Pending => {}
726            }
727
728            // In order to prevent this fiber's MPK state from being munged by
729            // other fibers while it is suspended, we save and restore it once
730            // once execution resumes. Note that when MPK is not supported,
731            // these are noops.
732            let previous_mask = if self.track_pkey_context_switch {
733                let previous_mask = mpk::current_mask();
734                mpk::allow(ProtectionMask::all());
735                previous_mask
736            } else {
737                ProtectionMask::all()
738            };
739            (*suspend).suspend(())?;
740            if self.track_pkey_context_switch {
741                mpk::allow(previous_mask);
742            }
743        }
744    }
745}
746
747struct Reset<T: Copy>(*mut T, T);
748
749impl<T: Copy> Drop for Reset<T> {
750    fn drop(&mut self) {
751        unsafe {
752            *self.0 = self.1;
753        }
754    }
755}