wasmtime/runtime/store/async_.rs
1use crate::prelude::*;
2use crate::runtime::vm::mpk::{self, ProtectionMask};
3use crate::store::{ResourceLimiterInner, StoreInner, StoreOpaque};
4#[cfg(feature = "call-hook")]
5use crate::CallHook;
6use crate::{Engine, Store, StoreContextMut, UpdateDeadline};
7use core::cell::UnsafeCell;
8use core::future::Future;
9use core::ops::Range;
10use core::pin::{pin, Pin};
11use core::ptr;
12use core::task::{Context, Poll};
13
14/// An object that can take callbacks when the runtime enters or exits hostcalls.
15#[cfg(feature = "call-hook")]
16#[async_trait::async_trait]
17pub trait CallHookHandler<T>: Send {
18 /// A callback to run when wasmtime is about to enter a host call, or when about to
19 /// exit the hostcall.
20 async fn handle_call_event(&self, t: StoreContextMut<'_, T>, ch: CallHook) -> Result<()>;
21}
22
23pub struct AsyncState {
24 current_suspend: UnsafeCell<*mut wasmtime_fiber::Suspend<Result<()>, (), Result<()>>>,
25 current_poll_cx: UnsafeCell<PollContext>,
26 /// The last fiber stack that was in use by this store.
27 last_fiber_stack: Option<wasmtime_fiber::FiberStack>,
28}
29
30impl Default for AsyncState {
31 fn default() -> AsyncState {
32 AsyncState {
33 current_suspend: UnsafeCell::new(ptr::null_mut()),
34 current_poll_cx: UnsafeCell::new(PollContext::default()),
35 last_fiber_stack: None,
36 }
37 }
38}
39
40// Lots of pesky unsafe cells and pointers in this structure. This means we need
41// to declare explicitly that we use this in a threadsafe fashion.
42unsafe impl Send for AsyncState {}
43unsafe impl Sync for AsyncState {}
44
45#[derive(Clone, Copy)]
46struct PollContext {
47 future_context: *mut Context<'static>,
48 guard_range_start: *mut u8,
49 guard_range_end: *mut u8,
50}
51
52impl Default for PollContext {
53 fn default() -> PollContext {
54 PollContext {
55 future_context: core::ptr::null_mut(),
56 guard_range_start: core::ptr::null_mut(),
57 guard_range_end: core::ptr::null_mut(),
58 }
59 }
60}
61
62impl<T> Store<T> {
63 /// Configures the [`ResourceLimiterAsync`](crate::ResourceLimiterAsync)
64 /// used to limit resource creation within this [`Store`].
65 ///
66 /// This method is an asynchronous variant of the [`Store::limiter`] method
67 /// where the embedder can block the wasm request for more resources with
68 /// host `async` execution of futures.
69 ///
70 /// By using a [`ResourceLimiterAsync`](`crate::ResourceLimiterAsync`)
71 /// with a [`Store`], you can no longer use
72 /// [`Memory::new`](`crate::Memory::new`),
73 /// [`Memory::grow`](`crate::Memory::grow`),
74 /// [`Table::new`](`crate::Table::new`), and
75 /// [`Table::grow`](`crate::Table::grow`). Instead, you must use their
76 /// `async` variants: [`Memory::new_async`](`crate::Memory::new_async`),
77 /// [`Memory::grow_async`](`crate::Memory::grow_async`),
78 /// [`Table::new_async`](`crate::Table::new_async`), and
79 /// [`Table::grow_async`](`crate::Table::grow_async`).
80 ///
81 /// Note that this limiter is only used to limit the creation/growth of
82 /// resources in the future, this does not retroactively attempt to apply
83 /// limits to the [`Store`]. Additionally this must be used with an async
84 /// [`Store`] configured via
85 /// [`Config::async_support`](crate::Config::async_support).
86 pub fn limiter_async(
87 &mut self,
88 mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync)
89 + Send
90 + Sync
91 + 'static,
92 ) {
93 debug_assert!(self.inner.async_support());
94 // Apply the limits on instances, tables, and memory given by the limiter:
95 let inner = &mut self.inner;
96 let (instance_limit, table_limit, memory_limit) = {
97 let l = limiter(&mut inner.data);
98 (l.instances(), l.tables(), l.memories())
99 };
100 let innermost = &mut inner.inner;
101 innermost.instance_limit = instance_limit;
102 innermost.table_limit = table_limit;
103 innermost.memory_limit = memory_limit;
104
105 // Save the limiter accessor function:
106 inner.limiter = Some(ResourceLimiterInner::Async(Box::new(limiter)));
107 }
108
109 /// Configures an async function that runs on calls and returns between
110 /// WebAssembly and host code. For the non-async equivalent of this method,
111 /// see [`Store::call_hook`].
112 ///
113 /// The function is passed a [`CallHook`] argument, which indicates which
114 /// state transition the VM is making.
115 ///
116 /// This function's future may return a [`Trap`]. If a trap is returned
117 /// when an import was called, it is immediately raised as-if the host
118 /// import had returned the trap. If a trap is returned after wasm returns
119 /// to the host then the wasm function's result is ignored and this trap is
120 /// returned instead.
121 ///
122 /// After this function returns a trap, it may be called for subsequent
123 /// returns to host or wasm code as the trap propagates to the root call.
124 #[cfg(feature = "call-hook")]
125 pub fn call_hook_async(&mut self, hook: impl CallHookHandler<T> + Send + Sync + 'static) {
126 self.inner.call_hook = Some(crate::store::CallHookInner::Async(Box::new(hook)));
127 }
128
129 /// Perform garbage collection asynchronously.
130 ///
131 /// Note that it is not required to actively call this function. GC will
132 /// automatically happen according to various internal heuristics. This is
133 /// provided if fine-grained control over the GC is desired.
134 ///
135 /// This method is only available when the `gc` Cargo feature is enabled.
136 #[cfg(feature = "gc")]
137 pub async fn gc_async(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()>
138 where
139 T: Send,
140 {
141 self.inner.gc_async(why).await
142 }
143
144 /// Configures epoch-deadline expiration to yield to the async
145 /// caller and the update the deadline.
146 ///
147 /// When epoch-interruption-instrumented code is executed on this
148 /// store and the epoch deadline is reached before completion,
149 /// with the store configured in this way, execution will yield
150 /// (the future will return `Pending` but re-awake itself for
151 /// later execution) and, upon resuming, the store will be
152 /// configured with an epoch deadline equal to the current epoch
153 /// plus `delta` ticks.
154 ///
155 /// This setting is intended to allow for cooperative timeslicing
156 /// of multiple CPU-bound Wasm guests in different stores, all
157 /// executing under the control of an async executor. To drive
158 /// this, stores should be configured to "yield and update"
159 /// automatically with this function, and some external driver (a
160 /// thread that wakes up periodically, or a timer
161 /// signal/interrupt) should call
162 /// [`Engine::increment_epoch()`](crate::Engine::increment_epoch).
163 ///
164 /// See documentation on
165 /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
166 /// for an introduction to epoch-based interruption.
167 #[cfg(target_has_atomic = "64")]
168 pub fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
169 self.inner.epoch_deadline_async_yield_and_update(delta);
170 }
171}
172
173impl<'a, T> StoreContextMut<'a, T> {
174 /// Perform garbage collection of `ExternRef`s.
175 ///
176 /// Same as [`Store::gc`].
177 ///
178 /// This method is only available when the `gc` Cargo feature is enabled.
179 #[cfg(feature = "gc")]
180 pub async fn gc_async(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) -> Result<()>
181 where
182 T: Send,
183 {
184 self.0.gc_async(why).await
185 }
186
187 /// Configures epoch-deadline expiration to yield to the async
188 /// caller and the update the deadline.
189 ///
190 /// For more information see
191 /// [`Store::epoch_deadline_async_yield_and_update`].
192 #[cfg(target_has_atomic = "64")]
193 pub fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
194 self.0.epoch_deadline_async_yield_and_update(delta);
195 }
196}
197
198impl<T> StoreInner<T> {
199 #[cfg(target_has_atomic = "64")]
200 fn epoch_deadline_async_yield_and_update(&mut self, delta: u64) {
201 assert!(
202 self.async_support(),
203 "cannot use `epoch_deadline_async_yield_and_update` without enabling async support in the config"
204 );
205 self.epoch_deadline_behavior =
206 Some(Box::new(move |_store| Ok(UpdateDeadline::Yield(delta))));
207 }
208}
209
210#[doc(hidden)]
211impl StoreOpaque {
212 /// Executes a synchronous computation `func` asynchronously on a new fiber.
213 ///
214 /// This function will convert the synchronous `func` into an asynchronous
215 /// future. This is done by running `func` in a fiber on a separate native
216 /// stack which can be suspended and resumed from.
217 ///
218 /// Most of the nitty-gritty here is how we juggle the various contexts
219 /// necessary to suspend the fiber later on and poll sub-futures. It's hoped
220 /// that the various comments are illuminating as to what's going on here.
221 pub(crate) async fn on_fiber<R>(
222 &mut self,
223 func: impl FnOnce(&mut Self) -> R + Send,
224 ) -> Result<R> {
225 let config = self.engine().config();
226 debug_assert!(self.async_support());
227 debug_assert!(config.async_stack_size > 0);
228
229 let mut slot = None;
230 let mut future = {
231 let current_poll_cx = self.async_state.current_poll_cx.get();
232 let current_suspend = self.async_state.current_suspend.get();
233 let stack = self.allocate_fiber_stack()?;
234 let track_pkey_context_switch = self.pkey.is_some();
235
236 let engine = self.engine().clone();
237 let slot = &mut slot;
238 let this = &mut *self;
239 let fiber = wasmtime_fiber::Fiber::new(stack, move |keep_going, suspend| {
240 // First check and see if we were interrupted/dropped, and only
241 // continue if we haven't been.
242 keep_going?;
243
244 // Configure our store's suspension context for the rest of the
245 // execution of this fiber. Note that a raw pointer is stored here
246 // which is only valid for the duration of this closure.
247 // Consequently we at least replace it with the previous value when
248 // we're done. This reset is also required for correctness because
249 // otherwise our value will overwrite another active fiber's value.
250 // There should be a test that segfaults in `async_functions.rs` if
251 // this `Replace` is removed.
252 unsafe {
253 let _reset = Reset(current_suspend, *current_suspend);
254 *current_suspend = suspend;
255
256 *slot = Some(func(this));
257 Ok(())
258 }
259 })?;
260
261 // Once we have the fiber representing our synchronous computation, we
262 // wrap that in a custom future implementation which does the
263 // translation from the future protocol to our fiber API.
264 FiberFuture {
265 fiber: Some(fiber),
266 current_poll_cx,
267 engine,
268 fiber_resume_state: Some(FiberResumeState {
269 tls: crate::runtime::vm::AsyncWasmCallState::new(),
270 mpk: if track_pkey_context_switch {
271 Some(ProtectionMask::all())
272 } else {
273 None
274 },
275 }),
276 }
277 };
278 (&mut future).await?;
279 let stack = future.fiber.take().map(|f| f.into_stack());
280 drop(future);
281 if let Some(stack) = stack {
282 self.deallocate_fiber_stack(stack);
283 }
284
285 return Ok(slot.unwrap());
286
287 struct FiberFuture<'a> {
288 fiber: Option<wasmtime_fiber::Fiber<'a, Result<()>, (), Result<()>>>,
289 current_poll_cx: *mut PollContext,
290 engine: Engine,
291 // See comments in `FiberResumeState` for this
292 fiber_resume_state: Option<FiberResumeState>,
293 }
294
295 // This is surely the most dangerous `unsafe impl Send` in the entire
296 // crate. There are two members in `FiberFuture` which cause it to not
297 // be `Send`. One is `current_poll_cx` and is entirely uninteresting.
298 // This is just used to manage `Context` pointers across `await` points
299 // in the future, and requires raw pointers to get it to happen easily.
300 // Nothing too weird about the `Send`-ness, values aren't actually
301 // crossing threads.
302 //
303 // The really interesting piece is `fiber`. Now the "fiber" here is
304 // actual honest-to-god Rust code which we're moving around. What we're
305 // doing is the equivalent of moving our thread's stack to another OS
306 // thread. Turns out we, in general, have no idea what's on the stack
307 // and would generally have no way to verify that this is actually safe
308 // to do!
309 //
310 // Thankfully, though, Wasmtime has the power. Without being glib it's
311 // actually worth examining what's on the stack. It's unfortunately not
312 // super-local to this function itself. Our closure to `Fiber::new` runs
313 // `func`, which is given to us from the outside. Thankfully, though, we
314 // have tight control over this. Usage of `on_fiber` is typically done
315 // *just* before entering WebAssembly itself, so we'll have a few stack
316 // frames of Rust code (all in Wasmtime itself) before we enter wasm.
317 //
318 // Once we've entered wasm, well then we have a whole bunch of wasm
319 // frames on the stack. We've got this nifty thing called Cranelift,
320 // though, which allows us to also have complete control over everything
321 // on the stack!
322 //
323 // Finally, when wasm switches back to the fiber's starting pointer
324 // (this future we're returning) then it means wasm has reentered Rust.
325 // Suspension can only happen via the `block_on` function of an
326 // `AsyncCx`. This, conveniently, also happens entirely in Wasmtime
327 // controlled code!
328 //
329 // There's an extremely important point that should be called out here.
330 // User-provided futures **are not on the stack** during suspension
331 // points. This is extremely crucial because we in general cannot reason
332 // about Send/Sync for stack-local variables since rustc doesn't analyze
333 // them at all. With our construction, though, we are guaranteed that
334 // Wasmtime owns all stack frames between the stack of a fiber and when
335 // the fiber suspends (and it could move across threads). At this time
336 // the only user-provided piece of data on the stack is the future
337 // itself given to us. Lo-and-behold as you might notice the future is
338 // required to be `Send`!
339 //
340 // What this all boils down to is that we, as the authors of Wasmtime,
341 // need to be extremely careful that on the async fiber stack we only
342 // store Send things. For example we can't start using `Rc` willy nilly
343 // by accident and leave a copy in TLS somewhere. (similarly we have to
344 // be ready for TLS to change while we're executing wasm code between
345 // suspension points).
346 //
347 // While somewhat onerous it shouldn't be too too hard (the TLS bit is
348 // the hardest bit so far). This does mean, though, that no user should
349 // ever have to worry about the `Send`-ness of Wasmtime. If rustc says
350 // it's ok, then it's ok.
351 //
352 // With all that in mind we unsafely assert here that wasmtime is
353 // correct. We declare the fiber as only containing Send data on its
354 // stack, despite not knowing for sure at compile time that this is
355 // correct. That's what `unsafe` in Rust is all about, though, right?
356 unsafe impl Send for FiberFuture<'_> {}
357
358 impl FiberFuture<'_> {
359 fn fiber(&self) -> &wasmtime_fiber::Fiber<'_, Result<()>, (), Result<()>> {
360 self.fiber.as_ref().unwrap()
361 }
362
363 /// This is a helper function to call `resume` on the underlying
364 /// fiber while correctly managing Wasmtime's state that the fiber
365 /// may clobber.
366 ///
367 /// ## Return Value
368 ///
369 /// * `Ok(Ok(()))` - the fiber successfully completed and yielded a
370 /// successful result.
371 /// * `Ok(Err(e))` - the fiber successfully completed and yielded
372 /// an error as a result of computation.
373 /// * `Err(())` - the fiber has not finished and it is suspended.
374 fn resume(&mut self, val: Result<()>) -> Result<Result<()>, ()> {
375 unsafe {
376 let prev = self.fiber_resume_state.take().unwrap().replace();
377 let restore = Restore {
378 fiber: self,
379 prior_fiber_state: Some(prev),
380 };
381 return restore.fiber.fiber().resume(val);
382 }
383
384 struct Restore<'a, 'b> {
385 fiber: &'a mut FiberFuture<'b>,
386 prior_fiber_state: Option<PriorFiberResumeState>,
387 }
388
389 impl Drop for Restore<'_, '_> {
390 fn drop(&mut self) {
391 unsafe {
392 self.fiber.fiber_resume_state =
393 Some(self.prior_fiber_state.take().unwrap().replace());
394 }
395 }
396 }
397 }
398 }
399
400 impl Future for FiberFuture<'_> {
401 type Output = Result<()>;
402
403 fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
404 // We need to carry over this `cx` into our fiber's runtime
405 // for when it tries to poll sub-futures that are created. Doing
406 // this must be done unsafely, however, since `cx` is only alive
407 // for this one singular function call. Here we do a `transmute`
408 // to extend the lifetime of `Context` so it can be stored in
409 // our `Store`, and then we replace the current polling context
410 // with this one.
411 //
412 // Note that the replace is done for weird situations where
413 // futures might be switching contexts and there's multiple
414 // wasmtime futures in a chain of futures.
415 //
416 // On exit from this function, though, we reset the polling
417 // context back to what it was to signify that `Store` no longer
418 // has access to this pointer.
419 let guard = self
420 .fiber()
421 .stack()
422 .guard_range()
423 .unwrap_or(core::ptr::null_mut()..core::ptr::null_mut());
424 unsafe {
425 let _reset = Reset(self.current_poll_cx, *self.current_poll_cx);
426 *self.current_poll_cx = PollContext {
427 future_context: core::mem::transmute::<
428 &mut Context<'_>,
429 *mut Context<'static>,
430 >(cx),
431 guard_range_start: guard.start,
432 guard_range_end: guard.end,
433 };
434
435 // After that's set up we resume execution of the fiber, which
436 // may also start the fiber for the first time. This either
437 // returns `Ok` saying the fiber finished (yay!) or it
438 // returns `Err` with the payload passed to `suspend`, which
439 // in our case is `()`.
440 match self.resume(Ok(())) {
441 Ok(result) => Poll::Ready(result),
442
443 // If `Err` is returned that means the fiber polled a
444 // future but it said "Pending", so we propagate that
445 // here.
446 //
447 // An additional safety check is performed when leaving
448 // this function to help bolster the guarantees of
449 // `unsafe impl Send` above. Notably this future may get
450 // re-polled on a different thread. Wasmtime's
451 // thread-local state points to the stack, however,
452 // meaning that it would be incorrect to leave a pointer
453 // in TLS when this function returns. This function
454 // performs a runtime assert to verify that this is the
455 // case, notably that the one TLS pointer Wasmtime uses
456 // is not pointing anywhere within the stack. If it is
457 // then that's a bug indicating that TLS management in
458 // Wasmtime is incorrect.
459 Err(()) => {
460 if let Some(range) = self.fiber().stack().range() {
461 crate::runtime::vm::AsyncWasmCallState::assert_current_state_not_in_range(range);
462 }
463 Poll::Pending
464 }
465 }
466 }
467 }
468 }
469
470 // Dropping futures is pretty special in that it means the future has
471 // been requested to be cancelled. Here we run the risk of dropping an
472 // in-progress fiber, and if we were to do nothing then the fiber would
473 // leak all its owned stack resources.
474 //
475 // To handle this we implement `Drop` here and, if the fiber isn't done,
476 // resume execution of the fiber saying "hey please stop you're
477 // interrupted". Our `Trap` created here (which has the stack trace
478 // of whomever dropped us) will then get propagated in whatever called
479 // `block_on`, and the idea is that the trap propagates all the way back
480 // up to the original fiber start, finishing execution.
481 //
482 // We don't actually care about the fiber's return value here (no one's
483 // around to look at it), we just assert the fiber finished to
484 // completion.
485 impl Drop for FiberFuture<'_> {
486 fn drop(&mut self) {
487 if self.fiber.is_none() {
488 return;
489 }
490
491 if !self.fiber().done() {
492 let result = self.resume(Err(anyhow!("future dropped")));
493 // This resumption with an error should always complete the
494 // fiber. While it's technically possible for host code to
495 // catch the trap and re-resume, we'd ideally like to
496 // signal that to callers that they shouldn't be doing
497 // that.
498 debug_assert!(result.is_ok());
499
500 // Note that `result` is `Ok(r)` where `r` is either
501 // `Ok(())` or `Err(e)`. If it's an error that's disposed of
502 // here. It's expected to be a propagation of the `future
503 // dropped` error created above.
504 }
505
506 self.fiber_resume_state.take().unwrap().dispose();
507
508 unsafe {
509 self.engine
510 .allocator()
511 .deallocate_fiber_stack(self.fiber.take().unwrap().into_stack());
512 }
513 }
514 }
515
516 /// State of the world when a fiber last suspended.
517 ///
518 /// This structure represents global state that a fiber clobbers during
519 /// its execution. For example TLS variables are updated, system
520 /// resources like MPK masks are updated, etc. The purpose of this
521 /// structure is to track all of this state and appropriately
522 /// save/restore it around fiber suspension points.
523 struct FiberResumeState {
524 /// Saved list of `CallThreadState` activations that are stored on a
525 /// fiber stack.
526 ///
527 /// This is a linked list that references stack-stored nodes on the
528 /// fiber stack that is currently suspended. The
529 /// `AsyncWasmCallState` type documents this more thoroughly but the
530 /// general gist is that when we this fiber is resumed this linked
531 /// list needs to be pushed on to the current thread's linked list
532 /// of activations.
533 tls: crate::runtime::vm::AsyncWasmCallState,
534
535 /// Saved MPK protection mask, if enabled.
536 ///
537 /// When MPK is enabled then executing WebAssembly will modify the
538 /// processor's current mask of addressable protection keys. This
539 /// means that our current state may get clobbered when a fiber
540 /// suspends. To ensure that this function preserves context it
541 /// will, when MPK is enabled, save the current mask when this
542 /// function is called and then restore the mask when the function
543 /// returns (aka the fiber suspends).
544 mpk: Option<ProtectionMask>,
545 }
546
547 impl FiberResumeState {
548 unsafe fn replace(self) -> PriorFiberResumeState {
549 let tls = self.tls.push();
550 let mpk = swap_mpk_states(self.mpk);
551 PriorFiberResumeState { tls, mpk }
552 }
553
554 fn dispose(self) {
555 self.tls.assert_null();
556 }
557 }
558
559 struct PriorFiberResumeState {
560 tls: crate::runtime::vm::PreviousAsyncWasmCallState,
561 mpk: Option<ProtectionMask>,
562 }
563
564 impl PriorFiberResumeState {
565 unsafe fn replace(self) -> FiberResumeState {
566 let tls = self.tls.restore();
567 let mpk = swap_mpk_states(self.mpk);
568 FiberResumeState { tls, mpk }
569 }
570 }
571
572 fn swap_mpk_states(mask: Option<ProtectionMask>) -> Option<ProtectionMask> {
573 mask.map(|mask| {
574 let current = mpk::current_mask();
575 mpk::allow(mask);
576 current
577 })
578 }
579 }
580
581 #[cfg(feature = "gc")]
582 pub(super) async fn do_gc_async(&mut self) {
583 assert!(
584 self.async_support(),
585 "cannot use `gc_async` without enabling async support in the config",
586 );
587
588 // If the GC heap hasn't been initialized, there is nothing to collect.
589 if self.gc_store.is_none() {
590 return;
591 }
592
593 log::trace!("============ Begin Async GC ===========");
594
595 // Take the GC roots out of `self` so we can borrow it mutably but still
596 // call mutable methods on `self`.
597 let mut roots = core::mem::take(&mut self.gc_roots_list);
598
599 self.trace_roots_async(&mut roots).await;
600 self.unwrap_gc_store_mut()
601 .gc_async(unsafe { roots.iter() })
602 .await;
603
604 // Restore the GC roots for the next GC.
605 roots.clear();
606 self.gc_roots_list = roots;
607
608 log::trace!("============ End Async GC ===========");
609 }
610
611 #[inline]
612 #[cfg(not(feature = "gc"))]
613 pub async fn gc_async(&mut self) {
614 // Nothing to collect.
615 //
616 // Note that this is *not* a public method, this is just defined for the
617 // crate-internal `StoreOpaque` type. This is a convenience so that we
618 // don't have to `cfg` every call site.
619 }
620
621 #[cfg(feature = "gc")]
622 async fn trace_roots_async(&mut self, gc_roots_list: &mut crate::runtime::vm::GcRootsList) {
623 use crate::runtime::vm::Yield;
624
625 log::trace!("Begin trace GC roots");
626
627 // We shouldn't have any leftover, stale GC roots.
628 assert!(gc_roots_list.is_empty());
629
630 self.trace_wasm_stack_roots(gc_roots_list);
631 Yield::new().await;
632 self.trace_vmctx_roots(gc_roots_list);
633 Yield::new().await;
634 self.trace_user_roots(gc_roots_list);
635
636 log::trace!("End trace GC roots")
637 }
638
639 /// Yields the async context, assuming that we are executing on a fiber and
640 /// that fiber is not in the process of dying. This function will return
641 /// None in the latter case (the fiber is dying), and panic if
642 /// `async_support()` is false.
643 #[inline]
644 pub fn async_cx(&self) -> Option<AsyncCx> {
645 assert!(self.async_support());
646
647 let poll_cx_box_ptr = self.async_state.current_poll_cx.get();
648 if poll_cx_box_ptr.is_null() {
649 return None;
650 }
651
652 let poll_cx_inner_ptr = unsafe { *poll_cx_box_ptr };
653 if poll_cx_inner_ptr.future_context.is_null() {
654 return None;
655 }
656
657 Some(AsyncCx {
658 current_suspend: self.async_state.current_suspend.get(),
659 current_poll_cx: unsafe { &raw mut (*poll_cx_box_ptr).future_context },
660 })
661 }
662
663 /// Yields execution to the caller on out-of-gas or epoch interruption.
664 ///
665 /// This only works on async futures and stores, and assumes that we're
666 /// executing on a fiber. This will yield execution back to the caller once.
667 pub fn async_yield_impl(&mut self) -> Result<()> {
668 use crate::runtime::vm::Yield;
669
670 let mut future = Yield::new();
671
672 // When control returns, we have a `Result<()>` passed
673 // in from the host fiber. If this finished successfully then
674 // we were resumed normally via a `poll`, so keep going. If
675 // the future was dropped while we were yielded, then we need
676 // to clean up this fiber. Do so by raising a trap which will
677 // abort all wasm and get caught on the other side to clean
678 // things up.
679 unsafe {
680 self.async_cx()
681 .expect("attempted to pull async context during shutdown")
682 .block_on(Pin::new_unchecked(&mut future))
683 }
684 }
685
686 fn allocate_fiber_stack(&mut self) -> Result<wasmtime_fiber::FiberStack> {
687 if let Some(stack) = self.async_state.last_fiber_stack.take() {
688 return Ok(stack);
689 }
690 self.engine().allocator().allocate_fiber_stack()
691 }
692
693 fn deallocate_fiber_stack(&mut self, stack: wasmtime_fiber::FiberStack) {
694 self.flush_fiber_stack();
695 self.async_state.last_fiber_stack = Some(stack);
696 }
697
698 /// Releases the last fiber stack to the underlying instance allocator, if
699 /// present.
700 pub fn flush_fiber_stack(&mut self) {
701 if let Some(stack) = self.async_state.last_fiber_stack.take() {
702 unsafe {
703 self.engine.allocator().deallocate_fiber_stack(stack);
704 }
705 }
706 }
707
708 pub(crate) fn async_guard_range(&self) -> Range<*mut u8> {
709 unsafe {
710 let ptr = self.async_state.current_poll_cx.get();
711 (*ptr).guard_range_start..(*ptr).guard_range_end
712 }
713 }
714}
715
716impl<T> StoreContextMut<'_, T> {
717 /// Executes a synchronous computation `func` asynchronously on a new fiber.
718 pub(crate) async fn on_fiber<R>(
719 &mut self,
720 func: impl FnOnce(&mut StoreContextMut<'_, T>) -> R + Send,
721 ) -> Result<R>
722 where
723 T: Send,
724 {
725 self.0
726 .on_fiber(|opaque| {
727 let store = unsafe { opaque.traitobj().cast::<StoreInner<T>>().as_mut() };
728 func(&mut StoreContextMut(store))
729 })
730 .await
731 }
732}
733
734pub struct AsyncCx {
735 current_suspend: *mut *mut wasmtime_fiber::Suspend<Result<()>, (), Result<()>>,
736 current_poll_cx: *mut *mut Context<'static>,
737}
738
739impl AsyncCx {
740 /// Blocks on the asynchronous computation represented by `future` and
741 /// produces the result here, in-line.
742 ///
743 /// This function is designed to only work when it's currently executing on
744 /// a native fiber. This fiber provides the ability for us to handle the
745 /// future's `Pending` state as "jump back to whomever called the fiber in
746 /// an asynchronous fashion and propagate `Pending`". This tight coupling
747 /// with `on_fiber` below is what powers the asynchronicity of calling wasm.
748 /// Note that the asynchronous part only applies to host functions, wasm
749 /// itself never really does anything asynchronous at this time.
750 ///
751 /// This function takes a `future` and will (appear to) synchronously wait
752 /// on the result. While this function is executing it will fiber switch
753 /// to-and-from the original frame calling `on_fiber` which should be a
754 /// guarantee due to how async stores are configured.
755 ///
756 /// The return value here is either the output of the future `T`, or a trap
757 /// which represents that the asynchronous computation was cancelled. It is
758 /// not recommended to catch the trap and try to keep executing wasm, so
759 /// we've tried to liberally document this.
760 pub unsafe fn block_on<F>(&self, mut future: F) -> Result<F::Output>
761 where
762 F: Future + Send,
763 {
764 let mut future = pin!(future);
765
766 // Take our current `Suspend` context which was configured as soon as
767 // our fiber started. Note that we must load it at the front here and
768 // save it on our stack frame. While we're polling the future other
769 // fibers may be started for recursive computations, and the current
770 // suspend context is only preserved at the edges of the fiber, not
771 // during the fiber itself.
772 //
773 // For a little bit of extra safety we also replace the current value
774 // with null to try to catch any accidental bugs on our part early.
775 // This is all pretty unsafe so we're trying to be careful...
776 //
777 // Note that there should be a segfaulting test in `async_functions.rs`
778 // if this `Reset` is removed.
779 let suspend = *self.current_suspend;
780 let _reset = Reset(self.current_suspend, suspend);
781 *self.current_suspend = ptr::null_mut();
782 assert!(!suspend.is_null());
783
784 loop {
785 let future_result = {
786 let poll_cx = *self.current_poll_cx;
787 let _reset = Reset(self.current_poll_cx, poll_cx);
788 *self.current_poll_cx = ptr::null_mut();
789 assert!(!poll_cx.is_null());
790 future.as_mut().poll(&mut *poll_cx)
791 };
792
793 match future_result {
794 Poll::Ready(t) => break Ok(t),
795 Poll::Pending => {}
796 }
797
798 (*suspend).suspend(())?;
799 }
800 }
801}
802
803struct Reset<T: Copy>(*mut T, T);
804
805impl<T: Copy> Drop for Reset<T> {
806 fn drop(&mut self) {
807 unsafe {
808 *self.0 = self.1;
809 }
810 }
811}