wasmtime/runtime/vm/traphandlers.rs
1//! WebAssembly trap handling, which is built on top of the lower-level
2//! signalhandling mechanisms.
3
4mod backtrace;
5
6#[cfg(feature = "coredump")]
7#[path = "traphandlers/coredump_enabled.rs"]
8mod coredump;
9#[cfg(not(feature = "coredump"))]
10#[path = "traphandlers/coredump_disabled.rs"]
11mod coredump;
12
13#[cfg(all(has_native_signals))]
14mod signals;
15#[cfg(all(has_native_signals))]
16pub use self::signals::*;
17
18#[cfg(feature = "gc")]
19use crate::ThrownException;
20use crate::runtime::module::lookup_code;
21use crate::runtime::store::{ExecutorRef, StoreOpaque};
22use crate::runtime::vm::sys::traphandlers;
23use crate::runtime::vm::{InterpreterRef, VMContext, VMStore, VMStoreContext, f32x4, f64x2, i8x16};
24#[cfg(all(feature = "debug", feature = "gc"))]
25use crate::store::AsStoreOpaque;
26use crate::{EntryStoreContext, prelude::*};
27use crate::{StoreContextMut, WasmBacktrace};
28use core::cell::Cell;
29use core::num::NonZeroU32;
30use core::ptr::{self, NonNull};
31use wasmtime_unwinder::Handler;
32
33#[cfg(feature = "debug")]
34pub(crate) use self::backtrace::Activation;
35pub use self::backtrace::Backtrace;
36#[cfg(feature = "gc")]
37pub use wasmtime_unwinder::Frame;
38
39pub use self::coredump::CoreDumpStack;
40pub use self::tls::tls_eager_initialize;
41#[cfg(feature = "async")]
42pub use self::tls::{AsyncWasmCallState, PreviousAsyncWasmCallState};
43
44pub use traphandlers::SignalHandler;
45
46pub(crate) struct TrapRegisters {
47 pub pc: usize,
48 pub fp: usize,
49}
50
51/// Return value from `test_if_trap`.
52pub(crate) enum TrapTest {
53 /// Not a wasm trap, need to delegate to whatever process handler is next.
54 NotWasm,
55 /// This trap was handled by the embedder via custom embedding APIs.
56 #[cfg(has_host_compiler_backend)]
57 #[cfg_attr(miri, expect(dead_code, reason = "using #[cfg] too unergonomic"))]
58 HandledByEmbedder,
59 /// This is a wasm trap, it needs to be handled.
60 Trap(Handler),
61}
62
63fn lazy_per_thread_init() {
64 traphandlers::lazy_per_thread_init();
65}
66
67/// Raises a preexisting trap or exception and unwinds.
68///
69/// If the preexisting state has registered a trap, this function will execute
70/// the `Handler::resume` to make its way back to the original exception
71/// handler created when Wasm was entered. If the state has registered an
72/// exception, this function will perform the unwind action registered: either
73/// resetting PC, FP, and SP to the handler in the middle of the Wasm
74/// activation on the stack, or the entry trampoline back to the the host, if
75/// the exception is uncaught.
76///
77/// This is currently only called from the `raise` builtin of
78/// Wasmtime. This builtin is only used when the host returns back to
79/// wasm and indicates that a trap or exception should be raised. In
80/// this situation the host has already stored trap or exception
81/// information within the `CallThreadState` and this is the low-level
82/// operation to actually perform an unwind.
83///
84/// Note that this function is used both for Pulley and for native execution.
85/// For Pulley this function will return and the interpreter will be
86/// responsible for handling the control-flow transfer. For native this
87/// function will not return as the control flow transfer will be handled
88/// internally.
89///
90/// # Safety
91///
92/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
93/// have been previously called. Additionally no Rust destructors can be on the
94/// stack. They will be skipped and not executed.
95pub(super) unsafe fn raise_preexisting_trap(store: &mut dyn VMStore) {
96 tls::with(|info| unsafe { info.unwrap().unwind(store) })
97}
98
99/// Invokes the closure `f` and handles any error/panic/trap that happens
100/// within.
101///
102/// This will invoke the closure `f` with the provided `store` and the closure
103/// will return a value that implements `HostResult`. This trait abstracts over
104/// how host values are translated to ABI values when going back into wasm.
105/// Some examples are:
106///
107/// * `T` - bare return types (not results) are simply returned as-is. No
108/// `catch_unwind` happens as if a trap can't happen then the host shouldn't
109/// be panicking or invoking user code.
110///
111/// * `Result<(), E>` - this represents an ABI return value of `bool` which
112/// indicates whether the call succeeded. This return value will catch panics
113/// and record trap information as `E`.
114///
115/// * `Result<u32, E>` - the ABI return value here is `u64` where on success
116/// the 32-bit result is zero-extended and `u64::MAX` as a return value
117/// indicates that a trap or panic happened.
118///
119/// This is primarily used in conjunction with the Cranelift-and-host boundary.
120/// This function acts as a bridge between the two to appropriately handle
121/// encoding host values to Cranelift-understood ABIs via the `HostResult`
122/// trait.
123pub fn catch_unwind_and_record_trap<R>(
124 store: &mut dyn VMStore,
125 f: impl FnOnce(&mut dyn VMStore) -> R,
126) -> R::Abi
127where
128 R: HostResult,
129{
130 // Invoke the closure `f`, optionally catching unwinds depending on `R`. The
131 // return value is always provided and if unwind information is provided
132 // (e.g. `ret` is a "false"-y value) then it's recorded in TLS for the
133 // unwind operation that's about to happen from Cranelift-generated code.
134 let (ret, unwind) = R::maybe_catch_unwind(store, |store| f(store));
135 if let Some(unwind) = unwind {
136 tls::with(|info| info.unwrap().record_unwind(store, unwind));
137 }
138 ret
139}
140
141/// A trait used in conjunction with `catch_unwind_and_record_trap` to convert a
142/// Rust-based type to a specific ABI while handling traps/unwinds.
143///
144/// This type is implemented for return values from host function calls and
145/// libcalls. The `Abi` value of this trait represents either a successful
146/// execution with some payload state or that a failed execution happened. In
147/// the event of a failed execution the state of the failure itself is stored
148/// within `CallThreadState::unwind`. Cranelift-compiled code is expected to
149/// test for this failure sentinel and process it accordingly.
150///
151/// See `catch_unwind_and_record_trap` for some more information as well.
152pub trait HostResult {
153 /// The type of the value that's returned to Cranelift-compiled code. Needs
154 /// to be ABI-safe to pass through an `extern "C"` return value.
155 type Abi: Copy;
156
157 /// Executes `f` and returns the ABI/unwind information as a result.
158 ///
159 /// This may optionally catch unwinds during execution depending on this
160 /// implementation. The ABI return value is unconditionally provided. If an
161 /// unwind was detected (e.g. a host panic or a wasm trap) then that's
162 /// additionally returned as well.
163 ///
164 /// If an unwind is returned then it's expected that when the host returns
165 /// back to wasm (which should be soon after calling this through
166 /// `catch_unwind_and_record_trap`) then wasm will very quickly turn around
167 /// and initiate an unwind (currently through `raise_preexisting_trap`).
168 fn maybe_catch_unwind(
169 store: &mut dyn VMStore,
170 f: impl FnOnce(&mut dyn VMStore) -> Self,
171 ) -> (Self::Abi, Option<UnwindReason>);
172}
173
174// Base case implementations that do not catch unwinds. These are for libcalls
175// that neither trap nor execute user code. The raw value is the ABI itself.
176//
177// Panics in these libcalls will result in a process abort as unwinding is not
178// allowed via Rust through `extern "C"` function boundaries.
179macro_rules! host_result_no_catch {
180 ($($t:ty,)*) => {
181 $(
182 impl HostResult for $t {
183 type Abi = $t;
184 #[allow(unreachable_code, reason = "some types uninhabited on some platforms")]
185 fn maybe_catch_unwind(
186 store: &mut dyn VMStore,
187 f: impl FnOnce(&mut dyn VMStore) -> $t,
188 ) -> ($t, Option<UnwindReason>) {
189 (f(store), None)
190 }
191 }
192 )*
193 }
194}
195
196host_result_no_catch! {
197 (),
198 bool,
199 u32,
200 *mut u8,
201 u64,
202 f32,
203 f64,
204 i8x16,
205 f32x4,
206 f64x2,
207}
208
209impl HostResult for NonNull<u8> {
210 type Abi = *mut u8;
211 fn maybe_catch_unwind(
212 store: &mut dyn VMStore,
213 f: impl FnOnce(&mut dyn VMStore) -> Self,
214 ) -> (*mut u8, Option<UnwindReason>) {
215 (f(store).as_ptr(), None)
216 }
217}
218
219/// Implementation of `HostResult` for `Result<T, E>`.
220///
221/// This is where things get interesting for `HostResult`. This is generically
222/// defined to allow many shapes of the `Result` type to be returned from host
223/// calls or libcalls. To do this an extra trait requirement is placed on the
224/// successful result `T`: `HostResultHasUnwindSentinel`.
225///
226/// The general requirement is that `T` says what ABI it has, and the ABI must
227/// have a sentinel value which indicates that an unwind in wasm should happen.
228/// For example if `T = ()` then `true` means that the call succeeded and
229/// `false` means that an unwind happened. Here the sentinel is `false` and the
230/// ABI is `bool`.
231///
232/// This is the only implementation of `HostResult` which actually catches
233/// unwinds as there's a sentinel to encode.
234impl<T, E> HostResult for Result<T, E>
235where
236 T: HostResultHasUnwindSentinel,
237 E: Into<TrapReason>,
238{
239 type Abi = T::Abi;
240
241 fn maybe_catch_unwind(
242 store: &mut dyn VMStore,
243 f: impl FnOnce(&mut dyn VMStore) -> Result<T, E>,
244 ) -> (T::Abi, Option<UnwindReason>) {
245 // First prepare the closure `f` as something that'll be invoked to
246 // generate the return value of this function. This is the
247 // conditionally, below, passed to `catch_unwind`.
248 let f = move || match f(store) {
249 Ok(ret) => (ret.into_abi(), None),
250 Err(reason) => (T::SENTINEL, Some(UnwindReason::Trap(reason.into()))),
251 };
252
253 // With `panic=unwind` use `std::panic::catch_unwind` to catch possible
254 // panics to rethrow.
255 #[cfg(all(feature = "std", panic = "unwind"))]
256 {
257 match std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)) {
258 Ok(result) => result,
259 Err(err) => (T::SENTINEL, Some(UnwindReason::Panic(err))),
260 }
261 }
262
263 // With `panic=abort` there's no use in using `std::panic::catch_unwind`
264 // since it won't actually catch anything. Note that
265 // `std::panic::catch_unwind` will technically optimize to this but having
266 // this branch avoids using the `std::panic` module entirely.
267 #[cfg(not(all(feature = "std", panic = "unwind")))]
268 {
269 f()
270 }
271 }
272}
273
274/// Trait used in conjunction with `HostResult for Result<T, E>` where this is
275/// the trait bound on `T`.
276///
277/// This is for values in the "ok" position of a `Result` return value. Each
278/// value can have a separate ABI from itself (e.g. `type Abi`) and must be
279/// convertible to the ABI. Additionally all implementations of this trait have
280/// a "sentinel value" which indicates that an unwind happened. This means that
281/// no valid instance of `Self` should generate the `SENTINEL` via the
282/// `into_abi` function.
283pub unsafe trait HostResultHasUnwindSentinel {
284 /// The Cranelift-understood ABI of this value (should not be `Self`).
285 type Abi: Copy;
286
287 /// A value that indicates that an unwind should happen and is tested for in
288 /// Cranelift-generated code.
289 const SENTINEL: Self::Abi;
290
291 /// Converts this value into the ABI representation. Should never returned
292 /// the `SENTINEL` value.
293 fn into_abi(self) -> Self::Abi;
294}
295
296/// No return value from the host is represented as a `bool` in the ABI. Here
297/// `true` means that execution succeeded while `false` is the sentinel used to
298/// indicate an unwind.
299unsafe impl HostResultHasUnwindSentinel for () {
300 type Abi = bool;
301 const SENTINEL: bool = false;
302 fn into_abi(self) -> bool {
303 true
304 }
305}
306
307unsafe impl HostResultHasUnwindSentinel for NonZeroU32 {
308 type Abi = u32;
309 const SENTINEL: Self::Abi = 0;
310 fn into_abi(self) -> Self::Abi {
311 self.get()
312 }
313}
314
315/// A 32-bit return value can be inflated to a 64-bit return value in the ABI.
316/// In this manner a successful result is a zero-extended 32-bit value and the
317/// failure sentinel is `u64::MAX` or -1 as a signed integer.
318unsafe impl HostResultHasUnwindSentinel for u32 {
319 type Abi = u64;
320 const SENTINEL: u64 = u64::MAX;
321 fn into_abi(self) -> u64 {
322 self.into()
323 }
324}
325
326/// If there is not actual successful result (e.g. an empty enum) then the ABI
327/// can be `()`, or nothing, because there's no successful result and it's
328/// always a failure.
329unsafe impl HostResultHasUnwindSentinel for core::convert::Infallible {
330 type Abi = ();
331 const SENTINEL: () = ();
332 fn into_abi(self) {
333 match self {}
334 }
335}
336
337unsafe impl HostResultHasUnwindSentinel for bool {
338 type Abi = u32;
339 const SENTINEL: Self::Abi = u32::MAX;
340 fn into_abi(self) -> Self::Abi {
341 u32::from(self)
342 }
343}
344
345/// Stores trace message with backtrace.
346#[derive(Debug)]
347pub struct Trap {
348 /// Original reason from where this trap originated.
349 pub reason: TrapReason,
350 /// Wasm backtrace of the trap, if any.
351 pub backtrace: Option<Backtrace>,
352 /// The Wasm Coredump, if any.
353 pub coredumpstack: Option<CoreDumpStack>,
354}
355
356/// Enumeration of different methods of raising a trap (or a sentinel
357/// for an exception).
358#[derive(Debug)]
359pub enum TrapReason {
360 /// A user-raised trap through `raise_user_trap`.
361 User(Error),
362
363 /// A trap raised from Cranelift-generated code.
364 Jit {
365 /// The program counter where this trap originated.
366 ///
367 /// This is later used with side tables from compilation to translate
368 /// the trapping address to a trap code.
369 pc: usize,
370
371 /// If the trap was a memory-related trap such as SIGSEGV then this
372 /// field will contain the address of the inaccessible data.
373 ///
374 /// Note that wasm loads/stores are not guaranteed to fill in this
375 /// information. Dynamically-bounds-checked memories, for example, will
376 /// not access an invalid address but may instead load from NULL or may
377 /// explicitly jump to a `ud2` instruction. This is only available for
378 /// fault-based traps which are one of the main ways, but not the only
379 /// way, to run wasm.
380 faulting_addr: Option<usize>,
381
382 /// The trap code associated with this trap.
383 trap: wasmtime_environ::Trap,
384 },
385
386 /// A trap raised from a wasm libcall
387 Wasm(wasmtime_environ::Trap),
388
389 /// An exception.
390 ///
391 /// Note that internally, exceptions are rooted on the Store, while
392 /// when crossing the public API, exceptions are held in a
393 /// `wasmtime::Exception` which contains a boxed root and implements
394 /// `Error`. This choice is intentional, to keep the internal
395 /// implementation lightweight and ensure the types represent only
396 /// allowable states.
397 #[cfg(feature = "gc")]
398 Exception,
399}
400
401impl From<Error> for TrapReason {
402 fn from(error: Error) -> Self {
403 #[cfg(feature = "gc")]
404 if error.is::<ThrownException>() {
405 return TrapReason::Exception;
406 }
407
408 TrapReason::User(error)
409 }
410}
411
412impl From<wasmtime_environ::Trap> for TrapReason {
413 fn from(code: wasmtime_environ::Trap) -> Self {
414 TrapReason::Wasm(code)
415 }
416}
417
418/// Catches any wasm traps that happen within the execution of `closure`,
419/// returning them as a `Result`.
420pub fn catch_traps<T, F>(
421 store: &mut StoreContextMut<'_, T>,
422 old_state: &mut EntryStoreContext,
423 mut closure: F,
424) -> Result<()>
425where
426 F: FnMut(NonNull<VMContext>, Option<InterpreterRef<'_>>) -> bool,
427{
428 let caller = store.0.default_caller();
429
430 let result = CallThreadState::new(store.0, old_state).with(|_cx| match store.0.executor() {
431 ExecutorRef::Interpreter(r) => closure(caller, Some(r)),
432 #[cfg(has_host_compiler_backend)]
433 ExecutorRef::Native => closure(caller, None),
434 });
435
436 match result {
437 Ok(x) => Ok(x),
438 #[cfg(feature = "gc")]
439 Err(UnwindState::UnwindToHost {
440 reason: UnwindReason::Trap(TrapReason::Exception),
441 backtrace: _,
442 coredump_stack: _,
443 }) => Err(ThrownException.into()),
444 Err(UnwindState::UnwindToHost {
445 reason: UnwindReason::Trap(reason),
446 backtrace,
447 coredump_stack,
448 }) => Err(crate::trap::from_runtime_box(
449 store.0,
450 Box::new(Trap {
451 reason,
452 backtrace,
453 coredumpstack: coredump_stack,
454 }),
455 )),
456 #[cfg(all(feature = "std", panic = "unwind"))]
457 Err(UnwindState::UnwindToHost {
458 reason: UnwindReason::Panic(panic),
459 ..
460 }) => std::panic::resume_unwind(panic),
461 #[cfg(feature = "gc")]
462 Err(UnwindState::UnwindToWasm { .. }) => {
463 unreachable!("We should not have returned to the host with an UnwindToWasm state");
464 }
465 Err(UnwindState::None) => {
466 unreachable!("We should not have gotten an error with no unwind state");
467 }
468 }
469}
470
471// Module to hide visibility of the `CallThreadState::prev` field and force
472// usage of its accessor methods.
473mod call_thread_state {
474 use super::*;
475 use crate::EntryStoreContext;
476 use crate::runtime::vm::{Unwind, VMStackChain};
477
478 /// Queued-up unwinding on the CallThreadState, ready to be
479 /// enacted by `unwind()`.
480 ///
481 /// This represents either a request to unwind to the entry point
482 /// from host, with associated data; or a request to
483 /// unwind into the middle of the Wasm action, e.g. when an
484 /// exception is caught.
485 pub enum UnwindState {
486 /// Unwind all the way to the entry from host to Wasm, using
487 /// the handler configured in the entry trampoline.
488 UnwindToHost {
489 reason: UnwindReason,
490 backtrace: Option<Backtrace>,
491 coredump_stack: Option<CoreDumpStack>,
492 },
493 /// Unwind into Wasm. The exception destination has been
494 /// resolved. Note that the payload value is still not
495 /// specified, because it must remain rooted on the Store
496 /// until `unwind()` actually takes the value. The first
497 /// payload word in the underlying exception ABI is used to
498 /// send the raw `VMExnRef`.
499 #[cfg(feature = "gc")]
500 UnwindToWasm(Handler),
501 /// Do not unwind.
502 None,
503 }
504
505 impl UnwindState {
506 pub(super) fn is_none(&self) -> bool {
507 match self {
508 Self::None => true,
509 _ => false,
510 }
511 }
512 }
513
514 /// Temporary state stored on the stack which is registered in the `tls`
515 /// module below for calls into wasm.
516 ///
517 /// This structure is stored on the stack and allocated during the
518 /// `catch_traps` function above. The purpose of this structure is to track
519 /// the state of an "activation" or a sequence of 0-or-more contiguous
520 /// WebAssembly call frames. A `CallThreadState` always lives on the stack
521 /// and additionally maintains pointers to previous states to form a linked
522 /// list of activations.
523 ///
524 /// One of the primary goals of `CallThreadState` is to store the state of
525 /// various fields in `VMStoreContext` when it was created. This is done
526 /// because calling WebAssembly will clobber these fields otherwise.
527 ///
528 /// Another major purpose of `CallThreadState` is to assist with unwinding
529 /// and track state necessary when an unwind happens for the original
530 /// creator of `CallThreadState` to determine why the unwind happened.
531 ///
532 /// Note that this structure is pointed-to from TLS, hence liberal usage of
533 /// interior mutability here since that only gives access to
534 /// `&CallThreadState`.
535 pub struct CallThreadState {
536 /// Unwind state set when initiating an unwind and read when
537 /// the control transfer occurs (after the `raise` point is
538 /// reached for host-code destinations and right when
539 /// performing the jump for Wasm-code destinations).
540 pub(super) unwind: Cell<UnwindState>,
541 #[cfg(all(has_native_signals))]
542 pub(super) signal_handler: Option<*const SignalHandler>,
543 pub(super) capture_backtrace: bool,
544 #[cfg(feature = "coredump")]
545 pub(super) capture_coredump: bool,
546
547 pub(crate) vm_store_context: Cell<NonNull<VMStoreContext>>,
548 pub(crate) unwinder: &'static dyn Unwind,
549
550 pub(super) prev: Cell<tls::Ptr>,
551
552 // The state of the runtime for the *previous* `CallThreadState` for
553 // this same store. Our *current* state is saved in `self.vm_store_context`,
554 // etc. We need access to the old values of these
555 // fields because the `VMStoreContext` typically doesn't change across
556 // nested calls into Wasm (i.e. they are typically calls back into the
557 // same store and `self.vm_store_context == self.prev.vm_store_context`) and we must to
558 // maintain the list of contiguous-Wasm-frames stack regions for
559 // backtracing purposes.
560 old_state: *mut EntryStoreContext,
561 }
562
563 impl Drop for CallThreadState {
564 fn drop(&mut self) {
565 // Unwind information should not be present as it should have
566 // already been processed.
567 debug_assert!(self.unwind.replace(UnwindState::None).is_none());
568 }
569 }
570
571 impl CallThreadState {
572 #[inline]
573 pub(super) fn new(
574 store: &mut StoreOpaque,
575 old_state: *mut EntryStoreContext,
576 ) -> CallThreadState {
577 CallThreadState {
578 unwind: Cell::new(UnwindState::None),
579 unwinder: store.unwinder(),
580 #[cfg(all(has_native_signals))]
581 signal_handler: store.signal_handler(),
582 capture_backtrace: store.engine().config().wasm_backtrace_max_frames.is_some(),
583 #[cfg(feature = "coredump")]
584 capture_coredump: store.engine().config().coredump_on_trap,
585 vm_store_context: Cell::new(store.vm_store_context_ptr()),
586 prev: Cell::new(ptr::null()),
587 old_state,
588 }
589 }
590
591 /// Get the saved FP upon exit from Wasm for the previous `CallThreadState`.
592 ///
593 /// # Safety
594 ///
595 /// Requires that the saved last Wasm trampoline FP points to
596 /// a valid trampoline frame, or is null.
597 pub unsafe fn old_last_wasm_exit_fp(&self) -> usize {
598 let trampoline_fp = unsafe { (&*self.old_state).last_wasm_exit_trampoline_fp };
599 // SAFETY: `trampoline_fp` is either a valid FP from an
600 // active trampoline frame or is null.
601 unsafe { VMStoreContext::wasm_exit_fp_from_trampoline_fp(trampoline_fp) }
602 }
603
604 /// Get the saved PC upon exit from Wasm for the previous `CallThreadState`.
605 pub unsafe fn old_last_wasm_exit_pc(&self) -> usize {
606 unsafe { (&*self.old_state).last_wasm_exit_pc }
607 }
608
609 /// Get the saved FP upon entry into Wasm for the previous `CallThreadState`.
610 pub unsafe fn old_last_wasm_entry_fp(&self) -> usize {
611 unsafe { (&*self.old_state).last_wasm_entry_fp }
612 }
613
614 /// Get the saved `VMStackChain` for the previous `CallThreadState`.
615 pub unsafe fn old_stack_chain(&self) -> VMStackChain {
616 unsafe { (&*self.old_state).stack_chain.clone() }
617 }
618
619 /// Get the previous `CallThreadState`.
620 pub fn prev(&self) -> tls::Ptr {
621 self.prev.get()
622 }
623
624 /// Pushes this `CallThreadState` activation on to the linked list
625 /// stored in TLS.
626 ///
627 /// This method will take the current head of the linked list, stored in
628 /// our TLS pointer, and move it into `prev`. The TLS pointer is then
629 /// updated to `self`.
630 ///
631 /// # Panics
632 ///
633 /// Panics if this activation is already in a linked list (e.g.
634 /// `self.prev` is set).
635 #[inline]
636 pub(crate) unsafe fn push(&self) {
637 assert!(self.prev.get().is_null());
638 self.prev.set(tls::raw::replace(self));
639 }
640
641 /// Pops this `CallThreadState` from the linked list stored in TLS.
642 ///
643 /// This method will restore `self.prev` into the head of the linked
644 /// list stored in TLS and will additionally null-out `self.prev`.
645 ///
646 /// # Panics
647 ///
648 /// Panics if this activation isn't the head of the list.
649 #[inline]
650 pub(crate) unsafe fn pop(&self) {
651 let prev = self.prev.replace(ptr::null());
652 let head = tls::raw::replace(prev);
653 assert!(core::ptr::eq(head, self));
654 }
655
656 /// Swaps the state in this `CallThreadState`'s `VMStoreContext` with
657 /// the state in `EntryStoreContext` that was saved when this
658 /// activation was created.
659 ///
660 /// This method is using during suspension of a fiber to restore the
661 /// store back to what it originally was and prepare it to be resumed
662 /// later on. This takes various fields of `VMStoreContext` and swaps
663 /// them with what was saved in `EntryStoreContext`. That restores
664 /// a store to just before this activation was called but saves off the
665 /// fields of this activation to get restored/resumed at a later time.
666 #[cfg(feature = "async")]
667 pub(super) unsafe fn swap(&self) {
668 unsafe fn swap<T>(a: &core::cell::UnsafeCell<T>, b: &mut T) {
669 unsafe { core::mem::swap(&mut *a.get(), b) }
670 }
671
672 unsafe {
673 let cx = self.vm_store_context.get().as_ref();
674 swap(
675 &cx.last_wasm_exit_trampoline_fp,
676 &mut (*self.old_state).last_wasm_exit_trampoline_fp,
677 );
678 swap(
679 &cx.last_wasm_exit_pc,
680 &mut (*self.old_state).last_wasm_exit_pc,
681 );
682 swap(
683 &cx.last_wasm_entry_fp,
684 &mut (*self.old_state).last_wasm_entry_fp,
685 );
686 swap(
687 &cx.last_wasm_entry_sp,
688 &mut (*self.old_state).last_wasm_entry_sp,
689 );
690 swap(
691 &cx.last_wasm_entry_trap_handler,
692 &mut (*self.old_state).last_wasm_entry_trap_handler,
693 );
694 swap(&cx.stack_chain, &mut (*self.old_state).stack_chain);
695 }
696 }
697 }
698}
699pub use call_thread_state::*;
700
701#[cfg(feature = "gc")]
702use super::compute_handler;
703
704pub enum UnwindReason {
705 #[cfg(all(feature = "std", panic = "unwind"))]
706 Panic(Box<dyn std::any::Any + Send>),
707 Trap(TrapReason),
708}
709
710impl<E> From<E> for UnwindReason
711where
712 E: Into<TrapReason>,
713{
714 fn from(value: E) -> UnwindReason {
715 UnwindReason::Trap(value.into())
716 }
717}
718
719impl CallThreadState {
720 #[inline]
721 fn with(mut self, closure: impl FnOnce(&CallThreadState) -> bool) -> Result<(), UnwindState> {
722 let succeeded = tls::set(&mut self, |me| closure(me));
723 if succeeded {
724 Ok(())
725 } else {
726 Err(self.read_unwind())
727 }
728 }
729
730 #[cold]
731 fn read_unwind(&self) -> UnwindState {
732 self.unwind.replace(UnwindState::None)
733 }
734
735 /// Records the unwind information provided within this `CallThreadState`,
736 /// optionally capturing a backtrace at this time.
737 ///
738 /// This function is used to stash metadata for why an unwind is about to
739 /// happen. The actual unwind is expected to happen after this function is
740 /// called using, for example, the `unwind` function below.
741 ///
742 /// Note that this is a relatively low-level function and will panic if
743 /// mis-used.
744 ///
745 /// # Panics
746 ///
747 /// Panics if unwind information has already been recorded as that should
748 /// have been processed first.
749 fn record_unwind(&self, store: &mut dyn VMStore, reason: UnwindReason) {
750 if cfg!(debug_assertions) {
751 let prev = self.unwind.replace(UnwindState::None);
752 assert!(prev.is_none());
753 }
754
755 // Avoid unused-variable warning in non-exceptions/GC build.
756 let _ = store;
757
758 let state = match reason {
759 #[cfg(all(feature = "std", panic = "unwind"))]
760 UnwindReason::Panic(err) => {
761 // Panics don't need backtraces. There is nowhere to attach the
762 // hypothetical backtrace to and it doesn't really make sense to try
763 // in the first place since this is a Rust problem rather than a
764 // Wasm problem.
765 UnwindState::UnwindToHost {
766 reason: UnwindReason::Panic(err),
767 backtrace: None,
768 coredump_stack: None,
769 }
770 }
771 // An unwind due to an already-set pending exception
772 // triggers the handler-search stack-walk. We store the
773 // resolved handler if one exists. In either case, the
774 // exception remains rooted in the Store until we actually
775 // perform the unwind, and then gets taken and becomes the
776 // payload at that point.
777 #[cfg(feature = "gc")]
778 UnwindReason::Trap(TrapReason::Exception) => {
779 // SAFETY: we are invoking `compute_handler()` while
780 // Wasm is on the stack and we have re-entered via a
781 // trampoline, as required by its stack-walking logic.
782 let handler = unsafe { compute_handler(store) };
783 match handler {
784 Some(handler) => UnwindState::UnwindToWasm(handler),
785 None => UnwindState::UnwindToHost {
786 reason: UnwindReason::Trap(TrapReason::Exception),
787 backtrace: None,
788 coredump_stack: None,
789 },
790 }
791 }
792 // And if we are just propagating an existing trap that already has
793 // a backtrace attached to it, then there is no need to capture a
794 // new backtrace either.
795 UnwindReason::Trap(TrapReason::User(err))
796 if err.downcast_ref::<WasmBacktrace>().is_some() =>
797 {
798 UnwindState::UnwindToHost {
799 reason: UnwindReason::Trap(TrapReason::User(err)),
800 backtrace: None,
801 coredump_stack: None,
802 }
803 }
804 UnwindReason::Trap(trap) => {
805 log::trace!("Capturing backtrace and coredump for {trap:?}");
806 UnwindState::UnwindToHost {
807 reason: UnwindReason::Trap(trap),
808 backtrace: self.capture_backtrace(store.vm_store_context_mut(), None),
809 coredump_stack: self.capture_coredump(store.vm_store_context_mut(), None),
810 }
811 }
812 };
813
814 self.unwind.set(state);
815
816 // Re-derive our VMStoreContext pointer for provenance.
817 self.vm_store_context.set(store.vm_store_context_ptr());
818 }
819
820 /// Helper function to perform an actual unwinding operation.
821 ///
822 /// This must be preceded by a `record_unwind` operation above to be
823 /// processed correctly on the other side.
824 ///
825 /// # Unsafety
826 ///
827 /// This function is not safe if a corresponding handler wasn't already
828 /// setup in the entry trampoline. Additionally this isn't safe as it may
829 /// skip all Rust destructors on the stack, if there are any, for native
830 /// executors as `Handler::resume` will be used.
831 unsafe fn unwind(&self, store: &mut dyn VMStore) {
832 #[allow(unused_mut, reason = "only mutated in `debug` configuration")]
833 let mut unwind = self.unwind.replace(UnwindState::None);
834
835 #[cfg(feature = "debug")]
836 {
837 let result = match &unwind {
838 #[cfg(feature = "gc")]
839 UnwindState::UnwindToWasm(_) => {
840 use wasmtime_core::alloc::PanicOnOom;
841
842 assert!(store.as_store_opaque().has_pending_exception());
843 let exn = store
844 .as_store_opaque()
845 .pending_exception_owned_rooted()
846 // TODO(#12069): handle allocation failure here
847 .panic_on_oom()
848 .expect("exception should be set when we are throwing");
849 store.block_on_debug_handler(crate::DebugEvent::CaughtExceptionThrown(exn))
850 }
851 #[cfg(feature = "gc")]
852 UnwindState::UnwindToHost {
853 reason: UnwindReason::Trap(TrapReason::Exception),
854 ..
855 } => {
856 use wasmtime_core::alloc::PanicOnOom;
857
858 let exn = store
859 .as_store_opaque()
860 .pending_exception_owned_rooted()
861 // TODO(#12069): handle allocation failure here
862 .panic_on_oom()
863 .expect("exception should be set when we are throwing");
864 store.block_on_debug_handler(crate::DebugEvent::UncaughtExceptionThrown(
865 exn.clone(),
866 ))
867 }
868 UnwindState::UnwindToHost {
869 reason: UnwindReason::Trap(TrapReason::Wasm(trap)),
870 ..
871 } => store.block_on_debug_handler(crate::DebugEvent::Trap(*trap)),
872 UnwindState::UnwindToHost {
873 reason: UnwindReason::Trap(TrapReason::User(err)),
874 ..
875 } => store.block_on_debug_handler(crate::DebugEvent::HostcallError(err)),
876
877 UnwindState::UnwindToHost {
878 reason: UnwindReason::Trap(TrapReason::Jit { .. }),
879 ..
880 } => {
881 // JIT traps not handled yet.
882 Ok(())
883 }
884 #[cfg(all(feature = "std", panic = "unwind"))]
885 UnwindState::UnwindToHost {
886 reason: UnwindReason::Panic(_),
887 ..
888 } => {
889 // We don't invoke any debugger hook when we're
890 // unwinding due to a Rust (host-side) panic.
891 Ok(())
892 }
893
894 UnwindState::None => unreachable!(),
895 };
896
897 // If the debugger invocation itself resulted in an `Err`
898 // (which can only come from the `block_on` hitting a
899 // failure mode), we need to override our unwind as-if
900 // were handling a host error.
901 if let Err(err) = result {
902 unwind = UnwindState::UnwindToHost {
903 reason: UnwindReason::Trap(TrapReason::User(err)),
904 backtrace: None,
905 coredump_stack: None,
906 };
907 }
908 }
909
910 match unwind {
911 UnwindState::UnwindToHost { .. } => {
912 self.unwind.set(unwind);
913 let handler = self.entry_trap_handler();
914 let payload1 = 0;
915 let payload2 = 0;
916 unsafe {
917 self.resume_to_exception_handler(
918 store.executor(),
919 &handler,
920 payload1,
921 payload2,
922 );
923 }
924 }
925 #[cfg(feature = "gc")]
926 UnwindState::UnwindToWasm(handler) => {
927 // Take the pending exception at this time and use it as payload.
928 let payload1 = usize::try_from(
929 store
930 .take_pending_exception()
931 .unwrap()
932 .as_gc_ref()
933 .as_raw_u32(),
934 )
935 .expect("GC ref does not fit in usize");
936 // We only use one of the payload words.
937 let payload2 = 0;
938 unsafe {
939 self.resume_to_exception_handler(
940 store.executor(),
941 &handler,
942 payload1,
943 payload2,
944 );
945 }
946 }
947 UnwindState::None => {
948 panic!("Attempting to unwind with no unwind state set.");
949 }
950 }
951 }
952
953 pub(crate) fn entry_trap_handler(&self) -> Handler {
954 unsafe {
955 let vm_store_context = self.vm_store_context.get().as_ref();
956 let fp = *vm_store_context.last_wasm_entry_fp.get();
957 let sp = *vm_store_context.last_wasm_entry_sp.get();
958 let pc = *vm_store_context.last_wasm_entry_trap_handler.get();
959 Handler { pc, sp, fp }
960 }
961 }
962
963 unsafe fn resume_to_exception_handler(
964 &self,
965 executor: ExecutorRef<'_>,
966 handler: &Handler,
967 payload1: usize,
968 payload2: usize,
969 ) {
970 unsafe {
971 match executor {
972 ExecutorRef::Interpreter(mut r) => {
973 r.resume_to_exception_handler(handler, payload1, payload2)
974 }
975 #[cfg(has_host_compiler_backend)]
976 ExecutorRef::Native => handler.resume_tailcc(payload1, payload2),
977 }
978 }
979 }
980
981 fn capture_backtrace(
982 &self,
983 limits: *const VMStoreContext,
984 trap_pc_and_fp: Option<(usize, usize)>,
985 ) -> Option<Backtrace> {
986 if !self.capture_backtrace {
987 return None;
988 }
989
990 Some(unsafe { Backtrace::new_with_trap_state(limits, self.unwinder, self, trap_pc_and_fp) })
991 }
992
993 pub(crate) fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Self> + 'a {
994 let mut state = Some(self);
995 core::iter::from_fn(move || {
996 let this = state?;
997 state = unsafe { this.prev().as_ref() };
998 Some(this)
999 })
1000 }
1001
1002 /// Trap handler using our thread-local state.
1003 ///
1004 /// * `regs` - some special program registers at the time that the trap
1005 /// happened, for example `pc`.
1006 /// * `faulting_addr` - the system-provided address that the a fault, if
1007 /// any, happened at. This is used when debug-asserting that all segfaults
1008 /// are known to live within a `Store<T>` in a valid range.
1009 /// * `call_handler` - a closure used to invoke the platform-specific
1010 /// signal handler for each instance, if available.
1011 ///
1012 /// Attempts to handle the trap if it's a wasm trap. Returns a `TrapTest`
1013 /// which indicates what this could be, such as:
1014 ///
1015 /// * `TrapTest::NotWasm` - not a wasm fault, this should get forwarded to
1016 /// the next platform-specific fault handler.
1017 /// * `TrapTest::HandledByEmbedder` - the embedder `call_handler` handled
1018 /// this signal, nothing else to do.
1019 /// * `TrapTest::Trap` - this is a wasm trap an the stack needs to be
1020 /// unwound now.
1021 pub(crate) fn test_if_trap(
1022 &self,
1023 regs: TrapRegisters,
1024 faulting_addr: Option<usize>,
1025 call_handler: impl FnOnce(&SignalHandler) -> bool,
1026 ) -> TrapTest {
1027 // First up see if any instance registered has a custom trap handler,
1028 // in which case run them all. If anything handles the trap then we
1029 // return that the trap was handled.
1030 let _ = &call_handler;
1031 #[cfg(all(has_native_signals, not(miri)))]
1032 if let Some(handler) = self.signal_handler {
1033 if unsafe { call_handler(&*handler) } {
1034 return TrapTest::HandledByEmbedder;
1035 }
1036 }
1037
1038 // If this fault wasn't in wasm code, then it's not our problem
1039 let Some((code, text_offset)) = lookup_code(regs.pc) else {
1040 return TrapTest::NotWasm;
1041 };
1042
1043 // If the fault was at a location that was not marked as potentially
1044 // trapping, then that's a bug in Cranelift/Winch/etc. Don't try to
1045 // catch the trap and pretend this isn't wasm so the program likely
1046 // aborts.
1047 let Some(trap) = code.lookup_trap_code(text_offset) else {
1048 return TrapTest::NotWasm;
1049 };
1050
1051 // If all that passed then this is indeed a wasm trap, so return the
1052 // `Handler` setup in the original wasm frame.
1053 self.set_jit_trap(regs, faulting_addr, trap);
1054 let entry_handler = self.entry_trap_handler();
1055 TrapTest::Trap(entry_handler)
1056 }
1057
1058 pub(crate) fn set_jit_trap(
1059 &self,
1060 TrapRegisters { pc, fp, .. }: TrapRegisters,
1061 faulting_addr: Option<usize>,
1062 trap: wasmtime_environ::Trap,
1063 ) {
1064 let backtrace =
1065 self.capture_backtrace(self.vm_store_context.get().as_ptr(), Some((pc, fp)));
1066 let coredump_stack =
1067 self.capture_coredump(self.vm_store_context.get().as_ptr(), Some((pc, fp)));
1068 self.unwind.set(UnwindState::UnwindToHost {
1069 reason: UnwindReason::Trap(TrapReason::Jit {
1070 pc,
1071 faulting_addr,
1072 trap,
1073 }),
1074 backtrace,
1075 coredump_stack,
1076 });
1077 }
1078}
1079
1080/// A private inner module managing the state of Wasmtime's thread-local storage
1081/// (TLS) state.
1082///
1083/// Wasmtime at this time has a single pointer of TLS. This single pointer of
1084/// TLS is the totality of all TLS required by Wasmtime. By keeping this as
1085/// small as possible it generally makes it easier to integrate with external
1086/// systems and implement features such as fiber context switches. This single
1087/// TLS pointer is declared in platform-specific modules to handle platform
1088/// differences, so this module here uses getters/setters which delegate to
1089/// platform-specific implementations.
1090///
1091/// The single TLS pointer used by Wasmtime is morally
1092/// `Option<&CallThreadState>` meaning that it's a possibly-present pointer to
1093/// some state. This pointer is a pointer to the most recent (youngest)
1094/// `CallThreadState` activation, or the most recent call into WebAssembly.
1095///
1096/// This TLS pointer is additionally the head of a linked list of activations
1097/// that are all stored on the stack for the current thread. Each time
1098/// WebAssembly is recursively invoked by an embedder will push a new entry into
1099/// this linked list. This singly-linked list is maintained with its head in TLS
1100/// node pointers are stored in `CallThreadState::prev`.
1101///
1102/// An example stack might look like this:
1103///
1104/// ```text
1105/// ┌─────────────────────┐◄───── highest, or oldest, stack address
1106/// │ native stack frames │
1107/// │ ... │
1108/// │ ┌───────────────┐◄─┼──┐
1109/// │ │CallThreadState│ │ │
1110/// │ └───────────────┘ │ p
1111/// ├─────────────────────┤ r
1112/// │ wasm stack frames │ e
1113/// │ ... │ v
1114/// ├─────────────────────┤ │
1115/// │ native stack frames │ │
1116/// │ ... │ │
1117/// │ ┌───────────────┐◄─┼──┼── TLS pointer
1118/// │ │CallThreadState├──┼──┘
1119/// │ └───────────────┘ │
1120/// ├─────────────────────┤
1121/// │ wasm stack frames │
1122/// │ ... │
1123/// ├─────────────────────┤
1124/// │ native stack frames │
1125/// │ ... │
1126/// └─────────────────────┘◄───── smallest, or youngest, stack address
1127/// ```
1128///
1129/// # Fibers and async
1130///
1131/// Wasmtime supports stack-switching with fibers to implement async. This means
1132/// that Wasmtime will temporarily execute code on a separate stack and then
1133/// suspend from this stack back to the embedder for async operations. Doing
1134/// this safely requires manual management of the TLS pointer updated by
1135/// Wasmtime.
1136///
1137/// For example when a fiber is suspended that means that the TLS pointer needs
1138/// to be restored to whatever it was when the fiber was resumed. Additionally
1139/// this may need to pop multiple `CallThreadState` activations, one for each
1140/// one located on the fiber stack itself.
1141///
1142/// The `AsyncWasmCallState` and `PreviousAsyncWasmCallState` structures in this
1143/// module are used to manage this state, namely:
1144///
1145/// * The `AsyncWasmCallState` structure represents the state of a suspended
1146/// fiber. This is a linked list, in reverse order, from oldest activation on
1147/// the fiber to youngest activation on the fiber.
1148///
1149/// * The `PreviousAsyncWasmCallState` structure represents a pointer within our
1150/// thread's TLS linked list of activations when a fiber was resumed. This
1151/// pointer is used during fiber suspension to know when to stop popping
1152/// activations from the thread's linked list.
1153///
1154/// Note that this means that the directionality of linked list links is
1155/// opposite when stored in TLS vs when stored for a suspended fiber. The
1156/// thread's current list pointed to by TLS is youngest-to-oldest links, while a
1157/// suspended fiber stores oldest-to-youngest links.
1158pub(crate) mod tls {
1159 use super::CallThreadState;
1160
1161 pub use raw::Ptr;
1162
1163 // An even *more* inner module for dealing with TLS. This actually has the
1164 // thread local variable and has functions to access the variable.
1165 //
1166 // Note that this is specially done to fully encapsulate that the accessors
1167 // for tls may or may not be inlined. Wasmtime's async support employs stack
1168 // switching which can resume execution on different OS threads. This means
1169 // that borrows of our TLS pointer must never live across accesses because
1170 // otherwise the access may be split across two threads and cause unsafety.
1171 //
1172 // This also means that extra care is taken by the runtime to save/restore
1173 // these TLS values when the runtime may have crossed threads.
1174 //
1175 // Note, though, that if async support is disabled at compile time then
1176 // these functions are free to be inlined.
1177 pub(super) mod raw {
1178 use super::CallThreadState;
1179
1180 pub type Ptr = *const CallThreadState;
1181
1182 const _: () = {
1183 assert!(core::mem::align_of::<CallThreadState>() > 1);
1184 };
1185
1186 fn tls_get() -> (Ptr, bool) {
1187 let mut initialized = false;
1188 let p = crate::runtime::vm::sys::tls_get().map_addr(|a| {
1189 initialized = (a & 1) != 0;
1190 a & !1
1191 });
1192 (p.cast(), initialized)
1193 }
1194
1195 fn tls_set(ptr: Ptr, initialized: bool) {
1196 let encoded = ptr.map_addr(|a| a | usize::from(initialized));
1197 crate::runtime::vm::sys::tls_set(encoded.cast_mut().cast::<u8>());
1198 }
1199
1200 #[cfg_attr(feature = "async", inline(never))] // see module docs
1201 #[cfg_attr(not(feature = "async"), inline)]
1202 pub fn replace(val: Ptr) -> Ptr {
1203 // When a new value is configured that means that we may be
1204 // entering WebAssembly so check to see if this thread has
1205 // performed per-thread initialization for traps.
1206 let (prev, initialized) = tls_get();
1207 if !initialized {
1208 super::super::lazy_per_thread_init();
1209 }
1210 tls_set(val, true);
1211 prev
1212 }
1213
1214 /// Eagerly initialize thread-local runtime functionality. This will be performed
1215 /// lazily by the runtime if users do not perform it eagerly.
1216 #[cfg_attr(feature = "async", inline(never))] // see module docs
1217 #[cfg_attr(not(feature = "async"), inline)]
1218 pub fn initialize() {
1219 let (state, initialized) = tls_get();
1220 if initialized {
1221 return;
1222 }
1223 super::super::lazy_per_thread_init();
1224 tls_set(state, true);
1225 }
1226
1227 #[cfg_attr(feature = "async", inline(never))] // see module docs
1228 #[cfg_attr(not(feature = "async"), inline)]
1229 pub fn get() -> Ptr {
1230 tls_get().0
1231 }
1232 }
1233
1234 pub use raw::initialize as tls_eager_initialize;
1235
1236 /// Opaque state used to persist the state of the `CallThreadState`
1237 /// activations associated with a fiber stack that's used as part of an
1238 /// async wasm call.
1239 #[cfg(feature = "async")]
1240 pub struct AsyncWasmCallState {
1241 // The head of a linked list of activations that are currently present
1242 // on an async call's fiber stack. This pointer points to the oldest
1243 // activation frame where the `prev` links internally link to younger
1244 // activation frames.
1245 //
1246 // When pushed onto a thread this linked list is traversed to get pushed
1247 // onto the current thread at the time.
1248 //
1249 // If this pointer is null then that means that the fiber this state is
1250 // associated with has no activations.
1251 state: raw::Ptr,
1252 }
1253
1254 // SAFETY: This is a relatively unsafe unsafe block and not really all that
1255 // well audited. The general idea is that the linked list of activations
1256 // owned by `self.state` are safe to send to other threads, but that relies
1257 // on everything internally being safe as well as stack variables and such.
1258 // This is more-or-less tied to the very large comment in `fiber.rs` about
1259 // `unsafe impl Send` there.
1260 #[cfg(feature = "async")]
1261 unsafe impl Send for AsyncWasmCallState {}
1262
1263 #[cfg(feature = "async")]
1264 impl AsyncWasmCallState {
1265 /// Creates new state that initially starts as null.
1266 pub fn new() -> AsyncWasmCallState {
1267 AsyncWasmCallState {
1268 state: core::ptr::null_mut(),
1269 }
1270 }
1271
1272 /// Pushes the saved state of this wasm's call onto the current thread's
1273 /// state.
1274 ///
1275 /// This will iterate over the linked list of states stored within
1276 /// `self` and push them sequentially onto the current thread's
1277 /// activation list.
1278 ///
1279 /// The returned `PreviousAsyncWasmCallState` captures the state of this
1280 /// thread just before this operation, and it must have its `restore`
1281 /// method called to restore the state when the async wasm is suspended
1282 /// from.
1283 ///
1284 /// # Unsafety
1285 ///
1286 /// Must be carefully coordinated with
1287 /// `PreviousAsyncWasmCallState::restore` and fiber switches to ensure
1288 /// that this doesn't push stale data and the data is popped
1289 /// appropriately.
1290 pub unsafe fn push(self) -> PreviousAsyncWasmCallState {
1291 // First save the state of TLS as-is so when this state is popped
1292 // off later on we know where to stop.
1293 let ret = PreviousAsyncWasmCallState { state: raw::get() };
1294
1295 // The oldest activation, if present, has various `VMStoreContext`
1296 // fields saved within it. These fields were the state for the
1297 // *youngest* activation when a suspension previously happened. By
1298 // swapping them back into the store this is an O(1) way of
1299 // restoring the state of a store's metadata fields at the time of
1300 // the suspension.
1301 //
1302 // The store's previous values before this function will all get
1303 // saved in the oldest activation's state on the stack. The store's
1304 // current state then describes the youngest activation which is
1305 // restored via the loop below.
1306 unsafe {
1307 if let Some(state) = self.state.as_ref() {
1308 state.swap();
1309 }
1310 }
1311
1312 // Our `state` pointer is a linked list of oldest-to-youngest so by
1313 // pushing in order of the list we restore the youngest-to-oldest
1314 // list as stored in the state of this current thread.
1315 let mut ptr = self.state;
1316 unsafe {
1317 while let Some(state) = ptr.as_ref() {
1318 ptr = state.prev.replace(core::ptr::null_mut());
1319 state.push();
1320 }
1321 }
1322 ret
1323 }
1324
1325 /// Performs a runtime check that this state is indeed null.
1326 pub fn assert_null(&self) {
1327 assert!(self.state.is_null());
1328 }
1329
1330 /// Asserts that the current CallThreadState pointer, if present, is not
1331 /// in the `range` specified.
1332 ///
1333 /// This is used when exiting a future in Wasmtime to assert that the
1334 /// current CallThreadState pointer does not point within the stack
1335 /// we're leaving (e.g. allocated for a fiber).
1336 pub fn assert_current_state_not_in_range(range: core::ops::Range<usize>) {
1337 let p = raw::get() as usize;
1338 assert!(p < range.start || range.end < p);
1339 }
1340 }
1341
1342 /// Opaque state used to help control TLS state across stack switches for
1343 /// async support.
1344 ///
1345 /// This structure is returned from [`AsyncWasmCallState::push`] and
1346 /// represents the state of this thread's TLS variable prior to the push
1347 /// operation.
1348 #[cfg(feature = "async")]
1349 pub struct PreviousAsyncWasmCallState {
1350 // The raw value of this thread's TLS pointer when this structure was
1351 // created. This is not dereferenced or inspected but is used to halt
1352 // linked list traversal in [`PreviousAsyncWasmCallState::restore`].
1353 state: raw::Ptr,
1354 }
1355
1356 #[cfg(feature = "async")]
1357 impl PreviousAsyncWasmCallState {
1358 /// Pops a fiber's linked list of activations and stores them in
1359 /// `AsyncWasmCallState`.
1360 ///
1361 /// This will pop the top activation of this current thread continuously
1362 /// until it reaches whatever the current activation was when
1363 /// [`AsyncWasmCallState::push`] was originally called.
1364 ///
1365 /// # Unsafety
1366 ///
1367 /// Must be paired with a `push` and only performed at a time when a
1368 /// fiber is being suspended.
1369 pub unsafe fn restore(self) -> AsyncWasmCallState {
1370 let thread_head = self.state;
1371 core::mem::forget(self);
1372 let mut ret = AsyncWasmCallState::new();
1373 loop {
1374 // If the current TLS state is as we originally found it, then
1375 // this loop is finished.
1376 //
1377 // Note, though, that before exiting, if the oldest
1378 // `CallThreadState` is present, the current state of
1379 // `VMStoreContext` is saved off within it. This will save the
1380 // current state, before this function, of `VMStoreContext`
1381 // into the `EntryStoreContext` stored with the oldest
1382 // activation. This is a bit counter-intuitive where the state
1383 // for the youngest activation is stored in the "old" state
1384 // of the oldest activation.
1385 //
1386 // What this does is restores the state of the store to just
1387 // before this async fiber was started. The fiber's state will
1388 // be entirely self-contained in the fiber itself and the
1389 // returned `AsyncWasmCallState`. Resumption above in
1390 // `AsyncWasmCallState::push` will perform the swap back into
1391 // the store to hook things up again.
1392 let ptr = raw::get();
1393 if ptr == thread_head {
1394 unsafe {
1395 if let Some(state) = ret.state.as_ref() {
1396 state.swap();
1397 }
1398 }
1399
1400 break ret;
1401 }
1402
1403 // Pop this activation from the current thread's TLS state, and
1404 // then afterwards push it onto our own linked list within this
1405 // `AsyncWasmCallState`. Note that the linked list in
1406 // `AsyncWasmCallState` is stored in reverse order so a
1407 // subsequent `push` later on pushes everything in the right
1408 // order.
1409 unsafe {
1410 (*ptr).pop();
1411 if let Some(state) = ret.state.as_ref() {
1412 (*ptr).prev.set(state);
1413 }
1414 }
1415 ret.state = ptr;
1416 }
1417 }
1418 }
1419
1420 #[cfg(feature = "async")]
1421 impl Drop for PreviousAsyncWasmCallState {
1422 fn drop(&mut self) {
1423 panic!("must be consumed with `restore`");
1424 }
1425 }
1426
1427 /// Configures thread local state such that for the duration of the
1428 /// execution of `closure` any call to `with` will yield `state`, unless
1429 /// this is recursively called again.
1430 #[inline]
1431 pub fn set<R>(state: &mut CallThreadState, closure: impl FnOnce(&CallThreadState) -> R) -> R {
1432 struct Reset<'a> {
1433 state: &'a CallThreadState,
1434 }
1435
1436 impl Drop for Reset<'_> {
1437 #[inline]
1438 fn drop(&mut self) {
1439 unsafe {
1440 self.state.pop();
1441 }
1442 }
1443 }
1444
1445 unsafe {
1446 state.push();
1447 let reset = Reset { state };
1448 closure(reset.state)
1449 }
1450 }
1451
1452 /// Returns the last pointer configured with `set` above, if any.
1453 pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState>) -> R) -> R {
1454 let p = raw::get();
1455 unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
1456 }
1457}