wasmtime/runtime/vm/traphandlers.rs
1//! WebAssembly trap handling, which is built on top of the lower-level
2//! signalhandling mechanisms.
3
4mod backtrace;
5
6#[cfg(feature = "coredump")]
7#[path = "traphandlers/coredump_enabled.rs"]
8mod coredump;
9#[cfg(not(feature = "coredump"))]
10#[path = "traphandlers/coredump_disabled.rs"]
11mod coredump;
12
13#[cfg(all(has_native_signals))]
14mod signals;
15#[cfg(all(has_native_signals))]
16pub use self::signals::*;
17
18#[cfg(feature = "gc")]
19use crate::ThrownException;
20use crate::runtime::module::lookup_code;
21use crate::runtime::store::{ExecutorRef, StoreOpaque};
22use crate::runtime::vm::sys::traphandlers;
23use crate::runtime::vm::{InterpreterRef, VMContext, VMStore, VMStoreContext, f32x4, f64x2, i8x16};
24#[cfg(all(feature = "debug", feature = "gc"))]
25use crate::store::AsStoreOpaque;
26use crate::{EntryStoreContext, prelude::*};
27use crate::{StoreContextMut, WasmBacktrace};
28use core::cell::Cell;
29use core::num::NonZeroU32;
30use core::ptr::{self, NonNull};
31use wasmtime_unwinder::Handler;
32
33pub use self::backtrace::Backtrace;
34#[cfg(feature = "debug")]
35pub(crate) use self::backtrace::{FrameOrHostCode, StoreBacktrace};
36#[cfg(feature = "gc")]
37pub use wasmtime_unwinder::Frame;
38
39pub use self::coredump::CoreDumpStack;
40pub use self::tls::tls_eager_initialize;
41#[cfg(feature = "async")]
42pub use self::tls::{AsyncWasmCallState, PreviousAsyncWasmCallState};
43
44pub use traphandlers::SignalHandler;
45
46pub(crate) struct TrapRegisters {
47 pub pc: usize,
48 pub fp: usize,
49}
50
51/// Return value from `test_if_trap`.
52pub(crate) enum TrapTest {
53 /// Not a wasm trap, need to delegate to whatever process handler is next.
54 NotWasm,
55 /// This trap was handled by the embedder via custom embedding APIs.
56 #[cfg(has_host_compiler_backend)]
57 #[cfg_attr(miri, expect(dead_code, reason = "using #[cfg] too unergonomic"))]
58 HandledByEmbedder,
59 /// This is a wasm trap, it needs to be handled.
60 Trap(Handler),
61}
62
63fn lazy_per_thread_init() {
64 traphandlers::lazy_per_thread_init();
65}
66
67/// Raises a preexisting trap or exception and unwinds.
68///
69/// If the preexisting state has registered a trap, this function will execute
70/// the `Handler::resume` to make its way back to the original exception
71/// handler created when Wasm was entered. If the state has registered an
72/// exception, this function will perform the unwind action registered: either
73/// resetting PC, FP, and SP to the handler in the middle of the Wasm
74/// activation on the stack, or the entry trampoline back to the the host, if
75/// the exception is uncaught.
76///
77/// This is currently only called from the `raise` builtin of
78/// Wasmtime. This builtin is only used when the host returns back to
79/// wasm and indicates that a trap or exception should be raised. In
80/// this situation the host has already stored trap or exception
81/// information within the `CallThreadState` and this is the low-level
82/// operation to actually perform an unwind.
83///
84/// Note that this function is used both for Pulley and for native execution.
85/// For Pulley this function will return and the interpreter will be
86/// responsible for handling the control-flow transfer. For native this
87/// function will not return as the control flow transfer will be handled
88/// internally.
89///
90/// # Safety
91///
92/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
93/// have been previously called. Additionally no Rust destructors can be on the
94/// stack. They will be skipped and not executed.
95pub(super) unsafe fn raise_preexisting_trap(store: &mut dyn VMStore) {
96 tls::with(|info| unsafe { info.unwrap().unwind(store) })
97}
98
99/// Invokes the closure `f` and handles any error/panic/trap that happens
100/// within.
101///
102/// This will invoke the closure `f` with the provided `store` and the closure
103/// will return a value that implements `HostResult`. This trait abstracts over
104/// how host values are translated to ABI values when going back into wasm.
105/// Some examples are:
106///
107/// * `T` - bare return types (not results) are simply returned as-is. No
108/// `catch_unwind` happens as if a trap can't happen then the host shouldn't
109/// be panicking or invoking user code.
110///
111/// * `Result<(), E>` - this represents an ABI return value of `bool` which
112/// indicates whether the call succeeded. This return value will catch panics
113/// and record trap information as `E`.
114///
115/// * `Result<u32, E>` - the ABI return value here is `u64` where on success
116/// the 32-bit result is zero-extended and `u64::MAX` as a return value
117/// indicates that a trap or panic happened.
118///
119/// This is primarily used in conjunction with the Cranelift-and-host boundary.
120/// This function acts as a bridge between the two to appropriately handle
121/// encoding host values to Cranelift-understood ABIs via the `HostResult`
122/// trait.
123pub fn catch_unwind_and_record_trap<R>(
124 store: &mut dyn VMStore,
125 f: impl FnOnce(&mut dyn VMStore) -> R,
126) -> R::Abi
127where
128 R: HostResult,
129{
130 // Invoke the closure `f`, optionally catching unwinds depending on `R`. The
131 // return value is always provided and if unwind information is provided
132 // (e.g. `ret` is a "false"-y value) then it's recorded in TLS for the
133 // unwind operation that's about to happen from Cranelift-generated code.
134 let (ret, unwind) = R::maybe_catch_unwind(store, |store| f(store));
135 if let Some(unwind) = unwind {
136 tls::with(|info| info.unwrap().record_unwind(store, unwind));
137 }
138 ret
139}
140
141/// A trait used in conjunction with `catch_unwind_and_record_trap` to convert a
142/// Rust-based type to a specific ABI while handling traps/unwinds.
143///
144/// This type is implemented for return values from host function calls and
145/// libcalls. The `Abi` value of this trait represents either a successful
146/// execution with some payload state or that a failed execution happened. In
147/// the event of a failed execution the state of the failure itself is stored
148/// within `CallThreadState::unwind`. Cranelift-compiled code is expected to
149/// test for this failure sentinel and process it accordingly.
150///
151/// See `catch_unwind_and_record_trap` for some more information as well.
152pub trait HostResult {
153 /// The type of the value that's returned to Cranelift-compiled code. Needs
154 /// to be ABI-safe to pass through an `extern "C"` return value.
155 type Abi: Copy;
156
157 /// Executes `f` and returns the ABI/unwind information as a result.
158 ///
159 /// This may optionally catch unwinds during execution depending on this
160 /// implementation. The ABI return value is unconditionally provided. If an
161 /// unwind was detected (e.g. a host panic or a wasm trap) then that's
162 /// additionally returned as well.
163 ///
164 /// If an unwind is returned then it's expected that when the host returns
165 /// back to wasm (which should be soon after calling this through
166 /// `catch_unwind_and_record_trap`) then wasm will very quickly turn around
167 /// and initiate an unwind (currently through `raise_preexisting_trap`).
168 fn maybe_catch_unwind(
169 store: &mut dyn VMStore,
170 f: impl FnOnce(&mut dyn VMStore) -> Self,
171 ) -> (Self::Abi, Option<UnwindReason>);
172}
173
174// Base case implementations that do not catch unwinds. These are for libcalls
175// that neither trap nor execute user code. The raw value is the ABI itself.
176//
177// Panics in these libcalls will result in a process abort as unwinding is not
178// allowed via Rust through `extern "C"` function boundaries.
179macro_rules! host_result_no_catch {
180 ($($t:ty,)*) => {
181 $(
182 impl HostResult for $t {
183 type Abi = $t;
184 #[allow(unreachable_code, reason = "some types uninhabited on some platforms")]
185 fn maybe_catch_unwind(
186 store: &mut dyn VMStore,
187 f: impl FnOnce(&mut dyn VMStore) -> $t,
188 ) -> ($t, Option<UnwindReason>) {
189 (f(store), None)
190 }
191 }
192 )*
193 }
194}
195
196host_result_no_catch! {
197 (),
198 bool,
199 u32,
200 *mut u8,
201 u64,
202 f32,
203 f64,
204 i8x16,
205 f32x4,
206 f64x2,
207}
208
209impl HostResult for NonNull<u8> {
210 type Abi = *mut u8;
211 fn maybe_catch_unwind(
212 store: &mut dyn VMStore,
213 f: impl FnOnce(&mut dyn VMStore) -> Self,
214 ) -> (*mut u8, Option<UnwindReason>) {
215 (f(store).as_ptr(), None)
216 }
217}
218
219/// Implementation of `HostResult` for `Result<T, E>`.
220///
221/// This is where things get interesting for `HostResult`. This is generically
222/// defined to allow many shapes of the `Result` type to be returned from host
223/// calls or libcalls. To do this an extra trait requirement is placed on the
224/// successful result `T`: `HostResultHasUnwindSentinel`.
225///
226/// The general requirement is that `T` says what ABI it has, and the ABI must
227/// have a sentinel value which indicates that an unwind in wasm should happen.
228/// For example if `T = ()` then `true` means that the call succeeded and
229/// `false` means that an unwind happened. Here the sentinel is `false` and the
230/// ABI is `bool`.
231///
232/// This is the only implementation of `HostResult` which actually catches
233/// unwinds as there's a sentinel to encode.
234impl<T, E> HostResult for Result<T, E>
235where
236 T: HostResultHasUnwindSentinel,
237 E: Into<TrapReason>,
238{
239 type Abi = T::Abi;
240
241 fn maybe_catch_unwind(
242 store: &mut dyn VMStore,
243 f: impl FnOnce(&mut dyn VMStore) -> Result<T, E>,
244 ) -> (T::Abi, Option<UnwindReason>) {
245 // First prepare the closure `f` as something that'll be invoked to
246 // generate the return value of this function. This is the
247 // conditionally, below, passed to `catch_unwind`.
248 let f = move || match f(store) {
249 Ok(ret) => (ret.into_abi(), None),
250 Err(reason) => (T::SENTINEL, Some(UnwindReason::Trap(reason.into()))),
251 };
252
253 // With `panic=unwind` use `std::panic::catch_unwind` to catch possible
254 // panics to rethrow.
255 #[cfg(all(feature = "std", panic = "unwind"))]
256 {
257 match std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)) {
258 Ok(result) => result,
259 Err(err) => (T::SENTINEL, Some(UnwindReason::Panic(err))),
260 }
261 }
262
263 // With `panic=abort` there's no use in using `std::panic::catch_unwind`
264 // since it won't actually catch anything. Note that
265 // `std::panic::catch_unwind` will technically optimize to this but having
266 // this branch avoids using the `std::panic` module entirely.
267 #[cfg(not(all(feature = "std", panic = "unwind")))]
268 {
269 f()
270 }
271 }
272}
273
274/// Trait used in conjunction with `HostResult for Result<T, E>` where this is
275/// the trait bound on `T`.
276///
277/// This is for values in the "ok" position of a `Result` return value. Each
278/// value can have a separate ABI from itself (e.g. `type Abi`) and must be
279/// convertible to the ABI. Additionally all implementations of this trait have
280/// a "sentinel value" which indicates that an unwind happened. This means that
281/// no valid instance of `Self` should generate the `SENTINEL` via the
282/// `into_abi` function.
283pub unsafe trait HostResultHasUnwindSentinel {
284 /// The Cranelift-understood ABI of this value (should not be `Self`).
285 type Abi: Copy;
286
287 /// A value that indicates that an unwind should happen and is tested for in
288 /// Cranelift-generated code.
289 const SENTINEL: Self::Abi;
290
291 /// Converts this value into the ABI representation. Should never returned
292 /// the `SENTINEL` value.
293 fn into_abi(self) -> Self::Abi;
294}
295
296/// No return value from the host is represented as a `bool` in the ABI. Here
297/// `true` means that execution succeeded while `false` is the sentinel used to
298/// indicate an unwind.
299unsafe impl HostResultHasUnwindSentinel for () {
300 type Abi = bool;
301 const SENTINEL: bool = false;
302 fn into_abi(self) -> bool {
303 true
304 }
305}
306
307unsafe impl HostResultHasUnwindSentinel for NonZeroU32 {
308 type Abi = u32;
309 const SENTINEL: Self::Abi = 0;
310 fn into_abi(self) -> Self::Abi {
311 self.get()
312 }
313}
314
315/// A 32-bit return value can be inflated to a 64-bit return value in the ABI.
316/// In this manner a successful result is a zero-extended 32-bit value and the
317/// failure sentinel is `u64::MAX` or -1 as a signed integer.
318unsafe impl HostResultHasUnwindSentinel for u32 {
319 type Abi = u64;
320 const SENTINEL: u64 = u64::MAX;
321 fn into_abi(self) -> u64 {
322 self.into()
323 }
324}
325
326/// If there is not actual successful result (e.g. an empty enum) then the ABI
327/// can be `()`, or nothing, because there's no successful result and it's
328/// always a failure.
329unsafe impl HostResultHasUnwindSentinel for core::convert::Infallible {
330 type Abi = ();
331 const SENTINEL: () = ();
332 fn into_abi(self) {
333 match self {}
334 }
335}
336
337unsafe impl HostResultHasUnwindSentinel for bool {
338 type Abi = u32;
339 const SENTINEL: Self::Abi = u32::MAX;
340 fn into_abi(self) -> Self::Abi {
341 u32::from(self)
342 }
343}
344
345/// Stores trace message with backtrace.
346#[derive(Debug)]
347pub struct Trap {
348 /// Original reason from where this trap originated.
349 pub reason: TrapReason,
350 /// Wasm backtrace of the trap, if any.
351 pub backtrace: Option<Backtrace>,
352 /// The Wasm Coredump, if any.
353 pub coredumpstack: Option<CoreDumpStack>,
354}
355
356/// Enumeration of different methods of raising a trap (or a sentinel
357/// for an exception).
358#[derive(Debug)]
359pub enum TrapReason {
360 /// A user-raised trap through `raise_user_trap`.
361 User(Error),
362
363 /// A trap raised from Cranelift-generated code.
364 Jit {
365 /// The program counter where this trap originated.
366 ///
367 /// This is later used with side tables from compilation to translate
368 /// the trapping address to a trap code.
369 pc: usize,
370
371 /// If the trap was a memory-related trap such as SIGSEGV then this
372 /// field will contain the address of the inaccessible data.
373 ///
374 /// Note that wasm loads/stores are not guaranteed to fill in this
375 /// information. Dynamically-bounds-checked memories, for example, will
376 /// not access an invalid address but may instead load from NULL or may
377 /// explicitly jump to a `ud2` instruction. This is only available for
378 /// fault-based traps which are one of the main ways, but not the only
379 /// way, to run wasm.
380 faulting_addr: Option<usize>,
381
382 /// The trap code associated with this trap.
383 trap: wasmtime_environ::Trap,
384 },
385
386 /// A trap raised from a wasm libcall
387 Wasm(wasmtime_environ::Trap),
388
389 /// An exception.
390 ///
391 /// Note that internally, exceptions are rooted on the Store, while
392 /// when crossing the public API, exceptions are held in a
393 /// `wasmtime::Exception` which contains a boxed root and implements
394 /// `Error`. This choice is intentional, to keep the internal
395 /// implementation lightweight and ensure the types represent only
396 /// allowable states.
397 #[cfg(feature = "gc")]
398 Exception,
399}
400
401impl From<Error> for TrapReason {
402 fn from(error: Error) -> Self {
403 #[cfg(feature = "gc")]
404 if error.is::<ThrownException>() {
405 return TrapReason::Exception;
406 }
407
408 TrapReason::User(error)
409 }
410}
411
412impl From<wasmtime_environ::Trap> for TrapReason {
413 fn from(code: wasmtime_environ::Trap) -> Self {
414 TrapReason::Wasm(code)
415 }
416}
417
418/// Catches any wasm traps that happen within the execution of `closure`,
419/// returning them as a `Result`.
420pub fn catch_traps<T, F>(
421 store: &mut StoreContextMut<'_, T>,
422 old_state: &mut EntryStoreContext,
423 mut closure: F,
424) -> Result<()>
425where
426 F: FnMut(NonNull<VMContext>, Option<InterpreterRef<'_>>) -> bool,
427{
428 let caller = store.0.default_caller();
429
430 let result = CallThreadState::new(store.0, old_state).with(|_cx| match store.0.executor() {
431 ExecutorRef::Interpreter(r) => closure(caller, Some(r)),
432 #[cfg(has_host_compiler_backend)]
433 ExecutorRef::Native => closure(caller, None),
434 });
435
436 match result {
437 Ok(x) => Ok(x),
438 #[cfg(feature = "gc")]
439 Err(UnwindState::UnwindToHost {
440 reason: UnwindReason::Trap(TrapReason::Exception),
441 backtrace: _,
442 coredump_stack: _,
443 }) => Err(ThrownException.into()),
444 Err(UnwindState::UnwindToHost {
445 reason: UnwindReason::Trap(reason),
446 backtrace,
447 coredump_stack,
448 }) => Err(crate::trap::from_runtime_box(
449 store.0,
450 Box::new(Trap {
451 reason,
452 backtrace,
453 coredumpstack: coredump_stack,
454 }),
455 )),
456 #[cfg(all(feature = "std", panic = "unwind"))]
457 Err(UnwindState::UnwindToHost {
458 reason: UnwindReason::Panic(panic),
459 ..
460 }) => std::panic::resume_unwind(panic),
461 #[cfg(feature = "gc")]
462 Err(UnwindState::UnwindToWasm { .. }) => {
463 unreachable!("We should not have returned to the host with an UnwindToWasm state");
464 }
465 Err(UnwindState::None) => {
466 unreachable!("We should not have gotten an error with no unwind state");
467 }
468 }
469}
470
471// Module to hide visibility of the `CallThreadState::prev` field and force
472// usage of its accessor methods.
473mod call_thread_state {
474 use super::*;
475 use crate::EntryStoreContext;
476 use crate::runtime::vm::{Unwind, VMStackChain};
477
478 /// Queued-up unwinding on the CallThreadState, ready to be
479 /// enacted by `unwind()`.
480 ///
481 /// This represents either a request to unwind to the entry point
482 /// from host, with associated data; or a request to
483 /// unwind into the middle of the Wasm action, e.g. when an
484 /// exception is caught.
485 pub enum UnwindState {
486 /// Unwind all the way to the entry from host to Wasm, using
487 /// the handler configured in the entry trampoline.
488 UnwindToHost {
489 reason: UnwindReason,
490 backtrace: Option<Backtrace>,
491 coredump_stack: Option<CoreDumpStack>,
492 },
493 /// Unwind into Wasm. The exception destination has been
494 /// resolved. Note that the payload value is still not
495 /// specified, because it must remain rooted on the Store
496 /// until `unwind()` actually takes the value. The first
497 /// payload word in the underlying exception ABI is used to
498 /// send the raw `VMExnRef`.
499 #[cfg(feature = "gc")]
500 UnwindToWasm(Handler),
501 /// Do not unwind.
502 None,
503 }
504
505 impl UnwindState {
506 pub(super) fn is_none(&self) -> bool {
507 match self {
508 Self::None => true,
509 _ => false,
510 }
511 }
512 }
513
514 /// Temporary state stored on the stack which is registered in the `tls`
515 /// module below for calls into wasm.
516 ///
517 /// This structure is stored on the stack and allocated during the
518 /// `catch_traps` function above. The purpose of this structure is to track
519 /// the state of an "activation" or a sequence of 0-or-more contiguous
520 /// WebAssembly call frames. A `CallThreadState` always lives on the stack
521 /// and additionally maintains pointers to previous states to form a linked
522 /// list of activations.
523 ///
524 /// One of the primary goals of `CallThreadState` is to store the state of
525 /// various fields in `VMStoreContext` when it was created. This is done
526 /// because calling WebAssembly will clobber these fields otherwise.
527 ///
528 /// Another major purpose of `CallThreadState` is to assist with unwinding
529 /// and track state necessary when an unwind happens for the original
530 /// creator of `CallThreadState` to determine why the unwind happened.
531 ///
532 /// Note that this structure is pointed-to from TLS, hence liberal usage of
533 /// interior mutability here since that only gives access to
534 /// `&CallThreadState`.
535 pub struct CallThreadState {
536 /// Unwind state set when initiating an unwind and read when
537 /// the control transfer occurs (after the `raise` point is
538 /// reached for host-code destinations and right when
539 /// performing the jump for Wasm-code destinations).
540 pub(super) unwind: Cell<UnwindState>,
541 #[cfg(all(has_native_signals))]
542 pub(super) signal_handler: Option<*const SignalHandler>,
543 pub(super) capture_backtrace: bool,
544 #[cfg(feature = "coredump")]
545 pub(super) capture_coredump: bool,
546
547 pub(crate) vm_store_context: NonNull<VMStoreContext>,
548 pub(crate) unwinder: &'static dyn Unwind,
549
550 pub(super) prev: Cell<tls::Ptr>,
551
552 // The state of the runtime for the *previous* `CallThreadState` for
553 // this same store. Our *current* state is saved in `self.vm_store_context`,
554 // etc. We need access to the old values of these
555 // fields because the `VMStoreContext` typically doesn't change across
556 // nested calls into Wasm (i.e. they are typically calls back into the
557 // same store and `self.vm_store_context == self.prev.vm_store_context`) and we must to
558 // maintain the list of contiguous-Wasm-frames stack regions for
559 // backtracing purposes.
560 old_state: *mut EntryStoreContext,
561 }
562
563 impl Drop for CallThreadState {
564 fn drop(&mut self) {
565 // Unwind information should not be present as it should have
566 // already been processed.
567 debug_assert!(self.unwind.replace(UnwindState::None).is_none());
568 }
569 }
570
571 impl CallThreadState {
572 #[inline]
573 pub(super) fn new(
574 store: &mut StoreOpaque,
575 old_state: *mut EntryStoreContext,
576 ) -> CallThreadState {
577 CallThreadState {
578 unwind: Cell::new(UnwindState::None),
579 unwinder: store.unwinder(),
580 #[cfg(all(has_native_signals))]
581 signal_handler: store.signal_handler(),
582 capture_backtrace: store.engine().config().wasm_backtrace,
583 #[cfg(feature = "coredump")]
584 capture_coredump: store.engine().config().coredump_on_trap,
585 vm_store_context: store.vm_store_context_ptr(),
586 prev: Cell::new(ptr::null()),
587 old_state,
588 }
589 }
590
591 /// Get the saved FP upon exit from Wasm for the previous `CallThreadState`.
592 ///
593 /// # Safety
594 ///
595 /// Requires that the saved last Wasm trampoline FP points to
596 /// a valid trampoline frame, or is null.
597 pub unsafe fn old_last_wasm_exit_fp(&self) -> usize {
598 let trampoline_fp = unsafe { (&*self.old_state).last_wasm_exit_trampoline_fp };
599 // SAFETY: `trampoline_fp` is either a valid FP from an
600 // active trampoline frame or is null.
601 unsafe { VMStoreContext::wasm_exit_fp_from_trampoline_fp(trampoline_fp) }
602 }
603
604 /// Get the saved PC upon exit from Wasm for the previous `CallThreadState`.
605 pub unsafe fn old_last_wasm_exit_pc(&self) -> usize {
606 unsafe { (&*self.old_state).last_wasm_exit_pc }
607 }
608
609 /// Get the saved FP upon entry into Wasm for the previous `CallThreadState`.
610 pub unsafe fn old_last_wasm_entry_fp(&self) -> usize {
611 unsafe { (&*self.old_state).last_wasm_entry_fp }
612 }
613
614 /// Get the saved `VMStackChain` for the previous `CallThreadState`.
615 pub unsafe fn old_stack_chain(&self) -> VMStackChain {
616 unsafe { (&*self.old_state).stack_chain.clone() }
617 }
618
619 /// Get the previous `CallThreadState`.
620 pub fn prev(&self) -> tls::Ptr {
621 self.prev.get()
622 }
623
624 /// Pushes this `CallThreadState` activation on to the linked list
625 /// stored in TLS.
626 ///
627 /// This method will take the current head of the linked list, stored in
628 /// our TLS pointer, and move it into `prev`. The TLS pointer is then
629 /// updated to `self`.
630 ///
631 /// # Panics
632 ///
633 /// Panics if this activation is already in a linked list (e.g.
634 /// `self.prev` is set).
635 #[inline]
636 pub(crate) unsafe fn push(&self) {
637 assert!(self.prev.get().is_null());
638 self.prev.set(tls::raw::replace(self));
639 }
640
641 /// Pops this `CallThreadState` from the linked list stored in TLS.
642 ///
643 /// This method will restore `self.prev` into the head of the linked
644 /// list stored in TLS and will additionally null-out `self.prev`.
645 ///
646 /// # Panics
647 ///
648 /// Panics if this activation isn't the head of the list.
649 #[inline]
650 pub(crate) unsafe fn pop(&self) {
651 let prev = self.prev.replace(ptr::null());
652 let head = tls::raw::replace(prev);
653 assert!(core::ptr::eq(head, self));
654 }
655
656 /// Swaps the state in this `CallThreadState`'s `VMStoreContext` with
657 /// the state in `EntryStoreContext` that was saved when this
658 /// activation was created.
659 ///
660 /// This method is using during suspension of a fiber to restore the
661 /// store back to what it originally was and prepare it to be resumed
662 /// later on. This takes various fields of `VMStoreContext` and swaps
663 /// them with what was saved in `EntryStoreContext`. That restores
664 /// a store to just before this activation was called but saves off the
665 /// fields of this activation to get restored/resumed at a later time.
666 #[cfg(feature = "async")]
667 pub(super) unsafe fn swap(&self) {
668 unsafe fn swap<T>(a: &core::cell::UnsafeCell<T>, b: &mut T) {
669 unsafe { core::mem::swap(&mut *a.get(), b) }
670 }
671
672 unsafe {
673 let cx = self.vm_store_context.as_ref();
674 swap(
675 &cx.last_wasm_exit_trampoline_fp,
676 &mut (*self.old_state).last_wasm_exit_trampoline_fp,
677 );
678 swap(
679 &cx.last_wasm_exit_pc,
680 &mut (*self.old_state).last_wasm_exit_pc,
681 );
682 swap(
683 &cx.last_wasm_entry_fp,
684 &mut (*self.old_state).last_wasm_entry_fp,
685 );
686 swap(
687 &cx.last_wasm_entry_sp,
688 &mut (*self.old_state).last_wasm_entry_sp,
689 );
690 swap(
691 &cx.last_wasm_entry_trap_handler,
692 &mut (*self.old_state).last_wasm_entry_trap_handler,
693 );
694 swap(&cx.stack_chain, &mut (*self.old_state).stack_chain);
695 }
696 }
697 }
698}
699pub use call_thread_state::*;
700
701#[cfg(feature = "gc")]
702use super::compute_handler;
703
704pub enum UnwindReason {
705 #[cfg(all(feature = "std", panic = "unwind"))]
706 Panic(Box<dyn std::any::Any + Send>),
707 Trap(TrapReason),
708}
709
710impl<E> From<E> for UnwindReason
711where
712 E: Into<TrapReason>,
713{
714 fn from(value: E) -> UnwindReason {
715 UnwindReason::Trap(value.into())
716 }
717}
718
719impl CallThreadState {
720 #[inline]
721 fn with(mut self, closure: impl FnOnce(&CallThreadState) -> bool) -> Result<(), UnwindState> {
722 let succeeded = tls::set(&mut self, |me| closure(me));
723 if succeeded {
724 Ok(())
725 } else {
726 Err(self.read_unwind())
727 }
728 }
729
730 #[cold]
731 fn read_unwind(&self) -> UnwindState {
732 self.unwind.replace(UnwindState::None)
733 }
734
735 /// Records the unwind information provided within this `CallThreadState`,
736 /// optionally capturing a backtrace at this time.
737 ///
738 /// This function is used to stash metadata for why an unwind is about to
739 /// happen. The actual unwind is expected to happen after this function is
740 /// called using, for example, the `unwind` function below.
741 ///
742 /// Note that this is a relatively low-level function and will panic if
743 /// mis-used.
744 ///
745 /// # Panics
746 ///
747 /// Panics if unwind information has already been recorded as that should
748 /// have been processed first.
749 fn record_unwind(&self, store: &mut dyn VMStore, reason: UnwindReason) {
750 if cfg!(debug_assertions) {
751 let prev = self.unwind.replace(UnwindState::None);
752 assert!(prev.is_none());
753 }
754 let state = match reason {
755 #[cfg(all(feature = "std", panic = "unwind"))]
756 UnwindReason::Panic(err) => {
757 // Panics don't need backtraces. There is nowhere to attach the
758 // hypothetical backtrace to and it doesn't really make sense to try
759 // in the first place since this is a Rust problem rather than a
760 // Wasm problem.
761 UnwindState::UnwindToHost {
762 reason: UnwindReason::Panic(err),
763 backtrace: None,
764 coredump_stack: None,
765 }
766 }
767 // An unwind due to an already-set pending exception
768 // triggers the handler-search stack-walk. We store the
769 // resolved handler if one exists. In either case, the
770 // exception remains rooted in the Store until we actually
771 // perform the unwind, and then gets taken and becomes the
772 // payload at that point.
773 #[cfg(feature = "gc")]
774 UnwindReason::Trap(TrapReason::Exception) => {
775 // SAFETY: we are invoking `compute_handler()` while
776 // Wasm is on the stack and we have re-entered via a
777 // trampoline, as required by its stack-walking logic.
778 let handler = unsafe { compute_handler(store) };
779 match handler {
780 Some(handler) => UnwindState::UnwindToWasm(handler),
781 None => UnwindState::UnwindToHost {
782 reason: UnwindReason::Trap(TrapReason::Exception),
783 backtrace: None,
784 coredump_stack: None,
785 },
786 }
787 }
788 // And if we are just propagating an existing trap that already has
789 // a backtrace attached to it, then there is no need to capture a
790 // new backtrace either.
791 UnwindReason::Trap(TrapReason::User(err))
792 if err.downcast_ref::<WasmBacktrace>().is_some() =>
793 {
794 UnwindState::UnwindToHost {
795 reason: UnwindReason::Trap(TrapReason::User(err)),
796 backtrace: None,
797 coredump_stack: None,
798 }
799 }
800 UnwindReason::Trap(trap) => {
801 log::trace!("Capturing backtrace and coredump for {trap:?}");
802 UnwindState::UnwindToHost {
803 reason: UnwindReason::Trap(trap),
804 backtrace: self.capture_backtrace(store.vm_store_context_mut(), None),
805 coredump_stack: self.capture_coredump(store.vm_store_context_mut(), None),
806 }
807 }
808 };
809
810 // Avoid unused-variable warning in non-exceptions/GC build.
811 let _ = store;
812
813 self.unwind.set(state);
814 }
815
816 /// Helper function to perform an actual unwinding operation.
817 ///
818 /// This must be preceded by a `record_unwind` operation above to be
819 /// processed correctly on the other side.
820 ///
821 /// # Unsafety
822 ///
823 /// This function is not safe if a corresponding handler wasn't already
824 /// setup in the entry trampoline. Additionally this isn't safe as it may
825 /// skip all Rust destructors on the stack, if there are any, for native
826 /// executors as `Handler::resume` will be used.
827 unsafe fn unwind(&self, store: &mut dyn VMStore) {
828 #[allow(unused_mut, reason = "only mutated in `debug` configuration")]
829 let mut unwind = self.unwind.replace(UnwindState::None);
830
831 #[cfg(feature = "debug")]
832 {
833 let result = match &unwind {
834 #[cfg(feature = "gc")]
835 UnwindState::UnwindToWasm(_) => {
836 assert!(store.as_store_opaque().has_pending_exception());
837 let exn = store
838 .as_store_opaque()
839 .pending_exception_owned_rooted()
840 .expect("exception should be set when we are throwing");
841 store.block_on_debug_handler(crate::DebugEvent::CaughtExceptionThrown(exn))
842 }
843 #[cfg(feature = "gc")]
844 UnwindState::UnwindToHost {
845 reason: UnwindReason::Trap(TrapReason::Exception),
846 ..
847 } => {
848 let exn = store
849 .as_store_opaque()
850 .pending_exception_owned_rooted()
851 .expect("exception should be set when we are throwing");
852 store.block_on_debug_handler(crate::DebugEvent::UncaughtExceptionThrown(
853 exn.clone(),
854 ))
855 }
856 UnwindState::UnwindToHost {
857 reason: UnwindReason::Trap(TrapReason::Wasm(trap)),
858 ..
859 } => store.block_on_debug_handler(crate::DebugEvent::Trap(*trap)),
860 UnwindState::UnwindToHost {
861 reason: UnwindReason::Trap(TrapReason::User(err)),
862 ..
863 } => store.block_on_debug_handler(crate::DebugEvent::HostcallError(err)),
864
865 UnwindState::UnwindToHost {
866 reason: UnwindReason::Trap(TrapReason::Jit { .. }),
867 ..
868 } => {
869 // JIT traps not handled yet.
870 Ok(())
871 }
872 #[cfg(all(feature = "std", panic = "unwind"))]
873 UnwindState::UnwindToHost {
874 reason: UnwindReason::Panic(_),
875 ..
876 } => {
877 // We don't invoke any debugger hook when we're
878 // unwinding due to a Rust (host-side) panic.
879 Ok(())
880 }
881
882 UnwindState::None => unreachable!(),
883 };
884
885 // If the debugger invocation itself resulted in an `Err`
886 // (which can only come from the `block_on` hitting a
887 // failure mode), we need to override our unwind as-if
888 // were handling a host error.
889 if let Err(err) = result {
890 unwind = UnwindState::UnwindToHost {
891 reason: UnwindReason::Trap(TrapReason::User(err)),
892 backtrace: None,
893 coredump_stack: None,
894 };
895 }
896 }
897
898 match unwind {
899 UnwindState::UnwindToHost { .. } => {
900 self.unwind.set(unwind);
901 let handler = self.entry_trap_handler();
902 let payload1 = 0;
903 let payload2 = 0;
904 unsafe {
905 self.resume_to_exception_handler(
906 store.executor(),
907 &handler,
908 payload1,
909 payload2,
910 );
911 }
912 }
913 #[cfg(feature = "gc")]
914 UnwindState::UnwindToWasm(handler) => {
915 // Take the pending exception at this time and use it as payload.
916 let payload1 = usize::try_from(
917 store
918 .take_pending_exception()
919 .unwrap()
920 .as_gc_ref()
921 .as_raw_u32(),
922 )
923 .expect("GC ref does not fit in usize");
924 // We only use one of the payload words.
925 let payload2 = 0;
926 unsafe {
927 self.resume_to_exception_handler(
928 store.executor(),
929 &handler,
930 payload1,
931 payload2,
932 );
933 }
934 }
935 UnwindState::None => {
936 panic!("Attempting to unwind with no unwind state set.");
937 }
938 }
939 }
940
941 pub(crate) fn entry_trap_handler(&self) -> Handler {
942 unsafe {
943 let vm_store_context = self.vm_store_context.as_ref();
944 let fp = *vm_store_context.last_wasm_entry_fp.get();
945 let sp = *vm_store_context.last_wasm_entry_sp.get();
946 let pc = *vm_store_context.last_wasm_entry_trap_handler.get();
947 Handler { pc, sp, fp }
948 }
949 }
950
951 unsafe fn resume_to_exception_handler(
952 &self,
953 executor: ExecutorRef<'_>,
954 handler: &Handler,
955 payload1: usize,
956 payload2: usize,
957 ) {
958 unsafe {
959 match executor {
960 ExecutorRef::Interpreter(mut r) => {
961 r.resume_to_exception_handler(handler, payload1, payload2)
962 }
963 #[cfg(has_host_compiler_backend)]
964 ExecutorRef::Native => handler.resume_tailcc(payload1, payload2),
965 }
966 }
967 }
968
969 fn capture_backtrace(
970 &self,
971 limits: *const VMStoreContext,
972 trap_pc_and_fp: Option<(usize, usize)>,
973 ) -> Option<Backtrace> {
974 if !self.capture_backtrace {
975 return None;
976 }
977
978 Some(unsafe { Backtrace::new_with_trap_state(limits, self.unwinder, self, trap_pc_and_fp) })
979 }
980
981 pub(crate) fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Self> + 'a {
982 let mut state = Some(self);
983 core::iter::from_fn(move || {
984 let this = state?;
985 state = unsafe { this.prev().as_ref() };
986 Some(this)
987 })
988 }
989
990 /// Trap handler using our thread-local state.
991 ///
992 /// * `regs` - some special program registers at the time that the trap
993 /// happened, for example `pc`.
994 /// * `faulting_addr` - the system-provided address that the a fault, if
995 /// any, happened at. This is used when debug-asserting that all segfaults
996 /// are known to live within a `Store<T>` in a valid range.
997 /// * `call_handler` - a closure used to invoke the platform-specific
998 /// signal handler for each instance, if available.
999 ///
1000 /// Attempts to handle the trap if it's a wasm trap. Returns a `TrapTest`
1001 /// which indicates what this could be, such as:
1002 ///
1003 /// * `TrapTest::NotWasm` - not a wasm fault, this should get forwarded to
1004 /// the next platform-specific fault handler.
1005 /// * `TrapTest::HandledByEmbedder` - the embedder `call_handler` handled
1006 /// this signal, nothing else to do.
1007 /// * `TrapTest::Trap` - this is a wasm trap an the stack needs to be
1008 /// unwound now.
1009 pub(crate) fn test_if_trap(
1010 &self,
1011 regs: TrapRegisters,
1012 faulting_addr: Option<usize>,
1013 call_handler: impl FnOnce(&SignalHandler) -> bool,
1014 ) -> TrapTest {
1015 // First up see if any instance registered has a custom trap handler,
1016 // in which case run them all. If anything handles the trap then we
1017 // return that the trap was handled.
1018 let _ = &call_handler;
1019 #[cfg(all(has_native_signals, not(miri)))]
1020 if let Some(handler) = self.signal_handler {
1021 if unsafe { call_handler(&*handler) } {
1022 return TrapTest::HandledByEmbedder;
1023 }
1024 }
1025
1026 // If this fault wasn't in wasm code, then it's not our problem
1027 let Some((code, text_offset)) = lookup_code(regs.pc) else {
1028 return TrapTest::NotWasm;
1029 };
1030
1031 // If the fault was at a location that was not marked as potentially
1032 // trapping, then that's a bug in Cranelift/Winch/etc. Don't try to
1033 // catch the trap and pretend this isn't wasm so the program likely
1034 // aborts.
1035 let Some(trap) = code.lookup_trap_code(text_offset) else {
1036 return TrapTest::NotWasm;
1037 };
1038
1039 // If all that passed then this is indeed a wasm trap, so return the
1040 // `Handler` setup in the original wasm frame.
1041 self.set_jit_trap(regs, faulting_addr, trap);
1042 let entry_handler = self.entry_trap_handler();
1043 TrapTest::Trap(entry_handler)
1044 }
1045
1046 pub(crate) fn set_jit_trap(
1047 &self,
1048 TrapRegisters { pc, fp, .. }: TrapRegisters,
1049 faulting_addr: Option<usize>,
1050 trap: wasmtime_environ::Trap,
1051 ) {
1052 let backtrace = self.capture_backtrace(self.vm_store_context.as_ptr(), Some((pc, fp)));
1053 let coredump_stack = self.capture_coredump(self.vm_store_context.as_ptr(), Some((pc, fp)));
1054 self.unwind.set(UnwindState::UnwindToHost {
1055 reason: UnwindReason::Trap(TrapReason::Jit {
1056 pc,
1057 faulting_addr,
1058 trap,
1059 }),
1060 backtrace,
1061 coredump_stack,
1062 });
1063 }
1064}
1065
1066/// A private inner module managing the state of Wasmtime's thread-local storage
1067/// (TLS) state.
1068///
1069/// Wasmtime at this time has a single pointer of TLS. This single pointer of
1070/// TLS is the totality of all TLS required by Wasmtime. By keeping this as
1071/// small as possible it generally makes it easier to integrate with external
1072/// systems and implement features such as fiber context switches. This single
1073/// TLS pointer is declared in platform-specific modules to handle platform
1074/// differences, so this module here uses getters/setters which delegate to
1075/// platform-specific implementations.
1076///
1077/// The single TLS pointer used by Wasmtime is morally
1078/// `Option<&CallThreadState>` meaning that it's a possibly-present pointer to
1079/// some state. This pointer is a pointer to the most recent (youngest)
1080/// `CallThreadState` activation, or the most recent call into WebAssembly.
1081///
1082/// This TLS pointer is additionally the head of a linked list of activations
1083/// that are all stored on the stack for the current thread. Each time
1084/// WebAssembly is recursively invoked by an embedder will push a new entry into
1085/// this linked list. This singly-linked list is maintained with its head in TLS
1086/// node pointers are stored in `CallThreadState::prev`.
1087///
1088/// An example stack might look like this:
1089///
1090/// ```text
1091/// ┌─────────────────────┐◄───── highest, or oldest, stack address
1092/// │ native stack frames │
1093/// │ ... │
1094/// │ ┌───────────────┐◄─┼──┐
1095/// │ │CallThreadState│ │ │
1096/// │ └───────────────┘ │ p
1097/// ├─────────────────────┤ r
1098/// │ wasm stack frames │ e
1099/// │ ... │ v
1100/// ├─────────────────────┤ │
1101/// │ native stack frames │ │
1102/// │ ... │ │
1103/// │ ┌───────────────┐◄─┼──┼── TLS pointer
1104/// │ │CallThreadState├──┼──┘
1105/// │ └───────────────┘ │
1106/// ├─────────────────────┤
1107/// │ wasm stack frames │
1108/// │ ... │
1109/// ├─────────────────────┤
1110/// │ native stack frames │
1111/// │ ... │
1112/// └─────────────────────┘◄───── smallest, or youngest, stack address
1113/// ```
1114///
1115/// # Fibers and async
1116///
1117/// Wasmtime supports stack-switching with fibers to implement async. This means
1118/// that Wasmtime will temporarily execute code on a separate stack and then
1119/// suspend from this stack back to the embedder for async operations. Doing
1120/// this safely requires manual management of the TLS pointer updated by
1121/// Wasmtime.
1122///
1123/// For example when a fiber is suspended that means that the TLS pointer needs
1124/// to be restored to whatever it was when the fiber was resumed. Additionally
1125/// this may need to pop multiple `CallThreadState` activations, one for each
1126/// one located on the fiber stack itself.
1127///
1128/// The `AsyncWasmCallState` and `PreviousAsyncWasmCallState` structures in this
1129/// module are used to manage this state, namely:
1130///
1131/// * The `AsyncWasmCallState` structure represents the state of a suspended
1132/// fiber. This is a linked list, in reverse order, from oldest activation on
1133/// the fiber to youngest activation on the fiber.
1134///
1135/// * The `PreviousAsyncWasmCallState` structure represents a pointer within our
1136/// thread's TLS linked list of activations when a fiber was resumed. This
1137/// pointer is used during fiber suspension to know when to stop popping
1138/// activations from the thread's linked list.
1139///
1140/// Note that this means that the directionality of linked list links is
1141/// opposite when stored in TLS vs when stored for a suspended fiber. The
1142/// thread's current list pointed to by TLS is youngest-to-oldest links, while a
1143/// suspended fiber stores oldest-to-youngest links.
1144pub(crate) mod tls {
1145 use super::CallThreadState;
1146
1147 pub use raw::Ptr;
1148
1149 // An even *more* inner module for dealing with TLS. This actually has the
1150 // thread local variable and has functions to access the variable.
1151 //
1152 // Note that this is specially done to fully encapsulate that the accessors
1153 // for tls may or may not be inlined. Wasmtime's async support employs stack
1154 // switching which can resume execution on different OS threads. This means
1155 // that borrows of our TLS pointer must never live across accesses because
1156 // otherwise the access may be split across two threads and cause unsafety.
1157 //
1158 // This also means that extra care is taken by the runtime to save/restore
1159 // these TLS values when the runtime may have crossed threads.
1160 //
1161 // Note, though, that if async support is disabled at compile time then
1162 // these functions are free to be inlined.
1163 pub(super) mod raw {
1164 use super::CallThreadState;
1165
1166 pub type Ptr = *const CallThreadState;
1167
1168 const _: () = {
1169 assert!(core::mem::align_of::<CallThreadState>() > 1);
1170 };
1171
1172 fn tls_get() -> (Ptr, bool) {
1173 let mut initialized = false;
1174 let p = crate::runtime::vm::sys::tls_get().map_addr(|a| {
1175 initialized = (a & 1) != 0;
1176 a & !1
1177 });
1178 (p.cast(), initialized)
1179 }
1180
1181 fn tls_set(ptr: Ptr, initialized: bool) {
1182 let encoded = ptr.map_addr(|a| a | usize::from(initialized));
1183 crate::runtime::vm::sys::tls_set(encoded.cast_mut().cast::<u8>());
1184 }
1185
1186 #[cfg_attr(feature = "async", inline(never))] // see module docs
1187 #[cfg_attr(not(feature = "async"), inline)]
1188 pub fn replace(val: Ptr) -> Ptr {
1189 // When a new value is configured that means that we may be
1190 // entering WebAssembly so check to see if this thread has
1191 // performed per-thread initialization for traps.
1192 let (prev, initialized) = tls_get();
1193 if !initialized {
1194 super::super::lazy_per_thread_init();
1195 }
1196 tls_set(val, true);
1197 prev
1198 }
1199
1200 /// Eagerly initialize thread-local runtime functionality. This will be performed
1201 /// lazily by the runtime if users do not perform it eagerly.
1202 #[cfg_attr(feature = "async", inline(never))] // see module docs
1203 #[cfg_attr(not(feature = "async"), inline)]
1204 pub fn initialize() {
1205 let (state, initialized) = tls_get();
1206 if initialized {
1207 return;
1208 }
1209 super::super::lazy_per_thread_init();
1210 tls_set(state, true);
1211 }
1212
1213 #[cfg_attr(feature = "async", inline(never))] // see module docs
1214 #[cfg_attr(not(feature = "async"), inline)]
1215 pub fn get() -> Ptr {
1216 tls_get().0
1217 }
1218 }
1219
1220 pub use raw::initialize as tls_eager_initialize;
1221
1222 /// Opaque state used to persist the state of the `CallThreadState`
1223 /// activations associated with a fiber stack that's used as part of an
1224 /// async wasm call.
1225 #[cfg(feature = "async")]
1226 pub struct AsyncWasmCallState {
1227 // The head of a linked list of activations that are currently present
1228 // on an async call's fiber stack. This pointer points to the oldest
1229 // activation frame where the `prev` links internally link to younger
1230 // activation frames.
1231 //
1232 // When pushed onto a thread this linked list is traversed to get pushed
1233 // onto the current thread at the time.
1234 //
1235 // If this pointer is null then that means that the fiber this state is
1236 // associated with has no activations.
1237 state: raw::Ptr,
1238 }
1239
1240 // SAFETY: This is a relatively unsafe unsafe block and not really all that
1241 // well audited. The general idea is that the linked list of activations
1242 // owned by `self.state` are safe to send to other threads, but that relies
1243 // on everything internally being safe as well as stack variables and such.
1244 // This is more-or-less tied to the very large comment in `fiber.rs` about
1245 // `unsafe impl Send` there.
1246 #[cfg(feature = "async")]
1247 unsafe impl Send for AsyncWasmCallState {}
1248
1249 #[cfg(feature = "async")]
1250 impl AsyncWasmCallState {
1251 /// Creates new state that initially starts as null.
1252 pub fn new() -> AsyncWasmCallState {
1253 AsyncWasmCallState {
1254 state: core::ptr::null_mut(),
1255 }
1256 }
1257
1258 /// Pushes the saved state of this wasm's call onto the current thread's
1259 /// state.
1260 ///
1261 /// This will iterate over the linked list of states stored within
1262 /// `self` and push them sequentially onto the current thread's
1263 /// activation list.
1264 ///
1265 /// The returned `PreviousAsyncWasmCallState` captures the state of this
1266 /// thread just before this operation, and it must have its `restore`
1267 /// method called to restore the state when the async wasm is suspended
1268 /// from.
1269 ///
1270 /// # Unsafety
1271 ///
1272 /// Must be carefully coordinated with
1273 /// `PreviousAsyncWasmCallState::restore` and fiber switches to ensure
1274 /// that this doesn't push stale data and the data is popped
1275 /// appropriately.
1276 pub unsafe fn push(self) -> PreviousAsyncWasmCallState {
1277 // First save the state of TLS as-is so when this state is popped
1278 // off later on we know where to stop.
1279 let ret = PreviousAsyncWasmCallState { state: raw::get() };
1280
1281 // The oldest activation, if present, has various `VMStoreContext`
1282 // fields saved within it. These fields were the state for the
1283 // *youngest* activation when a suspension previously happened. By
1284 // swapping them back into the store this is an O(1) way of
1285 // restoring the state of a store's metadata fields at the time of
1286 // the suspension.
1287 //
1288 // The store's previous values before this function will all get
1289 // saved in the oldest activation's state on the stack. The store's
1290 // current state then describes the youngest activation which is
1291 // restored via the loop below.
1292 unsafe {
1293 if let Some(state) = self.state.as_ref() {
1294 state.swap();
1295 }
1296 }
1297
1298 // Our `state` pointer is a linked list of oldest-to-youngest so by
1299 // pushing in order of the list we restore the youngest-to-oldest
1300 // list as stored in the state of this current thread.
1301 let mut ptr = self.state;
1302 unsafe {
1303 while let Some(state) = ptr.as_ref() {
1304 ptr = state.prev.replace(core::ptr::null_mut());
1305 state.push();
1306 }
1307 }
1308 ret
1309 }
1310
1311 /// Performs a runtime check that this state is indeed null.
1312 pub fn assert_null(&self) {
1313 assert!(self.state.is_null());
1314 }
1315
1316 /// Asserts that the current CallThreadState pointer, if present, is not
1317 /// in the `range` specified.
1318 ///
1319 /// This is used when exiting a future in Wasmtime to assert that the
1320 /// current CallThreadState pointer does not point within the stack
1321 /// we're leaving (e.g. allocated for a fiber).
1322 pub fn assert_current_state_not_in_range(range: core::ops::Range<usize>) {
1323 let p = raw::get() as usize;
1324 assert!(p < range.start || range.end < p);
1325 }
1326 }
1327
1328 /// Opaque state used to help control TLS state across stack switches for
1329 /// async support.
1330 ///
1331 /// This structure is returned from [`AsyncWasmCallState::push`] and
1332 /// represents the state of this thread's TLS variable prior to the push
1333 /// operation.
1334 #[cfg(feature = "async")]
1335 pub struct PreviousAsyncWasmCallState {
1336 // The raw value of this thread's TLS pointer when this structure was
1337 // created. This is not dereferenced or inspected but is used to halt
1338 // linked list traversal in [`PreviousAsyncWasmCallState::restore`].
1339 state: raw::Ptr,
1340 }
1341
1342 #[cfg(feature = "async")]
1343 impl PreviousAsyncWasmCallState {
1344 /// Pops a fiber's linked list of activations and stores them in
1345 /// `AsyncWasmCallState`.
1346 ///
1347 /// This will pop the top activation of this current thread continuously
1348 /// until it reaches whatever the current activation was when
1349 /// [`AsyncWasmCallState::push`] was originally called.
1350 ///
1351 /// # Unsafety
1352 ///
1353 /// Must be paired with a `push` and only performed at a time when a
1354 /// fiber is being suspended.
1355 pub unsafe fn restore(self) -> AsyncWasmCallState {
1356 let thread_head = self.state;
1357 core::mem::forget(self);
1358 let mut ret = AsyncWasmCallState::new();
1359 loop {
1360 // If the current TLS state is as we originally found it, then
1361 // this loop is finished.
1362 //
1363 // Note, though, that before exiting, if the oldest
1364 // `CallThreadState` is present, the current state of
1365 // `VMStoreContext` is saved off within it. This will save the
1366 // current state, before this function, of `VMStoreContext`
1367 // into the `EntryStoreContext` stored with the oldest
1368 // activation. This is a bit counter-intuitive where the state
1369 // for the youngest activation is stored in the "old" state
1370 // of the oldest activation.
1371 //
1372 // What this does is restores the state of the store to just
1373 // before this async fiber was started. The fiber's state will
1374 // be entirely self-contained in the fiber itself and the
1375 // returned `AsyncWasmCallState`. Resumption above in
1376 // `AsyncWasmCallState::push` will perform the swap back into
1377 // the store to hook things up again.
1378 let ptr = raw::get();
1379 if ptr == thread_head {
1380 unsafe {
1381 if let Some(state) = ret.state.as_ref() {
1382 state.swap();
1383 }
1384 }
1385
1386 break ret;
1387 }
1388
1389 // Pop this activation from the current thread's TLS state, and
1390 // then afterwards push it onto our own linked list within this
1391 // `AsyncWasmCallState`. Note that the linked list in
1392 // `AsyncWasmCallState` is stored in reverse order so a
1393 // subsequent `push` later on pushes everything in the right
1394 // order.
1395 unsafe {
1396 (*ptr).pop();
1397 if let Some(state) = ret.state.as_ref() {
1398 (*ptr).prev.set(state);
1399 }
1400 }
1401 ret.state = ptr;
1402 }
1403 }
1404 }
1405
1406 #[cfg(feature = "async")]
1407 impl Drop for PreviousAsyncWasmCallState {
1408 fn drop(&mut self) {
1409 panic!("must be consumed with `restore`");
1410 }
1411 }
1412
1413 /// Configures thread local state such that for the duration of the
1414 /// execution of `closure` any call to `with` will yield `state`, unless
1415 /// this is recursively called again.
1416 #[inline]
1417 pub fn set<R>(state: &mut CallThreadState, closure: impl FnOnce(&CallThreadState) -> R) -> R {
1418 struct Reset<'a> {
1419 state: &'a CallThreadState,
1420 }
1421
1422 impl Drop for Reset<'_> {
1423 #[inline]
1424 fn drop(&mut self) {
1425 unsafe {
1426 self.state.pop();
1427 }
1428 }
1429 }
1430
1431 unsafe {
1432 state.push();
1433 let reset = Reset { state };
1434 closure(reset.state)
1435 }
1436 }
1437
1438 /// Returns the last pointer configured with `set` above, if any.
1439 pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState>) -> R) -> R {
1440 let p = raw::get();
1441 unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
1442 }
1443}