wasmtime/runtime/vm/traphandlers.rs
1//! WebAssembly trap handling, which is built on top of the lower-level
2//! signalhandling mechanisms.
3
4mod backtrace;
5
6#[cfg(feature = "coredump")]
7#[path = "traphandlers/coredump_enabled.rs"]
8mod coredump;
9#[cfg(not(feature = "coredump"))]
10#[path = "traphandlers/coredump_disabled.rs"]
11mod coredump;
12
13#[cfg(all(has_native_signals))]
14mod signals;
15#[cfg(all(has_native_signals))]
16pub use self::signals::*;
17
18#[cfg(feature = "gc")]
19use crate::ThrownException;
20use crate::runtime::module::lookup_code;
21use crate::runtime::store::{ExecutorRef, StoreOpaque};
22use crate::runtime::vm::sys::traphandlers;
23use crate::runtime::vm::{InterpreterRef, VMContext, VMStore, VMStoreContext, f32x4, f64x2, i8x16};
24#[cfg(feature = "debug")]
25use crate::store::AsStoreOpaque;
26use crate::{EntryStoreContext, prelude::*};
27use crate::{StoreContextMut, WasmBacktrace};
28use core::cell::Cell;
29use core::num::NonZeroU32;
30use core::ptr::{self, NonNull};
31use wasmtime_unwinder::Handler;
32
33pub use self::backtrace::Backtrace;
34#[cfg(feature = "debug")]
35pub(crate) use self::backtrace::CurrentActivationBacktrace;
36#[cfg(feature = "gc")]
37pub use wasmtime_unwinder::Frame;
38
39pub use self::coredump::CoreDumpStack;
40pub use self::tls::tls_eager_initialize;
41#[cfg(feature = "async")]
42pub use self::tls::{AsyncWasmCallState, PreviousAsyncWasmCallState};
43
44pub use traphandlers::SignalHandler;
45
46pub(crate) struct TrapRegisters {
47 pub pc: usize,
48 pub fp: usize,
49}
50
51/// Return value from `test_if_trap`.
52pub(crate) enum TrapTest {
53 /// Not a wasm trap, need to delegate to whatever process handler is next.
54 NotWasm,
55 /// This trap was handled by the embedder via custom embedding APIs.
56 #[cfg(has_host_compiler_backend)]
57 #[cfg_attr(miri, expect(dead_code, reason = "using #[cfg] too unergonomic"))]
58 HandledByEmbedder,
59 /// This is a wasm trap, it needs to be handled.
60 Trap(Handler),
61}
62
63fn lazy_per_thread_init() {
64 traphandlers::lazy_per_thread_init();
65}
66
67/// Raises a preexisting trap or exception and unwinds.
68///
69/// If the preexisting state has registered a trap, this function will execute
70/// the `Handler::resume` to make its way back to the original exception
71/// handler created when Wasm was entered. If the state has registered an
72/// exception, this function will perform the unwind action registered: either
73/// resetting PC, FP, and SP to the handler in the middle of the Wasm
74/// activation on the stack, or the entry trampoline back to the the host, if
75/// the exception is uncaught.
76///
77/// This is currently only called from the `raise` builtin of
78/// Wasmtime. This builtin is only used when the host returns back to
79/// wasm and indicates that a trap or exception should be raised. In
80/// this situation the host has already stored trap or exception
81/// information within the `CallThreadState` and this is the low-level
82/// operation to actually perform an unwind.
83///
84/// Note that this function is used both for Pulley and for native execution.
85/// For Pulley this function will return and the interpreter will be
86/// responsible for handling the control-flow transfer. For native this
87/// function will not return as the control flow transfer will be handled
88/// internally.
89///
90/// # Safety
91///
92/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
93/// have been previously called. Additionally no Rust destructors can be on the
94/// stack. They will be skipped and not executed.
95pub(super) unsafe fn raise_preexisting_trap(store: &mut dyn VMStore) {
96 tls::with(|info| unsafe { info.unwrap().unwind(store) })
97}
98
99/// Invokes the closure `f` and handles any error/panic/trap that happens
100/// within.
101///
102/// This will invoke the closure `f` with the provided `store` and the closure
103/// will return a value that implements `HostResult`. This trait abstracts over
104/// how host values are translated to ABI values when going back into wasm.
105/// Some examples are:
106///
107/// * `T` - bare return types (not results) are simply returned as-is. No
108/// `catch_unwind` happens as if a trap can't happen then the host shouldn't
109/// be panicking or invoking user code.
110///
111/// * `Result<(), E>` - this represents an ABI return value of `bool` which
112/// indicates whether the call succeeded. This return value will catch panics
113/// and record trap information as `E`.
114///
115/// * `Result<u32, E>` - the ABI return value here is `u64` where on success
116/// the 32-bit result is zero-extended and `u64::MAX` as a return value
117/// indicates that a trap or panic happened.
118///
119/// This is primarily used in conjunction with the Cranelift-and-host boundary.
120/// This function acts as a bridge between the two to appropriately handle
121/// encoding host values to Cranelift-understood ABIs via the `HostResult`
122/// trait.
123pub fn catch_unwind_and_record_trap<R>(
124 store: &mut dyn VMStore,
125 f: impl FnOnce(&mut dyn VMStore) -> R,
126) -> R::Abi
127where
128 R: HostResult,
129{
130 // Invoke the closure `f`, optionally catching unwinds depending on `R`. The
131 // return value is always provided and if unwind information is provided
132 // (e.g. `ret` is a "false"-y value) then it's recorded in TLS for the
133 // unwind operation that's about to happen from Cranelift-generated code.
134 let (ret, unwind) = R::maybe_catch_unwind(store, |store| f(store));
135 if let Some(unwind) = unwind {
136 tls::with(|info| info.unwrap().record_unwind(store, unwind));
137 }
138 ret
139}
140
141/// A trait used in conjunction with `catch_unwind_and_record_trap` to convert a
142/// Rust-based type to a specific ABI while handling traps/unwinds.
143///
144/// This type is implemented for return values from host function calls and
145/// libcalls. The `Abi` value of this trait represents either a successful
146/// execution with some payload state or that a failed execution happened. In
147/// the event of a failed execution the state of the failure itself is stored
148/// within `CallThreadState::unwind`. Cranelift-compiled code is expected to
149/// test for this failure sentinel and process it accordingly.
150///
151/// See `catch_unwind_and_record_trap` for some more information as well.
152pub trait HostResult {
153 /// The type of the value that's returned to Cranelift-compiled code. Needs
154 /// to be ABI-safe to pass through an `extern "C"` return value.
155 type Abi: Copy;
156
157 /// Executes `f` and returns the ABI/unwind information as a result.
158 ///
159 /// This may optionally catch unwinds during execution depending on this
160 /// implementation. The ABI return value is unconditionally provided. If an
161 /// unwind was detected (e.g. a host panic or a wasm trap) then that's
162 /// additionally returned as well.
163 ///
164 /// If an unwind is returned then it's expected that when the host returns
165 /// back to wasm (which should be soon after calling this through
166 /// `catch_unwind_and_record_trap`) then wasm will very quickly turn around
167 /// and initiate an unwind (currently through `raise_preexisting_trap`).
168 fn maybe_catch_unwind(
169 store: &mut dyn VMStore,
170 f: impl FnOnce(&mut dyn VMStore) -> Self,
171 ) -> (Self::Abi, Option<UnwindReason>);
172}
173
174// Base case implementations that do not catch unwinds. These are for libcalls
175// that neither trap nor execute user code. The raw value is the ABI itself.
176//
177// Panics in these libcalls will result in a process abort as unwinding is not
178// allowed via Rust through `extern "C"` function boundaries.
179macro_rules! host_result_no_catch {
180 ($($t:ty,)*) => {
181 $(
182 impl HostResult for $t {
183 type Abi = $t;
184 fn maybe_catch_unwind(
185 store: &mut dyn VMStore,
186 f: impl FnOnce(&mut dyn VMStore) -> $t,
187 ) -> ($t, Option<UnwindReason>) {
188 (f(store), None)
189 }
190 }
191 )*
192 }
193}
194
195host_result_no_catch! {
196 (),
197 bool,
198 u32,
199 *mut u8,
200 u64,
201 f32,
202 f64,
203 i8x16,
204 f32x4,
205 f64x2,
206}
207
208impl HostResult for NonNull<u8> {
209 type Abi = *mut u8;
210 fn maybe_catch_unwind(
211 store: &mut dyn VMStore,
212 f: impl FnOnce(&mut dyn VMStore) -> Self,
213 ) -> (*mut u8, Option<UnwindReason>) {
214 (f(store).as_ptr(), None)
215 }
216}
217
218/// Implementation of `HostResult` for `Result<T, E>`.
219///
220/// This is where things get interesting for `HostResult`. This is generically
221/// defined to allow many shapes of the `Result` type to be returned from host
222/// calls or libcalls. To do this an extra trait requirement is placed on the
223/// successful result `T`: `HostResultHasUnwindSentinel`.
224///
225/// The general requirement is that `T` says what ABI it has, and the ABI must
226/// have a sentinel value which indicates that an unwind in wasm should happen.
227/// For example if `T = ()` then `true` means that the call succeeded and
228/// `false` means that an unwind happened. Here the sentinel is `false` and the
229/// ABI is `bool`.
230///
231/// This is the only implementation of `HostResult` which actually catches
232/// unwinds as there's a sentinel to encode.
233impl<T, E> HostResult for Result<T, E>
234where
235 T: HostResultHasUnwindSentinel,
236 E: Into<TrapReason>,
237{
238 type Abi = T::Abi;
239
240 fn maybe_catch_unwind(
241 store: &mut dyn VMStore,
242 f: impl FnOnce(&mut dyn VMStore) -> Result<T, E>,
243 ) -> (T::Abi, Option<UnwindReason>) {
244 // First prepare the closure `f` as something that'll be invoked to
245 // generate the return value of this function. This is the
246 // conditionally, below, passed to `catch_unwind`.
247 let f = move || match f(store) {
248 Ok(ret) => (ret.into_abi(), None),
249 Err(reason) => (T::SENTINEL, Some(UnwindReason::Trap(reason.into()))),
250 };
251
252 // With `panic=unwind` use `std::panic::catch_unwind` to catch possible
253 // panics to rethrow.
254 #[cfg(all(feature = "std", panic = "unwind"))]
255 {
256 match std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)) {
257 Ok(result) => result,
258 Err(err) => (T::SENTINEL, Some(UnwindReason::Panic(err))),
259 }
260 }
261
262 // With `panic=abort` there's no use in using `std::panic::catch_unwind`
263 // since it won't actually catch anything. Note that
264 // `std::panic::catch_unwind` will technically optimize to this but having
265 // this branch avoids using the `std::panic` module entirely.
266 #[cfg(not(all(feature = "std", panic = "unwind")))]
267 {
268 f()
269 }
270 }
271}
272
273/// Trait used in conjunction with `HostResult for Result<T, E>` where this is
274/// the trait bound on `T`.
275///
276/// This is for values in the "ok" position of a `Result` return value. Each
277/// value can have a separate ABI from itself (e.g. `type Abi`) and must be
278/// convertible to the ABI. Additionally all implementations of this trait have
279/// a "sentinel value" which indicates that an unwind happened. This means that
280/// no valid instance of `Self` should generate the `SENTINEL` via the
281/// `into_abi` function.
282pub unsafe trait HostResultHasUnwindSentinel {
283 /// The Cranelift-understood ABI of this value (should not be `Self`).
284 type Abi: Copy;
285
286 /// A value that indicates that an unwind should happen and is tested for in
287 /// Cranelift-generated code.
288 const SENTINEL: Self::Abi;
289
290 /// Converts this value into the ABI representation. Should never returned
291 /// the `SENTINEL` value.
292 fn into_abi(self) -> Self::Abi;
293}
294
295/// No return value from the host is represented as a `bool` in the ABI. Here
296/// `true` means that execution succeeded while `false` is the sentinel used to
297/// indicate an unwind.
298unsafe impl HostResultHasUnwindSentinel for () {
299 type Abi = bool;
300 const SENTINEL: bool = false;
301 fn into_abi(self) -> bool {
302 true
303 }
304}
305
306unsafe impl HostResultHasUnwindSentinel for NonZeroU32 {
307 type Abi = u32;
308 const SENTINEL: Self::Abi = 0;
309 fn into_abi(self) -> Self::Abi {
310 self.get()
311 }
312}
313
314/// A 32-bit return value can be inflated to a 64-bit return value in the ABI.
315/// In this manner a successful result is a zero-extended 32-bit value and the
316/// failure sentinel is `u64::MAX` or -1 as a signed integer.
317unsafe impl HostResultHasUnwindSentinel for u32 {
318 type Abi = u64;
319 const SENTINEL: u64 = u64::MAX;
320 fn into_abi(self) -> u64 {
321 self.into()
322 }
323}
324
325/// If there is not actual successful result (e.g. an empty enum) then the ABI
326/// can be `()`, or nothing, because there's no successful result and it's
327/// always a failure.
328unsafe impl HostResultHasUnwindSentinel for core::convert::Infallible {
329 type Abi = ();
330 const SENTINEL: () = ();
331 fn into_abi(self) {
332 match self {}
333 }
334}
335
336unsafe impl HostResultHasUnwindSentinel for bool {
337 type Abi = u32;
338 const SENTINEL: Self::Abi = u32::MAX;
339 fn into_abi(self) -> Self::Abi {
340 u32::from(self)
341 }
342}
343
344/// Stores trace message with backtrace.
345#[derive(Debug)]
346pub struct Trap {
347 /// Original reason from where this trap originated.
348 pub reason: TrapReason,
349 /// Wasm backtrace of the trap, if any.
350 pub backtrace: Option<Backtrace>,
351 /// The Wasm Coredump, if any.
352 pub coredumpstack: Option<CoreDumpStack>,
353}
354
355/// Enumeration of different methods of raising a trap (or a sentinel
356/// for an exception).
357#[derive(Debug)]
358pub enum TrapReason {
359 /// A user-raised trap through `raise_user_trap`.
360 User(Error),
361
362 /// A trap raised from Cranelift-generated code.
363 Jit {
364 /// The program counter where this trap originated.
365 ///
366 /// This is later used with side tables from compilation to translate
367 /// the trapping address to a trap code.
368 pc: usize,
369
370 /// If the trap was a memory-related trap such as SIGSEGV then this
371 /// field will contain the address of the inaccessible data.
372 ///
373 /// Note that wasm loads/stores are not guaranteed to fill in this
374 /// information. Dynamically-bounds-checked memories, for example, will
375 /// not access an invalid address but may instead load from NULL or may
376 /// explicitly jump to a `ud2` instruction. This is only available for
377 /// fault-based traps which are one of the main ways, but not the only
378 /// way, to run wasm.
379 faulting_addr: Option<usize>,
380
381 /// The trap code associated with this trap.
382 trap: wasmtime_environ::Trap,
383 },
384
385 /// A trap raised from a wasm libcall
386 Wasm(wasmtime_environ::Trap),
387
388 /// An exception.
389 ///
390 /// Note that internally, exceptions are rooted on the Store, while
391 /// when crossing the public API, exceptions are held in a
392 /// `wasmtime::Exception` which contains a boxed root and implements
393 /// `Error`. This choice is intentional, to keep the internal
394 /// implementation lightweight and ensure the types represent only
395 /// allowable states.
396 #[cfg(feature = "gc")]
397 Exception,
398}
399
400impl From<Error> for TrapReason {
401 fn from(error: Error) -> Self {
402 #[cfg(feature = "gc")]
403 if error.is::<ThrownException>() {
404 return TrapReason::Exception;
405 }
406
407 TrapReason::User(error)
408 }
409}
410
411impl From<wasmtime_environ::Trap> for TrapReason {
412 fn from(code: wasmtime_environ::Trap) -> Self {
413 TrapReason::Wasm(code)
414 }
415}
416
417/// Catches any wasm traps that happen within the execution of `closure`,
418/// returning them as a `Result`.
419pub fn catch_traps<T, F>(
420 store: &mut StoreContextMut<'_, T>,
421 old_state: &mut EntryStoreContext,
422 mut closure: F,
423) -> Result<()>
424where
425 F: FnMut(NonNull<VMContext>, Option<InterpreterRef<'_>>) -> bool,
426{
427 let caller = store.0.default_caller();
428
429 let result = CallThreadState::new(store.0, old_state).with(|_cx| match store.0.executor() {
430 ExecutorRef::Interpreter(r) => closure(caller, Some(r)),
431 #[cfg(has_host_compiler_backend)]
432 ExecutorRef::Native => closure(caller, None),
433 });
434
435 match result {
436 Ok(x) => Ok(x),
437 #[cfg(feature = "gc")]
438 Err(UnwindState::UnwindToHost {
439 reason: UnwindReason::Trap(TrapReason::Exception),
440 backtrace: _,
441 coredump_stack: _,
442 }) => Err(ThrownException.into()),
443 Err(UnwindState::UnwindToHost {
444 reason: UnwindReason::Trap(reason),
445 backtrace,
446 coredump_stack,
447 }) => Err(crate::trap::from_runtime_box(
448 store.0,
449 Box::new(Trap {
450 reason,
451 backtrace,
452 coredumpstack: coredump_stack,
453 }),
454 )),
455 #[cfg(all(feature = "std", panic = "unwind"))]
456 Err(UnwindState::UnwindToHost {
457 reason: UnwindReason::Panic(panic),
458 ..
459 }) => std::panic::resume_unwind(panic),
460 #[cfg(feature = "gc")]
461 Err(UnwindState::UnwindToWasm { .. }) => {
462 unreachable!("We should not have returned to the host with an UnwindToWasm state");
463 }
464 Err(UnwindState::None) => {
465 unreachable!("We should not have gotten an error with no unwind state");
466 }
467 }
468}
469
470// Module to hide visibility of the `CallThreadState::prev` field and force
471// usage of its accessor methods.
472mod call_thread_state {
473 use super::*;
474 use crate::EntryStoreContext;
475 use crate::runtime::vm::{Unwind, VMStackChain};
476
477 /// Queued-up unwinding on the CallThreadState, ready to be
478 /// enacted by `unwind()`.
479 ///
480 /// This represents either a request to unwind to the entry point
481 /// from host, with associated data; or a request to
482 /// unwind into the middle of the Wasm action, e.g. when an
483 /// exception is caught.
484 pub enum UnwindState {
485 /// Unwind all the way to the entry from host to Wasm, using
486 /// the handler configured in the entry trampoline.
487 UnwindToHost {
488 reason: UnwindReason,
489 backtrace: Option<Backtrace>,
490 coredump_stack: Option<CoreDumpStack>,
491 },
492 /// Unwind into Wasm. The exception destination has been
493 /// resolved. Note that the payload value is still not
494 /// specified, because it must remain rooted on the Store
495 /// until `unwind()` actually takes the value. The first
496 /// payload word in the underlying exception ABI is used to
497 /// send the raw `VMExnRef`.
498 #[cfg(feature = "gc")]
499 UnwindToWasm(Handler),
500 /// Do not unwind.
501 None,
502 }
503
504 impl UnwindState {
505 pub(super) fn is_none(&self) -> bool {
506 match self {
507 Self::None => true,
508 _ => false,
509 }
510 }
511 }
512
513 /// Temporary state stored on the stack which is registered in the `tls`
514 /// module below for calls into wasm.
515 ///
516 /// This structure is stored on the stack and allocated during the
517 /// `catch_traps` function above. The purpose of this structure is to track
518 /// the state of an "activation" or a sequence of 0-or-more contiguous
519 /// WebAssembly call frames. A `CallThreadState` always lives on the stack
520 /// and additionally maintains pointers to previous states to form a linked
521 /// list of activations.
522 ///
523 /// One of the primary goals of `CallThreadState` is to store the state of
524 /// various fields in `VMStoreContext` when it was created. This is done
525 /// because calling WebAssembly will clobber these fields otherwise.
526 ///
527 /// Another major purpose of `CallThreadState` is to assist with unwinding
528 /// and track state necessary when an unwind happens for the original
529 /// creator of `CallThreadState` to determine why the unwind happened.
530 ///
531 /// Note that this structure is pointed-to from TLS, hence liberal usage of
532 /// interior mutability here since that only gives access to
533 /// `&CallThreadState`.
534 pub struct CallThreadState {
535 /// Unwind state set when initiating an unwind and read when
536 /// the control transfer occurs (after the `raise` point is
537 /// reached for host-code destinations and right when
538 /// performing the jump for Wasm-code destinations).
539 pub(super) unwind: Cell<UnwindState>,
540 #[cfg(all(has_native_signals))]
541 pub(super) signal_handler: Option<*const SignalHandler>,
542 pub(super) capture_backtrace: bool,
543 #[cfg(feature = "coredump")]
544 pub(super) capture_coredump: bool,
545
546 pub(crate) vm_store_context: NonNull<VMStoreContext>,
547 pub(crate) unwinder: &'static dyn Unwind,
548
549 pub(super) prev: Cell<tls::Ptr>,
550
551 // The state of the runtime for the *previous* `CallThreadState` for
552 // this same store. Our *current* state is saved in `self.vm_store_context`,
553 // etc. We need access to the old values of these
554 // fields because the `VMStoreContext` typically doesn't change across
555 // nested calls into Wasm (i.e. they are typically calls back into the
556 // same store and `self.vm_store_context == self.prev.vm_store_context`) and we must to
557 // maintain the list of contiguous-Wasm-frames stack regions for
558 // backtracing purposes.
559 old_state: *mut EntryStoreContext,
560 }
561
562 impl Drop for CallThreadState {
563 fn drop(&mut self) {
564 // Unwind information should not be present as it should have
565 // already been processed.
566 debug_assert!(self.unwind.replace(UnwindState::None).is_none());
567 }
568 }
569
570 impl CallThreadState {
571 #[inline]
572 pub(super) fn new(
573 store: &mut StoreOpaque,
574 old_state: *mut EntryStoreContext,
575 ) -> CallThreadState {
576 CallThreadState {
577 unwind: Cell::new(UnwindState::None),
578 unwinder: store.unwinder(),
579 #[cfg(all(has_native_signals))]
580 signal_handler: store.signal_handler(),
581 capture_backtrace: store.engine().config().wasm_backtrace,
582 #[cfg(feature = "coredump")]
583 capture_coredump: store.engine().config().coredump_on_trap,
584 vm_store_context: store.vm_store_context_ptr(),
585 prev: Cell::new(ptr::null()),
586 old_state,
587 }
588 }
589
590 /// Get the saved FP upon exit from Wasm for the previous `CallThreadState`.
591 ///
592 /// # Safety
593 ///
594 /// Requires that the saved last Wasm trampoline FP points to
595 /// a valid trampoline frame, or is null.
596 pub unsafe fn old_last_wasm_exit_fp(&self) -> usize {
597 let trampoline_fp = unsafe { (&*self.old_state).last_wasm_exit_trampoline_fp };
598 // SAFETY: `trampoline_fp` is either a valid FP from an
599 // active trampoline frame or is null.
600 unsafe { VMStoreContext::wasm_exit_fp_from_trampoline_fp(trampoline_fp) }
601 }
602
603 /// Get the saved PC upon exit from Wasm for the previous `CallThreadState`.
604 pub unsafe fn old_last_wasm_exit_pc(&self) -> usize {
605 unsafe { (&*self.old_state).last_wasm_exit_pc }
606 }
607
608 /// Get the saved FP upon entry into Wasm for the previous `CallThreadState`.
609 pub unsafe fn old_last_wasm_entry_fp(&self) -> usize {
610 unsafe { (&*self.old_state).last_wasm_entry_fp }
611 }
612
613 /// Get the saved `VMStackChain` for the previous `CallThreadState`.
614 pub unsafe fn old_stack_chain(&self) -> VMStackChain {
615 unsafe { (&*self.old_state).stack_chain.clone() }
616 }
617
618 /// Get the previous `CallThreadState`.
619 pub fn prev(&self) -> tls::Ptr {
620 self.prev.get()
621 }
622
623 /// Pushes this `CallThreadState` activation on to the linked list
624 /// stored in TLS.
625 ///
626 /// This method will take the current head of the linked list, stored in
627 /// our TLS pointer, and move it into `prev`. The TLS pointer is then
628 /// updated to `self`.
629 ///
630 /// # Panics
631 ///
632 /// Panics if this activation is already in a linked list (e.g.
633 /// `self.prev` is set).
634 #[inline]
635 pub(crate) unsafe fn push(&self) {
636 assert!(self.prev.get().is_null());
637 self.prev.set(tls::raw::replace(self));
638 }
639
640 /// Pops this `CallThreadState` from the linked list stored in TLS.
641 ///
642 /// This method will restore `self.prev` into the head of the linked
643 /// list stored in TLS and will additionally null-out `self.prev`.
644 ///
645 /// # Panics
646 ///
647 /// Panics if this activation isn't the head of the list.
648 #[inline]
649 pub(crate) unsafe fn pop(&self) {
650 let prev = self.prev.replace(ptr::null());
651 let head = tls::raw::replace(prev);
652 assert!(core::ptr::eq(head, self));
653 }
654
655 /// Swaps the state in this `CallThreadState`'s `VMStoreContext` with
656 /// the state in `EntryStoreContext` that was saved when this
657 /// activation was created.
658 ///
659 /// This method is using during suspension of a fiber to restore the
660 /// store back to what it originally was and prepare it to be resumed
661 /// later on. This takes various fields of `VMStoreContext` and swaps
662 /// them with what was saved in `EntryStoreContext`. That restores
663 /// a store to just before this activation was called but saves off the
664 /// fields of this activation to get restored/resumed at a later time.
665 #[cfg(feature = "async")]
666 pub(super) unsafe fn swap(&self) {
667 unsafe fn swap<T>(a: &core::cell::UnsafeCell<T>, b: &mut T) {
668 unsafe { core::mem::swap(&mut *a.get(), b) }
669 }
670
671 unsafe {
672 let cx = self.vm_store_context.as_ref();
673 swap(
674 &cx.last_wasm_exit_trampoline_fp,
675 &mut (*self.old_state).last_wasm_exit_trampoline_fp,
676 );
677 swap(
678 &cx.last_wasm_exit_pc,
679 &mut (*self.old_state).last_wasm_exit_pc,
680 );
681 swap(
682 &cx.last_wasm_entry_fp,
683 &mut (*self.old_state).last_wasm_entry_fp,
684 );
685 swap(
686 &cx.last_wasm_entry_sp,
687 &mut (*self.old_state).last_wasm_entry_sp,
688 );
689 swap(
690 &cx.last_wasm_entry_trap_handler,
691 &mut (*self.old_state).last_wasm_entry_trap_handler,
692 );
693 swap(&cx.stack_chain, &mut (*self.old_state).stack_chain);
694 }
695 }
696 }
697}
698pub use call_thread_state::*;
699
700#[cfg(feature = "gc")]
701use super::compute_handler;
702
703pub enum UnwindReason {
704 #[cfg(all(feature = "std", panic = "unwind"))]
705 Panic(Box<dyn std::any::Any + Send>),
706 Trap(TrapReason),
707}
708
709impl<E> From<E> for UnwindReason
710where
711 E: Into<TrapReason>,
712{
713 fn from(value: E) -> UnwindReason {
714 UnwindReason::Trap(value.into())
715 }
716}
717
718impl CallThreadState {
719 #[inline]
720 fn with(mut self, closure: impl FnOnce(&CallThreadState) -> bool) -> Result<(), UnwindState> {
721 let succeeded = tls::set(&mut self, |me| closure(me));
722 if succeeded {
723 Ok(())
724 } else {
725 Err(self.read_unwind())
726 }
727 }
728
729 #[cold]
730 fn read_unwind(&self) -> UnwindState {
731 self.unwind.replace(UnwindState::None)
732 }
733
734 /// Records the unwind information provided within this `CallThreadState`,
735 /// optionally capturing a backtrace at this time.
736 ///
737 /// This function is used to stash metadata for why an unwind is about to
738 /// happen. The actual unwind is expected to happen after this function is
739 /// called using, for example, the `unwind` function below.
740 ///
741 /// Note that this is a relatively low-level function and will panic if
742 /// mis-used.
743 ///
744 /// # Panics
745 ///
746 /// Panics if unwind information has already been recorded as that should
747 /// have been processed first.
748 fn record_unwind(&self, store: &mut dyn VMStore, reason: UnwindReason) {
749 if cfg!(debug_assertions) {
750 let prev = self.unwind.replace(UnwindState::None);
751 assert!(prev.is_none());
752 }
753 let state = match reason {
754 #[cfg(all(feature = "std", panic = "unwind"))]
755 UnwindReason::Panic(err) => {
756 // Panics don't need backtraces. There is nowhere to attach the
757 // hypothetical backtrace to and it doesn't really make sense to try
758 // in the first place since this is a Rust problem rather than a
759 // Wasm problem.
760 UnwindState::UnwindToHost {
761 reason: UnwindReason::Panic(err),
762 backtrace: None,
763 coredump_stack: None,
764 }
765 }
766 // An unwind due to an already-set pending exception
767 // triggers the handler-search stack-walk. We store the
768 // resolved handler if one exists. In either case, the
769 // exception remains rooted in the Store until we actually
770 // perform the unwind, and then gets taken and becomes the
771 // payload at that point.
772 #[cfg(feature = "gc")]
773 UnwindReason::Trap(TrapReason::Exception) => {
774 // SAFETY: we are invoking `compute_handler()` while
775 // Wasm is on the stack and we have re-entered via a
776 // trampoline, as required by its stack-walking logic.
777 let handler = unsafe { compute_handler(store) };
778 match handler {
779 Some(handler) => UnwindState::UnwindToWasm(handler),
780 None => UnwindState::UnwindToHost {
781 reason: UnwindReason::Trap(TrapReason::Exception),
782 backtrace: None,
783 coredump_stack: None,
784 },
785 }
786 }
787 // And if we are just propagating an existing trap that already has
788 // a backtrace attached to it, then there is no need to capture a
789 // new backtrace either.
790 UnwindReason::Trap(TrapReason::User(err))
791 if err.downcast_ref::<WasmBacktrace>().is_some() =>
792 {
793 UnwindState::UnwindToHost {
794 reason: UnwindReason::Trap(TrapReason::User(err)),
795 backtrace: None,
796 coredump_stack: None,
797 }
798 }
799 UnwindReason::Trap(trap) => {
800 log::trace!("Capturing backtrace and coredump for {trap:?}");
801 UnwindState::UnwindToHost {
802 reason: UnwindReason::Trap(trap),
803 backtrace: self.capture_backtrace(store.vm_store_context_mut(), None),
804 coredump_stack: self.capture_coredump(store.vm_store_context_mut(), None),
805 }
806 }
807 };
808
809 // Avoid unused-variable warning in non-exceptions/GC build.
810 let _ = store;
811
812 self.unwind.set(state);
813 }
814
815 /// Helper function to perform an actual unwinding operation.
816 ///
817 /// This must be preceded by a `record_unwind` operation above to be
818 /// processed correctly on the other side.
819 ///
820 /// # Unsafety
821 ///
822 /// This function is not safe if a corresponding handler wasn't already
823 /// setup in the entry trampoline. Additionally this isn't safe as it may
824 /// skip all Rust destructors on the stack, if there are any, for native
825 /// executors as `Handler::resume` will be used.
826 unsafe fn unwind(&self, store: &mut dyn VMStore) {
827 #[allow(unused_mut, reason = "only mutated in `debug` configuration")]
828 let mut unwind = self.unwind.replace(UnwindState::None);
829
830 #[cfg(feature = "debug")]
831 {
832 let result = match &unwind {
833 UnwindState::UnwindToWasm(_) => {
834 assert!(store.as_store_opaque().has_pending_exception());
835 let exn = store
836 .as_store_opaque()
837 .pending_exception_owned_rooted()
838 .expect("exception should be set when we are throwing");
839 store.block_on_debug_handler(crate::DebugEvent::CaughtExceptionThrown(exn))
840 }
841
842 UnwindState::UnwindToHost {
843 reason: UnwindReason::Trap(TrapReason::Exception),
844 ..
845 } => {
846 use crate::store::AsStoreOpaque;
847 let exn = store
848 .as_store_opaque()
849 .pending_exception_owned_rooted()
850 .expect("exception should be set when we are throwing");
851 store.block_on_debug_handler(crate::DebugEvent::UncaughtExceptionThrown(
852 exn.clone(),
853 ))
854 }
855 UnwindState::UnwindToHost {
856 reason: UnwindReason::Trap(TrapReason::Wasm(trap)),
857 ..
858 } => store.block_on_debug_handler(crate::DebugEvent::Trap(*trap)),
859 UnwindState::UnwindToHost {
860 reason: UnwindReason::Trap(TrapReason::User(err)),
861 ..
862 } => store.block_on_debug_handler(crate::DebugEvent::HostcallError(err)),
863
864 UnwindState::UnwindToHost {
865 reason: UnwindReason::Trap(TrapReason::Jit { .. }),
866 ..
867 } => {
868 // JIT traps not handled yet.
869 Ok(())
870 }
871 #[cfg(all(feature = "std", panic = "unwind"))]
872 UnwindState::UnwindToHost {
873 reason: UnwindReason::Panic(_),
874 ..
875 } => {
876 // We don't invoke any debugger hook when we're
877 // unwinding due to a Rust (host-side) panic.
878 Ok(())
879 }
880
881 UnwindState::None => unreachable!(),
882 };
883
884 // If the debugger invocation itself resulted in an `Err`
885 // (which can only come from the `block_on` hitting a
886 // failure mode), we need to override our unwind as-if
887 // were handling a host error.
888 if let Err(err) = result {
889 unwind = UnwindState::UnwindToHost {
890 reason: UnwindReason::Trap(TrapReason::User(err)),
891 backtrace: None,
892 coredump_stack: None,
893 };
894 }
895 }
896
897 match unwind {
898 UnwindState::UnwindToHost { .. } => {
899 self.unwind.set(unwind);
900 let handler = self.entry_trap_handler();
901 let payload1 = 0;
902 let payload2 = 0;
903 unsafe {
904 self.resume_to_exception_handler(
905 store.executor(),
906 &handler,
907 payload1,
908 payload2,
909 );
910 }
911 }
912 #[cfg(feature = "gc")]
913 UnwindState::UnwindToWasm(handler) => {
914 // Take the pending exception at this time and use it as payload.
915 let payload1 = usize::try_from(
916 store
917 .take_pending_exception()
918 .unwrap()
919 .as_gc_ref()
920 .as_raw_u32(),
921 )
922 .expect("GC ref does not fit in usize");
923 // We only use one of the payload words.
924 let payload2 = 0;
925 unsafe {
926 self.resume_to_exception_handler(
927 store.executor(),
928 &handler,
929 payload1,
930 payload2,
931 );
932 }
933 }
934 UnwindState::None => {
935 panic!("Attempting to unwind with no unwind state set.");
936 }
937 }
938 }
939
940 pub(crate) fn entry_trap_handler(&self) -> Handler {
941 unsafe {
942 let vm_store_context = self.vm_store_context.as_ref();
943 let fp = *vm_store_context.last_wasm_entry_fp.get();
944 let sp = *vm_store_context.last_wasm_entry_sp.get();
945 let pc = *vm_store_context.last_wasm_entry_trap_handler.get();
946 Handler { pc, sp, fp }
947 }
948 }
949
950 unsafe fn resume_to_exception_handler(
951 &self,
952 executor: ExecutorRef<'_>,
953 handler: &Handler,
954 payload1: usize,
955 payload2: usize,
956 ) {
957 unsafe {
958 match executor {
959 ExecutorRef::Interpreter(mut r) => {
960 r.resume_to_exception_handler(handler, payload1, payload2)
961 }
962 #[cfg(has_host_compiler_backend)]
963 ExecutorRef::Native => handler.resume_tailcc(payload1, payload2),
964 }
965 }
966 }
967
968 fn capture_backtrace(
969 &self,
970 limits: *const VMStoreContext,
971 trap_pc_and_fp: Option<(usize, usize)>,
972 ) -> Option<Backtrace> {
973 if !self.capture_backtrace {
974 return None;
975 }
976
977 Some(unsafe { Backtrace::new_with_trap_state(limits, self.unwinder, self, trap_pc_and_fp) })
978 }
979
980 pub(crate) fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Self> + 'a {
981 let mut state = Some(self);
982 core::iter::from_fn(move || {
983 let this = state?;
984 state = unsafe { this.prev().as_ref() };
985 Some(this)
986 })
987 }
988
989 /// Trap handler using our thread-local state.
990 ///
991 /// * `regs` - some special program registers at the time that the trap
992 /// happened, for example `pc`.
993 /// * `faulting_addr` - the system-provided address that the a fault, if
994 /// any, happened at. This is used when debug-asserting that all segfaults
995 /// are known to live within a `Store<T>` in a valid range.
996 /// * `call_handler` - a closure used to invoke the platform-specific
997 /// signal handler for each instance, if available.
998 ///
999 /// Attempts to handle the trap if it's a wasm trap. Returns a `TrapTest`
1000 /// which indicates what this could be, such as:
1001 ///
1002 /// * `TrapTest::NotWasm` - not a wasm fault, this should get forwarded to
1003 /// the next platform-specific fault handler.
1004 /// * `TrapTest::HandledByEmbedder` - the embedder `call_handler` handled
1005 /// this signal, nothing else to do.
1006 /// * `TrapTest::Trap` - this is a wasm trap an the stack needs to be
1007 /// unwound now.
1008 pub(crate) fn test_if_trap(
1009 &self,
1010 regs: TrapRegisters,
1011 faulting_addr: Option<usize>,
1012 call_handler: impl FnOnce(&SignalHandler) -> bool,
1013 ) -> TrapTest {
1014 // First up see if any instance registered has a custom trap handler,
1015 // in which case run them all. If anything handles the trap then we
1016 // return that the trap was handled.
1017 let _ = &call_handler;
1018 #[cfg(all(has_native_signals, not(miri)))]
1019 if let Some(handler) = self.signal_handler {
1020 if unsafe { call_handler(&*handler) } {
1021 return TrapTest::HandledByEmbedder;
1022 }
1023 }
1024
1025 // If this fault wasn't in wasm code, then it's not our problem
1026 let Some((code, text_offset)) = lookup_code(regs.pc) else {
1027 return TrapTest::NotWasm;
1028 };
1029
1030 // If the fault was at a location that was not marked as potentially
1031 // trapping, then that's a bug in Cranelift/Winch/etc. Don't try to
1032 // catch the trap and pretend this isn't wasm so the program likely
1033 // aborts.
1034 let Some(trap) = code.lookup_trap_code(text_offset) else {
1035 return TrapTest::NotWasm;
1036 };
1037
1038 // If all that passed then this is indeed a wasm trap, so return the
1039 // `Handler` setup in the original wasm frame.
1040 self.set_jit_trap(regs, faulting_addr, trap);
1041 let entry_handler = self.entry_trap_handler();
1042 TrapTest::Trap(entry_handler)
1043 }
1044
1045 pub(crate) fn set_jit_trap(
1046 &self,
1047 TrapRegisters { pc, fp, .. }: TrapRegisters,
1048 faulting_addr: Option<usize>,
1049 trap: wasmtime_environ::Trap,
1050 ) {
1051 let backtrace = self.capture_backtrace(self.vm_store_context.as_ptr(), Some((pc, fp)));
1052 let coredump_stack = self.capture_coredump(self.vm_store_context.as_ptr(), Some((pc, fp)));
1053 self.unwind.set(UnwindState::UnwindToHost {
1054 reason: UnwindReason::Trap(TrapReason::Jit {
1055 pc,
1056 faulting_addr,
1057 trap,
1058 }),
1059 backtrace,
1060 coredump_stack,
1061 });
1062 }
1063}
1064
1065/// A private inner module managing the state of Wasmtime's thread-local storage
1066/// (TLS) state.
1067///
1068/// Wasmtime at this time has a single pointer of TLS. This single pointer of
1069/// TLS is the totality of all TLS required by Wasmtime. By keeping this as
1070/// small as possible it generally makes it easier to integrate with external
1071/// systems and implement features such as fiber context switches. This single
1072/// TLS pointer is declared in platform-specific modules to handle platform
1073/// differences, so this module here uses getters/setters which delegate to
1074/// platform-specific implementations.
1075///
1076/// The single TLS pointer used by Wasmtime is morally
1077/// `Option<&CallThreadState>` meaning that it's a possibly-present pointer to
1078/// some state. This pointer is a pointer to the most recent (youngest)
1079/// `CallThreadState` activation, or the most recent call into WebAssembly.
1080///
1081/// This TLS pointer is additionally the head of a linked list of activations
1082/// that are all stored on the stack for the current thread. Each time
1083/// WebAssembly is recursively invoked by an embedder will push a new entry into
1084/// this linked list. This singly-linked list is maintained with its head in TLS
1085/// node pointers are stored in `CallThreadState::prev`.
1086///
1087/// An example stack might look like this:
1088///
1089/// ```text
1090/// ┌─────────────────────┐◄───── highest, or oldest, stack address
1091/// │ native stack frames │
1092/// │ ... │
1093/// │ ┌───────────────┐◄─┼──┐
1094/// │ │CallThreadState│ │ │
1095/// │ └───────────────┘ │ p
1096/// ├─────────────────────┤ r
1097/// │ wasm stack frames │ e
1098/// │ ... │ v
1099/// ├─────────────────────┤ │
1100/// │ native stack frames │ │
1101/// │ ... │ │
1102/// │ ┌───────────────┐◄─┼──┼── TLS pointer
1103/// │ │CallThreadState├──┼──┘
1104/// │ └───────────────┘ │
1105/// ├─────────────────────┤
1106/// │ wasm stack frames │
1107/// │ ... │
1108/// ├─────────────────────┤
1109/// │ native stack frames │
1110/// │ ... │
1111/// └─────────────────────┘◄───── smallest, or youngest, stack address
1112/// ```
1113///
1114/// # Fibers and async
1115///
1116/// Wasmtime supports stack-switching with fibers to implement async. This means
1117/// that Wasmtime will temporarily execute code on a separate stack and then
1118/// suspend from this stack back to the embedder for async operations. Doing
1119/// this safely requires manual management of the TLS pointer updated by
1120/// Wasmtime.
1121///
1122/// For example when a fiber is suspended that means that the TLS pointer needs
1123/// to be restored to whatever it was when the fiber was resumed. Additionally
1124/// this may need to pop multiple `CallThreadState` activations, one for each
1125/// one located on the fiber stack itself.
1126///
1127/// The `AsyncWasmCallState` and `PreviousAsyncWasmCallState` structures in this
1128/// module are used to manage this state, namely:
1129///
1130/// * The `AsyncWasmCallState` structure represents the state of a suspended
1131/// fiber. This is a linked list, in reverse order, from oldest activation on
1132/// the fiber to youngest activation on the fiber.
1133///
1134/// * The `PreviousAsyncWasmCallState` structure represents a pointer within our
1135/// thread's TLS linked list of activations when a fiber was resumed. This
1136/// pointer is used during fiber suspension to know when to stop popping
1137/// activations from the thread's linked list.
1138///
1139/// Note that this means that the directionality of linked list links is
1140/// opposite when stored in TLS vs when stored for a suspended fiber. The
1141/// thread's current list pointed to by TLS is youngest-to-oldest links, while a
1142/// suspended fiber stores oldest-to-youngest links.
1143pub(crate) mod tls {
1144 use super::CallThreadState;
1145
1146 pub use raw::Ptr;
1147
1148 // An even *more* inner module for dealing with TLS. This actually has the
1149 // thread local variable and has functions to access the variable.
1150 //
1151 // Note that this is specially done to fully encapsulate that the accessors
1152 // for tls may or may not be inlined. Wasmtime's async support employs stack
1153 // switching which can resume execution on different OS threads. This means
1154 // that borrows of our TLS pointer must never live across accesses because
1155 // otherwise the access may be split across two threads and cause unsafety.
1156 //
1157 // This also means that extra care is taken by the runtime to save/restore
1158 // these TLS values when the runtime may have crossed threads.
1159 //
1160 // Note, though, that if async support is disabled at compile time then
1161 // these functions are free to be inlined.
1162 pub(super) mod raw {
1163 use super::CallThreadState;
1164
1165 pub type Ptr = *const CallThreadState;
1166
1167 const _: () = {
1168 assert!(core::mem::align_of::<CallThreadState>() > 1);
1169 };
1170
1171 fn tls_get() -> (Ptr, bool) {
1172 let mut initialized = false;
1173 let p = crate::runtime::vm::sys::tls_get().map_addr(|a| {
1174 initialized = (a & 1) != 0;
1175 a & !1
1176 });
1177 (p.cast(), initialized)
1178 }
1179
1180 fn tls_set(ptr: Ptr, initialized: bool) {
1181 let encoded = ptr.map_addr(|a| a | usize::from(initialized));
1182 crate::runtime::vm::sys::tls_set(encoded.cast_mut().cast::<u8>());
1183 }
1184
1185 #[cfg_attr(feature = "async", inline(never))] // see module docs
1186 #[cfg_attr(not(feature = "async"), inline)]
1187 pub fn replace(val: Ptr) -> Ptr {
1188 // When a new value is configured that means that we may be
1189 // entering WebAssembly so check to see if this thread has
1190 // performed per-thread initialization for traps.
1191 let (prev, initialized) = tls_get();
1192 if !initialized {
1193 super::super::lazy_per_thread_init();
1194 }
1195 tls_set(val, true);
1196 prev
1197 }
1198
1199 /// Eagerly initialize thread-local runtime functionality. This will be performed
1200 /// lazily by the runtime if users do not perform it eagerly.
1201 #[cfg_attr(feature = "async", inline(never))] // see module docs
1202 #[cfg_attr(not(feature = "async"), inline)]
1203 pub fn initialize() {
1204 let (state, initialized) = tls_get();
1205 if initialized {
1206 return;
1207 }
1208 super::super::lazy_per_thread_init();
1209 tls_set(state, true);
1210 }
1211
1212 #[cfg_attr(feature = "async", inline(never))] // see module docs
1213 #[cfg_attr(not(feature = "async"), inline)]
1214 pub fn get() -> Ptr {
1215 tls_get().0
1216 }
1217 }
1218
1219 pub use raw::initialize as tls_eager_initialize;
1220
1221 /// Opaque state used to persist the state of the `CallThreadState`
1222 /// activations associated with a fiber stack that's used as part of an
1223 /// async wasm call.
1224 #[cfg(feature = "async")]
1225 pub struct AsyncWasmCallState {
1226 // The head of a linked list of activations that are currently present
1227 // on an async call's fiber stack. This pointer points to the oldest
1228 // activation frame where the `prev` links internally link to younger
1229 // activation frames.
1230 //
1231 // When pushed onto a thread this linked list is traversed to get pushed
1232 // onto the current thread at the time.
1233 //
1234 // If this pointer is null then that means that the fiber this state is
1235 // associated with has no activations.
1236 state: raw::Ptr,
1237 }
1238
1239 // SAFETY: This is a relatively unsafe unsafe block and not really all that
1240 // well audited. The general idea is that the linked list of activations
1241 // owned by `self.state` are safe to send to other threads, but that relies
1242 // on everything internally being safe as well as stack variables and such.
1243 // This is more-or-less tied to the very large comment in `fiber.rs` about
1244 // `unsafe impl Send` there.
1245 #[cfg(feature = "async")]
1246 unsafe impl Send for AsyncWasmCallState {}
1247
1248 #[cfg(feature = "async")]
1249 impl AsyncWasmCallState {
1250 /// Creates new state that initially starts as null.
1251 pub fn new() -> AsyncWasmCallState {
1252 AsyncWasmCallState {
1253 state: core::ptr::null_mut(),
1254 }
1255 }
1256
1257 /// Pushes the saved state of this wasm's call onto the current thread's
1258 /// state.
1259 ///
1260 /// This will iterate over the linked list of states stored within
1261 /// `self` and push them sequentially onto the current thread's
1262 /// activation list.
1263 ///
1264 /// The returned `PreviousAsyncWasmCallState` captures the state of this
1265 /// thread just before this operation, and it must have its `restore`
1266 /// method called to restore the state when the async wasm is suspended
1267 /// from.
1268 ///
1269 /// # Unsafety
1270 ///
1271 /// Must be carefully coordinated with
1272 /// `PreviousAsyncWasmCallState::restore` and fiber switches to ensure
1273 /// that this doesn't push stale data and the data is popped
1274 /// appropriately.
1275 pub unsafe fn push(self) -> PreviousAsyncWasmCallState {
1276 // First save the state of TLS as-is so when this state is popped
1277 // off later on we know where to stop.
1278 let ret = PreviousAsyncWasmCallState { state: raw::get() };
1279
1280 // The oldest activation, if present, has various `VMStoreContext`
1281 // fields saved within it. These fields were the state for the
1282 // *youngest* activation when a suspension previously happened. By
1283 // swapping them back into the store this is an O(1) way of
1284 // restoring the state of a store's metadata fields at the time of
1285 // the suspension.
1286 //
1287 // The store's previous values before this function will all get
1288 // saved in the oldest activation's state on the stack. The store's
1289 // current state then describes the youngest activation which is
1290 // restored via the loop below.
1291 unsafe {
1292 if let Some(state) = self.state.as_ref() {
1293 state.swap();
1294 }
1295 }
1296
1297 // Our `state` pointer is a linked list of oldest-to-youngest so by
1298 // pushing in order of the list we restore the youngest-to-oldest
1299 // list as stored in the state of this current thread.
1300 let mut ptr = self.state;
1301 unsafe {
1302 while let Some(state) = ptr.as_ref() {
1303 ptr = state.prev.replace(core::ptr::null_mut());
1304 state.push();
1305 }
1306 }
1307 ret
1308 }
1309
1310 /// Performs a runtime check that this state is indeed null.
1311 pub fn assert_null(&self) {
1312 assert!(self.state.is_null());
1313 }
1314
1315 /// Asserts that the current CallThreadState pointer, if present, is not
1316 /// in the `range` specified.
1317 ///
1318 /// This is used when exiting a future in Wasmtime to assert that the
1319 /// current CallThreadState pointer does not point within the stack
1320 /// we're leaving (e.g. allocated for a fiber).
1321 pub fn assert_current_state_not_in_range(range: core::ops::Range<usize>) {
1322 let p = raw::get() as usize;
1323 assert!(p < range.start || range.end < p);
1324 }
1325 }
1326
1327 /// Opaque state used to help control TLS state across stack switches for
1328 /// async support.
1329 ///
1330 /// This structure is returned from [`AsyncWasmCallState::push`] and
1331 /// represents the state of this thread's TLS variable prior to the push
1332 /// operation.
1333 #[cfg(feature = "async")]
1334 pub struct PreviousAsyncWasmCallState {
1335 // The raw value of this thread's TLS pointer when this structure was
1336 // created. This is not dereferenced or inspected but is used to halt
1337 // linked list traversal in [`PreviousAsyncWasmCallState::restore`].
1338 state: raw::Ptr,
1339 }
1340
1341 #[cfg(feature = "async")]
1342 impl PreviousAsyncWasmCallState {
1343 /// Pops a fiber's linked list of activations and stores them in
1344 /// `AsyncWasmCallState`.
1345 ///
1346 /// This will pop the top activation of this current thread continuously
1347 /// until it reaches whatever the current activation was when
1348 /// [`AsyncWasmCallState::push`] was originally called.
1349 ///
1350 /// # Unsafety
1351 ///
1352 /// Must be paired with a `push` and only performed at a time when a
1353 /// fiber is being suspended.
1354 pub unsafe fn restore(self) -> AsyncWasmCallState {
1355 let thread_head = self.state;
1356 core::mem::forget(self);
1357 let mut ret = AsyncWasmCallState::new();
1358 loop {
1359 // If the current TLS state is as we originally found it, then
1360 // this loop is finished.
1361 //
1362 // Note, though, that before exiting, if the oldest
1363 // `CallThreadState` is present, the current state of
1364 // `VMStoreContext` is saved off within it. This will save the
1365 // current state, before this function, of `VMStoreContext`
1366 // into the `EntryStoreContext` stored with the oldest
1367 // activation. This is a bit counter-intuitive where the state
1368 // for the youngest activation is stored in the "old" state
1369 // of the oldest activation.
1370 //
1371 // What this does is restores the state of the store to just
1372 // before this async fiber was started. The fiber's state will
1373 // be entirely self-contained in the fiber itself and the
1374 // returned `AsyncWasmCallState`. Resumption above in
1375 // `AsyncWasmCallState::push` will perform the swap back into
1376 // the store to hook things up again.
1377 let ptr = raw::get();
1378 if ptr == thread_head {
1379 unsafe {
1380 if let Some(state) = ret.state.as_ref() {
1381 state.swap();
1382 }
1383 }
1384
1385 break ret;
1386 }
1387
1388 // Pop this activation from the current thread's TLS state, and
1389 // then afterwards push it onto our own linked list within this
1390 // `AsyncWasmCallState`. Note that the linked list in
1391 // `AsyncWasmCallState` is stored in reverse order so a
1392 // subsequent `push` later on pushes everything in the right
1393 // order.
1394 unsafe {
1395 (*ptr).pop();
1396 if let Some(state) = ret.state.as_ref() {
1397 (*ptr).prev.set(state);
1398 }
1399 }
1400 ret.state = ptr;
1401 }
1402 }
1403 }
1404
1405 #[cfg(feature = "async")]
1406 impl Drop for PreviousAsyncWasmCallState {
1407 fn drop(&mut self) {
1408 panic!("must be consumed with `restore`");
1409 }
1410 }
1411
1412 /// Configures thread local state such that for the duration of the
1413 /// execution of `closure` any call to `with` will yield `state`, unless
1414 /// this is recursively called again.
1415 #[inline]
1416 pub fn set<R>(state: &mut CallThreadState, closure: impl FnOnce(&CallThreadState) -> R) -> R {
1417 struct Reset<'a> {
1418 state: &'a CallThreadState,
1419 }
1420
1421 impl Drop for Reset<'_> {
1422 #[inline]
1423 fn drop(&mut self) {
1424 unsafe {
1425 self.state.pop();
1426 }
1427 }
1428 }
1429
1430 unsafe {
1431 state.push();
1432 let reset = Reset { state };
1433 closure(reset.state)
1434 }
1435 }
1436
1437 /// Returns the last pointer configured with `set` above, if any.
1438 pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState>) -> R) -> R {
1439 let p = raw::get();
1440 unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
1441 }
1442}