wasmtime/runtime/vm/traphandlers.rs
1//! WebAssembly trap handling, which is built on top of the lower-level
2//! signalhandling mechanisms.
3
4mod backtrace;
5
6#[cfg(feature = "coredump")]
7#[path = "traphandlers/coredump_enabled.rs"]
8mod coredump;
9#[cfg(not(feature = "coredump"))]
10#[path = "traphandlers/coredump_disabled.rs"]
11mod coredump;
12
13#[cfg(all(has_native_signals))]
14mod signals;
15#[cfg(all(has_native_signals))]
16pub use self::signals::*;
17
18use crate::prelude::*;
19use crate::runtime::module::lookup_code;
20use crate::runtime::store::{ExecutorRef, StoreOpaque};
21use crate::runtime::vm::sys::traphandlers;
22use crate::runtime::vm::{Instance, InterpreterRef, VMContext, VMStoreContext};
23use crate::{StoreContextMut, WasmBacktrace};
24use core::cell::Cell;
25use core::ops::Range;
26use core::ptr::{self, NonNull};
27
28pub use self::backtrace::Backtrace;
29pub use self::coredump::CoreDumpStack;
30pub use self::tls::tls_eager_initialize;
31#[cfg(feature = "async")]
32pub use self::tls::{AsyncWasmCallState, PreviousAsyncWasmCallState};
33
34pub use traphandlers::SignalHandler;
35
36pub(crate) struct TrapRegisters {
37 pub pc: usize,
38 pub fp: usize,
39}
40
41/// Return value from `test_if_trap`.
42pub(crate) enum TrapTest {
43 /// Not a wasm trap, need to delegate to whatever process handler is next.
44 NotWasm,
45 /// This trap was handled by the embedder via custom embedding APIs.
46 #[cfg(has_host_compiler_backend)]
47 #[cfg_attr(miri, expect(dead_code, reason = "using #[cfg] too unergonomic"))]
48 HandledByEmbedder,
49 /// This is a wasm trap, it needs to be handled.
50 #[cfg_attr(miri, expect(dead_code, reason = "using #[cfg] too unergonomic"))]
51 Trap {
52 /// How to longjmp back to the original wasm frame.
53 #[cfg(has_host_compiler_backend)]
54 jmp_buf: *const u8,
55 },
56}
57
58fn lazy_per_thread_init() {
59 traphandlers::lazy_per_thread_init();
60}
61
62/// Raises a preexisting trap and unwinds.
63///
64/// This function will execute the `longjmp` to make its way back to the
65/// original `setjmp` performed when wasm was entered. This is currently
66/// only called from the `raise` builtin of Wasmtime. This builtin is only used
67/// when the host returns back to wasm and indicates that a trap should be
68/// raised. In this situation the host has already stored trap information
69/// within the `CallThreadState` and this is the low-level operation to actually
70/// perform an unwind.
71///
72/// This function won't be use with Pulley, for example, as the interpreter
73/// halts differently than native code. Additionally one day this will ideally
74/// be implemented by Cranelift itself without need of a libcall when Cranelift
75/// implements the exception handling proposal for example.
76///
77/// # Safety
78///
79/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
80/// have been previously called. Additionally no Rust destructors can be on the
81/// stack. They will be skipped and not executed.
82#[cfg(has_host_compiler_backend)]
83pub(super) unsafe fn raise_preexisting_trap() -> ! {
84 tls::with(|info| info.unwrap().unwind())
85}
86
87/// Invokes the closure `f` and returns a `bool` if it succeeded.
88///
89/// This will invoke the closure `f` which returns a value that implements
90/// `HostResult`. This trait abstracts over how host values are translated to
91/// ABI values when going back into wasm. Some examples are:
92///
93/// * `T` - bare return types (not results) are simply returned as-is. No
94/// `catch_unwind` happens as if a trap can't happen then the host shouldn't
95/// be panicking or invoking user code.
96///
97/// * `Result<(), E>` - this represents an ABI return value of `bool` which
98/// indicates whether the call succeeded. This return value will catch panics
99/// and record trap information as `E`.
100///
101/// * `Result<u32, E>` - the ABI return value here is `u64` where on success
102/// the 32-bit result is zero-extended and `u64::MAX` as a return value
103/// indicates that a trap or panic happened.
104///
105/// This is primarily used in conjunction with the Cranelift-and-host boundary.
106/// This function acts as a bridge between the two to appropriately handle
107/// encoding host values to Cranelift-understood ABIs via the `HostResult`
108/// trait.
109pub fn catch_unwind_and_record_trap<R>(f: impl FnOnce() -> R) -> R::Abi
110where
111 R: HostResult,
112{
113 // Invoke the closure `f`, optionally catching unwinds depending on `R`. The
114 // return value is always provided and if unwind information is provided
115 // (e.g. `ret` is a "false"-y value) then it's recorded in TLS for the
116 // unwind operation that's about to happen from Cranelift-generated code.
117 let (ret, unwind) = R::maybe_catch_unwind(f);
118 if let Some(unwind) = unwind {
119 tls::with(|info| info.unwrap().record_unwind(unwind));
120 }
121 ret
122}
123
124/// A trait used in conjunction with `catch_unwind_and_record_trap` to convert a
125/// Rust-based type to a specific ABI while handling traps/unwinds.
126///
127/// This type is implemented for return values from host function calls and
128/// libcalls. The `Abi` value of this trait represents either a successful
129/// execution with some payload state or that a failed execution happened. In
130/// the event of a failed execution the state of the failure itself is stored
131/// within `CallThreadState::unwind`. Cranelift-compiled code is expected to
132/// test for this failure sentinel and process it accordingly.
133///
134/// See `catch_unwind_and_record_trap` for some more information as well.
135pub trait HostResult {
136 /// The type of the value that's returned to Cranelift-compiled code. Needs
137 /// to be ABI-safe to pass through an `extern "C"` return value.
138 type Abi: Copy;
139
140 /// Executes `f` and returns the ABI/unwind information as a result.
141 ///
142 /// This may optionally catch unwinds during execution depending on this
143 /// implementation. The ABI return value is unconditionally provided. If an
144 /// unwind was detected (e.g. a host panic or a wasm trap) then that's
145 /// additionally returned as well.
146 ///
147 /// If an unwind is returned then it's expected that when the host returns
148 /// back to wasm (which should be soon after calling this through
149 /// `catch_unwind_and_record_trap`) then wasm will very quickly turn around
150 /// and initiate an unwind (currently through `raise_preexisting_trap`).
151 fn maybe_catch_unwind(f: impl FnOnce() -> Self) -> (Self::Abi, Option<UnwindReason>);
152}
153
154// Base case implementations that do not catch unwinds. These are for libcalls
155// that neither trap nor execute user code. The raw value is the ABI itself.
156//
157// Panics in these libcalls will result in a process abort as unwinding is not
158// allowed via Rust through `extern "C"` function boundaries.
159macro_rules! host_result_no_catch {
160 ($($t:ty,)*) => {
161 $(
162 impl HostResult for $t {
163 type Abi = $t;
164 fn maybe_catch_unwind(f: impl FnOnce() -> $t) -> ($t, Option<UnwindReason>) {
165 (f(), None)
166 }
167 }
168 )*
169 }
170}
171
172host_result_no_catch! {
173 (),
174 bool,
175 u32,
176 *mut u8,
177 u64,
178}
179
180impl HostResult for NonNull<u8> {
181 type Abi = *mut u8;
182 fn maybe_catch_unwind(f: impl FnOnce() -> Self) -> (*mut u8, Option<UnwindReason>) {
183 (f().as_ptr(), None)
184 }
185}
186
187/// Implementation of `HostResult` for `Result<T, E>`.
188///
189/// This is where things get interesting for `HostResult`. This is generically
190/// defined to allow many shapes of the `Result` type to be returned from host
191/// calls or libcalls. To do this an extra trait requirement is placed on the
192/// successful result `T`: `HostResultHasUnwindSentinel`.
193///
194/// The general requirement is that `T` says what ABI it has, and the ABI must
195/// have a sentinel value which indicates that an unwind in wasm should happen.
196/// For example if `T = ()` then `true` means that the call succeeded and
197/// `false` means that an unwind happened. Here the sentinel is `false` and the
198/// ABI is `bool`.
199///
200/// This is the only implementation of `HostResult` which actually catches
201/// unwinds as there's a sentinel to encode.
202impl<T, E> HostResult for Result<T, E>
203where
204 T: HostResultHasUnwindSentinel,
205 E: Into<TrapReason>,
206{
207 type Abi = T::Abi;
208
209 fn maybe_catch_unwind(f: impl FnOnce() -> Result<T, E>) -> (T::Abi, Option<UnwindReason>) {
210 // First prepare the closure `f` as something that'll be invoked to
211 // generate the return value of this function. This is the
212 // conditionally, below, passed to `catch_unwind`.
213 let f = move || match f() {
214 Ok(ret) => (ret.into_abi(), None),
215 Err(reason) => (T::SENTINEL, Some(UnwindReason::Trap(reason.into()))),
216 };
217
218 // With `panic=unwind` use `std::panic::catch_unwind` to catch possible
219 // panics to rethrow.
220 #[cfg(all(feature = "std", panic = "unwind"))]
221 {
222 match std::panic::catch_unwind(std::panic::AssertUnwindSafe(f)) {
223 Ok(result) => result,
224 Err(err) => (T::SENTINEL, Some(UnwindReason::Panic(err))),
225 }
226 }
227
228 // With `panic=abort` there's no use in using `std::panic::catch_unwind`
229 // since it won't actually catch anything. Note that
230 // `std::panic::catch_unwind` will technically optimize to this but having
231 // this branch avoids using the `std::panic` module entirely.
232 #[cfg(not(all(feature = "std", panic = "unwind")))]
233 {
234 f()
235 }
236 }
237}
238
239/// Trait used in conjunction with `HostResult for Result<T, E>` where this is
240/// the trait bound on `T`.
241///
242/// This is for values in the "ok" position of a `Result` return value. Each
243/// value can have a separate ABI from itself (e.g. `type Abi`) and must be
244/// convertible to the ABI. Additionally all implementations of this trait have
245/// a "sentinel value" which indicates that an unwind happened. This means that
246/// no valid instance of `Self` should generate the `SENTINEL` via the
247/// `into_abi` function.
248pub unsafe trait HostResultHasUnwindSentinel {
249 /// The Cranelift-understood ABI of this value (should not be `Self`).
250 type Abi: Copy;
251
252 /// A value that indicates that an unwind should happen and is tested for in
253 /// Cranelift-generated code.
254 const SENTINEL: Self::Abi;
255
256 /// Converts this value into the ABI representation. Should never returned
257 /// the `SENTINEL` value.
258 fn into_abi(self) -> Self::Abi;
259}
260
261/// No return value from the host is represented as a `bool` in the ABI. Here
262/// `true` means that execution succeeded while `false` is the sentinel used to
263/// indicate an unwind.
264unsafe impl HostResultHasUnwindSentinel for () {
265 type Abi = bool;
266 const SENTINEL: bool = false;
267 fn into_abi(self) -> bool {
268 true
269 }
270}
271
272/// A 32-bit return value can be inflated to a 64-bit return value in the ABI.
273/// In this manner a successful result is a zero-extended 32-bit value and the
274/// failure sentinel is `u64::MAX` or -1 as a signed integer.
275unsafe impl HostResultHasUnwindSentinel for u32 {
276 type Abi = u64;
277 const SENTINEL: u64 = u64::MAX;
278 fn into_abi(self) -> u64 {
279 self.into()
280 }
281}
282
283/// If there is not actual successful result (e.g. an empty enum) then the ABI
284/// can be `()`, or nothing, because there's no successful result and it's
285/// always a failure.
286unsafe impl HostResultHasUnwindSentinel for core::convert::Infallible {
287 type Abi = ();
288 const SENTINEL: () = ();
289 fn into_abi(self) {
290 match self {}
291 }
292}
293
294/// Stores trace message with backtrace.
295#[derive(Debug)]
296pub struct Trap {
297 /// Original reason from where this trap originated.
298 pub reason: TrapReason,
299 /// Wasm backtrace of the trap, if any.
300 pub backtrace: Option<Backtrace>,
301 /// The Wasm Coredump, if any.
302 pub coredumpstack: Option<CoreDumpStack>,
303}
304
305/// Enumeration of different methods of raising a trap.
306#[derive(Debug)]
307pub enum TrapReason {
308 /// A user-raised trap through `raise_user_trap`.
309 User(Error),
310
311 /// A trap raised from Cranelift-generated code.
312 Jit {
313 /// The program counter where this trap originated.
314 ///
315 /// This is later used with side tables from compilation to translate
316 /// the trapping address to a trap code.
317 pc: usize,
318
319 /// If the trap was a memory-related trap such as SIGSEGV then this
320 /// field will contain the address of the inaccessible data.
321 ///
322 /// Note that wasm loads/stores are not guaranteed to fill in this
323 /// information. Dynamically-bounds-checked memories, for example, will
324 /// not access an invalid address but may instead load from NULL or may
325 /// explicitly jump to a `ud2` instruction. This is only available for
326 /// fault-based traps which are one of the main ways, but not the only
327 /// way, to run wasm.
328 faulting_addr: Option<usize>,
329
330 /// The trap code associated with this trap.
331 trap: wasmtime_environ::Trap,
332 },
333
334 /// A trap raised from a wasm libcall
335 Wasm(wasmtime_environ::Trap),
336}
337
338impl From<Error> for TrapReason {
339 fn from(err: Error) -> Self {
340 TrapReason::User(err)
341 }
342}
343
344impl From<wasmtime_environ::Trap> for TrapReason {
345 fn from(code: wasmtime_environ::Trap) -> Self {
346 TrapReason::Wasm(code)
347 }
348}
349
350/// Catches any wasm traps that happen within the execution of `closure`,
351/// returning them as a `Result`.
352///
353/// # Unsafety
354///
355/// This function is unsafe because during the execution of `closure` it may be
356/// longjmp'd over and none of its destructors on the stack may be run.
357pub unsafe fn catch_traps<T, F>(
358 store: &mut StoreContextMut<'_, T>,
359 mut closure: F,
360) -> Result<(), Box<Trap>>
361where
362 F: FnMut(NonNull<VMContext>, Option<InterpreterRef<'_>>) -> bool,
363{
364 let caller = store.0.default_caller();
365 let result = CallThreadState::new(store.0, caller).with(|cx| match store.0.executor() {
366 // In interpreted mode directly invoke the host closure since we won't
367 // be using host-based `setjmp`/`longjmp` as that's not going to save
368 // the context we want.
369 ExecutorRef::Interpreter(r) => {
370 cx.jmp_buf
371 .set(CallThreadState::JMP_BUF_INTERPRETER_SENTINEL);
372 closure(caller, Some(r))
373 }
374
375 // In native mode, however, defer to C to do the `setjmp` since Rust
376 // doesn't understand `setjmp`.
377 //
378 // Note that here we pass a function pointer to C to catch longjmp
379 // within, here it's `call_closure`, and that passes `None` for the
380 // interpreter since this branch is only ever taken if the interpreter
381 // isn't present.
382 #[cfg(has_host_compiler_backend)]
383 ExecutorRef::Native => traphandlers::wasmtime_setjmp(
384 cx.jmp_buf.as_ptr(),
385 {
386 extern "C" fn call_closure<F>(payload: *mut u8, caller: NonNull<VMContext>) -> bool
387 where
388 F: FnMut(NonNull<VMContext>, Option<InterpreterRef<'_>>) -> bool,
389 {
390 unsafe { (*(payload as *mut F))(caller, None) }
391 }
392
393 call_closure::<F>
394 },
395 &mut closure as *mut F as *mut u8,
396 caller,
397 ),
398 });
399
400 return match result {
401 Ok(x) => Ok(x),
402 Err((UnwindReason::Trap(reason), backtrace, coredumpstack)) => Err(Box::new(Trap {
403 reason,
404 backtrace,
405 coredumpstack,
406 })),
407 #[cfg(all(feature = "std", panic = "unwind"))]
408 Err((UnwindReason::Panic(panic), _, _)) => std::panic::resume_unwind(panic),
409 };
410}
411
412// Module to hide visibility of the `CallThreadState::prev` field and force
413// usage of its accessor methods.
414mod call_thread_state {
415 use super::*;
416 use crate::runtime::vm::Unwind;
417
418 /// Temporary state stored on the stack which is registered in the `tls` module
419 /// below for calls into wasm.
420 pub struct CallThreadState {
421 pub(super) unwind: Cell<Option<(UnwindReason, Option<Backtrace>, Option<CoreDumpStack>)>>,
422 pub(super) jmp_buf: Cell<*const u8>,
423 #[cfg(all(has_native_signals))]
424 pub(super) signal_handler: Option<*const SignalHandler>,
425 pub(super) capture_backtrace: bool,
426 #[cfg(feature = "coredump")]
427 pub(super) capture_coredump: bool,
428
429 pub(crate) vm_store_context: NonNull<VMStoreContext>,
430 pub(crate) unwinder: &'static dyn Unwind,
431
432 pub(super) prev: Cell<tls::Ptr>,
433 #[cfg(all(has_native_signals, unix))]
434 pub(crate) async_guard_range: Range<*mut u8>,
435
436 // The values of `VMStoreContext::last_wasm_{exit_{pc,fp},entry_sp}` for
437 // the *previous* `CallThreadState` for this same store/limits. Our
438 // *current* last wasm PC/FP/SP are saved in `self.vm_store_context`. We
439 // save a copy of the old registers here because the `VMStoreContext`
440 // typically doesn't change across nested calls into Wasm (i.e. they are
441 // typically calls back into the same store and `self.vm_store_context
442 // == self.prev.vm_store_context`) and we must to maintain the list of
443 // contiguous-Wasm-frames stack regions for backtracing purposes.
444 old_last_wasm_exit_fp: Cell<usize>,
445 old_last_wasm_exit_pc: Cell<usize>,
446 old_last_wasm_entry_fp: Cell<usize>,
447 }
448
449 impl Drop for CallThreadState {
450 fn drop(&mut self) {
451 // Unwind information should not be present as it should have
452 // already been processed.
453 debug_assert!(self.unwind.replace(None).is_none());
454
455 unsafe {
456 let cx = self.vm_store_context.as_ref();
457 *cx.last_wasm_exit_fp.get() = self.old_last_wasm_exit_fp.get();
458 *cx.last_wasm_exit_pc.get() = self.old_last_wasm_exit_pc.get();
459 *cx.last_wasm_entry_fp.get() = self.old_last_wasm_entry_fp.get();
460 }
461 }
462 }
463
464 impl CallThreadState {
465 pub const JMP_BUF_INTERPRETER_SENTINEL: *mut u8 = 1 as *mut u8;
466
467 #[inline]
468 pub(super) fn new(store: &mut StoreOpaque, caller: NonNull<VMContext>) -> CallThreadState {
469 let vm_store_context = unsafe {
470 Instance::from_vmctx(caller, |i| i.vm_store_context())
471 .read()
472 .unwrap()
473 .as_non_null()
474 };
475
476 // Don't try to plumb #[cfg] everywhere for this field, just pretend
477 // we're using it on miri/windows to silence compiler warnings.
478 let _: Range<_> = store.async_guard_range();
479
480 CallThreadState {
481 unwind: Cell::new(None),
482 unwinder: store.unwinder(),
483 jmp_buf: Cell::new(ptr::null()),
484 #[cfg(all(has_native_signals))]
485 signal_handler: store.signal_handler(),
486 capture_backtrace: store.engine().config().wasm_backtrace,
487 #[cfg(feature = "coredump")]
488 capture_coredump: store.engine().config().coredump_on_trap,
489 vm_store_context,
490 #[cfg(all(has_native_signals, unix))]
491 async_guard_range: store.async_guard_range(),
492 prev: Cell::new(ptr::null()),
493 old_last_wasm_exit_fp: Cell::new(unsafe {
494 *vm_store_context.as_ref().last_wasm_exit_fp.get()
495 }),
496 old_last_wasm_exit_pc: Cell::new(unsafe {
497 *vm_store_context.as_ref().last_wasm_exit_pc.get()
498 }),
499 old_last_wasm_entry_fp: Cell::new(unsafe {
500 *vm_store_context.as_ref().last_wasm_entry_fp.get()
501 }),
502 }
503 }
504
505 /// Get the saved FP upon exit from Wasm for the previous `CallThreadState`.
506 pub fn old_last_wasm_exit_fp(&self) -> usize {
507 self.old_last_wasm_exit_fp.get()
508 }
509
510 /// Get the saved PC upon exit from Wasm for the previous `CallThreadState`.
511 pub fn old_last_wasm_exit_pc(&self) -> usize {
512 self.old_last_wasm_exit_pc.get()
513 }
514
515 /// Get the saved FP upon entry into Wasm for the previous `CallThreadState`.
516 pub fn old_last_wasm_entry_fp(&self) -> usize {
517 self.old_last_wasm_entry_fp.get()
518 }
519
520 /// Get the previous `CallThreadState`.
521 pub fn prev(&self) -> tls::Ptr {
522 self.prev.get()
523 }
524
525 #[inline]
526 pub(crate) unsafe fn push(&self) {
527 assert!(self.prev.get().is_null());
528 self.prev.set(tls::raw::replace(self));
529 }
530
531 #[inline]
532 pub(crate) unsafe fn pop(&self) {
533 let prev = self.prev.replace(ptr::null());
534 let head = tls::raw::replace(prev);
535 assert!(core::ptr::eq(head, self));
536 }
537 }
538}
539pub use call_thread_state::*;
540
541pub enum UnwindReason {
542 #[cfg(all(feature = "std", panic = "unwind"))]
543 Panic(Box<dyn std::any::Any + Send>),
544 Trap(TrapReason),
545}
546
547impl CallThreadState {
548 #[inline]
549 fn with(
550 mut self,
551 closure: impl FnOnce(&CallThreadState) -> bool,
552 ) -> Result<(), (UnwindReason, Option<Backtrace>, Option<CoreDumpStack>)> {
553 let succeeded = tls::set(&mut self, |me| closure(me));
554 if succeeded {
555 Ok(())
556 } else {
557 Err(self.read_unwind())
558 }
559 }
560
561 #[cold]
562 fn read_unwind(&self) -> (UnwindReason, Option<Backtrace>, Option<CoreDumpStack>) {
563 self.unwind.replace(None).unwrap()
564 }
565
566 /// Records the unwind information provided within this `CallThreadState`,
567 /// optionally capturing a backtrace at this time.
568 ///
569 /// This function is used to stash metadata for why an unwind is about to
570 /// happen. The actual unwind is expected to happen after this function is
571 /// called using, for example, the `unwind` function below.
572 ///
573 /// Note that this is a relatively low-level function and will panic if
574 /// mis-used.
575 ///
576 /// # Panics
577 ///
578 /// Panics if unwind information has already been recorded as that should
579 /// have been processed first.
580 fn record_unwind(&self, reason: UnwindReason) {
581 if cfg!(debug_assertions) {
582 let prev = self.unwind.replace(None);
583 assert!(prev.is_none());
584 }
585 let (backtrace, coredump) = match &reason {
586 // Panics don't need backtraces. There is nowhere to attach the
587 // hypothetical backtrace to and it doesn't really make sense to try
588 // in the first place since this is a Rust problem rather than a
589 // Wasm problem.
590 #[cfg(all(feature = "std", panic = "unwind"))]
591 UnwindReason::Panic(_) => (None, None),
592 // And if we are just propagating an existing trap that already has
593 // a backtrace attached to it, then there is no need to capture a
594 // new backtrace either.
595 UnwindReason::Trap(TrapReason::User(err))
596 if err.downcast_ref::<WasmBacktrace>().is_some() =>
597 {
598 (None, None)
599 }
600 UnwindReason::Trap(_) => (
601 self.capture_backtrace(self.vm_store_context.as_ptr(), None),
602 self.capture_coredump(self.vm_store_context.as_ptr(), None),
603 ),
604 };
605 self.unwind.set(Some((reason, backtrace, coredump)));
606 }
607
608 /// Helper function to perform an actual unwinding operation.
609 ///
610 /// This must be preceded by a `record_unwind` operation above to be
611 /// processed correctly on the other side.
612 ///
613 /// # Unsafety
614 ///
615 /// This function is not safe if the corresponding setjmp wasn't already
616 /// called. Additionally this isn't safe as it will skip all Rust
617 /// destructors on the stack, if there are any.
618 #[cfg(has_host_compiler_backend)]
619 unsafe fn unwind(&self) -> ! {
620 debug_assert!(!self.jmp_buf.get().is_null());
621 debug_assert!(self.jmp_buf.get() != CallThreadState::JMP_BUF_INTERPRETER_SENTINEL);
622 traphandlers::wasmtime_longjmp(self.jmp_buf.get());
623 }
624
625 fn capture_backtrace(
626 &self,
627 limits: *const VMStoreContext,
628 trap_pc_and_fp: Option<(usize, usize)>,
629 ) -> Option<Backtrace> {
630 if !self.capture_backtrace {
631 return None;
632 }
633
634 Some(unsafe { Backtrace::new_with_trap_state(limits, self.unwinder, self, trap_pc_and_fp) })
635 }
636
637 pub(crate) fn iter<'a>(&'a self) -> impl Iterator<Item = &'a Self> + 'a {
638 let mut state = Some(self);
639 core::iter::from_fn(move || {
640 let this = state?;
641 state = unsafe { this.prev().as_ref() };
642 Some(this)
643 })
644 }
645
646 /// Trap handler using our thread-local state.
647 ///
648 /// * `regs` - some special program registers at the time that the trap
649 /// happened, for example `pc`.
650 /// * `faulting_addr` - the system-provided address that the a fault, if
651 /// any, happened at. This is used when debug-asserting that all segfaults
652 /// are known to live within a `Store<T>` in a valid range.
653 /// * `call_handler` - a closure used to invoke the platform-specific
654 /// signal handler for each instance, if available.
655 ///
656 /// Attempts to handle the trap if it's a wasm trap. Returns a `TrapTest`
657 /// which indicates what this could be, such as:
658 ///
659 /// * `TrapTest::NotWasm` - not a wasm fault, this should get forwarded to
660 /// the next platform-specific fault handler.
661 /// * `TrapTest::HandledByEmbedder` - the embedder `call_handler` handled
662 /// this signal, nothing else to do.
663 /// * `TrapTest::Trap` - this is a wasm trap an the stack needs to be
664 /// unwound now.
665 pub(crate) fn test_if_trap(
666 &self,
667 regs: TrapRegisters,
668 faulting_addr: Option<usize>,
669 call_handler: impl Fn(&SignalHandler) -> bool,
670 ) -> TrapTest {
671 // If we haven't even started to handle traps yet, bail out.
672 if self.jmp_buf.get().is_null() {
673 return TrapTest::NotWasm;
674 }
675
676 // First up see if any instance registered has a custom trap handler,
677 // in which case run them all. If anything handles the trap then we
678 // return that the trap was handled.
679 let _ = &call_handler;
680 #[cfg(all(has_native_signals, not(miri)))]
681 if let Some(handler) = self.signal_handler {
682 if unsafe { call_handler(&*handler) } {
683 return TrapTest::HandledByEmbedder;
684 }
685 }
686
687 // If this fault wasn't in wasm code, then it's not our problem
688 let Some((code, text_offset)) = lookup_code(regs.pc) else {
689 return TrapTest::NotWasm;
690 };
691
692 // If the fault was at a location that was not marked as potentially
693 // trapping, then that's a bug in Cranelift/Winch/etc. Don't try to
694 // catch the trap and pretend this isn't wasm so the program likely
695 // aborts.
696 let Some(trap) = code.lookup_trap_code(text_offset) else {
697 return TrapTest::NotWasm;
698 };
699
700 // If all that passed then this is indeed a wasm trap, so return the
701 // `jmp_buf` passed to `wasmtime_longjmp` to resume.
702 self.set_jit_trap(regs, faulting_addr, trap);
703 TrapTest::Trap {
704 #[cfg(has_host_compiler_backend)]
705 jmp_buf: self.take_jmp_buf(),
706 }
707 }
708
709 #[cfg(has_host_compiler_backend)]
710 pub(crate) fn take_jmp_buf(&self) -> *const u8 {
711 self.jmp_buf.replace(ptr::null())
712 }
713
714 pub(crate) fn set_jit_trap(
715 &self,
716 TrapRegisters { pc, fp, .. }: TrapRegisters,
717 faulting_addr: Option<usize>,
718 trap: wasmtime_environ::Trap,
719 ) {
720 let backtrace = self.capture_backtrace(self.vm_store_context.as_ptr(), Some((pc, fp)));
721 let coredump = self.capture_coredump(self.vm_store_context.as_ptr(), Some((pc, fp)));
722 self.unwind.set(Some((
723 UnwindReason::Trap(TrapReason::Jit {
724 pc,
725 faulting_addr,
726 trap,
727 }),
728 backtrace,
729 coredump,
730 )))
731 }
732}
733
734// A private inner module for managing the TLS state that we require across
735// calls in wasm. The WebAssembly code is called from C++ and then a trap may
736// happen which requires us to read some contextual state to figure out what to
737// do with the trap. This `tls` module is used to persist that information from
738// the caller to the trap site.
739pub(crate) mod tls {
740 use super::CallThreadState;
741
742 pub use raw::Ptr;
743
744 // An even *more* inner module for dealing with TLS. This actually has the
745 // thread local variable and has functions to access the variable.
746 //
747 // Note that this is specially done to fully encapsulate that the accessors
748 // for tls may or may not be inlined. Wasmtime's async support employs stack
749 // switching which can resume execution on different OS threads. This means
750 // that borrows of our TLS pointer must never live across accesses because
751 // otherwise the access may be split across two threads and cause unsafety.
752 //
753 // This also means that extra care is taken by the runtime to save/restore
754 // these TLS values when the runtime may have crossed threads.
755 //
756 // Note, though, that if async support is disabled at compile time then
757 // these functions are free to be inlined.
758 pub(super) mod raw {
759 use super::CallThreadState;
760 use sptr::Strict;
761
762 pub type Ptr = *const CallThreadState;
763
764 const _: () = {
765 assert!(core::mem::align_of::<CallThreadState>() > 1);
766 };
767
768 fn tls_get() -> (Ptr, bool) {
769 let mut initialized = false;
770 let p = Strict::map_addr(crate::runtime::vm::sys::tls_get(), |a| {
771 initialized = (a & 1) != 0;
772 a & !1
773 });
774 (p.cast(), initialized)
775 }
776
777 fn tls_set(ptr: Ptr, initialized: bool) {
778 let encoded = Strict::map_addr(ptr, |a| a | usize::from(initialized));
779 crate::runtime::vm::sys::tls_set(encoded.cast_mut().cast::<u8>());
780 }
781
782 #[cfg_attr(feature = "async", inline(never))] // see module docs
783 #[cfg_attr(not(feature = "async"), inline)]
784 pub fn replace(val: Ptr) -> Ptr {
785 // When a new value is configured that means that we may be
786 // entering WebAssembly so check to see if this thread has
787 // performed per-thread initialization for traps.
788 let (prev, initialized) = tls_get();
789 if !initialized {
790 super::super::lazy_per_thread_init();
791 }
792 tls_set(val, true);
793 prev
794 }
795
796 /// Eagerly initialize thread-local runtime functionality. This will be performed
797 /// lazily by the runtime if users do not perform it eagerly.
798 #[cfg_attr(feature = "async", inline(never))] // see module docs
799 #[cfg_attr(not(feature = "async"), inline)]
800 pub fn initialize() {
801 let (state, initialized) = tls_get();
802 if initialized {
803 return;
804 }
805 super::super::lazy_per_thread_init();
806 tls_set(state, true);
807 }
808
809 #[cfg_attr(feature = "async", inline(never))] // see module docs
810 #[cfg_attr(not(feature = "async"), inline)]
811 pub fn get() -> Ptr {
812 tls_get().0
813 }
814 }
815
816 pub use raw::initialize as tls_eager_initialize;
817
818 /// Opaque state used to persist the state of the `CallThreadState`
819 /// activations associated with a fiber stack that's used as part of an
820 /// async wasm call.
821 #[cfg(feature = "async")]
822 pub struct AsyncWasmCallState {
823 // The head of a linked list of activations that are currently present
824 // on an async call's fiber stack. This pointer points to the oldest
825 // activation frame where the `prev` links internally link to younger
826 // activation frames.
827 //
828 // When pushed onto a thread this linked list is traversed to get pushed
829 // onto the current thread at the time.
830 state: raw::Ptr,
831 }
832
833 #[cfg(feature = "async")]
834 impl AsyncWasmCallState {
835 /// Creates new state that initially starts as null.
836 pub fn new() -> AsyncWasmCallState {
837 AsyncWasmCallState {
838 state: core::ptr::null_mut(),
839 }
840 }
841
842 /// Pushes the saved state of this wasm's call onto the current thread's
843 /// state.
844 ///
845 /// This will iterate over the linked list of states stored within
846 /// `self` and push them sequentially onto the current thread's
847 /// activation list.
848 ///
849 /// The returned `PreviousAsyncWasmCallState` captures the state of this
850 /// thread just before this operation, and it must have its `restore`
851 /// method called to restore the state when the async wasm is suspended
852 /// from.
853 ///
854 /// # Unsafety
855 ///
856 /// Must be carefully coordinated with
857 /// `PreviousAsyncWasmCallState::restore` and fiber switches to ensure
858 /// that this doesn't push stale data and the data is popped
859 /// appropriately.
860 pub unsafe fn push(self) -> PreviousAsyncWasmCallState {
861 // Our `state` pointer is a linked list of oldest-to-youngest so by
862 // pushing in order of the list we restore the youngest-to-oldest
863 // list as stored in the state of this current thread.
864 let ret = PreviousAsyncWasmCallState { state: raw::get() };
865 let mut ptr = self.state;
866 while let Some(state) = ptr.as_ref() {
867 ptr = state.prev.replace(core::ptr::null_mut());
868 state.push();
869 }
870 ret
871 }
872
873 /// Performs a runtime check that this state is indeed null.
874 pub fn assert_null(&self) {
875 assert!(self.state.is_null());
876 }
877
878 /// Asserts that the current CallThreadState pointer, if present, is not
879 /// in the `range` specified.
880 ///
881 /// This is used when exiting a future in Wasmtime to assert that the
882 /// current CallThreadState pointer does not point within the stack
883 /// we're leaving (e.g. allocated for a fiber).
884 pub fn assert_current_state_not_in_range(range: core::ops::Range<usize>) {
885 let p = raw::get() as usize;
886 assert!(p < range.start || range.end < p);
887 }
888 }
889
890 /// Opaque state used to help control TLS state across stack switches for
891 /// async support.
892 #[cfg(feature = "async")]
893 pub struct PreviousAsyncWasmCallState {
894 // The head of a linked list, similar to the TLS state. Note though that
895 // this list is stored in reverse order to assist with `push` and `pop`
896 // below.
897 //
898 // After a `push` call this stores the previous head for the current
899 // thread so we know when to stop popping during a `pop`.
900 state: raw::Ptr,
901 }
902
903 #[cfg(feature = "async")]
904 impl PreviousAsyncWasmCallState {
905 /// Pops a fiber's linked list of activations and stores them in
906 /// `AsyncWasmCallState`.
907 ///
908 /// This will pop the top activation of this current thread continuously
909 /// until it reaches whatever the current activation was when `push` was
910 /// originally called.
911 ///
912 /// # Unsafety
913 ///
914 /// Must be paired with a `push` and only performed at a time when a
915 /// fiber is being suspended.
916 pub unsafe fn restore(self) -> AsyncWasmCallState {
917 let thread_head = self.state;
918 core::mem::forget(self);
919 let mut ret = AsyncWasmCallState::new();
920 loop {
921 // If the current TLS state is as we originally found it, then
922 // this loop is finished.
923 let ptr = raw::get();
924 if ptr == thread_head {
925 break ret;
926 }
927
928 // Pop this activation from the current thread's TLS state, and
929 // then afterwards push it onto our own linked list within this
930 // `AsyncWasmCallState`. Note that the linked list in `AsyncWasmCallState` is stored
931 // in reverse order so a subsequent `push` later on pushes
932 // everything in the right order.
933 (*ptr).pop();
934 if let Some(state) = ret.state.as_ref() {
935 (*ptr).prev.set(state);
936 }
937 ret.state = ptr;
938 }
939 }
940 }
941
942 #[cfg(feature = "async")]
943 impl Drop for PreviousAsyncWasmCallState {
944 fn drop(&mut self) {
945 panic!("must be consumed with `restore`");
946 }
947 }
948
949 /// Configures thread local state such that for the duration of the
950 /// execution of `closure` any call to `with` will yield `state`, unless
951 /// this is recursively called again.
952 #[inline]
953 pub fn set<R>(state: &mut CallThreadState, closure: impl FnOnce(&CallThreadState) -> R) -> R {
954 struct Reset<'a> {
955 state: &'a CallThreadState,
956 }
957
958 impl Drop for Reset<'_> {
959 #[inline]
960 fn drop(&mut self) {
961 unsafe {
962 self.state.pop();
963 }
964 }
965 }
966
967 unsafe {
968 state.push();
969 let reset = Reset { state };
970 closure(reset.state)
971 }
972 }
973
974 /// Returns the last pointer configured with `set` above, if any.
975 pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState>) -> R) -> R {
976 let p = raw::get();
977 unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
978 }
979}