wasmtime/runtime/vm/traphandlers/
backtrace.rs

1//! Backtrace and stack walking functionality for Wasm.
2//!
3//! Walking the Wasm stack is comprised of
4//!
5//! 1. identifying sequences of contiguous Wasm frames on the stack
6//!    (i.e. skipping over native host frames), and
7//!
8//! 2. walking the Wasm frames within such a sequence.
9//!
10//! To perform (1) we maintain the entry stack pointer (SP) and exit frame
11//! pointer (FP) and program counter (PC) each time we call into Wasm and Wasm
12//! calls into the host via trampolines (see
13//! `crates/wasmtime/src/runtime/vm/trampolines`). The most recent entry is
14//! stored in `VMStoreContext` and older entries are saved in
15//! `CallThreadState`. This lets us identify ranges of contiguous Wasm frames on
16//! the stack.
17//!
18//! To solve (2) and walk the Wasm frames within a region of contiguous Wasm
19//! frames on the stack, we configure Cranelift's `preserve_frame_pointers =
20//! true` setting. Then we can do simple frame pointer traversal starting at the
21//! exit FP and stopping once we reach the entry SP (meaning that the next older
22//! frame is a host frame).
23
24use crate::prelude::*;
25use crate::runtime::store::StoreOpaque;
26use crate::runtime::vm::stack_switching::VMStackChain;
27use crate::runtime::vm::{
28    Unwind, VMStoreContext,
29    traphandlers::{CallThreadState, tls},
30};
31#[cfg(all(feature = "gc", feature = "stack-switching"))]
32use crate::vm::stack_switching::{VMContRef, VMStackState};
33use core::ops::ControlFlow;
34use wasmtime_unwinder::Frame;
35
36/// A WebAssembly stack trace.
37#[derive(Debug)]
38pub struct Backtrace(Vec<Frame>);
39
40impl Backtrace {
41    /// Returns an empty backtrace
42    pub fn empty() -> Backtrace {
43        Backtrace(Vec::new())
44    }
45
46    /// Capture the current Wasm stack in a backtrace.
47    pub fn new(store: &StoreOpaque) -> Backtrace {
48        let vm_store_context = store.vm_store_context();
49        let unwind = store.unwinder();
50        tls::with(|state| match state {
51            Some(state) => unsafe {
52                Self::new_with_trap_state(vm_store_context, unwind, state, None)
53            },
54            None => Backtrace(vec![]),
55        })
56    }
57
58    /// Capture the current Wasm stack trace.
59    ///
60    /// If Wasm hit a trap, and we calling this from the trap handler, then the
61    /// Wasm exit trampoline didn't run, and we use the provided PC and FP
62    /// instead of looking them up in `VMStoreContext`.
63    pub(crate) unsafe fn new_with_trap_state(
64        vm_store_context: *const VMStoreContext,
65        unwind: &dyn Unwind,
66        state: &CallThreadState,
67        trap_pc_and_fp: Option<(usize, usize)>,
68    ) -> Backtrace {
69        let mut frames = vec![];
70        unsafe {
71            Self::trace_with_trap_state(vm_store_context, unwind, state, trap_pc_and_fp, |frame| {
72                frames.push(frame);
73                ControlFlow::Continue(())
74            });
75        }
76        Backtrace(frames)
77    }
78
79    /// Walk the current Wasm stack, calling `f` for each frame we walk.
80    #[cfg(feature = "gc")]
81    pub fn trace(store: &StoreOpaque, f: impl FnMut(Frame) -> ControlFlow<()>) {
82        let vm_store_context = store.vm_store_context();
83        let unwind = store.unwinder();
84        tls::with(|state| match state {
85            Some(state) => unsafe {
86                Self::trace_with_trap_state(vm_store_context, unwind, state, None, f)
87            },
88            None => {}
89        });
90    }
91
92    // Walk the stack of the given continuation, which must be suspended, and
93    // all of its parent continuations (if any).
94    #[cfg(all(feature = "gc", feature = "stack-switching"))]
95    pub fn trace_suspended_continuation(
96        store: &StoreOpaque,
97        continuation: &VMContRef,
98        f: impl FnMut(Frame) -> ControlFlow<()>,
99    ) {
100        log::trace!("====== Capturing Backtrace (suspended continuation) ======");
101
102        assert_eq!(
103            continuation.common_stack_information.state,
104            VMStackState::Suspended
105        );
106
107        let unwind = store.unwinder();
108
109        let pc = continuation.stack.control_context_instruction_pointer();
110        let fp = continuation.stack.control_context_frame_pointer();
111        let trampoline_fp = continuation
112            .common_stack_information
113            .limits
114            .last_wasm_entry_fp;
115
116        unsafe {
117            // FIXME(frank-emrich) Casting from *const to *mut pointer is
118            // terrible, but we won't actually modify any of the continuations
119            // here.
120            let stack_chain =
121                VMStackChain::Continuation(continuation as *const VMContRef as *mut VMContRef);
122
123            if let ControlFlow::Break(()) =
124                Self::trace_through_continuations(unwind, stack_chain, pc, fp, trampoline_fp, f)
125            {
126                log::trace!("====== Done Capturing Backtrace (closure break) ======");
127                return;
128            }
129        }
130
131        log::trace!("====== Done Capturing Backtrace (reached end of stack chain) ======");
132    }
133
134    /// Walk the current Wasm stack, calling `f` for each frame we walk.
135    ///
136    /// If Wasm hit a trap, and we calling this from the trap handler, then the
137    /// Wasm exit trampoline didn't run, and we use the provided PC and FP
138    /// instead of looking them up in `VMStoreContext`.
139    pub(crate) unsafe fn trace_with_trap_state(
140        vm_store_context: *const VMStoreContext,
141        unwind: &dyn Unwind,
142        state: &CallThreadState,
143        trap_pc_and_fp: Option<(usize, usize)>,
144        mut f: impl FnMut(Frame) -> ControlFlow<()>,
145    ) {
146        log::trace!("====== Capturing Backtrace ======");
147
148        let (last_wasm_exit_pc, last_wasm_exit_fp) = match trap_pc_and_fp {
149            // If we exited Wasm by catching a trap, then the Wasm-to-host
150            // trampoline did not get a chance to save the last Wasm PC and FP,
151            // and we need to use the plumbed-through values instead.
152            Some((pc, fp)) => {
153                assert!(core::ptr::eq(
154                    vm_store_context,
155                    state.vm_store_context.as_ptr()
156                ));
157                (pc, fp)
158            }
159            // Either there is no Wasm currently on the stack, or we exited Wasm
160            // through the Wasm-to-host trampoline.
161            None => unsafe {
162                let pc = *(*vm_store_context).last_wasm_exit_pc.get();
163                let fp = (*vm_store_context).last_wasm_exit_fp();
164                (pc, fp)
165            },
166        };
167
168        let stack_chain = unsafe { (*(*vm_store_context).stack_chain.get()).clone() };
169
170        // The first value in `activations` is for the most recently running
171        // wasm. We thus provide the stack chain of `first_wasm_state` to
172        // traverse the potential continuation stacks. For the subsequent
173        // activations, we unconditionally use `None` as the corresponding stack
174        // chain. This is justified because only the most recent execution of
175        // wasm may execute off the initial stack (see comments in
176        // `wasmtime::invoke_wasm_and_catch_traps` for details).
177        let activations =
178            core::iter::once((stack_chain, last_wasm_exit_pc, last_wasm_exit_fp, unsafe {
179                *(*vm_store_context).last_wasm_entry_fp.get()
180            }))
181            .chain(
182                state
183                    .iter()
184                    .flat_map(|state| state.iter())
185                    .filter(|state| {
186                        core::ptr::eq(vm_store_context, state.vm_store_context.as_ptr())
187                    })
188                    .map(|state| unsafe {
189                        (
190                            state.old_stack_chain(),
191                            state.old_last_wasm_exit_pc(),
192                            state.old_last_wasm_exit_fp(),
193                            state.old_last_wasm_entry_fp(),
194                        )
195                    }),
196            )
197            .take_while(|(chain, pc, fp, sp)| {
198                if *pc == 0 {
199                    debug_assert_eq!(*fp, 0);
200                    debug_assert_eq!(*sp, 0);
201                } else {
202                    debug_assert_ne!(chain.clone(), VMStackChain::Absent)
203                }
204                *pc != 0
205            });
206
207        for (chain, pc, fp, sp) in activations {
208            let res =
209                unsafe { Self::trace_through_continuations(unwind, chain, pc, fp, sp, &mut f) };
210            if let ControlFlow::Break(()) = res {
211                log::trace!("====== Done Capturing Backtrace (closure break) ======");
212                return;
213            }
214        }
215
216        log::trace!("====== Done Capturing Backtrace (reached end of activations) ======");
217    }
218
219    /// Traces through a sequence of stacks, creating a backtrace for each one,
220    /// beginning at the given `pc` and `fp`.
221    ///
222    /// If `chain` is `InitialStack`, we are tracing through the initial stack,
223    /// and this function behaves like `trace_through_wasm`.
224    /// Otherwise, we can interpret `chain` as a linked list of stacks, which
225    /// ends with the initial stack. We then trace through each of these stacks
226    /// individually, up to (and including) the initial stack.
227    unsafe fn trace_through_continuations(
228        unwind: &dyn Unwind,
229        chain: VMStackChain,
230        pc: usize,
231        fp: usize,
232        trampoline_fp: usize,
233        mut f: impl FnMut(Frame) -> ControlFlow<()>,
234    ) -> ControlFlow<()> {
235        use crate::runtime::vm::stack_switching::{VMContRef, VMStackLimits};
236
237        // Handle the stack that is currently running (which may be a
238        // continuation or the initial stack).
239        unsafe {
240            wasmtime_unwinder::visit_frames(unwind, pc, fp, trampoline_fp, &mut f)?;
241        }
242
243        // Note that the rest of this function has no effect if `chain` is
244        // `Some(VMStackChain::InitialStack(_))` (i.e., there is only one stack to
245        // trace through: the initial stack)
246
247        assert_ne!(chain, VMStackChain::Absent);
248        let stack_limits_vec: Vec<*mut VMStackLimits> =
249            unsafe { chain.clone().into_stack_limits_iter().collect() };
250        let continuations_vec: Vec<*mut VMContRef> =
251            unsafe { chain.clone().into_continuation_iter().collect() };
252
253        // The VMStackLimits of the currently running stack (whether that's a
254        // continuation or the initial stack) contains undefined data, the
255        // information about that stack is saved in the Store's
256        // `VMStoreContext` and handled at the top of this function
257        // already. That's why we ignore `stack_limits_vec[0]`.
258        //
259        // Note that a continuation stack's control context stores
260        // information about how to resume execution *in its parent*. Thus,
261        // we combine the information from continuations_vec[i] with
262        // stack_limits_vec[i + 1] below to get information about a
263        // particular stack.
264        //
265        // There must be exactly one more `VMStackLimits` object than there
266        // are continuations, due to the initial stack having one, too.
267        assert_eq!(stack_limits_vec.len(), continuations_vec.len() + 1);
268
269        for i in 0..continuations_vec.len() {
270            // The continuation whose control context we want to
271            // access, to get information about how to continue
272            // execution in its parent.
273            let continuation = unsafe { &*continuations_vec[i] };
274
275            // The stack limits describing the parent of `continuation`.
276            let parent_limits = unsafe { &*stack_limits_vec[i + 1] };
277
278            // The parent of `continuation` if present not the last in the chain.
279            let parent_continuation = continuations_vec.get(i + 1).map(|&c| unsafe { &*c });
280
281            let fiber_stack = continuation.fiber_stack();
282            let resume_pc = fiber_stack.control_context_instruction_pointer();
283            let resume_fp = fiber_stack.control_context_frame_pointer();
284
285            // If the parent is indeed a continuation, we know the
286            // boundaries of its stack and can perform some extra debugging
287            // checks.
288            let parent_stack_range = parent_continuation.and_then(|p| p.fiber_stack().range());
289            parent_stack_range.inspect(|parent_stack_range| {
290                debug_assert!(parent_stack_range.contains(&resume_fp));
291                debug_assert!(parent_stack_range.contains(&parent_limits.last_wasm_entry_fp));
292                debug_assert!(parent_stack_range.contains(&parent_limits.stack_limit));
293            });
294
295            unsafe {
296                wasmtime_unwinder::visit_frames(
297                    unwind,
298                    resume_pc,
299                    resume_fp,
300                    parent_limits.last_wasm_entry_fp,
301                    &mut f,
302                )?
303            }
304        }
305        ControlFlow::Continue(())
306    }
307
308    /// Iterate over the frames inside this backtrace.
309    pub fn frames<'a>(
310        &'a self,
311    ) -> impl ExactSizeIterator<Item = &'a Frame> + DoubleEndedIterator + 'a {
312        self.0.iter()
313    }
314}