wasmtime/runtime/vm/traphandlers/backtrace.rs
1//! Backtrace and stack walking functionality for Wasm.
2//!
3//! Walking the Wasm stack is comprised of
4//!
5//! 1. identifying sequences of contiguous Wasm frames on the stack
6//! (i.e. skipping over native host frames), and
7//!
8//! 2. walking the Wasm frames within such a sequence.
9//!
10//! To perform (1) we maintain the entry stack pointer (SP) and exit frame
11//! pointer (FP) and program counter (PC) each time we call into Wasm and Wasm
12//! calls into the host via trampolines (see
13//! `crates/wasmtime/src/runtime/vm/trampolines`). The most recent entry is
14//! stored in `VMStoreContext` and older entries are saved in
15//! `CallThreadState`. This lets us identify ranges of contiguous Wasm frames on
16//! the stack.
17//!
18//! To solve (2) and walk the Wasm frames within a region of contiguous Wasm
19//! frames on the stack, we configure Cranelift's `preserve_frame_pointers =
20//! true` setting. Then we can do simple frame pointer traversal starting at the
21//! exit FP and stopping once we reach the entry SP (meaning that the next older
22//! frame is a host frame).
23
24use crate::prelude::*;
25use crate::runtime::store::StoreOpaque;
26use crate::runtime::vm::stack_switching::VMStackChain;
27use crate::runtime::vm::{
28 Unwind, VMStoreContext,
29 traphandlers::{CallThreadState, tls},
30};
31#[cfg(all(feature = "gc", feature = "stack-switching"))]
32use crate::vm::stack_switching::{VMContRef, VMStackState};
33use core::ops::ControlFlow;
34use wasmtime_unwinder::Frame;
35
36/// A WebAssembly stack trace.
37#[derive(Debug)]
38pub struct Backtrace(Vec<Frame>);
39
40impl Backtrace {
41 /// Returns an empty backtrace
42 pub fn empty() -> Backtrace {
43 Backtrace(Vec::new())
44 }
45
46 /// Capture the current Wasm stack in a backtrace.
47 pub fn new(store: &StoreOpaque) -> Backtrace {
48 let vm_store_context = store.vm_store_context();
49 let unwind = store.unwinder();
50 tls::with(|state| match state {
51 Some(state) => unsafe {
52 Self::new_with_trap_state(vm_store_context, unwind, state, None)
53 },
54 None => Backtrace(vec![]),
55 })
56 }
57
58 /// Capture the current Wasm stack trace.
59 ///
60 /// If Wasm hit a trap, and we calling this from the trap handler, then the
61 /// Wasm exit trampoline didn't run, and we use the provided PC and FP
62 /// instead of looking them up in `VMStoreContext`.
63 pub(crate) unsafe fn new_with_trap_state(
64 vm_store_context: *const VMStoreContext,
65 unwind: &dyn Unwind,
66 state: &CallThreadState,
67 trap_pc_and_fp: Option<(usize, usize)>,
68 ) -> Backtrace {
69 let mut frames = vec![];
70 Self::trace_with_trap_state(vm_store_context, unwind, state, trap_pc_and_fp, |frame| {
71 frames.push(frame);
72 ControlFlow::Continue(())
73 });
74 Backtrace(frames)
75 }
76
77 /// Walk the current Wasm stack, calling `f` for each frame we walk.
78 #[cfg(feature = "gc")]
79 pub fn trace(store: &StoreOpaque, f: impl FnMut(Frame) -> ControlFlow<()>) {
80 let vm_store_context = store.vm_store_context();
81 let unwind = store.unwinder();
82 tls::with(|state| match state {
83 Some(state) => unsafe {
84 Self::trace_with_trap_state(vm_store_context, unwind, state, None, f)
85 },
86 None => {}
87 });
88 }
89
90 // Walk the stack of the given continuation, which must be suspended, and
91 // all of its parent continuations (if any).
92 #[cfg(all(feature = "gc", feature = "stack-switching"))]
93 pub fn trace_suspended_continuation(
94 store: &StoreOpaque,
95 continuation: &VMContRef,
96 f: impl FnMut(Frame) -> ControlFlow<()>,
97 ) {
98 log::trace!("====== Capturing Backtrace (suspended continuation) ======");
99
100 assert_eq!(
101 continuation.common_stack_information.state,
102 VMStackState::Suspended
103 );
104
105 let unwind = store.unwinder();
106
107 let pc = continuation.stack.control_context_instruction_pointer();
108 let fp = continuation.stack.control_context_frame_pointer();
109 let trampoline_fp = continuation
110 .common_stack_information
111 .limits
112 .last_wasm_entry_fp;
113
114 unsafe {
115 // FIXME(frank-emrich) Casting from *const to *mut pointer is
116 // terrible, but we won't actually modify any of the continuations
117 // here.
118 let stack_chain =
119 VMStackChain::Continuation(continuation as *const VMContRef as *mut VMContRef);
120
121 if let ControlFlow::Break(()) =
122 Self::trace_through_continuations(unwind, stack_chain, pc, fp, trampoline_fp, f)
123 {
124 log::trace!("====== Done Capturing Backtrace (closure break) ======");
125 return;
126 }
127 }
128
129 log::trace!("====== Done Capturing Backtrace (reached end of stack chain) ======");
130 }
131
132 /// Walk the current Wasm stack, calling `f` for each frame we walk.
133 ///
134 /// If Wasm hit a trap, and we calling this from the trap handler, then the
135 /// Wasm exit trampoline didn't run, and we use the provided PC and FP
136 /// instead of looking them up in `VMStoreContext`.
137 pub(crate) unsafe fn trace_with_trap_state(
138 vm_store_context: *const VMStoreContext,
139 unwind: &dyn Unwind,
140 state: &CallThreadState,
141 trap_pc_and_fp: Option<(usize, usize)>,
142 mut f: impl FnMut(Frame) -> ControlFlow<()>,
143 ) {
144 log::trace!("====== Capturing Backtrace ======");
145
146 let (last_wasm_exit_pc, last_wasm_exit_fp) = match trap_pc_and_fp {
147 // If we exited Wasm by catching a trap, then the Wasm-to-host
148 // trampoline did not get a chance to save the last Wasm PC and FP,
149 // and we need to use the plumbed-through values instead.
150 Some((pc, fp)) => {
151 assert!(core::ptr::eq(
152 vm_store_context,
153 state.vm_store_context.as_ptr()
154 ));
155 (pc, fp)
156 }
157 // Either there is no Wasm currently on the stack, or we exited Wasm
158 // through the Wasm-to-host trampoline.
159 None => {
160 let pc = *(*vm_store_context).last_wasm_exit_pc.get();
161 let fp = *(*vm_store_context).last_wasm_exit_fp.get();
162 (pc, fp)
163 }
164 };
165
166 let stack_chain = (*(*vm_store_context).stack_chain.get()).clone();
167
168 // The first value in `activations` is for the most recently running
169 // wasm. We thus provide the stack chain of `first_wasm_state` to
170 // traverse the potential continuation stacks. For the subsequent
171 // activations, we unconditionally use `None` as the corresponding stack
172 // chain. This is justified because only the most recent execution of
173 // wasm may execute off the initial stack (see comments in
174 // `wasmtime::invoke_wasm_and_catch_traps` for details).
175 let activations = core::iter::once((
176 stack_chain,
177 last_wasm_exit_pc,
178 last_wasm_exit_fp,
179 *(*vm_store_context).last_wasm_entry_fp.get(),
180 ))
181 .chain(
182 state
183 .iter()
184 .flat_map(|state| state.iter())
185 .filter(|state| core::ptr::eq(vm_store_context, state.vm_store_context.as_ptr()))
186 .map(|state| {
187 (
188 state.old_stack_chain(),
189 state.old_last_wasm_exit_pc(),
190 state.old_last_wasm_exit_fp(),
191 state.old_last_wasm_entry_fp(),
192 )
193 }),
194 )
195 .take_while(|(chain, pc, fp, sp)| {
196 if *pc == 0 {
197 debug_assert_eq!(*fp, 0);
198 debug_assert_eq!(*sp, 0);
199 } else {
200 debug_assert_ne!(chain.clone(), VMStackChain::Absent)
201 }
202 *pc != 0
203 });
204
205 for (chain, pc, fp, sp) in activations {
206 if let ControlFlow::Break(()) =
207 Self::trace_through_continuations(unwind, chain, pc, fp, sp, &mut f)
208 {
209 log::trace!("====== Done Capturing Backtrace (closure break) ======");
210 return;
211 }
212 }
213
214 log::trace!("====== Done Capturing Backtrace (reached end of activations) ======");
215 }
216
217 /// Traces through a sequence of stacks, creating a backtrace for each one,
218 /// beginning at the given `pc` and `fp`.
219 ///
220 /// If `chain` is `InitialStack`, we are tracing through the initial stack,
221 /// and this function behaves like `trace_through_wasm`.
222 /// Otherwise, we can interpret `chain` as a linked list of stacks, which
223 /// ends with the initial stack. We then trace through each of these stacks
224 /// individually, up to (and including) the initial stack.
225 unsafe fn trace_through_continuations(
226 unwind: &dyn Unwind,
227 chain: VMStackChain,
228 pc: usize,
229 fp: usize,
230 trampoline_fp: usize,
231 mut f: impl FnMut(Frame) -> ControlFlow<()>,
232 ) -> ControlFlow<()> {
233 use crate::runtime::vm::stack_switching::{VMContRef, VMStackLimits};
234
235 // Handle the stack that is currently running (which may be a
236 // continuation or the initial stack).
237 wasmtime_unwinder::visit_frames(unwind, pc, fp, trampoline_fp, &mut f)?;
238
239 // Note that the rest of this function has no effect if `chain` is
240 // `Some(VMStackChain::InitialStack(_))` (i.e., there is only one stack to
241 // trace through: the initial stack)
242
243 assert_ne!(chain, VMStackChain::Absent);
244 let stack_limits_vec: Vec<*mut VMStackLimits> =
245 chain.clone().into_stack_limits_iter().collect();
246 let continuations_vec: Vec<*mut VMContRef> =
247 chain.clone().into_continuation_iter().collect();
248
249 // The VMStackLimits of the currently running stack (whether that's a
250 // continuation or the initial stack) contains undefined data, the
251 // information about that stack is saved in the Store's
252 // `VMStoreContext` and handled at the top of this function
253 // already. That's why we ignore `stack_limits_vec[0]`.
254 //
255 // Note that a continuation stack's control context stores
256 // information about how to resume execution *in its parent*. Thus,
257 // we combine the information from continuations_vec[i] with
258 // stack_limits_vec[i + 1] below to get information about a
259 // particular stack.
260 //
261 // There must be exactly one more `VMStackLimits` object than there
262 // are continuations, due to the initial stack having one, too.
263 assert_eq!(stack_limits_vec.len(), continuations_vec.len() + 1);
264
265 for i in 0..continuations_vec.len() {
266 // The continuation whose control context we want to
267 // access, to get information about how to continue
268 // execution in its parent.
269 let continuation = unsafe { &*continuations_vec[i] };
270
271 // The stack limits describing the parent of `continuation`.
272 let parent_limits = unsafe { &*stack_limits_vec[i + 1] };
273
274 // The parent of `continuation` if present not the last in the chain.
275 let parent_continuation = continuations_vec.get(i + 1).map(|&c| unsafe { &*c });
276
277 let fiber_stack = continuation.fiber_stack();
278 let resume_pc = fiber_stack.control_context_instruction_pointer();
279 let resume_fp = fiber_stack.control_context_frame_pointer();
280
281 // If the parent is indeed a continuation, we know the
282 // boundaries of its stack and can perform some extra debugging
283 // checks.
284 let parent_stack_range = parent_continuation.and_then(|p| p.fiber_stack().range());
285 parent_stack_range.inspect(|parent_stack_range| {
286 debug_assert!(parent_stack_range.contains(&resume_fp));
287 debug_assert!(parent_stack_range.contains(&parent_limits.last_wasm_entry_fp));
288 debug_assert!(parent_stack_range.contains(&parent_limits.stack_limit));
289 });
290
291 wasmtime_unwinder::visit_frames(
292 unwind,
293 resume_pc,
294 resume_fp,
295 parent_limits.last_wasm_entry_fp,
296 &mut f,
297 )?
298 }
299 ControlFlow::Continue(())
300 }
301
302 /// Iterate over the frames inside this backtrace.
303 pub fn frames<'a>(
304 &'a self,
305 ) -> impl ExactSizeIterator<Item = &'a Frame> + DoubleEndedIterator + 'a {
306 self.0.iter()
307 }
308}