wasmtime/runtime/vm/traphandlers/backtrace.rs
1//! Backtrace and stack walking functionality for Wasm.
2//!
3//! Walking the Wasm stack is comprised of
4//!
5//! 1. identifying sequences of contiguous Wasm frames on the stack
6//! (i.e. skipping over native host frames), and
7//!
8//! 2. walking the Wasm frames within such a sequence.
9//!
10//! To perform (1) we maintain the entry stack pointer (SP) and exit frame
11//! pointer (FP) and program counter (PC) each time we call into Wasm and Wasm
12//! calls into the host via trampolines (see
13//! `crates/wasmtime/src/runtime/vm/trampolines`). The most recent entry is
14//! stored in `VMStoreContext` and older entries are saved in
15//! `CallThreadState`. This lets us identify ranges of contiguous Wasm frames on
16//! the stack.
17//!
18//! To solve (2) and walk the Wasm frames within a region of contiguous Wasm
19//! frames on the stack, we configure Cranelift's `preserve_frame_pointers =
20//! true` setting. Then we can do simple frame pointer traversal starting at the
21//! exit FP and stopping once we reach the entry SP (meaning that the next older
22//! frame is a host frame).
23
24use crate::prelude::*;
25use crate::runtime::store::StoreOpaque;
26use crate::runtime::vm::stack_switching::VMStackChain;
27use crate::runtime::vm::{
28 Unwind, VMStoreContext,
29 traphandlers::{CallThreadState, tls},
30};
31#[cfg(all(feature = "gc", feature = "stack-switching"))]
32use crate::vm::stack_switching::{VMContRef, VMStackState};
33use core::ops::ControlFlow;
34use wasmtime_unwinder::Frame;
35#[cfg(feature = "debug")]
36use wasmtime_unwinder::FrameCursor;
37
38/// A WebAssembly stack trace.
39#[derive(Debug)]
40pub struct Backtrace(Vec<Frame>);
41
42/// One activation: information sufficient to trace an activation on a
43/// frame as long as that frame remains alive.
44pub(crate) struct Activation {
45 exit_pc: usize,
46 exit_fp: usize,
47 entry_trampoline_fp: usize,
48}
49
50impl Activation {
51 /// Create a frame cursor starting at the exit frame of this activation.
52 ///
53 /// # Safety
54 ///
55 /// This activation must currently be valid (i.e., execution must
56 /// not have returned into the activation to unwind any frames,
57 /// and the stack must not have been freed).
58 #[cfg(feature = "debug")]
59 pub(crate) unsafe fn cursor(&self) -> FrameCursor {
60 // SAFETY: validity of this activation is ensured by our
61 // safety condition.
62 unsafe { FrameCursor::new(self.exit_pc, self.exit_fp, self.entry_trampoline_fp) }
63 }
64}
65
66impl Backtrace {
67 /// Returns an empty backtrace
68 pub fn empty() -> Backtrace {
69 Backtrace(Vec::new())
70 }
71
72 /// Capture the current Wasm stack in a backtrace.
73 pub fn new(store: &StoreOpaque) -> Backtrace {
74 let vm_store_context = store.vm_store_context();
75 let unwind = store.unwinder();
76 tls::with(|state| match state {
77 Some(state) => unsafe {
78 Self::new_with_trap_state(vm_store_context, unwind, state, None)
79 },
80 None => Backtrace(vec![]),
81 })
82 }
83
84 /// Capture the current Wasm stack trace.
85 ///
86 /// If Wasm hit a trap, and we calling this from the trap handler, then the
87 /// Wasm exit trampoline didn't run, and we use the provided PC and FP
88 /// instead of looking them up in `VMStoreContext`.
89 pub(crate) unsafe fn new_with_trap_state(
90 vm_store_context: *const VMStoreContext,
91 unwind: &dyn Unwind,
92 state: &CallThreadState,
93 trap_pc_and_fp: Option<(usize, usize)>,
94 ) -> Backtrace {
95 let mut frames = vec![];
96 let f = |activation: Activation| unsafe {
97 wasmtime_unwinder::visit_frames(
98 unwind,
99 activation.exit_pc,
100 activation.exit_fp,
101 activation.entry_trampoline_fp,
102 |frame| {
103 frames.push(frame);
104 ControlFlow::Continue(())
105 },
106 )
107 };
108 unsafe {
109 Self::trace_with_trap_state(vm_store_context, state, trap_pc_and_fp, f);
110 }
111 Backtrace(frames)
112 }
113
114 /// Walk the current Wasm stack, calling `f` for each frame we walk.
115 #[cfg(feature = "gc")]
116 pub fn trace(store: &StoreOpaque, mut f: impl FnMut(Frame) -> ControlFlow<()>) {
117 let vm_store_context = store.vm_store_context();
118 let unwind = store.unwinder();
119 tls::with(|state| match state {
120 Some(state) => unsafe {
121 let f = |activation: Activation| {
122 wasmtime_unwinder::visit_frames(
123 unwind,
124 activation.exit_pc,
125 activation.exit_fp,
126 activation.entry_trampoline_fp,
127 &mut f,
128 )
129 };
130 Self::trace_with_trap_state(vm_store_context, state, None, f)
131 },
132 None => {}
133 });
134 }
135
136 // Walk the stack of the given continuation, which must be suspended, and
137 // all of its parent continuations (if any).
138 #[cfg(all(feature = "gc", feature = "stack-switching"))]
139 pub fn trace_suspended_continuation(
140 store: &StoreOpaque,
141 continuation: &VMContRef,
142 mut f: impl FnMut(Frame) -> ControlFlow<()>,
143 ) {
144 log::trace!("====== Capturing Backtrace (suspended continuation) ======");
145
146 assert_eq!(
147 continuation.common_stack_information.state,
148 VMStackState::Suspended
149 );
150
151 let unwind = store.unwinder();
152
153 let pc = continuation.stack.control_context_instruction_pointer();
154 let fp = continuation.stack.control_context_frame_pointer();
155 let trampoline_fp = continuation
156 .common_stack_information
157 .limits
158 .last_wasm_entry_fp;
159
160 unsafe {
161 // FIXME(frank-emrich) Casting from *const to *mut pointer is
162 // terrible, but we won't actually modify any of the continuations
163 // here.
164 let stack_chain =
165 VMStackChain::Continuation(continuation as *const VMContRef as *mut VMContRef);
166
167 if let ControlFlow::Break(()) = Self::trace_through_continuations(
168 stack_chain,
169 pc,
170 fp,
171 trampoline_fp,
172 |activation| {
173 wasmtime_unwinder::visit_frames(
174 unwind,
175 activation.exit_pc,
176 activation.exit_fp,
177 activation.entry_trampoline_fp,
178 &mut f,
179 )
180 },
181 ) {
182 log::trace!("====== Done Capturing Backtrace (closure break) ======");
183 return;
184 }
185 }
186
187 log::trace!("====== Done Capturing Backtrace (reached end of stack chain) ======");
188 }
189
190 /// Walk the current Wasm stack, calling `f` for each frame we walk.
191 ///
192 /// If Wasm hit a trap, and we calling this from the trap handler, then the
193 /// Wasm exit trampoline didn't run, and we use the provided PC and FP
194 /// instead of looking them up in `VMStoreContext`.
195 ///
196 /// We define "current Wasm stack" here as "all activations
197 /// associated with the given store". That is: if we have a stack like
198 ///
199 /// ```plain
200 /// host --> (Wasm functions in store A) --> host --> (Wasm functions in store B) --> host
201 /// --> (Wasm functions in store A) --> host --> call `trace_with_trap_state` with store A
202 /// ```
203 ///
204 /// then we will see the first and third Wasm activations (those
205 /// associated with store A), but not that with store B. In
206 /// essence, activations from another store might as well be some
207 /// other opaque host code; we don't know anything about it.
208 pub(crate) unsafe fn trace_with_trap_state(
209 vm_store_context: *const VMStoreContext,
210 state: &CallThreadState,
211 trap_pc_and_fp: Option<(usize, usize)>,
212 mut f: impl FnMut(Activation) -> ControlFlow<()>,
213 ) {
214 log::trace!("====== Capturing Backtrace ======");
215
216 let (last_wasm_exit_pc, last_wasm_exit_fp) = match trap_pc_and_fp {
217 // If we exited Wasm by catching a trap, then the Wasm-to-host
218 // trampoline did not get a chance to save the last Wasm PC and FP,
219 // and we need to use the plumbed-through values instead.
220 Some((pc, fp)) => {
221 assert!(core::ptr::eq(
222 vm_store_context,
223 state.vm_store_context.get().as_ptr()
224 ));
225 (pc, fp)
226 }
227 // Either there is no Wasm currently on the stack, or we exited Wasm
228 // through the Wasm-to-host trampoline.
229 None => unsafe {
230 let pc = *(*vm_store_context).last_wasm_exit_pc.get();
231 let fp = (*vm_store_context).last_wasm_exit_fp();
232 (pc, fp)
233 },
234 };
235
236 let stack_chain = unsafe { (*(*vm_store_context).stack_chain.get()).clone() };
237
238 // The first value in `activations` is for the most recently running
239 // wasm. We thus provide the stack chain of `first_wasm_state` to
240 // traverse the potential continuation stacks. For the subsequent
241 // activations, we unconditionally use `None` as the corresponding stack
242 // chain. This is justified because only the most recent execution of
243 // wasm may execute off the initial stack (see comments in
244 // `wasmtime::invoke_wasm_and_catch_traps` for details).
245 let activations =
246 core::iter::once((stack_chain, last_wasm_exit_pc, last_wasm_exit_fp, unsafe {
247 *(*vm_store_context).last_wasm_entry_fp.get()
248 }))
249 .chain(
250 state
251 .iter()
252 .flat_map(|state| state.iter())
253 .filter(|state| {
254 core::ptr::eq(vm_store_context, state.vm_store_context.get().as_ptr())
255 })
256 .map(|state| unsafe {
257 (
258 state.old_stack_chain(),
259 state.old_last_wasm_exit_pc(),
260 state.old_last_wasm_exit_fp(),
261 state.old_last_wasm_entry_fp(),
262 )
263 }),
264 )
265 .take_while(|(chain, pc, fp, sp)| {
266 if *pc == 0 {
267 debug_assert_eq!(*fp, 0);
268 debug_assert_eq!(*sp, 0);
269 } else {
270 debug_assert_ne!(chain.clone(), VMStackChain::Absent)
271 }
272 *pc != 0
273 });
274
275 for (chain, exit_pc, exit_fp, entry_trampoline_fp) in activations {
276 let res = unsafe {
277 Self::trace_through_continuations(
278 chain,
279 exit_pc,
280 exit_fp,
281 entry_trampoline_fp,
282 &mut f,
283 )
284 };
285 if let ControlFlow::Break(()) = res {
286 log::trace!("====== Done Capturing Backtrace (closure break) ======");
287 return;
288 }
289 }
290
291 log::trace!("====== Done Capturing Backtrace (reached end of activations) ======");
292 }
293
294 /// Traces through a sequence of stacks, creating a backtrace for each one,
295 /// beginning at the given `pc` and `fp`.
296 ///
297 /// If `chain` is `InitialStack`, we are tracing through the initial stack,
298 /// and this function behaves like `trace_through_wasm`.
299 /// Otherwise, we can interpret `chain` as a linked list of stacks, which
300 /// ends with the initial stack. We then trace through each of these stacks
301 /// individually, up to (and including) the initial stack.
302 unsafe fn trace_through_continuations(
303 chain: VMStackChain,
304 exit_pc: usize,
305 exit_fp: usize,
306 entry_trampoline_fp: usize,
307 mut f: impl FnMut(Activation) -> ControlFlow<()>,
308 ) -> ControlFlow<()> {
309 use crate::runtime::vm::stack_switching::{VMContRef, VMStackLimits};
310
311 // Handle the stack that is currently running (which may be a
312 // continuation or the initial stack).
313 f(Activation {
314 exit_pc,
315 exit_fp,
316 entry_trampoline_fp,
317 })?;
318
319 // Note that the rest of this function has no effect if `chain` is
320 // `Some(VMStackChain::InitialStack(_))` (i.e., there is only one stack to
321 // trace through: the initial stack)
322
323 assert_ne!(chain, VMStackChain::Absent);
324 let stack_limits_vec: Vec<*mut VMStackLimits> =
325 unsafe { chain.clone().into_stack_limits_iter().collect() };
326 let continuations_vec: Vec<*mut VMContRef> =
327 unsafe { chain.clone().into_continuation_iter().collect() };
328
329 // The VMStackLimits of the currently running stack (whether that's a
330 // continuation or the initial stack) contains undefined data, the
331 // information about that stack is saved in the Store's
332 // `VMStoreContext` and handled at the top of this function
333 // already. That's why we ignore `stack_limits_vec[0]`.
334 //
335 // Note that a continuation stack's control context stores
336 // information about how to resume execution *in its parent*. Thus,
337 // we combine the information from continuations_vec[i] with
338 // stack_limits_vec[i + 1] below to get information about a
339 // particular stack.
340 //
341 // There must be exactly one more `VMStackLimits` object than there
342 // are continuations, due to the initial stack having one, too.
343 assert_eq!(stack_limits_vec.len(), continuations_vec.len() + 1);
344
345 for i in 0..continuations_vec.len() {
346 // The continuation whose control context we want to
347 // access, to get information about how to continue
348 // execution in its parent.
349 let continuation = unsafe { &*continuations_vec[i] };
350
351 // The stack limits describing the parent of `continuation`.
352 let parent_limits = unsafe { &*stack_limits_vec[i + 1] };
353
354 // The parent of `continuation` if present not the last in the chain.
355 let parent_continuation = continuations_vec.get(i + 1).map(|&c| unsafe { &*c });
356
357 let fiber_stack = continuation.fiber_stack();
358 let resume_pc = fiber_stack.control_context_instruction_pointer();
359 let resume_fp = fiber_stack.control_context_frame_pointer();
360
361 // If the parent is indeed a continuation, we know the
362 // boundaries of its stack and can perform some extra debugging
363 // checks.
364 let parent_stack_range = parent_continuation.and_then(|p| p.fiber_stack().range());
365 parent_stack_range.inspect(|parent_stack_range| {
366 debug_assert!(parent_stack_range.contains(&resume_fp));
367 debug_assert!(parent_stack_range.contains(&parent_limits.last_wasm_entry_fp));
368 debug_assert!(parent_stack_range.contains(&parent_limits.stack_limit));
369 });
370
371 f(Activation {
372 exit_pc: resume_pc,
373 exit_fp: resume_fp,
374 entry_trampoline_fp: parent_limits.last_wasm_entry_fp,
375 })?;
376 }
377 ControlFlow::Continue(())
378 }
379
380 /// Capture all Activations reachable from the current point
381 /// within a hostcall.
382 #[cfg(feature = "debug")]
383 pub(crate) fn activations(store: &StoreOpaque) -> Vec<Activation> {
384 let mut activations = vec![];
385 let vm_store_context = store.vm_store_context();
386 tls::with(|state| match state {
387 Some(state) => unsafe {
388 Self::trace_with_trap_state(vm_store_context, state, None, |act| {
389 activations.push(act);
390 ControlFlow::Continue(())
391 });
392 },
393 None => {}
394 });
395 activations
396 }
397
398 /// Iterate over the frames inside this backtrace.
399 pub fn frames<'a>(
400 &'a self,
401 ) -> impl ExactSizeIterator<Item = &'a Frame> + DoubleEndedIterator + 'a {
402 self.0.iter()
403 }
404}