wasmtime/runtime/
debug.rs

1//! Debugging API.
2
3use crate::Result;
4use crate::{
5    AnyRef, AsContext, AsContextMut, CodeMemory, ExnRef, Extern, ExternRef, Func, Instance, Module,
6    OwnedRooted, StoreContext, StoreContextMut, Val,
7    code::StoreCodePC,
8    module::ModuleRegistry,
9    store::{AutoAssertNoGc, StoreOpaque},
10    vm::{CompiledModuleId, FrameOrHostCode, StoreBacktrace, VMContext},
11};
12use alloc::collections::BTreeSet;
13use alloc::vec;
14use alloc::vec::Vec;
15use core::{ffi::c_void, ptr::NonNull};
16#[cfg(feature = "gc")]
17use wasmtime_environ::FrameTable;
18use wasmtime_environ::{
19    DefinedFuncIndex, EntityIndex, FrameInstPos, FrameStackShape, FrameStateSlot,
20    FrameStateSlotOffset, FrameTableBreakpointData, FrameTableDescriptorIndex, FrameValType,
21    FuncIndex, FuncKey, GlobalIndex, MemoryIndex, TableIndex, TagIndex, Trap,
22};
23use wasmtime_unwinder::Frame;
24
25use super::store::AsStoreOpaque;
26
27impl<'a, T> StoreContextMut<'a, T> {
28    /// Provide an object that captures Wasm stack state, including
29    /// Wasm VM-level values (locals and operand stack).
30    ///
31    /// This object views all activations for the current store that
32    /// are on the stack. An activation is a contiguous sequence of
33    /// Wasm frames (called functions) that were called from host code
34    /// and called back out to host code. If there are activations
35    /// from multiple stores on the stack, for example if Wasm code in
36    /// one store calls out to host code which invokes another Wasm
37    /// function in another store, then the other stores are "opaque"
38    /// to our view here in the same way that host code is.
39    ///
40    /// Returns `None` if debug instrumentation is not enabled for
41    /// the engine containing this store.
42    pub fn debug_frames(self) -> Option<DebugFrameCursor<'a, T>> {
43        if !self.engine().tunables().debug_guest {
44            return None;
45        }
46
47        let iter = StoreBacktrace::new(self);
48        let mut view = DebugFrameCursor {
49            iter,
50            is_trapping_frame: false,
51            frames: vec![],
52            current: None,
53        };
54        view.move_to_parent(); // Load the first frame.
55        Some(view)
56    }
57
58    /// Start an edit session to update breakpoints.
59    pub fn edit_breakpoints(self) -> Option<BreakpointEdit<'a>> {
60        if !self.engine().tunables().debug_guest {
61            return None;
62        }
63
64        let (breakpoints, registry) = self.0.breakpoints_and_registry_mut();
65        Some(breakpoints.edit(registry))
66    }
67}
68
69impl Instance {
70    /// Get access to a global within this instance's globals index
71    /// space.
72    ///
73    /// This permits accessing globals whether they are exported or
74    /// not. However, it is only available for purposes of debugging,
75    /// and so is only permitted when `guest_debug` is enabled in the
76    /// Engine's configuration. The intent of the Wasmtime API is to
77    /// enforce the Wasm type system's encapsulation even in the host
78    /// API, except where necessary for developer tooling.
79    ///
80    /// `None` is returned for any global index that is out-of-bounds.
81    ///
82    /// `None` is returned if guest-debugging is not enabled in the
83    /// engine configuration for this Store.
84    pub fn debug_global(
85        &self,
86        mut store: impl AsContextMut,
87        global_index: u32,
88    ) -> Option<crate::Global> {
89        self.debug_export(
90            store.as_context_mut().0,
91            GlobalIndex::from_bits(global_index).into(),
92        )
93        .and_then(|s| s.into_global())
94    }
95
96    /// Get access to a memory (unshared only) within this instance's
97    /// memory index space.
98    ///
99    /// This permits accessing memories whether they are exported or
100    /// not. However, it is only available for purposes of debugging,
101    /// and so is only permitted when `guest_debug` is enabled in the
102    /// Engine's configuration. The intent of the Wasmtime API is to
103    /// enforce the Wasm type system's encapsulation even in the host
104    /// API, except where necessary for developer tooling.
105    ///
106    /// `None` is returned for any memory index that is out-of-bounds.
107    ///
108    /// `None` is returned for any shared memory (use
109    /// `debug_shared_memory` instead).
110    ///
111    /// `None` is returned if guest-debugging is not enabled in the
112    /// engine configuration for this Store.
113    pub fn debug_memory(
114        &self,
115        mut store: impl AsContextMut,
116        memory_index: u32,
117    ) -> Option<crate::Memory> {
118        self.debug_export(
119            store.as_context_mut().0,
120            MemoryIndex::from_bits(memory_index).into(),
121        )
122        .and_then(|s| s.into_memory())
123    }
124
125    /// Get access to a shared memory within this instance's memory
126    /// index space.
127    ///
128    /// This permits accessing memories whether they are exported or
129    /// not. However, it is only available for purposes of debugging,
130    /// and so is only permitted when `guest_debug` is enabled in the
131    /// Engine's configuration. The intent of the Wasmtime API is to
132    /// enforce the Wasm type system's encapsulation even in the host
133    /// API, except where necessary for developer tooling.
134    ///
135    /// `None` is returned for any memory index that is out-of-bounds.
136    ///
137    /// `None` is returned for any unshared memory (use `debug_memory`
138    /// instead).
139    ///
140    /// `None` is returned if guest-debugging is not enabled in the
141    /// engine configuration for this Store.
142    pub fn debug_shared_memory(
143        &self,
144        mut store: impl AsContextMut,
145        memory_index: u32,
146    ) -> Option<crate::SharedMemory> {
147        self.debug_export(
148            store.as_context_mut().0,
149            MemoryIndex::from_bits(memory_index).into(),
150        )
151        .and_then(|s| s.into_shared_memory())
152    }
153
154    /// Get access to a table within this instance's table index
155    /// space.
156    ///
157    /// This permits accessing tables whether they are exported or
158    /// not. However, it is only available for purposes of debugging,
159    /// and so is only permitted when `guest_debug` is enabled in the
160    /// Engine's configuration. The intent of the Wasmtime API is to
161    /// enforce the Wasm type system's encapsulation even in the host
162    /// API, except where necessary for developer tooling.
163    ///
164    /// `None` is returned for any table index that is out-of-bounds.
165    ///
166    /// `None` is returned if guest-debugging is not enabled in the
167    /// engine configuration for this Store.
168    pub fn debug_table(
169        &self,
170        mut store: impl AsContextMut,
171        table_index: u32,
172    ) -> Option<crate::Table> {
173        self.debug_export(
174            store.as_context_mut().0,
175            TableIndex::from_bits(table_index).into(),
176        )
177        .and_then(|s| s.into_table())
178    }
179
180    /// Get access to a function within this instance's function index
181    /// space.
182    ///
183    /// This permits accessing functions whether they are exported or
184    /// not. However, it is only available for purposes of debugging,
185    /// and so is only permitted when `guest_debug` is enabled in the
186    /// Engine's configuration. The intent of the Wasmtime API is to
187    /// enforce the Wasm type system's encapsulation even in the host
188    /// API, except where necessary for developer tooling.
189    ///
190    /// `None` is returned for any function index that is
191    /// out-of-bounds.
192    ///
193    /// `None` is returned if guest-debugging is not enabled in the
194    /// engine configuration for this Store.
195    pub fn debug_function(
196        &self,
197        mut store: impl AsContextMut,
198        function_index: u32,
199    ) -> Option<crate::Func> {
200        self.debug_export(
201            store.as_context_mut().0,
202            FuncIndex::from_bits(function_index).into(),
203        )
204        .and_then(|s| s.into_func())
205    }
206
207    /// Get access to a tag within this instance's tag index space.
208    ///
209    /// This permits accessing tags whether they are exported or
210    /// not. However, it is only available for purposes of debugging,
211    /// and so is only permitted when `guest_debug` is enabled in the
212    /// Engine's configuration. The intent of the Wasmtime API is to
213    /// enforce the Wasm type system's encapsulation even in the host
214    /// API, except where necessary for developer tooling.
215    ///
216    /// `None` is returned for any tag index that is out-of-bounds.
217    ///
218    /// `None` is returned if guest-debugging is not enabled in the
219    /// engine configuration for this Store.
220    pub fn debug_tag(&self, mut store: impl AsContextMut, tag_index: u32) -> Option<crate::Tag> {
221        self.debug_export(
222            store.as_context_mut().0,
223            TagIndex::from_bits(tag_index).into(),
224        )
225        .and_then(|s| s.into_tag())
226    }
227
228    fn debug_export(&self, store: &mut StoreOpaque, index: EntityIndex) -> Option<Extern> {
229        if !store.engine().tunables().debug_guest {
230            return None;
231        }
232
233        let env_module = self._module(store).env_module();
234        if !env_module.is_valid(index) {
235            return None;
236        }
237        let store_id = store.id();
238        let (instance, registry) = store.instance_and_module_registry_mut(self.id());
239        // SAFETY: the `store` and `registry` are associated with
240        // this instance as we fetched the instance directly from
241        // the store above.
242        let export = unsafe { instance.get_export_by_index_mut(registry, store_id, index) };
243        Some(Extern::from_wasmtime_export(export, store))
244    }
245}
246
247impl<'a, T> StoreContext<'a, T> {
248    /// Return all breakpoints.
249    pub fn breakpoints(self) -> Option<impl Iterator<Item = Breakpoint> + 'a> {
250        if !self.engine().tunables().debug_guest {
251            return None;
252        }
253
254        let (breakpoints, registry) = self.0.breakpoints_and_registry();
255        Some(breakpoints.breakpoints(registry))
256    }
257
258    /// Indicate whether single-step mode is enabled.
259    pub fn is_single_step(&self) -> bool {
260        let (breakpoints, _) = self.0.breakpoints_and_registry();
261        breakpoints.is_single_step()
262    }
263}
264
265/// A view of an active stack frame, with the ability to move up the
266/// stack.
267///
268/// See the documentation on `Store::debug_frames` for more information
269/// about which frames this view will show.
270pub struct DebugFrameCursor<'a, T: 'static> {
271    /// Iterator over frames.
272    ///
273    /// This iterator owns the store while the view exists (accessible
274    /// as `iter.store`).
275    iter: StoreBacktrace<'a, T>,
276
277    /// Is the next frame to be visited by the iterator a trapping
278    /// frame?
279    ///
280    /// This alters how we interpret `pc`: for a trap, we look at the
281    /// instruction that *starts* at `pc`, while for all frames
282    /// further up the stack (i.e., at a callsite), we look at the
283    /// instruction that *ends* at `pc`.
284    is_trapping_frame: bool,
285
286    /// Virtual frame queue: decoded from `iter`, not yet
287    /// yielded. Innermost frame on top (last).
288    ///
289    /// This is only non-empty when there is more than one virtual
290    /// frame in a physical frame (i.e., for inlining); thus, its size
291    /// is bounded by our inlining depth.
292    frames: Vec<VirtualFrame>,
293
294    /// Currently focused virtual frame.
295    current: Option<FrameData>,
296}
297
298/// The result type from `DebugFrameCursor::move_to_parent()`:
299/// indicates whether the cursor skipped over host code to move to the
300/// next Wasm frame.
301#[derive(Clone, Copy, Debug, PartialEq, Eq)]
302pub enum FrameParentResult {
303    /// The new frame is in the same Wasm activation.
304    SameActivation,
305    /// The new frame is in the next higher Wasm activation on the
306    /// stack.
307    NewActivation,
308}
309
310impl<'a, T: 'static> DebugFrameCursor<'a, T> {
311    /// Move up to the next frame in the activation.
312    ///
313    /// Returns `FrameParentMove` as an indication whether the
314    /// moved-to frame is in the same activation or skipped over host
315    /// code.
316    pub fn move_to_parent(&mut self) -> FrameParentResult {
317        // If there are no virtual frames to yield, take and decode
318        // the next physical frame.
319        //
320        // Note that `if` rather than `while` here, and the assert
321        // that we get some virtual frames back, enforce the invariant
322        // that each physical frame decodes to at least one virtual
323        // frame (i.e., there are no physical frames for interstitial
324        // functions or other things that we completely ignore). If
325        // this ever changes, we can remove the assert and convert
326        // this to a loop that polls until it finds virtual frames.
327        let mut result = FrameParentResult::SameActivation;
328        self.current = None;
329        while self.frames.is_empty() {
330            let Some(next_frame) = self.iter.next() else {
331                return result;
332            };
333            self.frames = match next_frame {
334                FrameOrHostCode::Frame(frame) => VirtualFrame::decode(
335                    self.iter.store_mut().0.as_store_opaque(),
336                    frame,
337                    self.is_trapping_frame,
338                ),
339                FrameOrHostCode::HostCode => {
340                    result = FrameParentResult::NewActivation;
341                    continue;
342                }
343            };
344            debug_assert!(!self.frames.is_empty());
345            self.is_trapping_frame = false;
346        }
347
348        // Take a frame and focus it as the current one.
349        self.current = self.frames.pop().map(|vf| FrameData::compute(vf));
350        result
351    }
352
353    /// Has the iterator reached the end of the activation?
354    pub fn done(&self) -> bool {
355        self.current.is_none()
356    }
357
358    fn frame_data(&self) -> &FrameData {
359        self.current.as_ref().expect("No current frame")
360    }
361
362    fn raw_instance(&self) -> &crate::vm::Instance {
363        // Read out the vmctx slot.
364
365        // SAFETY: vmctx is always at offset 0 in the slot.
366        // (See crates/cranelift/src/func_environ.rs in `update_stack_slot_vmctx()`.)
367        let vmctx: *mut VMContext = unsafe { *(self.frame_data().slot_addr as *mut _) };
368        let vmctx = NonNull::new(vmctx).expect("null vmctx in debug state slot");
369        // SAFETY: the stored vmctx value is a valid instance in this
370        // store; we only visit frames from this store in the
371        // backtrace.
372        let instance = unsafe { crate::vm::Instance::from_vmctx(vmctx) };
373        // SAFETY: the instance pointer read above is valid.
374        unsafe { instance.as_ref() }
375    }
376
377    /// Get the instance associated with the current frame.
378    pub fn instance(&mut self) -> Instance {
379        let instance = self.raw_instance();
380        Instance::from_wasmtime(instance.id(), self.iter.store_mut().0.as_store_opaque())
381    }
382
383    /// Get the module associated with the current frame, if any
384    /// (i.e., not a container instance for a host-created entity).
385    pub fn module(&self) -> Option<&Module> {
386        let instance = self.raw_instance();
387        instance.runtime_module()
388    }
389
390    /// Get the raw function index associated with the current frame, and the
391    /// PC as an offset within its code section, if it is a Wasm
392    /// function directly from the given `Module` (rather than a
393    /// trampoline).
394    pub fn wasm_function_index_and_pc(&self) -> Option<(DefinedFuncIndex, u32)> {
395        let data = self.frame_data();
396        let FuncKey::DefinedWasmFunction(module, func) = data.func_key else {
397            return None;
398        };
399        debug_assert_eq!(
400            module,
401            self.module()
402                .expect("module should be defined if this is a defined function")
403                .env_module()
404                .module_index
405        );
406        Some((func, data.wasm_pc))
407    }
408
409    /// Get the number of locals in this frame.
410    pub fn num_locals(&self) -> u32 {
411        u32::try_from(self.frame_data().locals.len()).unwrap()
412    }
413
414    /// Get the depth of the operand stack in this frame.
415    pub fn num_stacks(&self) -> u32 {
416        u32::try_from(self.frame_data().stack.len()).unwrap()
417    }
418
419    /// Get the type and value of the given local in this frame.
420    ///
421    /// # Panics
422    ///
423    /// Panics if the index is out-of-range (greater than
424    /// `num_locals()`).
425    pub fn local(&mut self, index: u32) -> Val {
426        let data = self.frame_data();
427        let (offset, ty) = data.locals[usize::try_from(index).unwrap()];
428        let slot_addr = data.slot_addr;
429        // SAFETY: compiler produced metadata to describe this local
430        // slot and stored a value of the correct type into it.
431        unsafe { read_value(&mut self.iter.store_mut().0, slot_addr, offset, ty) }
432    }
433
434    /// Get the type and value of the given operand-stack value in
435    /// this frame.
436    ///
437    /// Index 0 corresponds to the bottom-of-stack, and higher indices
438    /// from there are more recently pushed values.  In other words,
439    /// index order reads the Wasm virtual machine's abstract stack
440    /// state left-to-right.
441    pub fn stack(&mut self, index: u32) -> Val {
442        let data = self.frame_data();
443        let (offset, ty) = data.stack[usize::try_from(index).unwrap()];
444        let slot_addr = data.slot_addr;
445        // SAFETY: compiler produced metadata to describe this
446        // operand-stack slot and stored a value of the correct type
447        // into it.
448        unsafe { read_value(&mut self.iter.store_mut().0, slot_addr, offset, ty) }
449    }
450}
451
452/// Internal data pre-computed for one stack frame.
453///
454/// This combines physical frame info (pc, fp) with the module this PC
455/// maps to (yielding a frame table) and one frame as produced by the
456/// progpoint lookup (Wasm PC, frame descriptor index, stack shape).
457struct VirtualFrame {
458    /// The frame pointer.
459    fp: *const u8,
460    /// The resolved module handle for the physical PC.
461    ///
462    /// The module for each inlined frame within the physical frame is
463    /// resolved from the vmctx reachable for each such frame; this
464    /// module isused only for looking up the frame table.
465    module: Module,
466    /// The Wasm PC for this frame.
467    wasm_pc: u32,
468    /// The frame descriptor for this frame.
469    frame_descriptor: FrameTableDescriptorIndex,
470    /// The stack shape for this frame.
471    stack_shape: FrameStackShape,
472}
473
474impl VirtualFrame {
475    /// Return virtual frames corresponding to a physical frame, from
476    /// outermost to innermost.
477    fn decode(store: &mut StoreOpaque, frame: Frame, is_trapping_frame: bool) -> Vec<VirtualFrame> {
478        let (module_with_code, pc) = store
479            .modules()
480            .module_and_code_by_pc(frame.pc())
481            .expect("Wasm frame PC does not correspond to a module");
482        let module = module_with_code.module();
483        let table = module.frame_table().unwrap();
484        let pc = u32::try_from(pc).expect("PC offset too large");
485        let pos = if is_trapping_frame {
486            FrameInstPos::Pre
487        } else {
488            FrameInstPos::Post
489        };
490        let program_points = table.find_program_point(pc, pos).expect("There must be a program point record in every frame when debug instrumentation is enabled");
491
492        program_points
493            .map(|(wasm_pc, frame_descriptor, stack_shape)| VirtualFrame {
494                fp: core::ptr::with_exposed_provenance(frame.fp()),
495                module: module.clone(),
496                wasm_pc,
497                frame_descriptor,
498                stack_shape,
499            })
500            .collect()
501    }
502}
503
504/// Data computed when we visit a given frame.
505struct FrameData {
506    slot_addr: *const u8,
507    func_key: FuncKey,
508    wasm_pc: u32,
509    /// Shape of locals in this frame.
510    ///
511    /// We need to store this locally because `FrameView` cannot
512    /// borrow the store: it needs a mut borrow, and an iterator
513    /// cannot yield the same mut borrow multiple times because it
514    /// cannot control the lifetime of the values it yields (the
515    /// signature of `next()` does not bound the return value to the
516    /// `&mut self` arg).
517    locals: Vec<(FrameStateSlotOffset, FrameValType)>,
518    /// Shape of the stack slots at this program point in this frame.
519    ///
520    /// In addition to the borrowing-related reason above, we also
521    /// materialize this because we want to provide O(1) access to the
522    /// stack by depth, and the frame slot descriptor stores info in a
523    /// linked-list (actually DAG, with dedup'ing) way.
524    stack: Vec<(FrameStateSlotOffset, FrameValType)>,
525}
526
527impl FrameData {
528    fn compute(frame: VirtualFrame) -> Self {
529        let frame_table = frame.module.frame_table().unwrap();
530        // Parse the frame descriptor.
531        let (data, slot_to_fp_offset) = frame_table
532            .frame_descriptor(frame.frame_descriptor)
533            .unwrap();
534        let frame_state_slot = FrameStateSlot::parse(data).unwrap();
535        let slot_addr = frame
536            .fp
537            .wrapping_sub(usize::try_from(slot_to_fp_offset).unwrap());
538
539        // Materialize the stack shape so we have O(1) access to its
540        // elements, and so we don't need to keep the borrow to the
541        // module alive.
542        let mut stack = frame_state_slot
543            .stack(frame.stack_shape)
544            .collect::<Vec<_>>();
545        stack.reverse(); // Put top-of-stack last.
546
547        // Materialize the local offsets/types so we don't need to
548        // keep the borrow to the module alive.
549        let locals = frame_state_slot.locals().collect::<Vec<_>>();
550
551        FrameData {
552            slot_addr,
553            func_key: frame_state_slot.func_key(),
554            wasm_pc: frame.wasm_pc,
555            stack,
556            locals,
557        }
558    }
559}
560
561/// Read the value at the given offset.
562///
563/// # Safety
564///
565/// The `offset` and `ty` must correspond to a valid value written
566/// to the frame by generated code of the correct type. This will
567/// be the case if this information comes from the frame tables
568/// (as long as the frontend that generates the tables and
569/// instrumentation is correct, and as long as the tables are
570/// preserved through serialization).
571unsafe fn read_value(
572    store: &mut StoreOpaque,
573    slot_base: *const u8,
574    offset: FrameStateSlotOffset,
575    ty: FrameValType,
576) -> Val {
577    let address = unsafe { slot_base.offset(isize::try_from(offset.offset()).unwrap()) };
578
579    // SAFETY: each case reads a value from memory that should be
580    // valid according to our safety condition.
581    match ty {
582        FrameValType::I32 => {
583            let value = unsafe { *(address as *const i32) };
584            Val::I32(value)
585        }
586        FrameValType::I64 => {
587            let value = unsafe { *(address as *const i64) };
588            Val::I64(value)
589        }
590        FrameValType::F32 => {
591            let value = unsafe { *(address as *const u32) };
592            Val::F32(value)
593        }
594        FrameValType::F64 => {
595            let value = unsafe { *(address as *const u64) };
596            Val::F64(value)
597        }
598        FrameValType::V128 => {
599            let value = unsafe { *(address as *const u128) };
600            Val::V128(value.into())
601        }
602        FrameValType::AnyRef => {
603            let mut nogc = AutoAssertNoGc::new(store);
604            let value = unsafe { *(address as *const u32) };
605            let value = AnyRef::_from_raw(&mut nogc, value);
606            Val::AnyRef(value)
607        }
608        FrameValType::ExnRef => {
609            let mut nogc = AutoAssertNoGc::new(store);
610            let value = unsafe { *(address as *const u32) };
611            let value = ExnRef::_from_raw(&mut nogc, value);
612            Val::ExnRef(value)
613        }
614        FrameValType::ExternRef => {
615            let mut nogc = AutoAssertNoGc::new(store);
616            let value = unsafe { *(address as *const u32) };
617            let value = ExternRef::_from_raw(&mut nogc, value);
618            Val::ExternRef(value)
619        }
620        FrameValType::FuncRef => {
621            let value = unsafe { *(address as *const *mut c_void) };
622            let value = unsafe { Func::_from_raw(store, value) };
623            Val::FuncRef(value)
624        }
625        FrameValType::ContRef => {
626            unimplemented!("contref values are not implemented in the host API yet")
627        }
628    }
629}
630
631/// Compute raw pointers to all GC refs in the given frame.
632// Note: ideally this would be an impl Iterator, but this is quite
633// awkward because of the locally computed data (FrameStateSlot::parse
634// structured result) within the closure borrowed by a nested closure.
635#[cfg(feature = "gc")]
636pub(crate) fn gc_refs_in_frame<'a>(ft: FrameTable<'a>, pc: u32, fp: *mut usize) -> Vec<*mut u32> {
637    let fp = fp.cast::<u8>();
638    let mut ret = vec![];
639    if let Some(frames) = ft.find_program_point(pc, FrameInstPos::Post) {
640        for (_wasm_pc, frame_desc, stack_shape) in frames {
641            let (frame_desc_data, slot_to_fp_offset) = ft.frame_descriptor(frame_desc).unwrap();
642            let frame_base = unsafe { fp.offset(-isize::try_from(slot_to_fp_offset).unwrap()) };
643            let frame_desc = FrameStateSlot::parse(frame_desc_data).unwrap();
644            for (offset, ty) in frame_desc.stack_and_locals(stack_shape) {
645                match ty {
646                    FrameValType::AnyRef | FrameValType::ExnRef | FrameValType::ExternRef => {
647                        let slot = unsafe {
648                            frame_base
649                                .offset(isize::try_from(offset.offset()).unwrap())
650                                .cast::<u32>()
651                        };
652                        ret.push(slot);
653                    }
654                    FrameValType::ContRef | FrameValType::FuncRef => {}
655                    FrameValType::I32
656                    | FrameValType::I64
657                    | FrameValType::F32
658                    | FrameValType::F64
659                    | FrameValType::V128 => {}
660                }
661            }
662        }
663    }
664    ret
665}
666
667impl<'a, T: 'static> AsContext for DebugFrameCursor<'a, T> {
668    type Data = T;
669    fn as_context(&self) -> StoreContext<'_, Self::Data> {
670        StoreContext(self.iter.store().0)
671    }
672}
673impl<'a, T: 'static> AsContextMut for DebugFrameCursor<'a, T> {
674    fn as_context_mut(&mut self) -> StoreContextMut<'_, Self::Data> {
675        StoreContextMut(self.iter.store_mut().0)
676    }
677}
678
679/// One debug event that occurs when running Wasm code on a store with
680/// a debug handler attached.
681#[derive(Debug)]
682pub enum DebugEvent<'a> {
683    /// A [`wasmtime::Error`] was raised by a hostcall.
684    HostcallError(&'a crate::Error),
685    /// An exception is thrown and caught by Wasm. The current state
686    /// is at the throw-point.
687    CaughtExceptionThrown(OwnedRooted<ExnRef>),
688    /// An exception was not caught and is escaping to the host.
689    UncaughtExceptionThrown(OwnedRooted<ExnRef>),
690    /// A Wasm trap occurred.
691    Trap(Trap),
692    /// A breakpoint was reached.
693    Breakpoint,
694    /// An epoch yield occurred.
695    EpochYield,
696}
697
698/// A handler for debug events.
699///
700/// This is an async callback that is invoked directly within the
701/// context of a debug event that occurs, i.e., with the Wasm code
702/// still on the stack. The callback can thus observe that stack, up
703/// to the most recent entry to Wasm.[^1]
704///
705/// Because this callback receives a `StoreContextMut`, it has full
706/// access to any state that any other hostcall has, including the
707/// `T`. In that way, it is like an epoch-deadline callback or a
708/// call-hook callback. It also "freezes" the entire store for the
709/// duration of the debugger callback future.
710///
711/// In the future, we expect to provide an "externally async" API on
712/// the `Store` that allows receiving a stream of debug events and
713/// accessing the store mutably while frozen; that will need to
714/// integrate with [`Store::run_concurrent`] to properly timeslice and
715/// scope the mutable access to the store, and has not been built
716/// yet. In the meantime, it should be possible to build a fully
717/// functional debugger with this async-callback API by channeling
718/// debug events out, and requests to read the store back in, over
719/// message-passing channels between the callback and an external
720/// debugger main loop.
721///
722/// Note that the `handle` hook may use its mutable store access to
723/// invoke another Wasm. Debug events will also be caught and will
724/// cause further `handle` invocations during this recursive
725/// invocation. It is up to the debugger to handle any implications of
726/// this reentrancy (e.g., implications on a duplex channel protocol
727/// with an event/continue handshake) if it does so.
728///
729/// Note also that this trait has `Clone` as a supertrait, and the
730/// handler is cloned at every invocation as an artifact of the
731/// internal ownership structure of Wasmtime: the handler itself is
732/// owned by the store, but also receives a mutable borrow to the
733/// whole store, so we need to clone it out to invoke it. It is
734/// recommended that this trait be implemented by a type that is cheap
735/// to clone: for example, a single `Arc` handle to debugger state.
736///
737/// [^1]: Providing visibility further than the most recent entry to
738///       Wasm is not directly possible because it could see into
739///       another async stack, and the stack that polls the future
740///       running a particular Wasm invocation could change after each
741///       suspend point in the handler.
742pub trait DebugHandler: Clone + Send + Sync + 'static {
743    /// The data expected on the store that this handler is attached
744    /// to.
745    type Data;
746
747    /// Handle a debug event.
748    fn handle(
749        &self,
750        store: StoreContextMut<'_, Self::Data>,
751        event: DebugEvent<'_>,
752    ) -> impl Future<Output = ()> + Send;
753}
754
755/// Breakpoint state for modules within a store.
756#[derive(Default)]
757pub(crate) struct BreakpointState {
758    /// Single-step mode.
759    single_step: bool,
760    /// Breakpoints added individually.
761    breakpoints: BTreeSet<BreakpointKey>,
762}
763
764/// A breakpoint.
765pub struct Breakpoint {
766    /// Reference to the module in which we are setting the breakpoint.
767    pub module: Module,
768    /// Wasm PC offset within the module.
769    pub pc: u32,
770}
771
772#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
773struct BreakpointKey(CompiledModuleId, u32);
774
775impl BreakpointKey {
776    fn from_raw(module: &Module, pc: u32) -> BreakpointKey {
777        BreakpointKey(module.id(), pc)
778    }
779
780    fn get(&self, registry: &ModuleRegistry) -> Breakpoint {
781        let module = registry
782            .module_by_compiled_id(self.0)
783            .expect("Module should not have been removed from Store")
784            .clone();
785        Breakpoint { module, pc: self.1 }
786    }
787}
788
789/// A breakpoint-editing session.
790///
791/// This enables updating breakpoint state (setting or unsetting
792/// individual breakpoints or the store-global single-step flag) in a
793/// batch. It is more efficient to batch these updates because
794/// "re-publishing" the newly patched code, with update breakpoint
795/// settings, typically requires a syscall to re-enable execute
796/// permissions.
797pub struct BreakpointEdit<'a> {
798    state: &'a mut BreakpointState,
799    registry: &'a mut ModuleRegistry,
800    /// Modules that have been edited.
801    ///
802    /// Invariant: each of these modules' CodeMemory objects is
803    /// *unpublished* when in the dirty set.
804    dirty_modules: BTreeSet<StoreCodePC>,
805}
806
807impl BreakpointState {
808    pub(crate) fn edit<'a>(&'a mut self, registry: &'a mut ModuleRegistry) -> BreakpointEdit<'a> {
809        BreakpointEdit {
810            state: self,
811            registry,
812            dirty_modules: BTreeSet::new(),
813        }
814    }
815
816    pub(crate) fn breakpoints<'a>(
817        &'a self,
818        registry: &'a ModuleRegistry,
819    ) -> impl Iterator<Item = Breakpoint> + 'a {
820        self.breakpoints.iter().map(|key| key.get(registry))
821    }
822
823    pub(crate) fn is_single_step(&self) -> bool {
824        self.single_step
825    }
826}
827
828impl<'a> BreakpointEdit<'a> {
829    fn get_code_memory<'b>(
830        registry: &'b mut ModuleRegistry,
831        dirty_modules: &mut BTreeSet<StoreCodePC>,
832        module: &Module,
833    ) -> Result<&'b mut CodeMemory> {
834        let store_code_pc = registry.store_code_base_or_register(module)?;
835        let code_memory = registry
836            .store_code_mut(store_code_pc)
837            .expect("Just checked presence above")
838            .code_memory_mut()
839            .expect("Must have unique ownership of StoreCode in guest-debug mode");
840        if dirty_modules.insert(store_code_pc) {
841            code_memory.unpublish()?;
842        }
843        Ok(code_memory)
844    }
845
846    fn patch<'b>(
847        patches: impl Iterator<Item = FrameTableBreakpointData<'b>> + 'b,
848        mem: &mut CodeMemory,
849        enable: bool,
850    ) {
851        let mem = mem.text_mut();
852        for patch in patches {
853            let data = if enable { patch.enable } else { patch.disable };
854            let mem = &mut mem[patch.offset..patch.offset + data.len()];
855            log::trace!(
856                "patch: offset 0x{:x} with enable={enable}: data {data:?} replacing {mem:?}",
857                patch.offset
858            );
859            mem.copy_from_slice(data);
860        }
861    }
862
863    /// Add a breakpoint in the given module at the given PC in that
864    /// module.
865    ///
866    /// No effect if the breakpoint is already set.
867    pub fn add_breakpoint(&mut self, module: &Module, pc: u32) -> Result<()> {
868        let key = BreakpointKey::from_raw(module, pc);
869        self.state.breakpoints.insert(key);
870        log::trace!("patching in breakpoint {key:?}");
871        let mem = Self::get_code_memory(self.registry, &mut self.dirty_modules, module)?;
872        let frame_table = module
873            .frame_table()
874            .expect("Frame table must be present when guest-debug is enabled");
875        let patches = frame_table.lookup_breakpoint_patches_by_pc(pc);
876        Self::patch(patches, mem, true);
877        Ok(())
878    }
879
880    /// Remove a breakpoint in the given module at the given PC in
881    /// that module.
882    ///
883    /// No effect if the breakpoint was not set.
884    pub fn remove_breakpoint(&mut self, module: &Module, pc: u32) -> Result<()> {
885        let key = BreakpointKey::from_raw(module, pc);
886        self.state.breakpoints.remove(&key);
887        if !self.state.single_step {
888            let mem = Self::get_code_memory(self.registry, &mut self.dirty_modules, module)?;
889            let frame_table = module
890                .frame_table()
891                .expect("Frame table must be present when guest-debug is enabled");
892            let patches = frame_table.lookup_breakpoint_patches_by_pc(pc);
893            Self::patch(patches, mem, false);
894        }
895        Ok(())
896    }
897
898    /// Turn on or off single-step mode.
899    ///
900    /// In single-step mode, a breakpoint event is emitted at every
901    /// Wasm PC.
902    pub fn single_step(&mut self, enabled: bool) -> Result<()> {
903        log::trace!(
904            "single_step({enabled}) with breakpoint set {:?}",
905            self.state.breakpoints
906        );
907        let modules = self.registry.all_modules().cloned().collect::<Vec<_>>();
908        for module in modules {
909            let mem = Self::get_code_memory(self.registry, &mut self.dirty_modules, &module)?;
910            let table = module
911                .frame_table()
912                .expect("Frame table must be present when guest-debug is enabled");
913            for (wasm_pc, patch) in table.breakpoint_patches() {
914                let key = BreakpointKey::from_raw(&module, wasm_pc);
915                let this_enabled = enabled || self.state.breakpoints.contains(&key);
916                log::trace!(
917                    "single_step: enabled {enabled} key {key:?} -> this_enabled {this_enabled}"
918                );
919                Self::patch(core::iter::once(patch), mem, this_enabled);
920            }
921        }
922
923        self.state.single_step = enabled;
924
925        Ok(())
926    }
927}
928
929impl<'a> Drop for BreakpointEdit<'a> {
930    fn drop(&mut self) {
931        for &store_code_base in &self.dirty_modules {
932            let store_code = self.registry.store_code_mut(store_code_base).unwrap();
933            if let Err(e) = store_code
934                .code_memory_mut()
935                .expect("Must have unique ownership of StoreCode in guest-debug mode")
936                .publish()
937            {
938                abort_on_republish_error(e);
939            }
940        }
941    }
942}
943
944/// Abort when we cannot re-publish executable code.
945///
946/// Note that this puts us in quite a conundrum. Typically we will
947/// have been editing breakpoints from within a hostcall context
948/// (e.g. inside a debugger hook while execution is paused) with JIT
949/// code on the stack. Wasmtime's usual path to return errors is back
950/// through that JIT code: we do not panic-unwind across the JIT code,
951/// we return into the exit trampoline and that then re-enters the
952/// raise libcall to use a Cranelift exception-throw to cross most of
953/// the JIT frames to the entry trampoline. When even trampolines are
954/// no longer executable, we have no way out. Even an ordinary
955/// `panic!` cannot work, because we catch panics and carry them
956/// across JIT code using that trampoline-based error path. Our only
957/// way out is to directly abort the whole process.
958///
959/// This is not without precedent: other engines have similar failure
960/// paths. For example, SpiderMonkey directly aborts the process when
961/// failing to re-apply executable permissions (see [1]).
962///
963/// Note that we don't really expect to ever hit this case in
964/// practice: it's unlikely that `mprotect` applying `PROT_EXEC` would
965/// fail due to, e.g., resource exhaustion in the kernel, because we
966/// will have the same net number of virtual memory areas before and
967/// after the permissions change. Nevertheless, we have to account for
968/// the possibility of error.
969///
970/// [1]: https://searchfox.org/firefox-main/rev/7496c8515212669451d7e775a00c2be07da38ca5/js/src/jit/AutoWritableJitCode.h#26-56
971#[cfg(feature = "std")]
972fn abort_on_republish_error(e: crate::Error) -> ! {
973    log::error!(
974        "Failed to re-publish executable code: {e:?}. Wasmtime cannot return through JIT code on the stack and cannot even panic; aborting the process."
975    );
976    std::process::abort();
977}
978
979/// In the `no_std` case, we don't have a concept of a "process
980/// abort", so rely on `panic!`. Typically an embedded scenario that
981/// uses `no_std` will build with `panic=abort` so the effect is the
982/// same. If it doesn't, there is truly nothing we can do here so
983/// let's panic anyway; the panic propagation through the trampolines
984/// will at least deterministically crash.
985#[cfg(not(feature = "std"))]
986fn abort_on_republish_error(e: crate::Error) -> ! {
987    panic!("Failed to re-publish executable code: {e:?}");
988}