Skip to main content

wasmtime/runtime/
debug.rs

1//! Debugging API.
2
3use super::store::AsStoreOpaque;
4use crate::store::StoreId;
5use crate::vm::{Activation, Backtrace};
6use crate::{
7    AnyRef, AsContextMut, CodeMemory, ExnRef, Extern, ExternRef, Func, Instance, Module,
8    OwnedRooted, StoreContext, StoreContextMut, Val,
9    code::StoreCodePC,
10    module::ModuleRegistry,
11    store::{AutoAssertNoGc, StoreOpaque},
12    vm::{CompiledModuleId, VMContext},
13};
14use crate::{Caller, Result, Store};
15use alloc::collections::{BTreeMap, BTreeSet, btree_map::Entry};
16use alloc::vec;
17use alloc::vec::Vec;
18use core::{ffi::c_void, ptr::NonNull};
19#[cfg(feature = "gc")]
20use wasmtime_environ::FrameTable;
21use wasmtime_environ::{
22    DefinedFuncIndex, EntityIndex, FrameInstPos, FrameStackShape, FrameStateSlot,
23    FrameStateSlotOffset, FrameTableBreakpointData, FrameTableDescriptorIndex, FrameValType,
24    FuncIndex, FuncKey, GlobalIndex, MemoryIndex, TableIndex, TagIndex, Trap,
25};
26use wasmtime_unwinder::{Frame, FrameCursor};
27
28impl<T> Store<T> {
29    /// Provide a frame handle for all activations, in order from
30    /// innermost (most recently called) to outermost on the stack.
31    ///
32    /// An activation is a contiguous sequence of Wasm frames (called
33    /// functions) that were called from host code and called back out
34    /// to host code. If there are activations from multiple stores on
35    /// the stack, for example if Wasm code in one store calls out to
36    /// host code which invokes another Wasm function in another
37    /// store, then the other stores are "opaque" to our view here in
38    /// the same way that host code is.
39    ///
40    /// Returns an empty list if debug instrumentation is not enabled
41    /// for the engine containing this store.
42    pub fn debug_exit_frames(&mut self) -> impl Iterator<Item = FrameHandle> {
43        self.as_store_opaque().debug_exit_frames()
44    }
45
46    /// Start an edit session to update breakpoints.
47    pub fn edit_breakpoints<'a>(&'a mut self) -> Option<BreakpointEdit<'a>> {
48        self.as_store_opaque().edit_breakpoints()
49    }
50}
51
52impl StoreOpaque {
53    fn debug_exit_frames(&mut self) -> impl Iterator<Item = FrameHandle> {
54        let activations = if self.engine().tunables().debug_guest {
55            Backtrace::activations(self)
56        } else {
57            vec![]
58        };
59
60        activations
61            .into_iter()
62            // SAFETY: each activation is currently active and will
63            // remain so (we have a mutable borrow of the store).
64            .filter_map(|act| unsafe { FrameHandle::exit_frame(self, act) })
65    }
66
67    fn edit_breakpoints<'a>(&'a mut self) -> Option<BreakpointEdit<'a>> {
68        if !self.engine().tunables().debug_guest {
69            return None;
70        }
71
72        let (breakpoints, registry) = self.breakpoints_and_registry_mut();
73        Some(breakpoints.edit(registry))
74    }
75}
76
77impl<'a, T> StoreContextMut<'a, T> {
78    /// Provide a frame handle for all activations, in order from
79    /// innermost (most recently called) to outermost on the stack.
80    ///
81    /// See [`Store::debug_exit_frames`] for more details.
82    pub fn debug_exit_frames(&mut self) -> impl Iterator<Item = FrameHandle> {
83        self.0.as_store_opaque().debug_exit_frames()
84    }
85
86    /// Start an edit session to update breakpoints.
87    pub fn edit_breakpoints(self) -> Option<BreakpointEdit<'a>> {
88        self.0.as_store_opaque().edit_breakpoints()
89    }
90}
91
92impl<'a, T> Caller<'a, T> {
93    /// Provide a frame handle for all activations, in order from
94    /// innermost (most recently called) to outermost on the stack.
95    ///
96    /// See [`Store::debug_exit_frames`] for more details.
97    pub fn debug_exit_frames(&mut self) -> impl Iterator<Item = FrameHandle> {
98        self.store.0.as_store_opaque().debug_exit_frames()
99    }
100}
101
102impl Instance {
103    /// Get access to a global within this instance's globals index
104    /// space.
105    ///
106    /// This permits accessing globals whether they are exported or
107    /// not. However, it is only available for purposes of debugging,
108    /// and so is only permitted when `guest_debug` is enabled in the
109    /// Engine's configuration. The intent of the Wasmtime API is to
110    /// enforce the Wasm type system's encapsulation even in the host
111    /// API, except where necessary for developer tooling.
112    ///
113    /// `None` is returned for any global index that is out-of-bounds.
114    ///
115    /// `None` is returned if guest-debugging is not enabled in the
116    /// engine configuration for this Store.
117    pub fn debug_global(
118        &self,
119        mut store: impl AsContextMut,
120        global_index: u32,
121    ) -> Option<crate::Global> {
122        self.debug_export(
123            store.as_context_mut().0,
124            GlobalIndex::from_bits(global_index).into(),
125        )
126        .and_then(|s| s.into_global())
127    }
128
129    /// Get access to a memory (unshared only) within this instance's
130    /// memory index space.
131    ///
132    /// This permits accessing memories whether they are exported or
133    /// not. However, it is only available for purposes of debugging,
134    /// and so is only permitted when `guest_debug` is enabled in the
135    /// Engine's configuration. The intent of the Wasmtime API is to
136    /// enforce the Wasm type system's encapsulation even in the host
137    /// API, except where necessary for developer tooling.
138    ///
139    /// `None` is returned for any memory index that is out-of-bounds.
140    ///
141    /// `None` is returned for any shared memory (use
142    /// `debug_shared_memory` instead).
143    ///
144    /// `None` is returned if guest-debugging is not enabled in the
145    /// engine configuration for this Store.
146    pub fn debug_memory(
147        &self,
148        mut store: impl AsContextMut,
149        memory_index: u32,
150    ) -> Option<crate::Memory> {
151        self.debug_export(
152            store.as_context_mut().0,
153            MemoryIndex::from_bits(memory_index).into(),
154        )
155        .and_then(|s| s.into_memory())
156    }
157
158    /// Get access to a shared memory within this instance's memory
159    /// index space.
160    ///
161    /// This permits accessing memories whether they are exported or
162    /// not. However, it is only available for purposes of debugging,
163    /// and so is only permitted when `guest_debug` is enabled in the
164    /// Engine's configuration. The intent of the Wasmtime API is to
165    /// enforce the Wasm type system's encapsulation even in the host
166    /// API, except where necessary for developer tooling.
167    ///
168    /// `None` is returned for any memory index that is out-of-bounds.
169    ///
170    /// `None` is returned for any unshared memory (use `debug_memory`
171    /// instead).
172    ///
173    /// `None` is returned if guest-debugging is not enabled in the
174    /// engine configuration for this Store.
175    pub fn debug_shared_memory(
176        &self,
177        mut store: impl AsContextMut,
178        memory_index: u32,
179    ) -> Option<crate::SharedMemory> {
180        self.debug_export(
181            store.as_context_mut().0,
182            MemoryIndex::from_bits(memory_index).into(),
183        )
184        .and_then(|s| s.into_shared_memory())
185    }
186
187    /// Get access to a table within this instance's table index
188    /// space.
189    ///
190    /// This permits accessing tables whether they are exported or
191    /// not. However, it is only available for purposes of debugging,
192    /// and so is only permitted when `guest_debug` is enabled in the
193    /// Engine's configuration. The intent of the Wasmtime API is to
194    /// enforce the Wasm type system's encapsulation even in the host
195    /// API, except where necessary for developer tooling.
196    ///
197    /// `None` is returned for any table index that is out-of-bounds.
198    ///
199    /// `None` is returned if guest-debugging is not enabled in the
200    /// engine configuration for this Store.
201    pub fn debug_table(
202        &self,
203        mut store: impl AsContextMut,
204        table_index: u32,
205    ) -> Option<crate::Table> {
206        self.debug_export(
207            store.as_context_mut().0,
208            TableIndex::from_bits(table_index).into(),
209        )
210        .and_then(|s| s.into_table())
211    }
212
213    /// Get access to a function within this instance's function index
214    /// space.
215    ///
216    /// This permits accessing functions whether they are exported or
217    /// not. However, it is only available for purposes of debugging,
218    /// and so is only permitted when `guest_debug` is enabled in the
219    /// Engine's configuration. The intent of the Wasmtime API is to
220    /// enforce the Wasm type system's encapsulation even in the host
221    /// API, except where necessary for developer tooling.
222    ///
223    /// `None` is returned for any function index that is
224    /// out-of-bounds.
225    ///
226    /// `None` is returned if guest-debugging is not enabled in the
227    /// engine configuration for this Store.
228    pub fn debug_function(
229        &self,
230        mut store: impl AsContextMut,
231        function_index: u32,
232    ) -> Option<crate::Func> {
233        self.debug_export(
234            store.as_context_mut().0,
235            FuncIndex::from_bits(function_index).into(),
236        )
237        .and_then(|s| s.into_func())
238    }
239
240    /// Get access to a tag within this instance's tag index space.
241    ///
242    /// This permits accessing tags whether they are exported or
243    /// not. However, it is only available for purposes of debugging,
244    /// and so is only permitted when `guest_debug` is enabled in the
245    /// Engine's configuration. The intent of the Wasmtime API is to
246    /// enforce the Wasm type system's encapsulation even in the host
247    /// API, except where necessary for developer tooling.
248    ///
249    /// `None` is returned for any tag index that is out-of-bounds.
250    ///
251    /// `None` is returned if guest-debugging is not enabled in the
252    /// engine configuration for this Store.
253    pub fn debug_tag(&self, mut store: impl AsContextMut, tag_index: u32) -> Option<crate::Tag> {
254        self.debug_export(
255            store.as_context_mut().0,
256            TagIndex::from_bits(tag_index).into(),
257        )
258        .and_then(|s| s.into_tag())
259    }
260
261    fn debug_export(&self, store: &mut StoreOpaque, index: EntityIndex) -> Option<Extern> {
262        if !store.engine().tunables().debug_guest {
263            return None;
264        }
265
266        let env_module = self._module(store).env_module();
267        if !env_module.is_valid(index) {
268            return None;
269        }
270        let store_id = store.id();
271        let (instance, registry) = store.instance_and_module_registry_mut(self.id());
272        // SAFETY: the `store` and `registry` are associated with
273        // this instance as we fetched the instance directly from
274        // the store above.
275        let export = unsafe { instance.get_export_by_index_mut(registry, store_id, index) };
276        Some(Extern::from_wasmtime_export(export, store))
277    }
278}
279
280impl<'a, T> StoreContext<'a, T> {
281    /// Return all breakpoints.
282    pub fn breakpoints(self) -> Option<impl Iterator<Item = Breakpoint> + 'a> {
283        if !self.engine().tunables().debug_guest {
284            return None;
285        }
286
287        let (breakpoints, registry) = self.0.breakpoints_and_registry();
288        Some(breakpoints.breakpoints(registry))
289    }
290
291    /// Indicate whether single-step mode is enabled.
292    pub fn is_single_step(&self) -> bool {
293        let (breakpoints, _) = self.0.breakpoints_and_registry();
294        breakpoints.is_single_step()
295    }
296}
297
298/// A handle to a stack frame, valid as long as execution is not
299/// resumed in the associated `Store`.
300///
301/// This handle can be held and cloned and used to refer to a frame
302/// within a paused store. It is cheap: it internally consists of a
303/// pointer to the actual frame, together with some metadata to
304/// determine when that pointer has gone stale.
305///
306/// At the API level, any usage of this frame handle requires a
307/// mutable borrow of the `Store`, because the `Store` logically owns
308/// the stack(s) for any execution within it. However, the existence
309/// of the handle itself does not hold a borrow on the `Store`; hence,
310/// the `Store` can continue to be used and queried, and some state
311/// (e.g. memories, tables, GC objects) can even be mutated, as long
312/// as execution is not resumed. The intent of this API is to allow a
313/// wide variety of debugger implementation strategies that expose
314/// stack frames and also allow other commands/actions at the same
315/// time.
316///
317/// The user can use [`FrameHandle::is_valid`] to determine if the
318/// handle is still valid and usable.
319#[derive(Clone)]
320pub struct FrameHandle {
321    /// The unwinder cursor at this frame.
322    cursor: FrameCursor,
323
324    /// The index of the virtual frame within the physical frame.
325    virtual_frame_idx: usize,
326
327    /// The unique Store this frame came from, to ensure the handle is
328    /// used with the correct Store.
329    store_id: StoreId,
330
331    /// Store `execution_version`.
332    store_version: u64,
333}
334
335impl FrameHandle {
336    /// Create a new FrameHandle at the exit frame of an activation.
337    ///
338    /// # Safety
339    ///
340    /// The provided activation must be valid currently.
341    unsafe fn exit_frame(store: &mut StoreOpaque, activation: Activation) -> Option<FrameHandle> {
342        // SAFETY: activation is valid as per our safety condition.
343        let mut cursor = unsafe { activation.cursor() };
344
345        // Find the first virtual frame. Each physical frame may have
346        // zero or more virtual frames.
347        while !cursor.done() {
348            let (cache, registry) = store.frame_data_cache_mut_and_registry();
349            let frames = cache.lookup_or_compute(registry, cursor.frame());
350            if frames.len() > 0 {
351                return Some(FrameHandle {
352                    cursor,
353                    virtual_frame_idx: 0,
354                    store_id: store.id(),
355                    store_version: store.vm_store_context().execution_version,
356                });
357            }
358            // SAFETY: activation is still valid (valid on entry per
359            // our safety condition, and we have not returned control
360            // since above).
361            unsafe {
362                cursor.advance(store.unwinder());
363            }
364        }
365
366        None
367    }
368
369    /// Determine whether this handle can still be used to refer to a
370    /// frame.
371    pub fn is_valid(&self, mut store: impl AsContextMut) -> bool {
372        let store = store.as_context_mut();
373        self.is_valid_impl(store.0.as_store_opaque())
374    }
375
376    fn is_valid_impl(&self, store: &StoreOpaque) -> bool {
377        let id = store.id();
378        let version = store.vm_store_context().execution_version;
379        self.store_id == id && self.store_version == version
380    }
381
382    /// Get a handle to the next frame up the activation (the one that
383    /// called this frame), if any.
384    pub fn parent(&self, mut store: impl AsContextMut) -> Result<Option<FrameHandle>> {
385        let mut store = store.as_context_mut();
386        if !self.is_valid(&mut store) {
387            crate::error::bail!("Frame handle is no longer valid.");
388        }
389
390        let mut parent = self.clone();
391        parent.virtual_frame_idx += 1;
392
393        while !parent.cursor.done() {
394            let (cache, registry) = store
395                .0
396                .as_store_opaque()
397                .frame_data_cache_mut_and_registry();
398            let frames = cache.lookup_or_compute(registry, parent.cursor.frame());
399            if parent.virtual_frame_idx < frames.len() {
400                return Ok(Some(parent));
401            }
402            parent.virtual_frame_idx = 0;
403            // SAFETY: activation is valid because we checked validity
404            // wrt execution version at the top of this function, and
405            // we have not returned since.
406            unsafe {
407                parent.cursor.advance(store.0.as_store_opaque().unwinder());
408            }
409        }
410
411        Ok(None)
412    }
413
414    fn frame_data<'a>(&self, store: &'a mut StoreOpaque) -> Result<&'a FrameData> {
415        if !self.is_valid_impl(store) {
416            crate::error::bail!("Frame handle is no longer valid.");
417        }
418        let (cache, registry) = store.frame_data_cache_mut_and_registry();
419        let frames = cache.lookup_or_compute(registry, self.cursor.frame());
420        // `virtual_frame_idx` counts up for ease of iteration
421        // behavior, while the frames are stored in outer-to-inner
422        // (i.e., caller to callee) order, so we need to reverse here.
423        Ok(&frames[frames.len() - 1 - self.virtual_frame_idx])
424    }
425
426    fn raw_instance<'a>(&self, store: &mut StoreOpaque) -> Result<&'a crate::vm::Instance> {
427        let frame_data = self.frame_data(store)?;
428
429        // Read out the vmctx slot.
430
431        // SAFETY: vmctx is always at offset 0 in the slot.  (See
432        // crates/cranelift/src/func_environ.rs in
433        // `update_stack_slot_vmctx()`.)  The frame/activation is
434        // still valid because we verified this in `frame_data` above.
435        let vmctx: usize =
436            unsafe { *(frame_data.slot_addr(self.cursor.frame().fp()) as *mut usize) };
437        let vmctx: *mut VMContext = core::ptr::with_exposed_provenance_mut(vmctx);
438        let vmctx = NonNull::new(vmctx).expect("null vmctx in debug state slot");
439        // SAFETY: the stored vmctx value is a valid instance in this
440        // store; we only visit frames from this store in the
441        // backtrace.
442        let instance = unsafe { crate::vm::Instance::from_vmctx(vmctx) };
443        // SAFETY: the instance pointer read above is valid.
444        Ok(unsafe { instance.as_ref() })
445    }
446
447    /// Get the instance associated with the current frame.
448    pub fn instance(&self, mut store: impl AsContextMut) -> Result<Instance> {
449        let store = store.as_context_mut();
450        let instance = self.raw_instance(store.0.as_store_opaque())?;
451        let id = instance.id();
452        Ok(Instance::from_wasmtime(id, store.0.as_store_opaque()))
453    }
454
455    /// Get the module associated with the current frame, if any
456    /// (i.e., not a container instance for a host-created entity).
457    pub fn module<'a, T: 'static>(
458        &self,
459        store: impl Into<StoreContextMut<'a, T>>,
460    ) -> Result<Option<&'a Module>> {
461        let store = store.into();
462        let instance = self.raw_instance(store.0.as_store_opaque())?;
463        Ok(instance.runtime_module())
464    }
465
466    /// Get the raw function index associated with the current frame, and the
467    /// PC as an offset within its code section, if it is a Wasm
468    /// function directly from the given `Module` (rather than a
469    /// trampoline).
470    pub fn wasm_function_index_and_pc(
471        &self,
472        mut store: impl AsContextMut,
473    ) -> Result<Option<(DefinedFuncIndex, u32)>> {
474        let mut store = store.as_context_mut();
475        let frame_data = self.frame_data(store.0.as_store_opaque())?;
476        let FuncKey::DefinedWasmFunction(module, func) = frame_data.func_key else {
477            return Ok(None);
478        };
479        let wasm_pc = frame_data.wasm_pc;
480        debug_assert_eq!(
481            module,
482            self.module(&mut store)?
483                .expect("module should be defined if this is a defined function")
484                .env_module()
485                .module_index
486        );
487        Ok(Some((func, wasm_pc)))
488    }
489
490    /// Get the number of locals in this frame.
491    pub fn num_locals(&self, mut store: impl AsContextMut) -> Result<u32> {
492        let store = store.as_context_mut();
493        let frame_data = self.frame_data(store.0.as_store_opaque())?;
494        Ok(u32::try_from(frame_data.locals.len()).unwrap())
495    }
496
497    /// Get the depth of the operand stack in this frame.
498    pub fn num_stacks(&self, mut store: impl AsContextMut) -> Result<u32> {
499        let store = store.as_context_mut();
500        let frame_data = self.frame_data(store.0.as_store_opaque())?;
501        Ok(u32::try_from(frame_data.stack.len()).unwrap())
502    }
503
504    /// Get the type and value of the given local in this frame.
505    ///
506    /// # Panics
507    ///
508    /// Panics if the index is out-of-range (greater than
509    /// `num_locals()`).
510    pub fn local(&self, mut store: impl AsContextMut, index: u32) -> Result<Val> {
511        let store = store.as_context_mut();
512        let frame_data = self.frame_data(store.0.as_store_opaque())?;
513        let (offset, ty) = frame_data.locals[usize::try_from(index).unwrap()];
514        let slot_addr = frame_data.slot_addr(self.cursor.frame().fp());
515        // SAFETY: compiler produced metadata to describe this local
516        // slot and stored a value of the correct type into it. Slot
517        // address is valid because we checked liveness of the
518        // activation/frame via `frame_data` above.
519        Ok(unsafe { read_value(store.0.as_store_opaque(), slot_addr, offset, ty) })
520    }
521
522    /// Get the type and value of the given operand-stack value in
523    /// this frame.
524    ///
525    /// Index 0 corresponds to the bottom-of-stack, and higher indices
526    /// from there are more recently pushed values.  In other words,
527    /// index order reads the Wasm virtual machine's abstract stack
528    /// state left-to-right.
529    pub fn stack(&self, mut store: impl AsContextMut, index: u32) -> Result<Val> {
530        let store = store.as_context_mut();
531        let frame_data = self.frame_data(store.0.as_store_opaque())?;
532        let (offset, ty) = frame_data.stack[usize::try_from(index).unwrap()];
533        let slot_addr = frame_data.slot_addr(self.cursor.frame().fp());
534        // SAFETY: compiler produced metadata to describe this
535        // operand-stack slot and stored a value of the correct type
536        // into it. Slot address is valid because we checked liveness
537        // of the activation/frame via `frame_data` above.
538        Ok(unsafe { read_value(store.0.as_store_opaque(), slot_addr, offset, ty) })
539    }
540}
541
542/// A cache from `StoreCodePC`s for modules' private code within a
543/// store to pre-computed layout data for the virtual stack frame(s)
544/// present at that physical PC.
545pub(crate) struct FrameDataCache {
546    /// For a given physical PC, the list of virtual frames, from
547    /// inner (most recently called/inlined) to outer.
548    by_pc: BTreeMap<StoreCodePC, Vec<FrameData>>,
549}
550
551impl FrameDataCache {
552    pub(crate) fn new() -> FrameDataCache {
553        FrameDataCache {
554            by_pc: BTreeMap::new(),
555        }
556    }
557
558    /// Look up (or compute) the list of `FrameData`s from a physical
559    /// `Frame`.
560    fn lookup_or_compute<'a>(
561        &'a mut self,
562        registry: &ModuleRegistry,
563        frame: Frame,
564    ) -> &'a [FrameData] {
565        let pc = StoreCodePC::from_raw(frame.pc());
566        match self.by_pc.entry(pc) {
567            Entry::Occupied(frames) => frames.into_mut(),
568            Entry::Vacant(v) => {
569                // Although inlining can mix modules, `module` is the
570                // module that actually contains the physical PC
571                // (i.e., the outermost function that inlined the
572                // others).
573                let (module, frames) = VirtualFrame::decode(registry, frame.pc());
574                let frames = frames
575                    .into_iter()
576                    .map(|frame| FrameData::compute(frame, &module))
577                    .collect::<Vec<_>>();
578                v.insert(frames)
579            }
580        }
581    }
582}
583
584/// Internal data pre-computed for one stack frame.
585///
586/// This represents one frame as produced by the progpoint lookup
587/// (Wasm PC, frame descriptor index, stack shape).
588struct VirtualFrame {
589    /// The Wasm PC for this frame.
590    wasm_pc: u32,
591    /// The frame descriptor for this frame.
592    frame_descriptor: FrameTableDescriptorIndex,
593    /// The stack shape for this frame.
594    stack_shape: FrameStackShape,
595}
596
597impl VirtualFrame {
598    /// Return virtual frames corresponding to a physical frame, from
599    /// outermost to innermost.
600    fn decode(registry: &ModuleRegistry, pc: usize) -> (Module, Vec<VirtualFrame>) {
601        let (module_with_code, pc) = registry
602            .module_and_code_by_pc(pc)
603            .expect("Wasm frame PC does not correspond to a module");
604        let module = module_with_code.module();
605        let table = module.frame_table().unwrap();
606        let pc = u32::try_from(pc).expect("PC offset too large");
607        let program_points = table.find_program_point(pc, FrameInstPos::Post)
608            .expect("There must be a program point record in every frame when debug instrumentation is enabled");
609
610        (
611            module.clone(),
612            program_points
613                .map(|(wasm_pc, frame_descriptor, stack_shape)| VirtualFrame {
614                    wasm_pc,
615                    frame_descriptor,
616                    stack_shape,
617                })
618                .collect(),
619        )
620    }
621}
622
623/// Data computed when we visit a given frame.
624struct FrameData {
625    slot_to_fp_offset: usize,
626    func_key: FuncKey,
627    wasm_pc: u32,
628    /// Shape of locals in this frame.
629    ///
630    /// We need to store this locally because `FrameView` cannot
631    /// borrow the store: it needs a mut borrow, and an iterator
632    /// cannot yield the same mut borrow multiple times because it
633    /// cannot control the lifetime of the values it yields (the
634    /// signature of `next()` does not bound the return value to the
635    /// `&mut self` arg).
636    locals: Vec<(FrameStateSlotOffset, FrameValType)>,
637    /// Shape of the stack slots at this program point in this frame.
638    ///
639    /// In addition to the borrowing-related reason above, we also
640    /// materialize this because we want to provide O(1) access to the
641    /// stack by depth, and the frame slot descriptor stores info in a
642    /// linked-list (actually DAG, with dedup'ing) way.
643    stack: Vec<(FrameStateSlotOffset, FrameValType)>,
644}
645
646impl FrameData {
647    fn compute(frame: VirtualFrame, module: &Module) -> Self {
648        let frame_table = module.frame_table().unwrap();
649        // Parse the frame descriptor.
650        let (data, slot_to_fp_offset) = frame_table
651            .frame_descriptor(frame.frame_descriptor)
652            .unwrap();
653        let frame_state_slot = FrameStateSlot::parse(data).unwrap();
654        let slot_to_fp_offset = usize::try_from(slot_to_fp_offset).unwrap();
655
656        // Materialize the stack shape so we have O(1) access to its
657        // elements, and so we don't need to keep the borrow to the
658        // module alive.
659        let mut stack = frame_state_slot
660            .stack(frame.stack_shape)
661            .collect::<Vec<_>>();
662        stack.reverse(); // Put top-of-stack last.
663
664        // Materialize the local offsets/types so we don't need to
665        // keep the borrow to the module alive.
666        let locals = frame_state_slot.locals().collect::<Vec<_>>();
667
668        FrameData {
669            slot_to_fp_offset,
670            func_key: frame_state_slot.func_key(),
671            wasm_pc: frame.wasm_pc,
672            stack,
673            locals,
674        }
675    }
676
677    fn slot_addr(&self, fp: usize) -> *mut u8 {
678        let fp: *mut u8 = core::ptr::with_exposed_provenance_mut(fp);
679        fp.wrapping_sub(self.slot_to_fp_offset)
680    }
681}
682
683/// Read the value at the given offset.
684///
685/// # Safety
686///
687/// The `offset` and `ty` must correspond to a valid value written
688/// to the frame by generated code of the correct type. This will
689/// be the case if this information comes from the frame tables
690/// (as long as the frontend that generates the tables and
691/// instrumentation is correct, and as long as the tables are
692/// preserved through serialization).
693unsafe fn read_value(
694    store: &mut StoreOpaque,
695    slot_base: *const u8,
696    offset: FrameStateSlotOffset,
697    ty: FrameValType,
698) -> Val {
699    let address = unsafe { slot_base.offset(isize::try_from(offset.offset()).unwrap()) };
700
701    // SAFETY: each case reads a value from memory that should be
702    // valid according to our safety condition.
703    match ty {
704        FrameValType::I32 => {
705            let value = unsafe { *(address as *const i32) };
706            Val::I32(value)
707        }
708        FrameValType::I64 => {
709            let value = unsafe { *(address as *const i64) };
710            Val::I64(value)
711        }
712        FrameValType::F32 => {
713            let value = unsafe { *(address as *const u32) };
714            Val::F32(value)
715        }
716        FrameValType::F64 => {
717            let value = unsafe { *(address as *const u64) };
718            Val::F64(value)
719        }
720        FrameValType::V128 => {
721            // Vectors are always stored as little-endian.
722            let value = unsafe { u128::from_le_bytes(*(address as *const [u8; 16])) };
723            Val::V128(value.into())
724        }
725        FrameValType::AnyRef => {
726            let mut nogc = AutoAssertNoGc::new(store);
727            let value = unsafe { *(address as *const u32) };
728            let value = AnyRef::_from_raw(&mut nogc, value);
729            Val::AnyRef(value)
730        }
731        FrameValType::ExnRef => {
732            let mut nogc = AutoAssertNoGc::new(store);
733            let value = unsafe { *(address as *const u32) };
734            let value = ExnRef::_from_raw(&mut nogc, value);
735            Val::ExnRef(value)
736        }
737        FrameValType::ExternRef => {
738            let mut nogc = AutoAssertNoGc::new(store);
739            let value = unsafe { *(address as *const u32) };
740            let value = ExternRef::_from_raw(&mut nogc, value);
741            Val::ExternRef(value)
742        }
743        FrameValType::FuncRef => {
744            let value = unsafe { *(address as *const *mut c_void) };
745            let value = unsafe { Func::_from_raw(store, value) };
746            Val::FuncRef(value)
747        }
748        FrameValType::ContRef => {
749            unimplemented!("contref values are not implemented in the host API yet")
750        }
751    }
752}
753
754/// Compute raw pointers to all GC refs in the given frame.
755// Note: ideally this would be an impl Iterator, but this is quite
756// awkward because of the locally computed data (FrameStateSlot::parse
757// structured result) within the closure borrowed by a nested closure.
758#[cfg(feature = "gc")]
759pub(crate) fn gc_refs_in_frame<'a>(ft: FrameTable<'a>, pc: u32, fp: *mut usize) -> Vec<*mut u32> {
760    let fp = fp.cast::<u8>();
761    let mut ret = vec![];
762    if let Some(frames) = ft.find_program_point(pc, FrameInstPos::Post) {
763        for (_wasm_pc, frame_desc, stack_shape) in frames {
764            let (frame_desc_data, slot_to_fp_offset) = ft.frame_descriptor(frame_desc).unwrap();
765            let frame_base = unsafe { fp.offset(-isize::try_from(slot_to_fp_offset).unwrap()) };
766            let frame_desc = FrameStateSlot::parse(frame_desc_data).unwrap();
767            for (offset, ty) in frame_desc.stack_and_locals(stack_shape) {
768                match ty {
769                    FrameValType::AnyRef | FrameValType::ExnRef | FrameValType::ExternRef => {
770                        let slot = unsafe {
771                            frame_base
772                                .offset(isize::try_from(offset.offset()).unwrap())
773                                .cast::<u32>()
774                        };
775                        ret.push(slot);
776                    }
777                    FrameValType::ContRef | FrameValType::FuncRef => {}
778                    FrameValType::I32
779                    | FrameValType::I64
780                    | FrameValType::F32
781                    | FrameValType::F64
782                    | FrameValType::V128 => {}
783                }
784            }
785        }
786    }
787    ret
788}
789
790/// One debug event that occurs when running Wasm code on a store with
791/// a debug handler attached.
792#[derive(Debug)]
793pub enum DebugEvent<'a> {
794    /// A [`wasmtime::Error`](crate::Error) was raised by a hostcall.
795    HostcallError(&'a crate::Error),
796    /// An exception is thrown and caught by Wasm. The current state
797    /// is at the throw-point.
798    CaughtExceptionThrown(OwnedRooted<ExnRef>),
799    /// An exception was not caught and is escaping to the host.
800    UncaughtExceptionThrown(OwnedRooted<ExnRef>),
801    /// A Wasm trap occurred.
802    Trap(Trap),
803    /// A breakpoint was reached.
804    Breakpoint,
805    /// An epoch yield occurred.
806    EpochYield,
807}
808
809/// A handler for debug events.
810///
811/// This is an async callback that is invoked directly within the
812/// context of a debug event that occurs, i.e., with the Wasm code
813/// still on the stack. The callback can thus observe that stack, up
814/// to the most recent entry to Wasm.[^1]
815///
816/// Because this callback receives a `StoreContextMut`, it has full
817/// access to any state that any other hostcall has, including the
818/// `T`. In that way, it is like an epoch-deadline callback or a
819/// call-hook callback. It also "freezes" the entire store for the
820/// duration of the debugger callback future.
821///
822/// In the future, we expect to provide an "externally async" API on
823/// the `Store` that allows receiving a stream of debug events and
824/// accessing the store mutably while frozen; that will need to
825/// integrate with [`Store::run_concurrent`] to properly timeslice and
826/// scope the mutable access to the store, and has not been built
827/// yet. In the meantime, it should be possible to build a fully
828/// functional debugger with this async-callback API by channeling
829/// debug events out, and requests to read the store back in, over
830/// message-passing channels between the callback and an external
831/// debugger main loop.
832///
833/// Note that the `handle` hook may use its mutable store access to
834/// invoke another Wasm. Debug events will also be caught and will
835/// cause further `handle` invocations during this recursive
836/// invocation. It is up to the debugger to handle any implications of
837/// this reentrancy (e.g., implications on a duplex channel protocol
838/// with an event/continue handshake) if it does so.
839///
840/// Note also that this trait has `Clone` as a supertrait, and the
841/// handler is cloned at every invocation as an artifact of the
842/// internal ownership structure of Wasmtime: the handler itself is
843/// owned by the store, but also receives a mutable borrow to the
844/// whole store, so we need to clone it out to invoke it. It is
845/// recommended that this trait be implemented by a type that is cheap
846/// to clone: for example, a single `Arc` handle to debugger state.
847///
848/// [^1]: Providing visibility further than the most recent entry to
849///       Wasm is not directly possible because it could see into
850///       another async stack, and the stack that polls the future
851///       running a particular Wasm invocation could change after each
852///       suspend point in the handler.
853///
854/// [`Store::run_concurrent`]: crate::Store::run_concurrent
855pub trait DebugHandler: Clone + Send + Sync + 'static {
856    /// The data expected on the store that this handler is attached
857    /// to.
858    type Data;
859
860    /// Handle a debug event.
861    fn handle(
862        &self,
863        store: StoreContextMut<'_, Self::Data>,
864        event: DebugEvent<'_>,
865    ) -> impl Future<Output = ()> + Send;
866}
867
868/// Breakpoint state for modules within a store.
869#[derive(Default)]
870pub(crate) struct BreakpointState {
871    /// Single-step mode.
872    single_step: bool,
873    /// Breakpoints added individually.
874    breakpoints: BTreeSet<BreakpointKey>,
875}
876
877/// A breakpoint.
878pub struct Breakpoint {
879    /// Reference to the module in which we are setting the breakpoint.
880    pub module: Module,
881    /// Wasm PC offset within the module.
882    pub pc: u32,
883}
884
885#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
886struct BreakpointKey(CompiledModuleId, u32);
887
888impl BreakpointKey {
889    fn from_raw(module: &Module, pc: u32) -> BreakpointKey {
890        BreakpointKey(module.id(), pc)
891    }
892
893    fn get(&self, registry: &ModuleRegistry) -> Breakpoint {
894        let module = registry
895            .module_by_compiled_id(self.0)
896            .expect("Module should not have been removed from Store")
897            .clone();
898        Breakpoint { module, pc: self.1 }
899    }
900}
901
902/// A breakpoint-editing session.
903///
904/// This enables updating breakpoint state (setting or unsetting
905/// individual breakpoints or the store-global single-step flag) in a
906/// batch. It is more efficient to batch these updates because
907/// "re-publishing" the newly patched code, with update breakpoint
908/// settings, typically requires a syscall to re-enable execute
909/// permissions.
910pub struct BreakpointEdit<'a> {
911    state: &'a mut BreakpointState,
912    registry: &'a mut ModuleRegistry,
913    /// Modules that have been edited.
914    ///
915    /// Invariant: each of these modules' CodeMemory objects is
916    /// *unpublished* when in the dirty set.
917    dirty_modules: BTreeSet<StoreCodePC>,
918}
919
920impl BreakpointState {
921    pub(crate) fn edit<'a>(&'a mut self, registry: &'a mut ModuleRegistry) -> BreakpointEdit<'a> {
922        BreakpointEdit {
923            state: self,
924            registry,
925            dirty_modules: BTreeSet::new(),
926        }
927    }
928
929    pub(crate) fn breakpoints<'a>(
930        &'a self,
931        registry: &'a ModuleRegistry,
932    ) -> impl Iterator<Item = Breakpoint> + 'a {
933        self.breakpoints.iter().map(|key| key.get(registry))
934    }
935
936    pub(crate) fn is_single_step(&self) -> bool {
937        self.single_step
938    }
939}
940
941impl<'a> BreakpointEdit<'a> {
942    fn get_code_memory<'b>(
943        registry: &'b mut ModuleRegistry,
944        dirty_modules: &mut BTreeSet<StoreCodePC>,
945        module: &Module,
946    ) -> Result<&'b mut CodeMemory> {
947        let store_code_pc = registry.store_code_base_or_register(module)?;
948        let code_memory = registry
949            .store_code_mut(store_code_pc)
950            .expect("Just checked presence above")
951            .code_memory_mut()
952            .expect("Must have unique ownership of StoreCode in guest-debug mode");
953        if dirty_modules.insert(store_code_pc) {
954            code_memory.unpublish()?;
955        }
956        Ok(code_memory)
957    }
958
959    fn patch<'b>(
960        patches: impl Iterator<Item = FrameTableBreakpointData<'b>> + 'b,
961        mem: &mut CodeMemory,
962        enable: bool,
963    ) {
964        let mem = mem.text_mut();
965        for patch in patches {
966            let data = if enable { patch.enable } else { patch.disable };
967            let mem = &mut mem[patch.offset..patch.offset + data.len()];
968            log::trace!(
969                "patch: offset 0x{:x} with enable={enable}: data {data:?} replacing {mem:?}",
970                patch.offset
971            );
972            mem.copy_from_slice(data);
973        }
974    }
975
976    /// Add a breakpoint in the given module at the given PC in that
977    /// module.
978    ///
979    /// No effect if the breakpoint is already set.
980    pub fn add_breakpoint(&mut self, module: &Module, pc: u32) -> Result<()> {
981        let key = BreakpointKey::from_raw(module, pc);
982        self.state.breakpoints.insert(key);
983        log::trace!("patching in breakpoint {key:?}");
984        let mem = Self::get_code_memory(self.registry, &mut self.dirty_modules, module)?;
985        let frame_table = module
986            .frame_table()
987            .expect("Frame table must be present when guest-debug is enabled");
988        let patches = frame_table.lookup_breakpoint_patches_by_pc(pc);
989        Self::patch(patches, mem, true);
990        Ok(())
991    }
992
993    /// Remove a breakpoint in the given module at the given PC in
994    /// that module.
995    ///
996    /// No effect if the breakpoint was not set.
997    pub fn remove_breakpoint(&mut self, module: &Module, pc: u32) -> Result<()> {
998        let key = BreakpointKey::from_raw(module, pc);
999        self.state.breakpoints.remove(&key);
1000        if !self.state.single_step {
1001            let mem = Self::get_code_memory(self.registry, &mut self.dirty_modules, module)?;
1002            let frame_table = module
1003                .frame_table()
1004                .expect("Frame table must be present when guest-debug is enabled");
1005            let patches = frame_table.lookup_breakpoint_patches_by_pc(pc);
1006            Self::patch(patches, mem, false);
1007        }
1008        Ok(())
1009    }
1010
1011    /// Turn on or off single-step mode.
1012    ///
1013    /// In single-step mode, a breakpoint event is emitted at every
1014    /// Wasm PC.
1015    pub fn single_step(&mut self, enabled: bool) -> Result<()> {
1016        log::trace!(
1017            "single_step({enabled}) with breakpoint set {:?}",
1018            self.state.breakpoints
1019        );
1020        let modules = self.registry.all_modules().cloned().collect::<Vec<_>>();
1021        for module in modules {
1022            let mem = Self::get_code_memory(self.registry, &mut self.dirty_modules, &module)?;
1023            let table = module
1024                .frame_table()
1025                .expect("Frame table must be present when guest-debug is enabled");
1026            for (wasm_pc, patch) in table.breakpoint_patches() {
1027                let key = BreakpointKey::from_raw(&module, wasm_pc);
1028                let this_enabled = enabled || self.state.breakpoints.contains(&key);
1029                log::trace!(
1030                    "single_step: enabled {enabled} key {key:?} -> this_enabled {this_enabled}"
1031                );
1032                Self::patch(core::iter::once(patch), mem, this_enabled);
1033            }
1034        }
1035
1036        self.state.single_step = enabled;
1037
1038        Ok(())
1039    }
1040}
1041
1042impl<'a> Drop for BreakpointEdit<'a> {
1043    fn drop(&mut self) {
1044        for &store_code_base in &self.dirty_modules {
1045            let store_code = self.registry.store_code_mut(store_code_base).unwrap();
1046            if let Err(e) = store_code
1047                .code_memory_mut()
1048                .expect("Must have unique ownership of StoreCode in guest-debug mode")
1049                .publish()
1050            {
1051                abort_on_republish_error(e);
1052            }
1053        }
1054    }
1055}
1056
1057/// Abort when we cannot re-publish executable code.
1058///
1059/// Note that this puts us in quite a conundrum. Typically we will
1060/// have been editing breakpoints from within a hostcall context
1061/// (e.g. inside a debugger hook while execution is paused) with JIT
1062/// code on the stack. Wasmtime's usual path to return errors is back
1063/// through that JIT code: we do not panic-unwind across the JIT code,
1064/// we return into the exit trampoline and that then re-enters the
1065/// raise libcall to use a Cranelift exception-throw to cross most of
1066/// the JIT frames to the entry trampoline. When even trampolines are
1067/// no longer executable, we have no way out. Even an ordinary
1068/// `panic!` cannot work, because we catch panics and carry them
1069/// across JIT code using that trampoline-based error path. Our only
1070/// way out is to directly abort the whole process.
1071///
1072/// This is not without precedent: other engines have similar failure
1073/// paths. For example, SpiderMonkey directly aborts the process when
1074/// failing to re-apply executable permissions (see [1]).
1075///
1076/// Note that we don't really expect to ever hit this case in
1077/// practice: it's unlikely that `mprotect` applying `PROT_EXEC` would
1078/// fail due to, e.g., resource exhaustion in the kernel, because we
1079/// will have the same net number of virtual memory areas before and
1080/// after the permissions change. Nevertheless, we have to account for
1081/// the possibility of error.
1082///
1083/// [1]: https://searchfox.org/firefox-main/rev/7496c8515212669451d7e775a00c2be07da38ca5/js/src/jit/AutoWritableJitCode.h#26-56
1084#[cfg(feature = "std")]
1085fn abort_on_republish_error(e: crate::Error) -> ! {
1086    log::error!(
1087        "Failed to re-publish executable code: {e:?}. Wasmtime cannot return through JIT code on the stack and cannot even panic; aborting the process."
1088    );
1089    std::process::abort();
1090}
1091
1092/// In the `no_std` case, we don't have a concept of a "process
1093/// abort", so rely on `panic!`. Typically an embedded scenario that
1094/// uses `no_std` will build with `panic=abort` so the effect is the
1095/// same. If it doesn't, there is truly nothing we can do here so
1096/// let's panic anyway; the panic propagation through the trampolines
1097/// will at least deterministically crash.
1098#[cfg(not(feature = "std"))]
1099fn abort_on_republish_error(e: crate::Error) -> ! {
1100    panic!("Failed to re-publish executable code: {e:?}");
1101}