Skip to main content

wasmtime/runtime/
debug.rs

1//! Debugging API.
2
3use super::store::AsStoreOpaque;
4use crate::code::StoreCode;
5use crate::module::RegisterBreakpointState;
6use crate::store::StoreId;
7use crate::vm::{Activation, Backtrace};
8use crate::{
9    AnyRef, AsContextMut, CodeMemory, ExnRef, Extern, ExternRef, Func, Instance, Module,
10    OwnedRooted, StoreContext, StoreContextMut, Val,
11    code::StoreCodePC,
12    module::ModuleRegistry,
13    store::{AutoAssertNoGc, StoreOpaque},
14    vm::{CompiledModuleId, VMContext},
15};
16use crate::{Caller, Result, Store};
17use alloc::collections::{BTreeMap, BTreeSet, btree_map::Entry};
18use alloc::vec;
19use alloc::vec::Vec;
20use core::{ffi::c_void, ptr::NonNull};
21#[cfg(feature = "gc")]
22use wasmtime_environ::FrameTable;
23// Re-export ModulePC so downstream crates can use it.
24pub use wasmtime_environ::ModulePC;
25use wasmtime_environ::{
26    DefinedFuncIndex, EntityIndex, FrameInstPos, FrameStackShape, FrameStateSlot,
27    FrameStateSlotOffset, FrameTableBreakpointData, FrameTableDescriptorIndex, FrameValType,
28    FuncIndex, FuncKey, GlobalIndex, MemoryIndex, TableIndex, TagIndex, Trap,
29};
30use wasmtime_unwinder::{Frame, FrameCursor};
31
32impl<T> Store<T> {
33    /// Provide a frame handle for all activations, in order from
34    /// innermost (most recently called) to outermost on the stack.
35    ///
36    /// An activation is a contiguous sequence of Wasm frames (called
37    /// functions) that were called from host code and called back out
38    /// to host code. If there are activations from multiple stores on
39    /// the stack, for example if Wasm code in one store calls out to
40    /// host code which invokes another Wasm function in another
41    /// store, then the other stores are "opaque" to our view here in
42    /// the same way that host code is.
43    ///
44    /// Returns an empty list if debug instrumentation is not enabled
45    /// for the engine containing this store.
46    pub fn debug_exit_frames(&mut self) -> impl Iterator<Item = FrameHandle> {
47        self.as_store_opaque().debug_exit_frames()
48    }
49
50    /// Start an edit session to update breakpoints.
51    pub fn edit_breakpoints<'a>(&'a mut self) -> Option<BreakpointEdit<'a>> {
52        self.as_store_opaque().edit_breakpoints()
53    }
54
55    /// Get a vector of all Instances held in the Store, for debug
56    /// purposes.
57    ///
58    /// Guest debugging must be enabled for this accessor to return
59    /// any instances. If it is not, an empty vector is returned.
60    pub fn debug_all_instances(&mut self) -> Vec<Instance> {
61        self.as_store_opaque().debug_all_instances()
62    }
63
64    /// Get a vector of all Modules held in the Store, for debug
65    /// purposes.
66    ///
67    /// Guest debugging must be enabled for this accessor to return
68    /// any modules. If it is not, an empty vector is returned.
69    pub fn debug_all_modules(&mut self) -> Vec<Module> {
70        self.as_store_opaque().debug_all_modules()
71    }
72}
73
74impl<'a, T> StoreContextMut<'a, T> {
75    /// Provide a frame handle for all activations, in order from
76    /// innermost (most recently called) to outermost on the stack.
77    ///
78    /// See [`Store::debug_exit_frames`] for more details.
79    pub fn debug_exit_frames(&mut self) -> impl Iterator<Item = FrameHandle> {
80        self.0.as_store_opaque().debug_exit_frames()
81    }
82
83    /// Start an edit session to update breakpoints.
84    pub fn edit_breakpoints(self) -> Option<BreakpointEdit<'a>> {
85        self.0.as_store_opaque().edit_breakpoints()
86    }
87
88    /// Get a vector of all Instances held in the Store, for debug
89    /// purposes.
90    ///
91    /// See [`Store::debug_all_instances`] for more details.
92    pub fn debug_all_instances(self) -> Vec<Instance> {
93        self.0.as_store_opaque().debug_all_instances()
94    }
95
96    /// Get a vector of all Modules held in the Store, for debug
97    /// purposes.
98    ///
99    /// See [`Store::debug_all_modules`] for more details.
100    pub fn debug_all_modules(self) -> Vec<Module> {
101        self.0.as_store_opaque().debug_all_modules()
102    }
103}
104
105impl<'a, T> Caller<'a, T> {
106    /// Provide a frame handle for all activations, in order from
107    /// innermost (most recently called) to outermost on the stack.
108    ///
109    /// See [`Store::debug_exit_frames`] for more details.
110    pub fn debug_exit_frames(&mut self) -> impl Iterator<Item = FrameHandle> {
111        self.store.0.as_store_opaque().debug_exit_frames()
112    }
113
114    /// Start an edit session to update breakpoints.
115    pub fn edit_breakpoints<'b>(&'b mut self) -> Option<BreakpointEdit<'b>> {
116        self.store.0.as_store_opaque().edit_breakpoints()
117    }
118
119    /// Get a vector of all Instances held in the Store, for debug
120    /// purposes.
121    ///
122    /// See [`Store::debug_all_instances`] for more details.
123    pub fn debug_all_instances(&mut self) -> Vec<Instance> {
124        self.store.0.as_store_opaque().debug_all_instances()
125    }
126
127    /// Get a vector of all Modules held in the Store, for debug
128    /// purposes.
129    ///
130    /// See [`Store::debug_all_modules`] for more details.
131    pub fn debug_all_modules(&mut self) -> Vec<Module> {
132        self.store.0.as_store_opaque().debug_all_modules()
133    }
134}
135
136impl StoreOpaque {
137    fn debug_exit_frames(&mut self) -> impl Iterator<Item = FrameHandle> {
138        let activations = if self.engine().tunables().debug_guest {
139            Backtrace::activations(self)
140        } else {
141            vec![]
142        };
143
144        activations
145            .into_iter()
146            // SAFETY: each activation is currently active and will
147            // remain so (we have a mutable borrow of the store).
148            .filter_map(|act| unsafe { FrameHandle::exit_frame(self, act) })
149    }
150
151    fn edit_breakpoints<'a>(&'a mut self) -> Option<BreakpointEdit<'a>> {
152        if !self.engine().tunables().debug_guest {
153            return None;
154        }
155
156        let (breakpoints, registry) = self.breakpoints_and_registry_mut();
157        Some(breakpoints.edit(registry))
158    }
159
160    fn debug_all_instances(&mut self) -> Vec<Instance> {
161        if !self.engine().tunables().debug_guest {
162            return vec![];
163        }
164
165        self.all_instances().collect()
166    }
167
168    fn debug_all_modules(&self) -> Vec<Module> {
169        if !self.engine().tunables().debug_guest {
170            return vec![];
171        }
172
173        self.modules().all_modules().cloned().collect()
174    }
175}
176
177impl Instance {
178    /// Get access to a global within this instance's globals index
179    /// space.
180    ///
181    /// This permits accessing globals whether they are exported or
182    /// not. However, it is only available for purposes of debugging,
183    /// and so is only permitted when `guest_debug` is enabled in the
184    /// Engine's configuration. The intent of the Wasmtime API is to
185    /// enforce the Wasm type system's encapsulation even in the host
186    /// API, except where necessary for developer tooling.
187    ///
188    /// `None` is returned for any global index that is out-of-bounds.
189    ///
190    /// `None` is returned if guest-debugging is not enabled in the
191    /// engine configuration for this Store.
192    pub fn debug_global(
193        &self,
194        mut store: impl AsContextMut,
195        global_index: u32,
196    ) -> Option<crate::Global> {
197        self.debug_export(
198            store.as_context_mut().0,
199            GlobalIndex::from_bits(global_index).into(),
200        )
201        .and_then(|s| s.into_global())
202    }
203
204    /// Get access to a memory (unshared only) within this instance's
205    /// memory index space.
206    ///
207    /// This permits accessing memories whether they are exported or
208    /// not. However, it is only available for purposes of debugging,
209    /// and so is only permitted when `guest_debug` is enabled in the
210    /// Engine's configuration. The intent of the Wasmtime API is to
211    /// enforce the Wasm type system's encapsulation even in the host
212    /// API, except where necessary for developer tooling.
213    ///
214    /// `None` is returned for any memory index that is out-of-bounds.
215    ///
216    /// `None` is returned for any shared memory (use
217    /// `debug_shared_memory` instead).
218    ///
219    /// `None` is returned if guest-debugging is not enabled in the
220    /// engine configuration for this Store.
221    pub fn debug_memory(
222        &self,
223        mut store: impl AsContextMut,
224        memory_index: u32,
225    ) -> Option<crate::Memory> {
226        self.debug_export(
227            store.as_context_mut().0,
228            MemoryIndex::from_bits(memory_index).into(),
229        )
230        .and_then(|s| s.into_memory())
231    }
232
233    /// Get access to a shared memory within this instance's memory
234    /// index space.
235    ///
236    /// This permits accessing memories whether they are exported or
237    /// not. However, it is only available for purposes of debugging,
238    /// and so is only permitted when `guest_debug` is enabled in the
239    /// Engine's configuration. The intent of the Wasmtime API is to
240    /// enforce the Wasm type system's encapsulation even in the host
241    /// API, except where necessary for developer tooling.
242    ///
243    /// `None` is returned for any memory index that is out-of-bounds.
244    ///
245    /// `None` is returned for any unshared memory (use `debug_memory`
246    /// instead).
247    ///
248    /// `None` is returned if guest-debugging is not enabled in the
249    /// engine configuration for this Store.
250    pub fn debug_shared_memory(
251        &self,
252        mut store: impl AsContextMut,
253        memory_index: u32,
254    ) -> Option<crate::SharedMemory> {
255        self.debug_export(
256            store.as_context_mut().0,
257            MemoryIndex::from_bits(memory_index).into(),
258        )
259        .and_then(|s| s.into_shared_memory())
260    }
261
262    /// Get access to a table within this instance's table index
263    /// space.
264    ///
265    /// This permits accessing tables whether they are exported or
266    /// not. However, it is only available for purposes of debugging,
267    /// and so is only permitted when `guest_debug` is enabled in the
268    /// Engine's configuration. The intent of the Wasmtime API is to
269    /// enforce the Wasm type system's encapsulation even in the host
270    /// API, except where necessary for developer tooling.
271    ///
272    /// `None` is returned for any table index that is out-of-bounds.
273    ///
274    /// `None` is returned if guest-debugging is not enabled in the
275    /// engine configuration for this Store.
276    pub fn debug_table(
277        &self,
278        mut store: impl AsContextMut,
279        table_index: u32,
280    ) -> Option<crate::Table> {
281        self.debug_export(
282            store.as_context_mut().0,
283            TableIndex::from_bits(table_index).into(),
284        )
285        .and_then(|s| s.into_table())
286    }
287
288    /// Get access to a function within this instance's function index
289    /// space.
290    ///
291    /// This permits accessing functions whether they are exported or
292    /// not. However, it is only available for purposes of debugging,
293    /// and so is only permitted when `guest_debug` is enabled in the
294    /// Engine's configuration. The intent of the Wasmtime API is to
295    /// enforce the Wasm type system's encapsulation even in the host
296    /// API, except where necessary for developer tooling.
297    ///
298    /// `None` is returned for any function index that is
299    /// out-of-bounds.
300    ///
301    /// `None` is returned if guest-debugging is not enabled in the
302    /// engine configuration for this Store.
303    pub fn debug_function(
304        &self,
305        mut store: impl AsContextMut,
306        function_index: u32,
307    ) -> Option<crate::Func> {
308        self.debug_export(
309            store.as_context_mut().0,
310            FuncIndex::from_bits(function_index).into(),
311        )
312        .and_then(|s| s.into_func())
313    }
314
315    /// Get access to a tag within this instance's tag index space.
316    ///
317    /// This permits accessing tags whether they are exported or
318    /// not. However, it is only available for purposes of debugging,
319    /// and so is only permitted when `guest_debug` is enabled in the
320    /// Engine's configuration. The intent of the Wasmtime API is to
321    /// enforce the Wasm type system's encapsulation even in the host
322    /// API, except where necessary for developer tooling.
323    ///
324    /// `None` is returned for any tag index that is out-of-bounds.
325    ///
326    /// `None` is returned if guest-debugging is not enabled in the
327    /// engine configuration for this Store.
328    pub fn debug_tag(&self, mut store: impl AsContextMut, tag_index: u32) -> Option<crate::Tag> {
329        self.debug_export(
330            store.as_context_mut().0,
331            TagIndex::from_bits(tag_index).into(),
332        )
333        .and_then(|s| s.into_tag())
334    }
335
336    fn debug_export(&self, store: &mut StoreOpaque, index: EntityIndex) -> Option<Extern> {
337        if !store.engine().tunables().debug_guest {
338            return None;
339        }
340
341        let env_module = self._module(store).env_module();
342        if !env_module.is_valid(index) {
343            return None;
344        }
345        let store_id = store.id();
346        let (instance, registry) = store.instance_and_module_registry_mut(self.id());
347        // SAFETY: the `store` and `registry` are associated with
348        // this instance as we fetched the instance directly from
349        // the store above.
350        let export = unsafe { instance.get_export_by_index_mut(registry, store_id, index) };
351        Some(Extern::from_wasmtime_export(export, store.engine()))
352    }
353}
354
355impl<'a, T> StoreContext<'a, T> {
356    /// Return all breakpoints.
357    pub fn breakpoints(self) -> Option<impl Iterator<Item = Breakpoint> + 'a> {
358        if !self.engine().tunables().debug_guest {
359            return None;
360        }
361
362        let (breakpoints, registry) = self.0.breakpoints_and_registry();
363        Some(breakpoints.breakpoints(registry))
364    }
365
366    /// Indicate whether single-step mode is enabled.
367    pub fn is_single_step(&self) -> bool {
368        let (breakpoints, _) = self.0.breakpoints_and_registry();
369        breakpoints.is_single_step()
370    }
371}
372
373/// A handle to a stack frame, valid as long as execution is not
374/// resumed in the associated `Store`.
375///
376/// This handle can be held and cloned and used to refer to a frame
377/// within a paused store. It is cheap: it internally consists of a
378/// pointer to the actual frame, together with some metadata to
379/// determine when that pointer has gone stale.
380///
381/// At the API level, any usage of this frame handle requires a
382/// mutable borrow of the `Store`, because the `Store` logically owns
383/// the stack(s) for any execution within it. However, the existence
384/// of the handle itself does not hold a borrow on the `Store`; hence,
385/// the `Store` can continue to be used and queried, and some state
386/// (e.g. memories, tables, GC objects) can even be mutated, as long
387/// as execution is not resumed. The intent of this API is to allow a
388/// wide variety of debugger implementation strategies that expose
389/// stack frames and also allow other commands/actions at the same
390/// time.
391///
392/// The user can use [`FrameHandle::is_valid`] to determine if the
393/// handle is still valid and usable.
394#[derive(Clone)]
395pub struct FrameHandle {
396    /// The unwinder cursor at this frame.
397    cursor: FrameCursor,
398
399    /// The index of the virtual frame within the physical frame.
400    virtual_frame_idx: usize,
401
402    /// The unique Store this frame came from, to ensure the handle is
403    /// used with the correct Store.
404    store_id: StoreId,
405
406    /// Store `execution_version`.
407    store_version: u64,
408}
409
410impl FrameHandle {
411    /// Create a new FrameHandle at the exit frame of an activation.
412    ///
413    /// # Safety
414    ///
415    /// The provided activation must be valid currently.
416    unsafe fn exit_frame(store: &mut StoreOpaque, activation: Activation) -> Option<FrameHandle> {
417        // SAFETY: activation is valid as per our safety condition.
418        let mut cursor = unsafe { activation.cursor() };
419
420        // Find the first virtual frame. Each physical frame may have
421        // zero or more virtual frames.
422        while !cursor.done() {
423            let (cache, registry) = store.frame_data_cache_mut_and_registry();
424            let frames = cache.lookup_or_compute(registry, cursor.frame());
425            if frames.len() > 0 {
426                return Some(FrameHandle {
427                    cursor,
428                    virtual_frame_idx: 0,
429                    store_id: store.id(),
430                    store_version: store.vm_store_context().execution_version,
431                });
432            }
433            // SAFETY: activation is still valid (valid on entry per
434            // our safety condition, and we have not returned control
435            // since above).
436            unsafe {
437                cursor.advance(store.unwinder());
438            }
439        }
440
441        None
442    }
443
444    /// Determine whether this handle can still be used to refer to a
445    /// frame.
446    pub fn is_valid(&self, mut store: impl AsContextMut) -> bool {
447        let store = store.as_context_mut();
448        self.is_valid_impl(store.0.as_store_opaque())
449    }
450
451    fn is_valid_impl(&self, store: &StoreOpaque) -> bool {
452        let id = store.id();
453        let version = store.vm_store_context().execution_version;
454        self.store_id == id && self.store_version == version
455    }
456
457    /// Get a handle to the next frame up the activation (the one that
458    /// called this frame), if any.
459    pub fn parent(&self, mut store: impl AsContextMut) -> Result<Option<FrameHandle>> {
460        let mut store = store.as_context_mut();
461        if !self.is_valid(&mut store) {
462            crate::error::bail!("Frame handle is no longer valid.");
463        }
464
465        let mut parent = self.clone();
466        parent.virtual_frame_idx += 1;
467
468        while !parent.cursor.done() {
469            let (cache, registry) = store
470                .0
471                .as_store_opaque()
472                .frame_data_cache_mut_and_registry();
473            let frames = cache.lookup_or_compute(registry, parent.cursor.frame());
474            if parent.virtual_frame_idx < frames.len() {
475                return Ok(Some(parent));
476            }
477            parent.virtual_frame_idx = 0;
478            // SAFETY: activation is valid because we checked validity
479            // wrt execution version at the top of this function, and
480            // we have not returned since.
481            unsafe {
482                parent.cursor.advance(store.0.as_store_opaque().unwinder());
483            }
484        }
485
486        Ok(None)
487    }
488
489    fn frame_data<'a>(&self, store: &'a mut StoreOpaque) -> Result<&'a FrameData> {
490        if !self.is_valid_impl(store) {
491            crate::error::bail!("Frame handle is no longer valid.");
492        }
493        let (cache, registry) = store.frame_data_cache_mut_and_registry();
494        let frames = cache.lookup_or_compute(registry, self.cursor.frame());
495        // `virtual_frame_idx` counts up for ease of iteration
496        // behavior, while the frames are stored in outer-to-inner
497        // (i.e., caller to callee) order, so we need to reverse here.
498        Ok(&frames[frames.len() - 1 - self.virtual_frame_idx])
499    }
500
501    fn raw_instance<'a>(&self, store: &mut StoreOpaque) -> Result<&'a crate::vm::Instance> {
502        let frame_data = self.frame_data(store)?;
503
504        // Read out the vmctx slot.
505
506        // SAFETY: vmctx is always at offset 0 in the slot.  (See
507        // crates/cranelift/src/func_environ.rs in
508        // `update_stack_slot_vmctx()`.)  The frame/activation is
509        // still valid because we verified this in `frame_data` above.
510        let vmctx: usize =
511            unsafe { *(frame_data.slot_addr(self.cursor.frame().fp()) as *mut usize) };
512        let vmctx: *mut VMContext = core::ptr::with_exposed_provenance_mut(vmctx);
513        let vmctx = NonNull::new(vmctx).expect("null vmctx in debug state slot");
514        // SAFETY: the stored vmctx value is a valid instance in this
515        // store; we only visit frames from this store in the
516        // backtrace.
517        let instance = unsafe { crate::vm::Instance::from_vmctx(vmctx) };
518        // SAFETY: the instance pointer read above is valid.
519        Ok(unsafe { instance.as_ref() })
520    }
521
522    /// Get the instance associated with the current frame.
523    pub fn instance(&self, mut store: impl AsContextMut) -> Result<Instance> {
524        let store = store.as_context_mut();
525        let instance = self.raw_instance(store.0.as_store_opaque())?;
526        let id = instance.id();
527        Ok(Instance::from_wasmtime(id, store.0.as_store_opaque()))
528    }
529
530    /// Get the module associated with the current frame, if any
531    /// (i.e., not a container instance for a host-created entity).
532    pub fn module<'a, T: 'static>(
533        &self,
534        store: impl Into<StoreContextMut<'a, T>>,
535    ) -> Result<Option<&'a Module>> {
536        let store = store.into();
537        let instance = self.raw_instance(store.0.as_store_opaque())?;
538        Ok(instance.runtime_module())
539    }
540
541    /// Get the raw function index associated with the current frame, and the
542    /// module-relative PC as an offset within the module binary, if
543    /// this is a Wasm function directly from the given `Module`
544    /// (rather than a trampoline).
545    pub fn wasm_function_index_and_pc(
546        &self,
547        mut store: impl AsContextMut,
548    ) -> Result<Option<(DefinedFuncIndex, ModulePC)>> {
549        let mut store = store.as_context_mut();
550        let frame_data = self.frame_data(store.0.as_store_opaque())?;
551        let FuncKey::DefinedWasmFunction(module, func) = frame_data.func_key else {
552            return Ok(None);
553        };
554        let wasm_pc = frame_data.wasm_pc;
555        debug_assert_eq!(
556            module,
557            self.module(&mut store)?
558                .expect("module should be defined if this is a defined function")
559                .env_module()
560                .module_index
561        );
562        Ok(Some((func, wasm_pc)))
563    }
564
565    /// Get the number of locals in this frame.
566    pub fn num_locals(&self, mut store: impl AsContextMut) -> Result<u32> {
567        let store = store.as_context_mut();
568        let frame_data = self.frame_data(store.0.as_store_opaque())?;
569        Ok(u32::try_from(frame_data.locals.len()).unwrap())
570    }
571
572    /// Get the depth of the operand stack in this frame.
573    pub fn num_stacks(&self, mut store: impl AsContextMut) -> Result<u32> {
574        let store = store.as_context_mut();
575        let frame_data = self.frame_data(store.0.as_store_opaque())?;
576        Ok(u32::try_from(frame_data.stack.len()).unwrap())
577    }
578
579    /// Get the type and value of the given local in this frame.
580    ///
581    /// # Panics
582    ///
583    /// Panics if the index is out-of-range (greater than
584    /// `num_locals()`).
585    pub fn local(&self, mut store: impl AsContextMut, index: u32) -> Result<Val> {
586        let store = store.as_context_mut();
587        let frame_data = self.frame_data(store.0.as_store_opaque())?;
588        let (offset, ty) = frame_data.locals[usize::try_from(index).unwrap()];
589        let slot_addr = frame_data.slot_addr(self.cursor.frame().fp());
590        // SAFETY: compiler produced metadata to describe this local
591        // slot and stored a value of the correct type into it. Slot
592        // address is valid because we checked liveness of the
593        // activation/frame via `frame_data` above.
594        Ok(unsafe { read_value(store.0.as_store_opaque(), slot_addr, offset, ty) })
595    }
596
597    /// Get the type and value of the given operand-stack value in
598    /// this frame.
599    ///
600    /// Index 0 corresponds to the bottom-of-stack, and higher indices
601    /// from there are more recently pushed values.  In other words,
602    /// index order reads the Wasm virtual machine's abstract stack
603    /// state left-to-right.
604    pub fn stack(&self, mut store: impl AsContextMut, index: u32) -> Result<Val> {
605        let store = store.as_context_mut();
606        let frame_data = self.frame_data(store.0.as_store_opaque())?;
607        let (offset, ty) = frame_data.stack[usize::try_from(index).unwrap()];
608        let slot_addr = frame_data.slot_addr(self.cursor.frame().fp());
609        // SAFETY: compiler produced metadata to describe this
610        // operand-stack slot and stored a value of the correct type
611        // into it. Slot address is valid because we checked liveness
612        // of the activation/frame via `frame_data` above.
613        Ok(unsafe { read_value(store.0.as_store_opaque(), slot_addr, offset, ty) })
614    }
615}
616
617/// A cache from `StoreCodePC`s for modules' private code within a
618/// store to pre-computed layout data for the virtual stack frame(s)
619/// present at that physical PC.
620pub(crate) struct FrameDataCache {
621    /// For a given physical PC, the list of virtual frames, from
622    /// inner (most recently called/inlined) to outer.
623    by_pc: BTreeMap<StoreCodePC, Vec<FrameData>>,
624}
625
626impl FrameDataCache {
627    pub(crate) fn new() -> FrameDataCache {
628        FrameDataCache {
629            by_pc: BTreeMap::new(),
630        }
631    }
632
633    /// Look up (or compute) the list of `FrameData`s from a physical
634    /// `Frame`.
635    fn lookup_or_compute<'a>(
636        &'a mut self,
637        registry: &ModuleRegistry,
638        frame: Frame,
639    ) -> &'a [FrameData] {
640        let pc = StoreCodePC::from_raw(frame.pc());
641        match self.by_pc.entry(pc) {
642            Entry::Occupied(frames) => frames.into_mut(),
643            Entry::Vacant(v) => {
644                // Although inlining can mix modules, `module` is the
645                // module that actually contains the physical PC
646                // (i.e., the outermost function that inlined the
647                // others).
648                let (module, frames) = VirtualFrame::decode(registry, frame.pc());
649                let frames = frames
650                    .into_iter()
651                    .map(|frame| FrameData::compute(frame, &module))
652                    .collect::<Vec<_>>();
653                v.insert(frames)
654            }
655        }
656    }
657}
658
659/// Internal data pre-computed for one stack frame.
660///
661/// This represents one frame as produced by the progpoint lookup
662/// (Wasm PC, frame descriptor index, stack shape).
663struct VirtualFrame {
664    /// The module-relative Wasm PC for this frame.
665    wasm_pc: ModulePC,
666    /// The frame descriptor for this frame.
667    frame_descriptor: FrameTableDescriptorIndex,
668    /// The stack shape for this frame.
669    stack_shape: FrameStackShape,
670}
671
672impl VirtualFrame {
673    /// Return virtual frames corresponding to a physical frame, from
674    /// outermost to innermost.
675    fn decode(registry: &ModuleRegistry, pc: usize) -> (Module, Vec<VirtualFrame>) {
676        let (module_with_code, pc) = registry
677            .module_and_code_by_pc(pc)
678            .expect("Wasm frame PC does not correspond to a module");
679        let module = module_with_code.module();
680        let table = module.frame_table().unwrap();
681        let pc = u32::try_from(pc).expect("PC offset too large");
682        let program_points = table.find_program_point(pc, FrameInstPos::Post)
683            .expect("There must be a program point record in every frame when debug instrumentation is enabled");
684
685        (
686            module.clone(),
687            program_points
688                .map(|(wasm_pc, frame_descriptor, stack_shape)| VirtualFrame {
689                    wasm_pc,
690                    frame_descriptor,
691                    stack_shape,
692                })
693                .collect(),
694        )
695    }
696}
697
698/// Data computed when we visit a given frame.
699struct FrameData {
700    slot_to_fp_offset: usize,
701    func_key: FuncKey,
702    wasm_pc: ModulePC,
703    /// Shape of locals in this frame.
704    ///
705    /// We need to store this locally because `FrameView` cannot
706    /// borrow the store: it needs a mut borrow, and an iterator
707    /// cannot yield the same mut borrow multiple times because it
708    /// cannot control the lifetime of the values it yields (the
709    /// signature of `next()` does not bound the return value to the
710    /// `&mut self` arg).
711    locals: Vec<(FrameStateSlotOffset, FrameValType)>,
712    /// Shape of the stack slots at this program point in this frame.
713    ///
714    /// In addition to the borrowing-related reason above, we also
715    /// materialize this because we want to provide O(1) access to the
716    /// stack by depth, and the frame slot descriptor stores info in a
717    /// linked-list (actually DAG, with dedup'ing) way.
718    stack: Vec<(FrameStateSlotOffset, FrameValType)>,
719}
720
721impl FrameData {
722    fn compute(frame: VirtualFrame, module: &Module) -> Self {
723        let frame_table = module.frame_table().unwrap();
724        // Parse the frame descriptor.
725        let (data, slot_to_fp_offset) = frame_table
726            .frame_descriptor(frame.frame_descriptor)
727            .unwrap();
728        let frame_state_slot = FrameStateSlot::parse(data).unwrap();
729        let slot_to_fp_offset = usize::try_from(slot_to_fp_offset).unwrap();
730
731        // Materialize the stack shape so we have O(1) access to its
732        // elements, and so we don't need to keep the borrow to the
733        // module alive.
734        let mut stack = frame_state_slot
735            .stack(frame.stack_shape)
736            .collect::<Vec<_>>();
737        stack.reverse(); // Put top-of-stack last.
738
739        // Materialize the local offsets/types so we don't need to
740        // keep the borrow to the module alive.
741        let locals = frame_state_slot.locals().collect::<Vec<_>>();
742
743        FrameData {
744            slot_to_fp_offset,
745            func_key: frame_state_slot.func_key(),
746            wasm_pc: frame.wasm_pc,
747            stack,
748            locals,
749        }
750    }
751
752    fn slot_addr(&self, fp: usize) -> *mut u8 {
753        let fp: *mut u8 = core::ptr::with_exposed_provenance_mut(fp);
754        fp.wrapping_sub(self.slot_to_fp_offset)
755    }
756}
757
758/// Read the value at the given offset.
759///
760/// # Safety
761///
762/// The `offset` and `ty` must correspond to a valid value written
763/// to the frame by generated code of the correct type. This will
764/// be the case if this information comes from the frame tables
765/// (as long as the frontend that generates the tables and
766/// instrumentation is correct, and as long as the tables are
767/// preserved through serialization).
768unsafe fn read_value(
769    store: &mut StoreOpaque,
770    slot_base: *const u8,
771    offset: FrameStateSlotOffset,
772    ty: FrameValType,
773) -> Val {
774    let address = unsafe { slot_base.offset(isize::try_from(offset.offset()).unwrap()) };
775
776    // SAFETY: each case reads a value from memory that should be
777    // valid according to our safety condition.
778    match ty {
779        FrameValType::I32 => {
780            let value = unsafe { *(address as *const i32) };
781            Val::I32(value)
782        }
783        FrameValType::I64 => {
784            let value = unsafe { *(address as *const i64) };
785            Val::I64(value)
786        }
787        FrameValType::F32 => {
788            let value = unsafe { *(address as *const u32) };
789            Val::F32(value)
790        }
791        FrameValType::F64 => {
792            let value = unsafe { *(address as *const u64) };
793            Val::F64(value)
794        }
795        FrameValType::V128 => {
796            // Vectors are always stored as little-endian.
797            let value = unsafe { u128::from_le_bytes(*(address as *const [u8; 16])) };
798            Val::V128(value.into())
799        }
800        FrameValType::AnyRef => {
801            let mut nogc = AutoAssertNoGc::new(store);
802            let value = unsafe { *(address as *const u32) };
803            let value = AnyRef::_from_raw(&mut nogc, value);
804            Val::AnyRef(value)
805        }
806        FrameValType::ExnRef => {
807            let mut nogc = AutoAssertNoGc::new(store);
808            let value = unsafe { *(address as *const u32) };
809            let value = ExnRef::_from_raw(&mut nogc, value);
810            Val::ExnRef(value)
811        }
812        FrameValType::ExternRef => {
813            let mut nogc = AutoAssertNoGc::new(store);
814            let value = unsafe { *(address as *const u32) };
815            let value = ExternRef::_from_raw(&mut nogc, value);
816            Val::ExternRef(value)
817        }
818        FrameValType::FuncRef => {
819            let value = unsafe { *(address as *const *mut c_void) };
820            let value = unsafe { Func::_from_raw(store, value) };
821            Val::FuncRef(value)
822        }
823        FrameValType::ContRef => {
824            unimplemented!("contref values are not implemented in the host API yet")
825        }
826    }
827}
828
829/// Compute raw pointers to all GC refs in the given frame.
830// Note: ideally this would be an impl Iterator, but this is quite
831// awkward because of the locally computed data (FrameStateSlot::parse
832// structured result) within the closure borrowed by a nested closure.
833#[cfg(feature = "gc")]
834pub(crate) fn gc_refs_in_frame<'a>(ft: FrameTable<'a>, pc: u32, fp: *mut usize) -> Vec<*mut u32> {
835    let fp = fp.cast::<u8>();
836    let mut ret = vec![];
837    if let Some(frames) = ft.find_program_point(pc, FrameInstPos::Post) {
838        for (_wasm_pc, frame_desc, stack_shape) in frames {
839            let (frame_desc_data, slot_to_fp_offset) = ft.frame_descriptor(frame_desc).unwrap();
840            let frame_base = unsafe { fp.offset(-isize::try_from(slot_to_fp_offset).unwrap()) };
841            let frame_desc = FrameStateSlot::parse(frame_desc_data).unwrap();
842            for (offset, ty) in frame_desc.stack_and_locals(stack_shape) {
843                match ty {
844                    FrameValType::AnyRef | FrameValType::ExnRef | FrameValType::ExternRef => {
845                        let slot = unsafe {
846                            frame_base
847                                .offset(isize::try_from(offset.offset()).unwrap())
848                                .cast::<u32>()
849                        };
850                        ret.push(slot);
851                    }
852                    FrameValType::ContRef | FrameValType::FuncRef => {}
853                    FrameValType::I32
854                    | FrameValType::I64
855                    | FrameValType::F32
856                    | FrameValType::F64
857                    | FrameValType::V128 => {}
858                }
859            }
860        }
861    }
862    ret
863}
864
865/// One debug event that occurs when running Wasm code on a store with
866/// a debug handler attached.
867#[derive(Debug)]
868pub enum DebugEvent<'a> {
869    /// A [`wasmtime::Error`](crate::Error) was raised by a hostcall.
870    HostcallError(&'a crate::Error),
871    /// An exception is thrown and caught by Wasm. The current state
872    /// is at the throw-point.
873    CaughtExceptionThrown(OwnedRooted<ExnRef>),
874    /// An exception was not caught and is escaping to the host.
875    UncaughtExceptionThrown(OwnedRooted<ExnRef>),
876    /// A Wasm trap occurred.
877    Trap(Trap),
878    /// A breakpoint was reached.
879    Breakpoint,
880    /// An epoch yield occurred.
881    EpochYield,
882}
883
884/// A handler for debug events.
885///
886/// This is an async callback that is invoked directly within the
887/// context of a debug event that occurs, i.e., with the Wasm code
888/// still on the stack. The callback can thus observe that stack, up
889/// to the most recent entry to Wasm.[^1]
890///
891/// Because this callback receives a `StoreContextMut`, it has full
892/// access to any state that any other hostcall has, including the
893/// `T`. In that way, it is like an epoch-deadline callback or a
894/// call-hook callback. It also "freezes" the entire store for the
895/// duration of the debugger callback future.
896///
897/// In the future, we expect to provide an "externally async" API on
898/// the `Store` that allows receiving a stream of debug events and
899/// accessing the store mutably while frozen; that will need to
900/// integrate with [`Store::run_concurrent`] to properly timeslice and
901/// scope the mutable access to the store, and has not been built
902/// yet. In the meantime, it should be possible to build a fully
903/// functional debugger with this async-callback API by channeling
904/// debug events out, and requests to read the store back in, over
905/// message-passing channels between the callback and an external
906/// debugger main loop.
907///
908/// Note that the `handle` hook may use its mutable store access to
909/// invoke another Wasm. Debug events will also be caught and will
910/// cause further `handle` invocations during this recursive
911/// invocation. It is up to the debugger to handle any implications of
912/// this reentrancy (e.g., implications on a duplex channel protocol
913/// with an event/continue handshake) if it does so.
914///
915/// Note also that this trait has `Clone` as a supertrait, and the
916/// handler is cloned at every invocation as an artifact of the
917/// internal ownership structure of Wasmtime: the handler itself is
918/// owned by the store, but also receives a mutable borrow to the
919/// whole store, so we need to clone it out to invoke it. It is
920/// recommended that this trait be implemented by a type that is cheap
921/// to clone: for example, a single `Arc` handle to debugger state.
922///
923/// [^1]: Providing visibility further than the most recent entry to
924///       Wasm is not directly possible because it could see into
925///       another async stack, and the stack that polls the future
926///       running a particular Wasm invocation could change after each
927///       suspend point in the handler.
928///
929/// [`Store::run_concurrent`]: crate::Store::run_concurrent
930pub trait DebugHandler: Clone + Send + Sync + 'static {
931    /// The data expected on the store that this handler is attached
932    /// to.
933    type Data;
934
935    /// Handle a debug event.
936    fn handle(
937        &self,
938        store: StoreContextMut<'_, Self::Data>,
939        event: DebugEvent<'_>,
940    ) -> impl Future<Output = ()> + Send;
941}
942
943/// Breakpoint state for modules within a store.
944#[derive(Default)]
945pub(crate) struct BreakpointState {
946    /// Single-step mode.
947    single_step: bool,
948    /// Breakpoints added individually. Maps from the actual
949    /// (possibly slipped-forward) breakpoint key to a reference
950    /// count. Multiple requested PCs may map to the same actual
951    /// breakpoint when they are slipped forward.
952    breakpoints: BTreeMap<BreakpointKey, usize>,
953    /// When a requested breakpoint PC does not exactly match an
954    /// opcode boundary, we "slip" it forward to the next available
955    /// PC. This map records the redirect from the requested key to
956    /// the actual key so that `remove_breakpoint` can undo it.
957    breakpoint_redirects: BTreeMap<BreakpointKey, BreakpointKey>,
958}
959
960/// A breakpoint.
961pub struct Breakpoint {
962    /// Reference to the module in which we are setting the breakpoint.
963    pub module: Module,
964    /// Module-relative Wasm PC offset.
965    pub pc: ModulePC,
966}
967
968#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
969struct BreakpointKey(CompiledModuleId, ModulePC);
970
971impl BreakpointKey {
972    fn from_raw(module: &Module, pc: ModulePC) -> BreakpointKey {
973        BreakpointKey(module.id(), pc)
974    }
975
976    fn get(&self, registry: &ModuleRegistry) -> Breakpoint {
977        let module = registry
978            .module_by_compiled_id(self.0)
979            .expect("Module should not have been removed from Store")
980            .clone();
981        Breakpoint { module, pc: self.1 }
982    }
983}
984
985/// A breakpoint-editing session.
986///
987/// This enables updating breakpoint state (setting or unsetting
988/// individual breakpoints or the store-global single-step flag) in a
989/// batch. It is more efficient to batch these updates because
990/// "re-publishing" the newly patched code, with update breakpoint
991/// settings, typically requires a syscall to re-enable execute
992/// permissions.
993pub struct BreakpointEdit<'a> {
994    state: &'a mut BreakpointState,
995    registry: &'a mut ModuleRegistry,
996    /// Modules that have been edited.
997    ///
998    /// Invariant: each of these modules' CodeMemory objects is
999    /// *unpublished* when in the dirty set.
1000    dirty_modules: BTreeSet<StoreCodePC>,
1001}
1002
1003impl BreakpointState {
1004    pub(crate) fn edit<'a>(&'a mut self, registry: &'a mut ModuleRegistry) -> BreakpointEdit<'a> {
1005        BreakpointEdit {
1006            state: self,
1007            registry,
1008            dirty_modules: BTreeSet::new(),
1009        }
1010    }
1011
1012    pub(crate) fn breakpoints<'a>(
1013        &'a self,
1014        registry: &'a ModuleRegistry,
1015    ) -> impl Iterator<Item = Breakpoint> + 'a {
1016        self.breakpoints.keys().map(|key| key.get(registry))
1017    }
1018
1019    pub(crate) fn is_single_step(&self) -> bool {
1020        self.single_step
1021    }
1022
1023    /// Internal helper to patch a new module for
1024    /// single-stepping. When a module is newly registered in a
1025    /// `Store`, we need to patch all breakpoints into the copy for
1026    /// this `Store` if single-stepping is currently enabled.
1027    pub(crate) fn patch_new_module(&self, code: &mut StoreCode, module: &Module) -> Result<()> {
1028        // Apply single-step state if single-stepping is enabled. Note
1029        // that no other individual breakpoints will exist yet (as
1030        // this is a newly registered module).
1031        if self.single_step {
1032            let mem = code.code_memory_mut().unwrap();
1033            mem.unpublish()?;
1034            BreakpointEdit::apply_single_step(mem, module, true, |_key| false)?;
1035            mem.publish()?;
1036        }
1037        Ok(())
1038    }
1039}
1040
1041impl<'a> BreakpointEdit<'a> {
1042    fn get_code_memory<'b>(
1043        breakpoints: &BreakpointState,
1044        registry: &'b mut ModuleRegistry,
1045        dirty_modules: &mut BTreeSet<StoreCodePC>,
1046        module: &Module,
1047    ) -> Result<&'b mut CodeMemory> {
1048        let store_code_pc =
1049            registry.store_code_base_or_register(module, RegisterBreakpointState(breakpoints))?;
1050        let code_memory = registry
1051            .store_code_mut(store_code_pc)
1052            .expect("Just checked presence above")
1053            .code_memory_mut()
1054            .expect("Must have unique ownership of StoreCode in guest-debug mode");
1055        if dirty_modules.insert(store_code_pc) {
1056            code_memory.unpublish()?;
1057        }
1058        Ok(code_memory)
1059    }
1060
1061    fn patch<'b>(
1062        patches: impl Iterator<Item = FrameTableBreakpointData<'b>> + 'b,
1063        mem: &mut CodeMemory,
1064        enable: bool,
1065    ) {
1066        let mem = mem.text_mut();
1067        for patch in patches {
1068            let data = if enable { patch.enable } else { patch.disable };
1069            let mem = &mut mem[patch.offset..patch.offset + data.len()];
1070            log::trace!(
1071                "patch: offset 0x{:x} with enable={enable}: data {data:?} replacing {mem:?}",
1072                patch.offset
1073            );
1074            mem.copy_from_slice(data);
1075        }
1076    }
1077
1078    /// Add a breakpoint in the given module at the given PC in that
1079    /// module.
1080    ///
1081    /// If the requested PC does not fall exactly on an opcode
1082    /// boundary, the breakpoint is "slipped" forward to the next
1083    /// available opcode PC.
1084    ///
1085    /// No effect if the breakpoint is already set.
1086    pub fn add_breakpoint(&mut self, module: &Module, pc: ModulePC) -> Result<()> {
1087        let frame_table = module
1088            .frame_table()
1089            .expect("Frame table must be present when guest-debug is enabled");
1090        let actual_pc = frame_table.nearest_breakpoint(pc).unwrap_or(pc);
1091        let requested_key = BreakpointKey::from_raw(module, pc);
1092        let actual_key = BreakpointKey::from_raw(module, actual_pc);
1093
1094        if actual_pc != pc {
1095            log::trace!("slipping breakpoint from {requested_key:?} to {actual_key:?}");
1096            self.state
1097                .breakpoint_redirects
1098                .insert(requested_key, actual_key);
1099        }
1100
1101        let refcount = self.state.breakpoints.entry(actual_key).or_insert(0);
1102        *refcount += 1;
1103        if *refcount == 1 {
1104            // First reference: actually patch the code.
1105            let mem =
1106                Self::get_code_memory(self.state, self.registry, &mut self.dirty_modules, module)?;
1107            let patches = frame_table.lookup_breakpoint_patches_by_pc(actual_pc);
1108            Self::patch(patches, mem, true);
1109        }
1110        Ok(())
1111    }
1112
1113    /// Remove a breakpoint in the given module at the given PC in
1114    /// that module.
1115    ///
1116    /// No effect if the breakpoint was not set.
1117    pub fn remove_breakpoint(&mut self, module: &Module, pc: ModulePC) -> Result<()> {
1118        let requested_key = BreakpointKey::from_raw(module, pc);
1119        let actual_key = self
1120            .state
1121            .breakpoint_redirects
1122            .remove(&requested_key)
1123            .unwrap_or(requested_key);
1124        let actual_pc = actual_key.1;
1125
1126        if let Some(refcount) = self.state.breakpoints.get_mut(&actual_key) {
1127            *refcount -= 1;
1128            if *refcount == 0 {
1129                self.state.breakpoints.remove(&actual_key);
1130                if !self.state.single_step {
1131                    let mem = Self::get_code_memory(
1132                        self.state,
1133                        self.registry,
1134                        &mut self.dirty_modules,
1135                        module,
1136                    )?;
1137                    let frame_table = module
1138                        .frame_table()
1139                        .expect("Frame table must be present when guest-debug is enabled");
1140                    let patches = frame_table.lookup_breakpoint_patches_by_pc(actual_pc);
1141                    Self::patch(patches, mem, false);
1142                }
1143            }
1144        }
1145        Ok(())
1146    }
1147
1148    fn apply_single_step<F: Fn(&BreakpointKey) -> bool>(
1149        mem: &mut CodeMemory,
1150        module: &Module,
1151        enabled: bool,
1152        key_enabled: F,
1153    ) -> Result<()> {
1154        let table = module
1155            .frame_table()
1156            .expect("Frame table must be present when guest-debug is enabled");
1157        for (wasm_pc, patch) in table.breakpoint_patches() {
1158            let key = BreakpointKey::from_raw(&module, wasm_pc);
1159            let this_enabled = enabled || key_enabled(&key);
1160            log::trace!(
1161                "single_step: enabled {enabled} key {key:?} -> this_enabled {this_enabled}"
1162            );
1163            Self::patch(core::iter::once(patch), mem, this_enabled);
1164        }
1165        Ok(())
1166    }
1167
1168    /// Turn on or off single-step mode.
1169    ///
1170    /// In single-step mode, a breakpoint event is emitted at every
1171    /// Wasm PC.
1172    pub fn single_step(&mut self, enabled: bool) -> Result<()> {
1173        log::trace!(
1174            "single_step({enabled}) with breakpoint set {:?}",
1175            self.state.breakpoints
1176        );
1177        if self.state.single_step == enabled {
1178            // No change to current state; don't go through the effort of re-patching and
1179            // re-publishing code.
1180            return Ok(());
1181        }
1182        let modules = self.registry.all_modules().cloned().collect::<Vec<_>>();
1183        for module in modules {
1184            let mem =
1185                Self::get_code_memory(self.state, self.registry, &mut self.dirty_modules, &module)?;
1186            Self::apply_single_step(mem, &module, enabled, |key| {
1187                self.state.breakpoints.contains_key(key)
1188            })?;
1189        }
1190
1191        self.state.single_step = enabled;
1192
1193        Ok(())
1194    }
1195}
1196
1197impl<'a> Drop for BreakpointEdit<'a> {
1198    fn drop(&mut self) {
1199        for &store_code_base in &self.dirty_modules {
1200            let store_code = self.registry.store_code_mut(store_code_base).unwrap();
1201            if let Err(e) = store_code
1202                .code_memory_mut()
1203                .expect("Must have unique ownership of StoreCode in guest-debug mode")
1204                .publish()
1205            {
1206                abort_on_republish_error(e);
1207            }
1208        }
1209    }
1210}
1211
1212/// Abort when we cannot re-publish executable code.
1213///
1214/// Note that this puts us in quite a conundrum. Typically we will
1215/// have been editing breakpoints from within a hostcall context
1216/// (e.g. inside a debugger hook while execution is paused) with JIT
1217/// code on the stack. Wasmtime's usual path to return errors is back
1218/// through that JIT code: we do not panic-unwind across the JIT code,
1219/// we return into the exit trampoline and that then re-enters the
1220/// raise libcall to use a Cranelift exception-throw to cross most of
1221/// the JIT frames to the entry trampoline. When even trampolines are
1222/// no longer executable, we have no way out. Even an ordinary
1223/// `panic!` cannot work, because we catch panics and carry them
1224/// across JIT code using that trampoline-based error path. Our only
1225/// way out is to directly abort the whole process.
1226///
1227/// This is not without precedent: other engines have similar failure
1228/// paths. For example, SpiderMonkey directly aborts the process when
1229/// failing to re-apply executable permissions (see [1]).
1230///
1231/// Note that we don't really expect to ever hit this case in
1232/// practice: it's unlikely that `mprotect` applying `PROT_EXEC` would
1233/// fail due to, e.g., resource exhaustion in the kernel, because we
1234/// will have the same net number of virtual memory areas before and
1235/// after the permissions change. Nevertheless, we have to account for
1236/// the possibility of error.
1237///
1238/// [1]: https://searchfox.org/firefox-main/rev/7496c8515212669451d7e775a00c2be07da38ca5/js/src/jit/AutoWritableJitCode.h#26-56
1239#[cfg(feature = "std")]
1240fn abort_on_republish_error(e: crate::Error) -> ! {
1241    log::error!(
1242        "Failed to re-publish executable code: {e:?}. Wasmtime cannot return through JIT code on the stack and cannot even panic; aborting the process."
1243    );
1244    std::process::abort();
1245}
1246
1247/// In the `no_std` case, we don't have a concept of a "process
1248/// abort", so rely on `panic!`. Typically an embedded scenario that
1249/// uses `no_std` will build with `panic=abort` so the effect is the
1250/// same. If it doesn't, there is truly nothing we can do here so
1251/// let's panic anyway; the panic propagation through the trampolines
1252/// will at least deterministically crash.
1253#[cfg(not(feature = "std"))]
1254fn abort_on_republish_error(e: crate::Error) -> ! {
1255    panic!("Failed to re-publish executable code: {e:?}");
1256}