Skip to main content

wasmtime/runtime/
debug.rs

1//! Debugging API.
2
3use super::store::AsStoreOpaque;
4use crate::code::StoreCode;
5use crate::module::RegisterBreakpointState;
6use crate::store::StoreId;
7use crate::vm::{Activation, Backtrace};
8use crate::{
9    AnyRef, AsContextMut, CodeMemory, ExnRef, Extern, ExternRef, Func, Instance, Module,
10    OwnedRooted, StoreContext, StoreContextMut, Val,
11    code::StoreCodePC,
12    module::ModuleRegistry,
13    store::{AutoAssertNoGc, StoreOpaque},
14    vm::{CompiledModuleId, VMContext},
15};
16use crate::{Caller, Result, Store};
17use alloc::collections::{BTreeMap, BTreeSet, btree_map::Entry};
18use alloc::vec;
19use alloc::vec::Vec;
20use core::{ffi::c_void, ptr::NonNull};
21#[cfg(feature = "gc")]
22use wasmtime_environ::FrameTable;
23use wasmtime_environ::{
24    DefinedFuncIndex, EntityIndex, FrameInstPos, FrameStackShape, FrameStateSlot,
25    FrameStateSlotOffset, FrameTableBreakpointData, FrameTableDescriptorIndex, FrameValType,
26    FuncIndex, FuncKey, GlobalIndex, MemoryIndex, TableIndex, TagIndex, Trap,
27};
28use wasmtime_unwinder::{Frame, FrameCursor};
29
30impl<T> Store<T> {
31    /// Provide a frame handle for all activations, in order from
32    /// innermost (most recently called) to outermost on the stack.
33    ///
34    /// An activation is a contiguous sequence of Wasm frames (called
35    /// functions) that were called from host code and called back out
36    /// to host code. If there are activations from multiple stores on
37    /// the stack, for example if Wasm code in one store calls out to
38    /// host code which invokes another Wasm function in another
39    /// store, then the other stores are "opaque" to our view here in
40    /// the same way that host code is.
41    ///
42    /// Returns an empty list if debug instrumentation is not enabled
43    /// for the engine containing this store.
44    pub fn debug_exit_frames(&mut self) -> impl Iterator<Item = FrameHandle> {
45        self.as_store_opaque().debug_exit_frames()
46    }
47
48    /// Start an edit session to update breakpoints.
49    pub fn edit_breakpoints<'a>(&'a mut self) -> Option<BreakpointEdit<'a>> {
50        self.as_store_opaque().edit_breakpoints()
51    }
52
53    /// Get a vector of all Instances held in the Store, for debug
54    /// purposes.
55    ///
56    /// Guest debugging must be enabled for this accessor to return
57    /// any instances. If it is not, an empty vector is returend.
58    pub fn debug_all_instances(&mut self) -> Vec<Instance> {
59        self.as_store_opaque().debug_all_instances()
60    }
61
62    /// Get a vector of all Modules held in the Store, for debug
63    /// purposes.
64    ///
65    /// Guest debugging must be enabled for this accessor to return
66    /// any modules. If it is not, an empty vector is returend.
67    pub fn debug_all_modules(&mut self) -> Vec<Module> {
68        self.as_store_opaque().debug_all_modules()
69    }
70}
71
72impl<'a, T> StoreContextMut<'a, T> {
73    /// Provide a frame handle for all activations, in order from
74    /// innermost (most recently called) to outermost on the stack.
75    ///
76    /// See [`Store::debug_exit_frames`] for more details.
77    pub fn debug_exit_frames(&mut self) -> impl Iterator<Item = FrameHandle> {
78        self.0.as_store_opaque().debug_exit_frames()
79    }
80
81    /// Start an edit session to update breakpoints.
82    pub fn edit_breakpoints(self) -> Option<BreakpointEdit<'a>> {
83        self.0.as_store_opaque().edit_breakpoints()
84    }
85
86    /// Get a vector of all Instances held in the Store, for debug
87    /// purposes.
88    ///
89    /// See [`Store::debug_all_instances`] for more details.
90    pub fn debug_all_instances(self) -> Vec<Instance> {
91        self.0.as_store_opaque().debug_all_instances()
92    }
93
94    /// Get a vector of all Modules held in the Store, for debug
95    /// purposes.
96    ///
97    /// See [`Store::debug_all_modules`] for more details.
98    pub fn debug_all_modules(self) -> Vec<Module> {
99        self.0.as_store_opaque().debug_all_modules()
100    }
101}
102
103impl<'a, T> Caller<'a, T> {
104    /// Provide a frame handle for all activations, in order from
105    /// innermost (most recently called) to outermost on the stack.
106    ///
107    /// See [`Store::debug_exit_frames`] for more details.
108    pub fn debug_exit_frames(&mut self) -> impl Iterator<Item = FrameHandle> {
109        self.store.0.as_store_opaque().debug_exit_frames()
110    }
111
112    /// Start an edit session to update breakpoints.
113    pub fn edit_breakpoints<'b>(&'b mut self) -> Option<BreakpointEdit<'b>> {
114        self.store.0.as_store_opaque().edit_breakpoints()
115    }
116
117    /// Get a vector of all Instances held in the Store, for debug
118    /// purposes.
119    ///
120    /// See [`Store::debug_all_instances`] for more details.
121    pub fn debug_all_instances(&mut self) -> Vec<Instance> {
122        self.store.0.as_store_opaque().debug_all_instances()
123    }
124
125    /// Get a vector of all Modules held in the Store, for debug
126    /// purposes.
127    ///
128    /// See [`Store::debug_all_modules`] for more details.
129    pub fn debug_all_modules(&mut self) -> Vec<Module> {
130        self.store.0.as_store_opaque().debug_all_modules()
131    }
132}
133
134impl StoreOpaque {
135    fn debug_exit_frames(&mut self) -> impl Iterator<Item = FrameHandle> {
136        let activations = if self.engine().tunables().debug_guest {
137            Backtrace::activations(self)
138        } else {
139            vec![]
140        };
141
142        activations
143            .into_iter()
144            // SAFETY: each activation is currently active and will
145            // remain so (we have a mutable borrow of the store).
146            .filter_map(|act| unsafe { FrameHandle::exit_frame(self, act) })
147    }
148
149    fn edit_breakpoints<'a>(&'a mut self) -> Option<BreakpointEdit<'a>> {
150        if !self.engine().tunables().debug_guest {
151            return None;
152        }
153
154        let (breakpoints, registry) = self.breakpoints_and_registry_mut();
155        Some(breakpoints.edit(registry))
156    }
157
158    fn debug_all_instances(&mut self) -> Vec<Instance> {
159        if !self.engine().tunables().debug_guest {
160            return vec![];
161        }
162
163        self.all_instances().collect()
164    }
165
166    fn debug_all_modules(&self) -> Vec<Module> {
167        if !self.engine().tunables().debug_guest {
168            return vec![];
169        }
170
171        self.modules().all_modules().cloned().collect()
172    }
173}
174
175impl Instance {
176    /// Get access to a global within this instance's globals index
177    /// space.
178    ///
179    /// This permits accessing globals whether they are exported or
180    /// not. However, it is only available for purposes of debugging,
181    /// and so is only permitted when `guest_debug` is enabled in the
182    /// Engine's configuration. The intent of the Wasmtime API is to
183    /// enforce the Wasm type system's encapsulation even in the host
184    /// API, except where necessary for developer tooling.
185    ///
186    /// `None` is returned for any global index that is out-of-bounds.
187    ///
188    /// `None` is returned if guest-debugging is not enabled in the
189    /// engine configuration for this Store.
190    pub fn debug_global(
191        &self,
192        mut store: impl AsContextMut,
193        global_index: u32,
194    ) -> Option<crate::Global> {
195        self.debug_export(
196            store.as_context_mut().0,
197            GlobalIndex::from_bits(global_index).into(),
198        )
199        .and_then(|s| s.into_global())
200    }
201
202    /// Get access to a memory (unshared only) within this instance's
203    /// memory index space.
204    ///
205    /// This permits accessing memories whether they are exported or
206    /// not. However, it is only available for purposes of debugging,
207    /// and so is only permitted when `guest_debug` is enabled in the
208    /// Engine's configuration. The intent of the Wasmtime API is to
209    /// enforce the Wasm type system's encapsulation even in the host
210    /// API, except where necessary for developer tooling.
211    ///
212    /// `None` is returned for any memory index that is out-of-bounds.
213    ///
214    /// `None` is returned for any shared memory (use
215    /// `debug_shared_memory` instead).
216    ///
217    /// `None` is returned if guest-debugging is not enabled in the
218    /// engine configuration for this Store.
219    pub fn debug_memory(
220        &self,
221        mut store: impl AsContextMut,
222        memory_index: u32,
223    ) -> Option<crate::Memory> {
224        self.debug_export(
225            store.as_context_mut().0,
226            MemoryIndex::from_bits(memory_index).into(),
227        )
228        .and_then(|s| s.into_memory())
229    }
230
231    /// Get access to a shared memory within this instance's memory
232    /// index space.
233    ///
234    /// This permits accessing memories whether they are exported or
235    /// not. However, it is only available for purposes of debugging,
236    /// and so is only permitted when `guest_debug` is enabled in the
237    /// Engine's configuration. The intent of the Wasmtime API is to
238    /// enforce the Wasm type system's encapsulation even in the host
239    /// API, except where necessary for developer tooling.
240    ///
241    /// `None` is returned for any memory index that is out-of-bounds.
242    ///
243    /// `None` is returned for any unshared memory (use `debug_memory`
244    /// instead).
245    ///
246    /// `None` is returned if guest-debugging is not enabled in the
247    /// engine configuration for this Store.
248    pub fn debug_shared_memory(
249        &self,
250        mut store: impl AsContextMut,
251        memory_index: u32,
252    ) -> Option<crate::SharedMemory> {
253        self.debug_export(
254            store.as_context_mut().0,
255            MemoryIndex::from_bits(memory_index).into(),
256        )
257        .and_then(|s| s.into_shared_memory())
258    }
259
260    /// Get access to a table within this instance's table index
261    /// space.
262    ///
263    /// This permits accessing tables whether they are exported or
264    /// not. However, it is only available for purposes of debugging,
265    /// and so is only permitted when `guest_debug` is enabled in the
266    /// Engine's configuration. The intent of the Wasmtime API is to
267    /// enforce the Wasm type system's encapsulation even in the host
268    /// API, except where necessary for developer tooling.
269    ///
270    /// `None` is returned for any table index that is out-of-bounds.
271    ///
272    /// `None` is returned if guest-debugging is not enabled in the
273    /// engine configuration for this Store.
274    pub fn debug_table(
275        &self,
276        mut store: impl AsContextMut,
277        table_index: u32,
278    ) -> Option<crate::Table> {
279        self.debug_export(
280            store.as_context_mut().0,
281            TableIndex::from_bits(table_index).into(),
282        )
283        .and_then(|s| s.into_table())
284    }
285
286    /// Get access to a function within this instance's function index
287    /// space.
288    ///
289    /// This permits accessing functions whether they are exported or
290    /// not. However, it is only available for purposes of debugging,
291    /// and so is only permitted when `guest_debug` is enabled in the
292    /// Engine's configuration. The intent of the Wasmtime API is to
293    /// enforce the Wasm type system's encapsulation even in the host
294    /// API, except where necessary for developer tooling.
295    ///
296    /// `None` is returned for any function index that is
297    /// out-of-bounds.
298    ///
299    /// `None` is returned if guest-debugging is not enabled in the
300    /// engine configuration for this Store.
301    pub fn debug_function(
302        &self,
303        mut store: impl AsContextMut,
304        function_index: u32,
305    ) -> Option<crate::Func> {
306        self.debug_export(
307            store.as_context_mut().0,
308            FuncIndex::from_bits(function_index).into(),
309        )
310        .and_then(|s| s.into_func())
311    }
312
313    /// Get access to a tag within this instance's tag index space.
314    ///
315    /// This permits accessing tags whether they are exported or
316    /// not. However, it is only available for purposes of debugging,
317    /// and so is only permitted when `guest_debug` is enabled in the
318    /// Engine's configuration. The intent of the Wasmtime API is to
319    /// enforce the Wasm type system's encapsulation even in the host
320    /// API, except where necessary for developer tooling.
321    ///
322    /// `None` is returned for any tag index that is out-of-bounds.
323    ///
324    /// `None` is returned if guest-debugging is not enabled in the
325    /// engine configuration for this Store.
326    pub fn debug_tag(&self, mut store: impl AsContextMut, tag_index: u32) -> Option<crate::Tag> {
327        self.debug_export(
328            store.as_context_mut().0,
329            TagIndex::from_bits(tag_index).into(),
330        )
331        .and_then(|s| s.into_tag())
332    }
333
334    fn debug_export(&self, store: &mut StoreOpaque, index: EntityIndex) -> Option<Extern> {
335        if !store.engine().tunables().debug_guest {
336            return None;
337        }
338
339        let env_module = self._module(store).env_module();
340        if !env_module.is_valid(index) {
341            return None;
342        }
343        let store_id = store.id();
344        let (instance, registry) = store.instance_and_module_registry_mut(self.id());
345        // SAFETY: the `store` and `registry` are associated with
346        // this instance as we fetched the instance directly from
347        // the store above.
348        let export = unsafe { instance.get_export_by_index_mut(registry, store_id, index) };
349        Some(Extern::from_wasmtime_export(export, store))
350    }
351}
352
353impl<'a, T> StoreContext<'a, T> {
354    /// Return all breakpoints.
355    pub fn breakpoints(self) -> Option<impl Iterator<Item = Breakpoint> + 'a> {
356        if !self.engine().tunables().debug_guest {
357            return None;
358        }
359
360        let (breakpoints, registry) = self.0.breakpoints_and_registry();
361        Some(breakpoints.breakpoints(registry))
362    }
363
364    /// Indicate whether single-step mode is enabled.
365    pub fn is_single_step(&self) -> bool {
366        let (breakpoints, _) = self.0.breakpoints_and_registry();
367        breakpoints.is_single_step()
368    }
369}
370
371/// A handle to a stack frame, valid as long as execution is not
372/// resumed in the associated `Store`.
373///
374/// This handle can be held and cloned and used to refer to a frame
375/// within a paused store. It is cheap: it internally consists of a
376/// pointer to the actual frame, together with some metadata to
377/// determine when that pointer has gone stale.
378///
379/// At the API level, any usage of this frame handle requires a
380/// mutable borrow of the `Store`, because the `Store` logically owns
381/// the stack(s) for any execution within it. However, the existence
382/// of the handle itself does not hold a borrow on the `Store`; hence,
383/// the `Store` can continue to be used and queried, and some state
384/// (e.g. memories, tables, GC objects) can even be mutated, as long
385/// as execution is not resumed. The intent of this API is to allow a
386/// wide variety of debugger implementation strategies that expose
387/// stack frames and also allow other commands/actions at the same
388/// time.
389///
390/// The user can use [`FrameHandle::is_valid`] to determine if the
391/// handle is still valid and usable.
392#[derive(Clone)]
393pub struct FrameHandle {
394    /// The unwinder cursor at this frame.
395    cursor: FrameCursor,
396
397    /// The index of the virtual frame within the physical frame.
398    virtual_frame_idx: usize,
399
400    /// The unique Store this frame came from, to ensure the handle is
401    /// used with the correct Store.
402    store_id: StoreId,
403
404    /// Store `execution_version`.
405    store_version: u64,
406}
407
408impl FrameHandle {
409    /// Create a new FrameHandle at the exit frame of an activation.
410    ///
411    /// # Safety
412    ///
413    /// The provided activation must be valid currently.
414    unsafe fn exit_frame(store: &mut StoreOpaque, activation: Activation) -> Option<FrameHandle> {
415        // SAFETY: activation is valid as per our safety condition.
416        let mut cursor = unsafe { activation.cursor() };
417
418        // Find the first virtual frame. Each physical frame may have
419        // zero or more virtual frames.
420        while !cursor.done() {
421            let (cache, registry) = store.frame_data_cache_mut_and_registry();
422            let frames = cache.lookup_or_compute(registry, cursor.frame());
423            if frames.len() > 0 {
424                return Some(FrameHandle {
425                    cursor,
426                    virtual_frame_idx: 0,
427                    store_id: store.id(),
428                    store_version: store.vm_store_context().execution_version,
429                });
430            }
431            // SAFETY: activation is still valid (valid on entry per
432            // our safety condition, and we have not returned control
433            // since above).
434            unsafe {
435                cursor.advance(store.unwinder());
436            }
437        }
438
439        None
440    }
441
442    /// Determine whether this handle can still be used to refer to a
443    /// frame.
444    pub fn is_valid(&self, mut store: impl AsContextMut) -> bool {
445        let store = store.as_context_mut();
446        self.is_valid_impl(store.0.as_store_opaque())
447    }
448
449    fn is_valid_impl(&self, store: &StoreOpaque) -> bool {
450        let id = store.id();
451        let version = store.vm_store_context().execution_version;
452        self.store_id == id && self.store_version == version
453    }
454
455    /// Get a handle to the next frame up the activation (the one that
456    /// called this frame), if any.
457    pub fn parent(&self, mut store: impl AsContextMut) -> Result<Option<FrameHandle>> {
458        let mut store = store.as_context_mut();
459        if !self.is_valid(&mut store) {
460            crate::error::bail!("Frame handle is no longer valid.");
461        }
462
463        let mut parent = self.clone();
464        parent.virtual_frame_idx += 1;
465
466        while !parent.cursor.done() {
467            let (cache, registry) = store
468                .0
469                .as_store_opaque()
470                .frame_data_cache_mut_and_registry();
471            let frames = cache.lookup_or_compute(registry, parent.cursor.frame());
472            if parent.virtual_frame_idx < frames.len() {
473                return Ok(Some(parent));
474            }
475            parent.virtual_frame_idx = 0;
476            // SAFETY: activation is valid because we checked validity
477            // wrt execution version at the top of this function, and
478            // we have not returned since.
479            unsafe {
480                parent.cursor.advance(store.0.as_store_opaque().unwinder());
481            }
482        }
483
484        Ok(None)
485    }
486
487    fn frame_data<'a>(&self, store: &'a mut StoreOpaque) -> Result<&'a FrameData> {
488        if !self.is_valid_impl(store) {
489            crate::error::bail!("Frame handle is no longer valid.");
490        }
491        let (cache, registry) = store.frame_data_cache_mut_and_registry();
492        let frames = cache.lookup_or_compute(registry, self.cursor.frame());
493        // `virtual_frame_idx` counts up for ease of iteration
494        // behavior, while the frames are stored in outer-to-inner
495        // (i.e., caller to callee) order, so we need to reverse here.
496        Ok(&frames[frames.len() - 1 - self.virtual_frame_idx])
497    }
498
499    fn raw_instance<'a>(&self, store: &mut StoreOpaque) -> Result<&'a crate::vm::Instance> {
500        let frame_data = self.frame_data(store)?;
501
502        // Read out the vmctx slot.
503
504        // SAFETY: vmctx is always at offset 0 in the slot.  (See
505        // crates/cranelift/src/func_environ.rs in
506        // `update_stack_slot_vmctx()`.)  The frame/activation is
507        // still valid because we verified this in `frame_data` above.
508        let vmctx: usize =
509            unsafe { *(frame_data.slot_addr(self.cursor.frame().fp()) as *mut usize) };
510        let vmctx: *mut VMContext = core::ptr::with_exposed_provenance_mut(vmctx);
511        let vmctx = NonNull::new(vmctx).expect("null vmctx in debug state slot");
512        // SAFETY: the stored vmctx value is a valid instance in this
513        // store; we only visit frames from this store in the
514        // backtrace.
515        let instance = unsafe { crate::vm::Instance::from_vmctx(vmctx) };
516        // SAFETY: the instance pointer read above is valid.
517        Ok(unsafe { instance.as_ref() })
518    }
519
520    /// Get the instance associated with the current frame.
521    pub fn instance(&self, mut store: impl AsContextMut) -> Result<Instance> {
522        let store = store.as_context_mut();
523        let instance = self.raw_instance(store.0.as_store_opaque())?;
524        let id = instance.id();
525        Ok(Instance::from_wasmtime(id, store.0.as_store_opaque()))
526    }
527
528    /// Get the module associated with the current frame, if any
529    /// (i.e., not a container instance for a host-created entity).
530    pub fn module<'a, T: 'static>(
531        &self,
532        store: impl Into<StoreContextMut<'a, T>>,
533    ) -> Result<Option<&'a Module>> {
534        let store = store.into();
535        let instance = self.raw_instance(store.0.as_store_opaque())?;
536        Ok(instance.runtime_module())
537    }
538
539    /// Get the raw function index associated with the current frame, and the
540    /// PC as an offset within its code section, if it is a Wasm
541    /// function directly from the given `Module` (rather than a
542    /// trampoline).
543    pub fn wasm_function_index_and_pc(
544        &self,
545        mut store: impl AsContextMut,
546    ) -> Result<Option<(DefinedFuncIndex, u32)>> {
547        let mut store = store.as_context_mut();
548        let frame_data = self.frame_data(store.0.as_store_opaque())?;
549        let FuncKey::DefinedWasmFunction(module, func) = frame_data.func_key else {
550            return Ok(None);
551        };
552        let wasm_pc = frame_data.wasm_pc;
553        debug_assert_eq!(
554            module,
555            self.module(&mut store)?
556                .expect("module should be defined if this is a defined function")
557                .env_module()
558                .module_index
559        );
560        Ok(Some((func, wasm_pc)))
561    }
562
563    /// Get the number of locals in this frame.
564    pub fn num_locals(&self, mut store: impl AsContextMut) -> Result<u32> {
565        let store = store.as_context_mut();
566        let frame_data = self.frame_data(store.0.as_store_opaque())?;
567        Ok(u32::try_from(frame_data.locals.len()).unwrap())
568    }
569
570    /// Get the depth of the operand stack in this frame.
571    pub fn num_stacks(&self, mut store: impl AsContextMut) -> Result<u32> {
572        let store = store.as_context_mut();
573        let frame_data = self.frame_data(store.0.as_store_opaque())?;
574        Ok(u32::try_from(frame_data.stack.len()).unwrap())
575    }
576
577    /// Get the type and value of the given local in this frame.
578    ///
579    /// # Panics
580    ///
581    /// Panics if the index is out-of-range (greater than
582    /// `num_locals()`).
583    pub fn local(&self, mut store: impl AsContextMut, index: u32) -> Result<Val> {
584        let store = store.as_context_mut();
585        let frame_data = self.frame_data(store.0.as_store_opaque())?;
586        let (offset, ty) = frame_data.locals[usize::try_from(index).unwrap()];
587        let slot_addr = frame_data.slot_addr(self.cursor.frame().fp());
588        // SAFETY: compiler produced metadata to describe this local
589        // slot and stored a value of the correct type into it. Slot
590        // address is valid because we checked liveness of the
591        // activation/frame via `frame_data` above.
592        Ok(unsafe { read_value(store.0.as_store_opaque(), slot_addr, offset, ty) })
593    }
594
595    /// Get the type and value of the given operand-stack value in
596    /// this frame.
597    ///
598    /// Index 0 corresponds to the bottom-of-stack, and higher indices
599    /// from there are more recently pushed values.  In other words,
600    /// index order reads the Wasm virtual machine's abstract stack
601    /// state left-to-right.
602    pub fn stack(&self, mut store: impl AsContextMut, index: u32) -> Result<Val> {
603        let store = store.as_context_mut();
604        let frame_data = self.frame_data(store.0.as_store_opaque())?;
605        let (offset, ty) = frame_data.stack[usize::try_from(index).unwrap()];
606        let slot_addr = frame_data.slot_addr(self.cursor.frame().fp());
607        // SAFETY: compiler produced metadata to describe this
608        // operand-stack slot and stored a value of the correct type
609        // into it. Slot address is valid because we checked liveness
610        // of the activation/frame via `frame_data` above.
611        Ok(unsafe { read_value(store.0.as_store_opaque(), slot_addr, offset, ty) })
612    }
613}
614
615/// A cache from `StoreCodePC`s for modules' private code within a
616/// store to pre-computed layout data for the virtual stack frame(s)
617/// present at that physical PC.
618pub(crate) struct FrameDataCache {
619    /// For a given physical PC, the list of virtual frames, from
620    /// inner (most recently called/inlined) to outer.
621    by_pc: BTreeMap<StoreCodePC, Vec<FrameData>>,
622}
623
624impl FrameDataCache {
625    pub(crate) fn new() -> FrameDataCache {
626        FrameDataCache {
627            by_pc: BTreeMap::new(),
628        }
629    }
630
631    /// Look up (or compute) the list of `FrameData`s from a physical
632    /// `Frame`.
633    fn lookup_or_compute<'a>(
634        &'a mut self,
635        registry: &ModuleRegistry,
636        frame: Frame,
637    ) -> &'a [FrameData] {
638        let pc = StoreCodePC::from_raw(frame.pc());
639        match self.by_pc.entry(pc) {
640            Entry::Occupied(frames) => frames.into_mut(),
641            Entry::Vacant(v) => {
642                // Although inlining can mix modules, `module` is the
643                // module that actually contains the physical PC
644                // (i.e., the outermost function that inlined the
645                // others).
646                let (module, frames) = VirtualFrame::decode(registry, frame.pc());
647                let frames = frames
648                    .into_iter()
649                    .map(|frame| FrameData::compute(frame, &module))
650                    .collect::<Vec<_>>();
651                v.insert(frames)
652            }
653        }
654    }
655}
656
657/// Internal data pre-computed for one stack frame.
658///
659/// This represents one frame as produced by the progpoint lookup
660/// (Wasm PC, frame descriptor index, stack shape).
661struct VirtualFrame {
662    /// The Wasm PC for this frame.
663    wasm_pc: u32,
664    /// The frame descriptor for this frame.
665    frame_descriptor: FrameTableDescriptorIndex,
666    /// The stack shape for this frame.
667    stack_shape: FrameStackShape,
668}
669
670impl VirtualFrame {
671    /// Return virtual frames corresponding to a physical frame, from
672    /// outermost to innermost.
673    fn decode(registry: &ModuleRegistry, pc: usize) -> (Module, Vec<VirtualFrame>) {
674        let (module_with_code, pc) = registry
675            .module_and_code_by_pc(pc)
676            .expect("Wasm frame PC does not correspond to a module");
677        let module = module_with_code.module();
678        let table = module.frame_table().unwrap();
679        let pc = u32::try_from(pc).expect("PC offset too large");
680        let program_points = table.find_program_point(pc, FrameInstPos::Post)
681            .expect("There must be a program point record in every frame when debug instrumentation is enabled");
682
683        (
684            module.clone(),
685            program_points
686                .map(|(wasm_pc, frame_descriptor, stack_shape)| VirtualFrame {
687                    wasm_pc,
688                    frame_descriptor,
689                    stack_shape,
690                })
691                .collect(),
692        )
693    }
694}
695
696/// Data computed when we visit a given frame.
697struct FrameData {
698    slot_to_fp_offset: usize,
699    func_key: FuncKey,
700    wasm_pc: u32,
701    /// Shape of locals in this frame.
702    ///
703    /// We need to store this locally because `FrameView` cannot
704    /// borrow the store: it needs a mut borrow, and an iterator
705    /// cannot yield the same mut borrow multiple times because it
706    /// cannot control the lifetime of the values it yields (the
707    /// signature of `next()` does not bound the return value to the
708    /// `&mut self` arg).
709    locals: Vec<(FrameStateSlotOffset, FrameValType)>,
710    /// Shape of the stack slots at this program point in this frame.
711    ///
712    /// In addition to the borrowing-related reason above, we also
713    /// materialize this because we want to provide O(1) access to the
714    /// stack by depth, and the frame slot descriptor stores info in a
715    /// linked-list (actually DAG, with dedup'ing) way.
716    stack: Vec<(FrameStateSlotOffset, FrameValType)>,
717}
718
719impl FrameData {
720    fn compute(frame: VirtualFrame, module: &Module) -> Self {
721        let frame_table = module.frame_table().unwrap();
722        // Parse the frame descriptor.
723        let (data, slot_to_fp_offset) = frame_table
724            .frame_descriptor(frame.frame_descriptor)
725            .unwrap();
726        let frame_state_slot = FrameStateSlot::parse(data).unwrap();
727        let slot_to_fp_offset = usize::try_from(slot_to_fp_offset).unwrap();
728
729        // Materialize the stack shape so we have O(1) access to its
730        // elements, and so we don't need to keep the borrow to the
731        // module alive.
732        let mut stack = frame_state_slot
733            .stack(frame.stack_shape)
734            .collect::<Vec<_>>();
735        stack.reverse(); // Put top-of-stack last.
736
737        // Materialize the local offsets/types so we don't need to
738        // keep the borrow to the module alive.
739        let locals = frame_state_slot.locals().collect::<Vec<_>>();
740
741        FrameData {
742            slot_to_fp_offset,
743            func_key: frame_state_slot.func_key(),
744            wasm_pc: frame.wasm_pc,
745            stack,
746            locals,
747        }
748    }
749
750    fn slot_addr(&self, fp: usize) -> *mut u8 {
751        let fp: *mut u8 = core::ptr::with_exposed_provenance_mut(fp);
752        fp.wrapping_sub(self.slot_to_fp_offset)
753    }
754}
755
756/// Read the value at the given offset.
757///
758/// # Safety
759///
760/// The `offset` and `ty` must correspond to a valid value written
761/// to the frame by generated code of the correct type. This will
762/// be the case if this information comes from the frame tables
763/// (as long as the frontend that generates the tables and
764/// instrumentation is correct, and as long as the tables are
765/// preserved through serialization).
766unsafe fn read_value(
767    store: &mut StoreOpaque,
768    slot_base: *const u8,
769    offset: FrameStateSlotOffset,
770    ty: FrameValType,
771) -> Val {
772    let address = unsafe { slot_base.offset(isize::try_from(offset.offset()).unwrap()) };
773
774    // SAFETY: each case reads a value from memory that should be
775    // valid according to our safety condition.
776    match ty {
777        FrameValType::I32 => {
778            let value = unsafe { *(address as *const i32) };
779            Val::I32(value)
780        }
781        FrameValType::I64 => {
782            let value = unsafe { *(address as *const i64) };
783            Val::I64(value)
784        }
785        FrameValType::F32 => {
786            let value = unsafe { *(address as *const u32) };
787            Val::F32(value)
788        }
789        FrameValType::F64 => {
790            let value = unsafe { *(address as *const u64) };
791            Val::F64(value)
792        }
793        FrameValType::V128 => {
794            // Vectors are always stored as little-endian.
795            let value = unsafe { u128::from_le_bytes(*(address as *const [u8; 16])) };
796            Val::V128(value.into())
797        }
798        FrameValType::AnyRef => {
799            let mut nogc = AutoAssertNoGc::new(store);
800            let value = unsafe { *(address as *const u32) };
801            let value = AnyRef::_from_raw(&mut nogc, value);
802            Val::AnyRef(value)
803        }
804        FrameValType::ExnRef => {
805            let mut nogc = AutoAssertNoGc::new(store);
806            let value = unsafe { *(address as *const u32) };
807            let value = ExnRef::_from_raw(&mut nogc, value);
808            Val::ExnRef(value)
809        }
810        FrameValType::ExternRef => {
811            let mut nogc = AutoAssertNoGc::new(store);
812            let value = unsafe { *(address as *const u32) };
813            let value = ExternRef::_from_raw(&mut nogc, value);
814            Val::ExternRef(value)
815        }
816        FrameValType::FuncRef => {
817            let value = unsafe { *(address as *const *mut c_void) };
818            let value = unsafe { Func::_from_raw(store, value) };
819            Val::FuncRef(value)
820        }
821        FrameValType::ContRef => {
822            unimplemented!("contref values are not implemented in the host API yet")
823        }
824    }
825}
826
827/// Compute raw pointers to all GC refs in the given frame.
828// Note: ideally this would be an impl Iterator, but this is quite
829// awkward because of the locally computed data (FrameStateSlot::parse
830// structured result) within the closure borrowed by a nested closure.
831#[cfg(feature = "gc")]
832pub(crate) fn gc_refs_in_frame<'a>(ft: FrameTable<'a>, pc: u32, fp: *mut usize) -> Vec<*mut u32> {
833    let fp = fp.cast::<u8>();
834    let mut ret = vec![];
835    if let Some(frames) = ft.find_program_point(pc, FrameInstPos::Post) {
836        for (_wasm_pc, frame_desc, stack_shape) in frames {
837            let (frame_desc_data, slot_to_fp_offset) = ft.frame_descriptor(frame_desc).unwrap();
838            let frame_base = unsafe { fp.offset(-isize::try_from(slot_to_fp_offset).unwrap()) };
839            let frame_desc = FrameStateSlot::parse(frame_desc_data).unwrap();
840            for (offset, ty) in frame_desc.stack_and_locals(stack_shape) {
841                match ty {
842                    FrameValType::AnyRef | FrameValType::ExnRef | FrameValType::ExternRef => {
843                        let slot = unsafe {
844                            frame_base
845                                .offset(isize::try_from(offset.offset()).unwrap())
846                                .cast::<u32>()
847                        };
848                        ret.push(slot);
849                    }
850                    FrameValType::ContRef | FrameValType::FuncRef => {}
851                    FrameValType::I32
852                    | FrameValType::I64
853                    | FrameValType::F32
854                    | FrameValType::F64
855                    | FrameValType::V128 => {}
856                }
857            }
858        }
859    }
860    ret
861}
862
863/// One debug event that occurs when running Wasm code on a store with
864/// a debug handler attached.
865#[derive(Debug)]
866pub enum DebugEvent<'a> {
867    /// A [`wasmtime::Error`](crate::Error) was raised by a hostcall.
868    HostcallError(&'a crate::Error),
869    /// An exception is thrown and caught by Wasm. The current state
870    /// is at the throw-point.
871    CaughtExceptionThrown(OwnedRooted<ExnRef>),
872    /// An exception was not caught and is escaping to the host.
873    UncaughtExceptionThrown(OwnedRooted<ExnRef>),
874    /// A Wasm trap occurred.
875    Trap(Trap),
876    /// A breakpoint was reached.
877    Breakpoint,
878    /// An epoch yield occurred.
879    EpochYield,
880}
881
882/// A handler for debug events.
883///
884/// This is an async callback that is invoked directly within the
885/// context of a debug event that occurs, i.e., with the Wasm code
886/// still on the stack. The callback can thus observe that stack, up
887/// to the most recent entry to Wasm.[^1]
888///
889/// Because this callback receives a `StoreContextMut`, it has full
890/// access to any state that any other hostcall has, including the
891/// `T`. In that way, it is like an epoch-deadline callback or a
892/// call-hook callback. It also "freezes" the entire store for the
893/// duration of the debugger callback future.
894///
895/// In the future, we expect to provide an "externally async" API on
896/// the `Store` that allows receiving a stream of debug events and
897/// accessing the store mutably while frozen; that will need to
898/// integrate with [`Store::run_concurrent`] to properly timeslice and
899/// scope the mutable access to the store, and has not been built
900/// yet. In the meantime, it should be possible to build a fully
901/// functional debugger with this async-callback API by channeling
902/// debug events out, and requests to read the store back in, over
903/// message-passing channels between the callback and an external
904/// debugger main loop.
905///
906/// Note that the `handle` hook may use its mutable store access to
907/// invoke another Wasm. Debug events will also be caught and will
908/// cause further `handle` invocations during this recursive
909/// invocation. It is up to the debugger to handle any implications of
910/// this reentrancy (e.g., implications on a duplex channel protocol
911/// with an event/continue handshake) if it does so.
912///
913/// Note also that this trait has `Clone` as a supertrait, and the
914/// handler is cloned at every invocation as an artifact of the
915/// internal ownership structure of Wasmtime: the handler itself is
916/// owned by the store, but also receives a mutable borrow to the
917/// whole store, so we need to clone it out to invoke it. It is
918/// recommended that this trait be implemented by a type that is cheap
919/// to clone: for example, a single `Arc` handle to debugger state.
920///
921/// [^1]: Providing visibility further than the most recent entry to
922///       Wasm is not directly possible because it could see into
923///       another async stack, and the stack that polls the future
924///       running a particular Wasm invocation could change after each
925///       suspend point in the handler.
926///
927/// [`Store::run_concurrent`]: crate::Store::run_concurrent
928pub trait DebugHandler: Clone + Send + Sync + 'static {
929    /// The data expected on the store that this handler is attached
930    /// to.
931    type Data;
932
933    /// Handle a debug event.
934    fn handle(
935        &self,
936        store: StoreContextMut<'_, Self::Data>,
937        event: DebugEvent<'_>,
938    ) -> impl Future<Output = ()> + Send;
939}
940
941/// Breakpoint state for modules within a store.
942#[derive(Default)]
943pub(crate) struct BreakpointState {
944    /// Single-step mode.
945    single_step: bool,
946    /// Breakpoints added individually.
947    breakpoints: BTreeSet<BreakpointKey>,
948}
949
950/// A breakpoint.
951pub struct Breakpoint {
952    /// Reference to the module in which we are setting the breakpoint.
953    pub module: Module,
954    /// Wasm PC offset within the module.
955    pub pc: u32,
956}
957
958#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
959struct BreakpointKey(CompiledModuleId, u32);
960
961impl BreakpointKey {
962    fn from_raw(module: &Module, pc: u32) -> BreakpointKey {
963        BreakpointKey(module.id(), pc)
964    }
965
966    fn get(&self, registry: &ModuleRegistry) -> Breakpoint {
967        let module = registry
968            .module_by_compiled_id(self.0)
969            .expect("Module should not have been removed from Store")
970            .clone();
971        Breakpoint { module, pc: self.1 }
972    }
973}
974
975/// A breakpoint-editing session.
976///
977/// This enables updating breakpoint state (setting or unsetting
978/// individual breakpoints or the store-global single-step flag) in a
979/// batch. It is more efficient to batch these updates because
980/// "re-publishing" the newly patched code, with update breakpoint
981/// settings, typically requires a syscall to re-enable execute
982/// permissions.
983pub struct BreakpointEdit<'a> {
984    state: &'a mut BreakpointState,
985    registry: &'a mut ModuleRegistry,
986    /// Modules that have been edited.
987    ///
988    /// Invariant: each of these modules' CodeMemory objects is
989    /// *unpublished* when in the dirty set.
990    dirty_modules: BTreeSet<StoreCodePC>,
991}
992
993impl BreakpointState {
994    pub(crate) fn edit<'a>(&'a mut self, registry: &'a mut ModuleRegistry) -> BreakpointEdit<'a> {
995        BreakpointEdit {
996            state: self,
997            registry,
998            dirty_modules: BTreeSet::new(),
999        }
1000    }
1001
1002    pub(crate) fn breakpoints<'a>(
1003        &'a self,
1004        registry: &'a ModuleRegistry,
1005    ) -> impl Iterator<Item = Breakpoint> + 'a {
1006        self.breakpoints.iter().map(|key| key.get(registry))
1007    }
1008
1009    pub(crate) fn is_single_step(&self) -> bool {
1010        self.single_step
1011    }
1012
1013    /// Internal helper to patch a new module for
1014    /// single-stepping. When a module is newly registered in a
1015    /// `Store`, we need to patch all breakpoints into the copy for
1016    /// this `Store` if single-stepping is currently enabled.
1017    pub(crate) fn patch_new_module(&self, code: &mut StoreCode, module: &Module) -> Result<()> {
1018        // Apply single-step state if single-stepping is enabled. Note
1019        // that no other individual breakpoints will exist yet (as
1020        // this is a newly registered module).
1021        if self.single_step {
1022            let mem = code.code_memory_mut().unwrap();
1023            mem.unpublish()?;
1024            BreakpointEdit::apply_single_step(mem, module, true, |_key| false)?;
1025            mem.publish()?;
1026        }
1027        Ok(())
1028    }
1029}
1030
1031impl<'a> BreakpointEdit<'a> {
1032    fn get_code_memory<'b>(
1033        breakpoints: &BreakpointState,
1034        registry: &'b mut ModuleRegistry,
1035        dirty_modules: &mut BTreeSet<StoreCodePC>,
1036        module: &Module,
1037    ) -> Result<&'b mut CodeMemory> {
1038        let store_code_pc =
1039            registry.store_code_base_or_register(module, RegisterBreakpointState(breakpoints))?;
1040        let code_memory = registry
1041            .store_code_mut(store_code_pc)
1042            .expect("Just checked presence above")
1043            .code_memory_mut()
1044            .expect("Must have unique ownership of StoreCode in guest-debug mode");
1045        if dirty_modules.insert(store_code_pc) {
1046            code_memory.unpublish()?;
1047        }
1048        Ok(code_memory)
1049    }
1050
1051    fn patch<'b>(
1052        patches: impl Iterator<Item = FrameTableBreakpointData<'b>> + 'b,
1053        mem: &mut CodeMemory,
1054        enable: bool,
1055    ) {
1056        let mem = mem.text_mut();
1057        for patch in patches {
1058            let data = if enable { patch.enable } else { patch.disable };
1059            let mem = &mut mem[patch.offset..patch.offset + data.len()];
1060            log::trace!(
1061                "patch: offset 0x{:x} with enable={enable}: data {data:?} replacing {mem:?}",
1062                patch.offset
1063            );
1064            mem.copy_from_slice(data);
1065        }
1066    }
1067
1068    /// Add a breakpoint in the given module at the given PC in that
1069    /// module.
1070    ///
1071    /// No effect if the breakpoint is already set.
1072    pub fn add_breakpoint(&mut self, module: &Module, pc: u32) -> Result<()> {
1073        let key = BreakpointKey::from_raw(module, pc);
1074        self.state.breakpoints.insert(key);
1075        log::trace!("patching in breakpoint {key:?}");
1076        let mem =
1077            Self::get_code_memory(self.state, self.registry, &mut self.dirty_modules, module)?;
1078        let frame_table = module
1079            .frame_table()
1080            .expect("Frame table must be present when guest-debug is enabled");
1081        let patches = frame_table.lookup_breakpoint_patches_by_pc(pc);
1082        Self::patch(patches, mem, true);
1083        Ok(())
1084    }
1085
1086    /// Remove a breakpoint in the given module at the given PC in
1087    /// that module.
1088    ///
1089    /// No effect if the breakpoint was not set.
1090    pub fn remove_breakpoint(&mut self, module: &Module, pc: u32) -> Result<()> {
1091        let key = BreakpointKey::from_raw(module, pc);
1092        self.state.breakpoints.remove(&key);
1093        if !self.state.single_step {
1094            let mem =
1095                Self::get_code_memory(self.state, self.registry, &mut self.dirty_modules, module)?;
1096            let frame_table = module
1097                .frame_table()
1098                .expect("Frame table must be present when guest-debug is enabled");
1099            let patches = frame_table.lookup_breakpoint_patches_by_pc(pc);
1100            Self::patch(patches, mem, false);
1101        }
1102        Ok(())
1103    }
1104
1105    fn apply_single_step<F: Fn(&BreakpointKey) -> bool>(
1106        mem: &mut CodeMemory,
1107        module: &Module,
1108        enabled: bool,
1109        key_enabled: F,
1110    ) -> Result<()> {
1111        let table = module
1112            .frame_table()
1113            .expect("Frame table must be present when guest-debug is enabled");
1114        for (wasm_pc, patch) in table.breakpoint_patches() {
1115            let key = BreakpointKey::from_raw(&module, wasm_pc);
1116            let this_enabled = enabled || key_enabled(&key);
1117            log::trace!(
1118                "single_step: enabled {enabled} key {key:?} -> this_enabled {this_enabled}"
1119            );
1120            Self::patch(core::iter::once(patch), mem, this_enabled);
1121        }
1122        Ok(())
1123    }
1124
1125    /// Turn on or off single-step mode.
1126    ///
1127    /// In single-step mode, a breakpoint event is emitted at every
1128    /// Wasm PC.
1129    pub fn single_step(&mut self, enabled: bool) -> Result<()> {
1130        log::trace!(
1131            "single_step({enabled}) with breakpoint set {:?}",
1132            self.state.breakpoints
1133        );
1134        if self.state.single_step == enabled {
1135            // No change to current state; don't go through the effort of re-patching and
1136            // re-publishing code.
1137            return Ok(());
1138        }
1139        let modules = self.registry.all_modules().cloned().collect::<Vec<_>>();
1140        for module in modules {
1141            let mem =
1142                Self::get_code_memory(self.state, self.registry, &mut self.dirty_modules, &module)?;
1143            Self::apply_single_step(mem, &module, enabled, |key| {
1144                self.state.breakpoints.contains(key)
1145            })?;
1146        }
1147
1148        self.state.single_step = enabled;
1149
1150        Ok(())
1151    }
1152}
1153
1154impl<'a> Drop for BreakpointEdit<'a> {
1155    fn drop(&mut self) {
1156        for &store_code_base in &self.dirty_modules {
1157            let store_code = self.registry.store_code_mut(store_code_base).unwrap();
1158            if let Err(e) = store_code
1159                .code_memory_mut()
1160                .expect("Must have unique ownership of StoreCode in guest-debug mode")
1161                .publish()
1162            {
1163                abort_on_republish_error(e);
1164            }
1165        }
1166    }
1167}
1168
1169/// Abort when we cannot re-publish executable code.
1170///
1171/// Note that this puts us in quite a conundrum. Typically we will
1172/// have been editing breakpoints from within a hostcall context
1173/// (e.g. inside a debugger hook while execution is paused) with JIT
1174/// code on the stack. Wasmtime's usual path to return errors is back
1175/// through that JIT code: we do not panic-unwind across the JIT code,
1176/// we return into the exit trampoline and that then re-enters the
1177/// raise libcall to use a Cranelift exception-throw to cross most of
1178/// the JIT frames to the entry trampoline. When even trampolines are
1179/// no longer executable, we have no way out. Even an ordinary
1180/// `panic!` cannot work, because we catch panics and carry them
1181/// across JIT code using that trampoline-based error path. Our only
1182/// way out is to directly abort the whole process.
1183///
1184/// This is not without precedent: other engines have similar failure
1185/// paths. For example, SpiderMonkey directly aborts the process when
1186/// failing to re-apply executable permissions (see [1]).
1187///
1188/// Note that we don't really expect to ever hit this case in
1189/// practice: it's unlikely that `mprotect` applying `PROT_EXEC` would
1190/// fail due to, e.g., resource exhaustion in the kernel, because we
1191/// will have the same net number of virtual memory areas before and
1192/// after the permissions change. Nevertheless, we have to account for
1193/// the possibility of error.
1194///
1195/// [1]: https://searchfox.org/firefox-main/rev/7496c8515212669451d7e775a00c2be07da38ca5/js/src/jit/AutoWritableJitCode.h#26-56
1196#[cfg(feature = "std")]
1197fn abort_on_republish_error(e: crate::Error) -> ! {
1198    log::error!(
1199        "Failed to re-publish executable code: {e:?}. Wasmtime cannot return through JIT code on the stack and cannot even panic; aborting the process."
1200    );
1201    std::process::abort();
1202}
1203
1204/// In the `no_std` case, we don't have a concept of a "process
1205/// abort", so rely on `panic!`. Typically an embedded scenario that
1206/// uses `no_std` will build with `panic=abort` so the effect is the
1207/// same. If it doesn't, there is truly nothing we can do here so
1208/// let's panic anyway; the panic propagation through the trampolines
1209/// will at least deterministically crash.
1210#[cfg(not(feature = "std"))]
1211fn abort_on_republish_error(e: crate::Error) -> ! {
1212    panic!("Failed to re-publish executable code: {e:?}");
1213}