wasmtime/runtime/vm/gc/
gc_runtime.rs

1//! Traits for abstracting over our different garbage collectors.
2
3use crate::prelude::*;
4use crate::runtime::vm::{
5    ExternRefHostDataId, ExternRefHostDataTable, GcHeapObject, SendSyncPtr, TypedGcRef, VMArrayRef,
6    VMExternRef, VMGcHeader, VMGcObjectDataMut, VMGcRef, VMStructRef,
7};
8use core::ptr::NonNull;
9use core::{
10    alloc::Layout, any::Any, cell::UnsafeCell, marker, mem, num::NonZeroUsize, ops::Range, ptr,
11};
12use wasmtime_environ::{GcArrayLayout, GcStructLayout, GcTypeLayouts, VMSharedTypeIndex};
13
14/// Trait for integrating a garbage collector with the runtime.
15///
16/// This trait is responsible for:
17///
18/// * GC barriers used by runtime code (as opposed to compiled Wasm code)
19///
20/// * Creating and managing GC heaps for individual stores
21///
22/// * Running garbage collection
23///
24/// # Safety
25///
26/// The collector, its GC heaps, and GC barriers when taken together as a whole
27/// must be safe. Additionally, they must work with the GC barriers emitted into
28/// compiled Wasm code via the collector's corresponding `GcCompiler`
29/// implementation. That is, if callers only call safe methods on this trait
30/// (while pairing it with its associated `GcCompiler`, `GcHeap`, and etc...)
31/// and uphold all the documented safety invariants of this trait's unsafe
32/// methods, then it must be impossible for callers to violate memory
33/// safety. Implementations of this trait may not add new safety invariants, not
34/// already documented in this trait's interface, that callers need to uphold.
35pub unsafe trait GcRuntime: 'static + Send + Sync {
36    /// Get this collector's GC type layouts.
37    fn layouts(&self) -> &dyn GcTypeLayouts;
38
39    /// Construct a new GC heap.
40    #[cfg(feature = "gc")]
41    fn new_gc_heap(&self) -> Result<Box<dyn GcHeap>>;
42}
43
44/// A heap that manages garbage-collected objects.
45///
46/// Each `wasmtime::Store` is associated with a single `GcHeap`, and a `GcHeap`
47/// is only ever used with one store at a time, but `GcHeap`s may be reused with
48/// new stores after its original store is dropped. The `reset` method will be
49/// called in between each such reuse. (This reuse allows for better integration
50/// with the pooling allocator).
51///
52/// If a `GcHeap` mapped any memory, its `Drop` implementation should unmap that
53/// memory.
54///
55/// # Safety
56///
57/// The trait methods below are all safe: implementations of this trait must
58/// ensure that these methods cannot be misused to create memory unsafety. The
59/// expectation is that -- given that `VMGcRef` is a newtype over an index --
60/// implementations perform similar tricks as Wasm linear memory
61/// implementations. The heap should internally be a contiguous region of memory
62/// and `VMGcRef` indices into the heap must be bounds checked (explicitly or
63/// implicitly via virtual memory tricks).
64///
65/// Furthermore, if heap corruption occurs because (for example) a `VMGcRef`
66/// from a different heap is used with this heap, then that corruption must be
67/// limited to within this heap. Every heap is a mini sandbox. It follows that
68/// native pointers should never be written into or read out from the GC heap,
69/// since that could spread corruption from inside the GC heap out to the native
70/// host heap. The host data for an `externref`, therefore, is stored in a side
71/// table (`ExternRefHostDataTable`) and never inside the heap. Only an id
72/// referencing a slot in that table should ever be written into the GC heap.
73///
74/// These constraints give us great amounts of safety compared to working with
75/// raw pointers. The worst that could happen is corruption local to heap and a
76/// panic, or perhaps reading stale heap data from a previous Wasm instance. A
77/// corrupt `GcHeap` can *never* result in the native host's corruption.
78///
79/// The downside is that we are introducing `heap_base + index` computations and
80/// bounds checking to access GC memory, adding performance overhead. This is
81/// deemed to be a worthy trade off. Furthermore, it isn't even a clear cut
82/// performance degradation since this allows us to use 32-bit "pointers",
83/// giving us more compact data representations and the improved cache
84/// utilization that implies.
85pub unsafe trait GcHeap: 'static + Send + Sync {
86    ////////////////////////////////////////////////////////////////////////////
87    // `Any` methods
88
89    /// Get this heap as an `&Any`.
90    fn as_any(&self) -> &dyn Any;
91
92    /// Get this heap as an `&mut Any`.
93    fn as_any_mut(&mut self) -> &mut dyn Any;
94
95    ////////////////////////////////////////////////////////////////////////////
96    // No-GC Scope Methods
97
98    /// Enter a no-GC scope.
99    ///
100    /// Calling the `gc` method when we are inside a no-GC scope should panic.
101    ///
102    /// We can enter multiple, nested no-GC scopes and this method should
103    /// account for that.
104    fn enter_no_gc_scope(&mut self);
105
106    /// Exit a no-GC scope.
107    ///
108    /// Dual to `enter_no_gc_scope`.
109    fn exit_no_gc_scope(&mut self);
110
111    ////////////////////////////////////////////////////////////////////////////
112    // GC Barriers
113
114    /// Read barrier called every time the runtime clones a GC reference.
115    ///
116    /// Callers should pass a valid `VMGcRef` that belongs to the given
117    /// heap. Failure to do so is memory safe, but may result in general
118    /// failures such as panics or incorrect results.
119    fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef;
120
121    /// Write barrier called whenever the runtime is nulling out a GC reference.
122    ///
123    /// Default implemented in terms of the `write_gc_ref` barrier.
124    ///
125    /// If an `externref` is reclaimed, then its associated entry in the
126    /// `host_data_table` should be removed.
127    ///
128    /// Callers should pass a valid `VMGcRef` that belongs to the given
129    /// heap. Failure to do so is memory safe, but may result in general
130    /// failures such as panics or incorrect results.
131    ///
132    /// The given `gc_ref` should not be used again.
133    fn drop_gc_ref(&mut self, host_data_table: &mut ExternRefHostDataTable, gc_ref: VMGcRef) {
134        let mut dest = Some(gc_ref);
135        self.write_gc_ref(host_data_table, &mut dest, None);
136    }
137
138    /// Write barrier called every time the runtime overwrites a GC reference.
139    ///
140    /// The `source` is a borrowed GC reference, and should not have been cloned
141    /// already for this write operation. This allows implementations to fuse
142    /// the `source`'s read barrier into this write barrier.
143    ///
144    /// If an `externref` is reclaimed, then its associated entry in the
145    /// `host_data_table` should be removed.
146    ///
147    /// Callers should pass a valid `VMGcRef` that belongs to the given heap for
148    /// both the `source` and `destination`. Failure to do so is memory safe,
149    /// but may result in general failures such as panics or incorrect results.
150    fn write_gc_ref(
151        &mut self,
152        host_data_table: &mut ExternRefHostDataTable,
153        destination: &mut Option<VMGcRef>,
154        source: Option<&VMGcRef>,
155    );
156
157    /// Read barrier called whenever a GC reference is passed from the runtime
158    /// to Wasm: an argument to a host-to-Wasm call, or a return from a
159    /// Wasm-to-host call.
160    ///
161    /// Callers should pass a valid `VMGcRef` that belongs to the given
162    /// heap. Failure to do so is memory safe, but may result in general
163    /// failures such as panics or incorrect results.
164    fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef);
165
166    /// Predicate invoked before calling into or returning to Wasm to determine
167    /// whether we should GC first.
168    ///
169    /// `num_gc_refs` is the number of non-`i31ref` GC references that will be
170    /// passed into Wasm.
171    fn need_gc_before_entering_wasm(&self, num_gc_refs: NonZeroUsize) -> bool;
172
173    ////////////////////////////////////////////////////////////////////////////
174    // `externref` Methods
175
176    /// Allocate a `VMExternRef` with space for host data described by the given
177    /// layout.
178    ///
179    /// Return values:
180    ///
181    /// * `Ok(Some(_))`: The allocation was successful.
182    ///
183    /// * `Ok(None)`: There is currently no available space for this
184    ///   allocation. The caller should call `self.gc()`, run the GC to
185    ///   completion so the collector can reclaim space, and then try allocating
186    ///   again.
187    ///
188    /// * `Err(_)`: The collector cannot satisfy this allocation request, and
189    ///   would not be able to even after the caller were to trigger a
190    ///   collection. This could be because, for example, the requested
191    ///   allocation is larger than this collector's implementation limit for
192    ///   object size.
193    fn alloc_externref(&mut self, host_data: ExternRefHostDataId) -> Result<Option<VMExternRef>>;
194
195    /// Get the host data ID associated with the given `externref`.
196    ///
197    /// Callers should pass a valid `externref` that belongs to the given
198    /// heap. Failure to do so is memory safe, but may result in general
199    /// failures such as panics or incorrect results.
200    fn externref_host_data(&self, externref: &VMExternRef) -> ExternRefHostDataId;
201
202    ////////////////////////////////////////////////////////////////////////////
203    // Struct, array, and general GC object methods
204
205    /// Get the header of the object that `gc_ref` points to.
206    fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader;
207
208    /// Get the header of the object that `gc_ref` points to.
209    fn header_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcHeader;
210
211    /// Get the size (in bytes) of the object referenced by `gc_ref`.
212    ///
213    /// # Panics
214    ///
215    /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
216    fn object_size(&self, gc_ref: &VMGcRef) -> usize;
217
218    /// Allocate a raw, uninitialized GC-managed object with the given header
219    /// and layout.
220    ///
221    /// The object's fields and elements are left uninitialized. It is the
222    /// caller's responsibility to initialize them before exposing the struct to
223    /// Wasm or triggering a GC.
224    ///
225    /// The header's described type and layout must match *for this
226    /// collector*. That is, if this collector adds an extra header word to all
227    /// objects, the given layout must already include space for that header
228    /// word. Therefore, this method is effectively only usable with layouts
229    /// derived from a `Gc{Struct,Array}Layout` returned by this collector.
230    ///
231    /// Failure to uphold any of the above is memory safe, but may result in
232    /// general failures such as panics or incorrect results.
233    ///
234    /// Return values:
235    ///
236    /// * `Ok(Some(_))`: The allocation was successful.
237    ///
238    /// * `Ok(None)`: There is currently no available space for this
239    ///   allocation. The caller should call `self.gc()`, run the GC to
240    ///   completion so the collector can reclaim space, and then try allocating
241    ///   again.
242    ///
243    /// * `Err(_)`: The collector cannot satisfy this allocation request, and
244    ///   would not be able to even after the caller were to trigger a
245    ///   collection. This could be because, for example, the requested
246    ///   alignment is larger than this collector's implementation limit.
247    fn alloc_raw(&mut self, header: VMGcHeader, layout: Layout) -> Result<Option<VMGcRef>>;
248
249    /// Allocate a GC-managed struct of the given type and layout.
250    ///
251    /// The struct's fields are left uninitialized. It is the caller's
252    /// responsibility to initialize them before exposing the struct to Wasm or
253    /// triggering a GC.
254    ///
255    /// The `ty` and `layout` must match.
256    ///
257    /// Failure to do either of the above is memory safe, but may result in
258    /// general failures such as panics or incorrect results.
259    ///
260    /// Return values:
261    ///
262    /// * `Ok(Some(_))`: The allocation was successful.
263    ///
264    /// * `Ok(None)`: There is currently no available space for this
265    ///   allocation. The caller should call `self.gc()`, run the GC to
266    ///   completion so the collector can reclaim space, and then try allocating
267    ///   again.
268    ///
269    /// * `Err(_)`: The collector cannot satisfy this allocation request, and
270    ///   would not be able to even after the caller were to trigger a
271    ///   collection. This could be because, for example, the requested
272    ///   allocation is larger than this collector's implementation limit for
273    ///   object size.
274    fn alloc_uninit_struct(
275        &mut self,
276        ty: VMSharedTypeIndex,
277        layout: &GcStructLayout,
278    ) -> Result<Option<VMStructRef>>;
279
280    /// Deallocate an uninitialized, GC-managed struct.
281    ///
282    /// This is useful for if initialization of the struct's fields fails, so
283    /// that the struct's allocation can be eagerly reclaimed, and so that the
284    /// collector doesn't attempt to treat any of the uninitialized fields as
285    /// valid GC references, or something like that.
286    fn dealloc_uninit_struct(&mut self, structref: VMStructRef);
287
288    /// * `Ok(Some(_))`: The allocation was successful.
289    ///
290    /// * `Ok(None)`: There is currently no available space for this
291    ///   allocation. The caller should call `self.gc()`, run the GC to
292    ///   completion so the collector can reclaim space, and then try allocating
293    ///   again.
294    ///
295    /// * `Err(_)`: The collector cannot satisfy this allocation request, and
296    ///   would not be able to even after the caller were to trigger a
297    ///   collection. This could be because, for example, the requested
298    ///   allocation is larger than this collector's implementation limit for
299    ///   object size.
300    fn alloc_uninit_array(
301        &mut self,
302        ty: VMSharedTypeIndex,
303        len: u32,
304        layout: &GcArrayLayout,
305    ) -> Result<Option<VMArrayRef>>;
306
307    /// Deallocate an uninitialized, GC-managed array.
308    ///
309    /// This is useful for if initialization of the array's fields fails, so
310    /// that the array's allocation can be eagerly reclaimed, and so that the
311    /// collector doesn't attempt to treat any of the uninitialized fields as
312    /// valid GC references, or something like that.
313    fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef);
314
315    /// Get the length of the given array.
316    ///
317    /// Panics on out-of-bounds accesses.
318    ///
319    /// The given `arrayref` should be valid and of the given size. Failure to
320    /// do so is memory safe, but may result in general failures such as panics
321    /// or incorrect results.
322    fn array_len(&self, arrayref: &VMArrayRef) -> u32;
323
324    ////////////////////////////////////////////////////////////////////////////
325    // Garbage Collection Methods
326
327    /// Start a new garbage collection process.
328    ///
329    /// The given `roots` are GC roots and should not be collected (nor anything
330    /// transitively reachable from them).
331    ///
332    /// Upon reclaiming an `externref`, its associated entry in the
333    /// `host_data_table` is removed.
334    ///
335    /// Callers should pass valid GC roots that belongs to this heap, and the
336    /// host data table associated with this heap's `externref`s. Failure to do
337    /// so is memory safe, but may result in general failures such as panics or
338    /// incorrect results.
339    ///
340    /// This method should panic if we are in a no-GC scope.
341    fn gc<'a>(
342        &'a mut self,
343        roots: GcRootsIter<'a>,
344        host_data_table: &'a mut ExternRefHostDataTable,
345    ) -> Box<dyn GarbageCollection<'a> + 'a>;
346
347    ////////////////////////////////////////////////////////////////////////////
348    // JIT-Code Interaction Methods
349
350    /// Get the pointer that will be stored in the `VMContext::gc_heap_data`
351    /// field and be accessible from JIT code via collaboration with the
352    /// corresponding `GcCompiler` trait.
353    ///
354    /// # Safety
355    ///
356    /// The returned pointer, if any, must remain valid as long as `self` is not
357    /// dropped.
358    unsafe fn vmctx_gc_heap_data(&self) -> NonNull<u8>;
359
360    ////////////////////////////////////////////////////////////////////////////
361    // Recycling GC Heap Methods
362
363    /// Reset this heap.
364    ///
365    /// Calling this method unassociates this heap with the store that it has
366    /// been associated with, making it available to be associated with a new
367    /// heap.
368    ///
369    /// This should refill free lists, reset bump pointers, and etc... as if
370    /// nothing were allocated in this heap (because nothing is allocated in
371    /// this heap anymore).
372    ///
373    /// This should retain any allocated memory from the global allocator and
374    /// any virtual memory mappings.
375    ///
376    /// This method is only used with the pooling allocator.
377    #[cfg(feature = "pooling-allocator")]
378    fn reset(&mut self);
379
380    ////////////////////////////////////////////////////////////////////////////
381    // Accessors for the raw bytes of the GC heap
382
383    /// Get a slice of the raw bytes of the GC heap.
384    ///
385    /// # Implementation Safety
386    ///
387    /// The heap slice must be the GC heap region, and the region must remain
388    /// valid (i.e. not moved or resized) for JIT code until `self` is dropped
389    /// or `self.reset()` is called.
390    fn heap_slice(&self) -> &[UnsafeCell<u8>];
391
392    /// Get a mutable slice of the raw bytes of the GC heap.
393    ///
394    /// # Implementation Safety
395    ///
396    /// The heap slice must be the GC heap region, and the region must remain
397    /// valid (i.e. not moved or resized) for JIT code until `self` is dropped
398    /// or `self.reset()` is called.
399    fn heap_slice_mut(&mut self) -> &mut [u8];
400
401    ////////////////////////////////////////////////////////////////////////////
402    // Provided helper methods.
403
404    /// Index into this heap and get a shared reference to the `T` that `gc_ref`
405    /// points to.
406    ///
407    /// # Panics
408    ///
409    /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
410    fn index<T>(&self, gc_ref: &TypedGcRef<T>) -> &T
411    where
412        Self: Sized,
413        T: GcHeapObject,
414    {
415        assert!(!mem::needs_drop::<T>());
416        let gc_ref = gc_ref.as_untyped();
417        let start = gc_ref.as_heap_index().unwrap().get();
418        let start = usize::try_from(start).unwrap();
419        let len = mem::size_of::<T>();
420        let slice = &self.heap_slice()[start..][..len];
421        unsafe { &*(slice.as_ptr().cast::<T>()) }
422    }
423
424    /// Index into this heap and get an exclusive reference to the `T` that
425    /// `gc_ref` points to.
426    ///
427    /// # Panics
428    ///
429    /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
430    fn index_mut<T>(&mut self, gc_ref: &TypedGcRef<T>) -> &mut T
431    where
432        Self: Sized,
433        T: GcHeapObject,
434    {
435        assert!(!mem::needs_drop::<T>());
436        let gc_ref = gc_ref.as_untyped();
437        let start = gc_ref.as_heap_index().unwrap().get();
438        let start = usize::try_from(start).unwrap();
439        let len = mem::size_of::<T>();
440        let slice = &mut self.heap_slice_mut()[start..][..len];
441        unsafe { &mut *(slice.as_mut_ptr().cast::<T>()) }
442    }
443
444    /// Get the range of bytes that the given object occupies in the heap.
445    ///
446    /// # Panics
447    ///
448    /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
449    fn object_range(&self, gc_ref: &VMGcRef) -> Range<usize> {
450        let start = gc_ref.as_heap_index().unwrap().get();
451        let start = usize::try_from(start).unwrap();
452        let size = self.object_size(gc_ref);
453        let end = start.checked_add(size).unwrap();
454        start..end
455    }
456
457    /// Get a mutable borrow of the given object's data.
458    ///
459    /// # Panics
460    ///
461    /// Panics on out-of-bounds accesses or if the `gc_ref` is an `i31ref`.
462    fn gc_object_data(&mut self, gc_ref: &VMGcRef) -> VMGcObjectDataMut<'_> {
463        let range = self.object_range(gc_ref);
464        let data = &mut self.heap_slice_mut()[range];
465        VMGcObjectDataMut::new(data)
466    }
467
468    /// Get a pair of mutable borrows of the given objects' data.
469    ///
470    /// # Panics
471    ///
472    /// Panics if `a == b` or on out-of-bounds accesses or if either GC ref is
473    /// an `i31ref`.
474    fn gc_object_data_pair(
475        &mut self,
476        a: &VMGcRef,
477        b: &VMGcRef,
478    ) -> (VMGcObjectDataMut<'_>, VMGcObjectDataMut<'_>) {
479        assert_ne!(a, b);
480
481        let a_range = self.object_range(a);
482        let b_range = self.object_range(b);
483
484        // Assert that the two objects do not overlap.
485        assert!(a_range.start <= a_range.end);
486        assert!(b_range.start <= b_range.end);
487        assert!(a_range.end <= b_range.start || b_range.end <= a_range.start);
488
489        let (a_data, b_data) = if a_range.start < b_range.start {
490            let (a_half, b_half) = self.heap_slice_mut().split_at_mut(b_range.start);
491            let b_len = b_range.end - b_range.start;
492            (&mut a_half[a_range], &mut b_half[..b_len])
493        } else {
494            let (b_half, a_half) = self.heap_slice_mut().split_at_mut(a_range.start);
495            let a_len = a_range.end - a_range.start;
496            (&mut a_half[..a_len], &mut b_half[b_range])
497        };
498
499        (
500            VMGcObjectDataMut::new(a_data),
501            VMGcObjectDataMut::new(b_data),
502        )
503    }
504}
505
506/// A list of GC roots.
507///
508/// This is effectively a builder for a `GcRootsIter` that will be given to a GC
509/// heap when it is time to perform garbage collection.
510#[derive(Default)]
511pub struct GcRootsList(Vec<RawGcRoot>);
512
513// Ideally these `*mut`s would be `&mut`s and we wouldn't need as much of this
514// machinery around `GcRootsList`, `RawGcRoot`, `GcRoot`, and `GcRootIter` but
515// if we try that then we run into two different kinds of lifetime issues:
516//
517// 1. When collecting the various roots from a `&mut StoreOpaque`, we borrow
518//    from `self` to push new GC roots onto the roots list. But then we want to
519//    call helper methods like `self.for_each_global(...)`, but we can't because
520//    there are active borrows of `self` preventing it.
521//
522// 2. We want to reuse the roots list and its backing storage across GCs, rather
523//    than reallocate on every GC. But the only place for the roots list to live
524//    such that it is easily reusable across GCs is in the store itself. But the
525//    contents of the roots list (when it is non-empty, during GCs) borrow from
526//    the store, which creates self-references.
527#[derive(Clone, Copy, Debug)]
528#[cfg_attr(
529    not(feature = "gc"),
530    expect(
531        dead_code,
532        reason = "not worth it at this time to #[cfg] away these variants",
533    )
534)]
535enum RawGcRoot {
536    Stack(SendSyncPtr<u32>),
537    NonStack(SendSyncPtr<VMGcRef>),
538}
539
540#[cfg(feature = "gc")]
541impl GcRootsList {
542    /// Add a GC root that is inside a Wasm stack frame to this list.
543    #[inline]
544    pub unsafe fn add_wasm_stack_root(&mut self, ptr_to_root: SendSyncPtr<u32>) {
545        log::trace!(
546            "Adding Wasm stack root: {:#p} -> {:#p}",
547            ptr_to_root,
548            VMGcRef::from_raw_u32(*ptr_to_root.as_ref()).unwrap()
549        );
550        debug_assert!(VMGcRef::from_raw_u32(*ptr_to_root.as_ref()).is_some());
551        self.0.push(RawGcRoot::Stack(ptr_to_root));
552    }
553
554    /// Add a GC root to this list.
555    #[inline]
556    pub unsafe fn add_root(&mut self, ptr_to_root: SendSyncPtr<VMGcRef>, why: &str) {
557        log::trace!(
558            "Adding non-stack root: {why}: {:#p}",
559            ptr_to_root.as_ref().unchecked_copy()
560        );
561        self.0.push(RawGcRoot::NonStack(ptr_to_root))
562    }
563
564    /// Get an iterator over all roots in this list.
565    ///
566    /// # Safety
567    ///
568    /// Callers must ensure that all the pointers to GC roots that have been
569    /// added to this list are valid for the duration of the `'a` lifetime.
570    #[inline]
571    pub unsafe fn iter<'a>(&'a mut self) -> GcRootsIter<'a> {
572        GcRootsIter {
573            list: self,
574            index: 0,
575        }
576    }
577
578    /// Is this list empty?
579    pub fn is_empty(&self) -> bool {
580        self.0.is_empty()
581    }
582
583    /// Clear this GC roots list.
584    #[inline]
585    pub fn clear(&mut self) {
586        self.0.clear();
587    }
588}
589
590/// An iterator over all the roots in a `GcRootsList`.
591pub struct GcRootsIter<'a> {
592    list: &'a mut GcRootsList,
593    index: usize,
594}
595
596impl<'a> Iterator for GcRootsIter<'a> {
597    type Item = GcRoot<'a>;
598
599    #[inline]
600    fn next(&mut self) -> Option<Self::Item> {
601        let root = GcRoot {
602            raw: self.list.0.get(self.index).copied()?,
603            _phantom: marker::PhantomData,
604        };
605        self.index += 1;
606        Some(root)
607    }
608}
609
610/// A GC root.
611///
612/// This is, effectively, a mutable reference to a `VMGcRef`.
613///
614/// Collector implementations should update the `VMGcRef` if they move the
615/// `VMGcRef`'s referent during the course of a GC.
616#[derive(Debug)]
617pub struct GcRoot<'a> {
618    raw: RawGcRoot,
619    _phantom: marker::PhantomData<&'a mut VMGcRef>,
620}
621
622impl GcRoot<'_> {
623    /// Is this root from inside a Wasm stack frame?
624    #[inline]
625    pub fn is_on_wasm_stack(&self) -> bool {
626        matches!(self.raw, RawGcRoot::Stack(_))
627    }
628
629    /// Get this GC root.
630    ///
631    /// Does NOT run GC barriers.
632    #[inline]
633    pub fn get(&self) -> VMGcRef {
634        match self.raw {
635            RawGcRoot::NonStack(ptr) => unsafe { ptr::read(ptr.as_ptr()) },
636            RawGcRoot::Stack(ptr) => unsafe {
637                let raw: u32 = ptr::read(ptr.as_ptr());
638                VMGcRef::from_raw_u32(raw).expect("non-null")
639            },
640        }
641    }
642
643    /// Set this GC root.
644    ///
645    /// Does NOT run GC barriers.
646    ///
647    /// Collector implementations should use this method to update GC root
648    /// pointers after the collector moves the GC object that the root is
649    /// referencing.
650    pub fn set(&mut self, new_ref: VMGcRef) {
651        match self.raw {
652            RawGcRoot::NonStack(ptr) => unsafe {
653                ptr::write(ptr.as_ptr(), new_ref);
654            },
655            RawGcRoot::Stack(ptr) => unsafe {
656                ptr::write(ptr.as_ptr(), new_ref.as_raw_u32());
657            },
658        }
659    }
660}
661
662/// A garbage collection process.
663///
664/// Implementations define the `collect_increment` method, and then consumers
665/// can either use
666///
667/// * `GarbageCollection::collect` for synchronous code, or
668///
669/// * `collect_async(Box<dyn GarbageCollection>)` for async code.
670///
671/// When using fuel and/or epochs, consumers can also use `collect_increment`
672/// directly and choose to abandon further execution in this GC's heap's whole
673/// store if the GC is taking too long to complete.
674pub trait GarbageCollection<'a>: Send + Sync {
675    /// Perform an incremental slice of this garbage collection process.
676    ///
677    /// Upon completion of the slice, a `GcProgress` is returned which informs
678    /// the caller whether to continue driving this GC process forward and
679    /// executing more slices (`GcProgress::Continue`) or whether the GC process
680    /// has finished (`GcProgress::Complete`).
681    ///
682    /// The mutator does *not* run in between increments. This method exists
683    /// solely to allow cooperative yielding
684    fn collect_increment(&mut self) -> GcProgress;
685
686    /// Run this GC process to completion.
687    ///
688    /// Keeps calling `collect_increment` in a loop until the GC process is
689    /// complete.
690    fn collect(&mut self) {
691        loop {
692            match self.collect_increment() {
693                GcProgress::Continue => continue,
694                GcProgress::Complete => return,
695            }
696        }
697    }
698}
699
700/// The result of doing an incremental amount of GC.
701pub enum GcProgress {
702    /// There is still more work to do.
703    Continue,
704    /// The GC is complete.
705    Complete,
706}
707
708/// Asynchronously run the given garbage collection process to completion,
709/// cooperatively yielding back to the event loop after each increment of work.
710#[cfg(feature = "async")]
711pub async fn collect_async<'a>(mut collection: Box<dyn GarbageCollection<'a> + 'a>) {
712    loop {
713        match collection.collect_increment() {
714            GcProgress::Continue => crate::runtime::vm::Yield::new().await,
715            GcProgress::Complete => return,
716        }
717    }
718}
719
720#[cfg(all(test, feature = "async"))]
721mod collect_async_tests {
722    use super::*;
723
724    #[test]
725    fn is_send_and_sync() {
726        fn _assert_send_sync<T: Send + Sync>(_: T) {}
727
728        fn _foo<'a>(collection: Box<dyn GarbageCollection<'a>>) {
729            _assert_send_sync(collect_async(collection));
730        }
731    }
732}