Skip to main content

wasmtime/runtime/vm/gc/
gc_runtime.rs

1//! Traits for abstracting over our different garbage collectors.
2
3use crate::prelude::*;
4use crate::runtime::vm::{
5    ExternRefHostDataId, ExternRefHostDataTable, GcHeapObject, SendSyncPtr, TypedGcRef, VMArrayRef,
6    VMExternRef, VMGcHeader, VMGcObjectData, VMGcRef,
7};
8use crate::store::Asyncness;
9use crate::vm::VMMemoryDefinition;
10use core::ptr::NonNull;
11use core::slice;
12use core::{alloc::Layout, any::Any, marker, mem, ops::Range, ptr};
13use wasmtime_environ::{GcArrayLayout, GcStructLayout, GcTypeLayouts, VMSharedTypeIndex};
14
15/// Trait for integrating a garbage collector with the runtime.
16///
17/// This trait is responsible for:
18///
19/// * GC barriers used by runtime code (as opposed to compiled Wasm code)
20///
21/// * Creating and managing GC heaps for individual stores
22///
23/// * Running garbage collection
24///
25/// # Safety
26///
27/// The collector, its GC heaps, and GC barriers when taken together as a whole
28/// must be safe. Additionally, they must work with the GC barriers emitted into
29/// compiled Wasm code via the collector's corresponding `GcCompiler`
30/// implementation. That is, if callers only call safe methods on this trait
31/// (while pairing it with its associated `GcCompiler`, `GcHeap`, and etc...)
32/// and uphold all the documented safety invariants of this trait's unsafe
33/// methods, then it must be impossible for callers to violate memory
34/// safety. Implementations of this trait may not add new safety invariants, not
35/// already documented in this trait's interface, that callers need to uphold.
36pub unsafe trait GcRuntime: 'static + Send + Sync {
37    /// Get this collector's GC type layouts.
38    fn layouts(&self) -> &dyn GcTypeLayouts;
39
40    /// Construct a new GC heap.
41    #[cfg(feature = "gc")]
42    fn new_gc_heap(&self, engine: &crate::Engine) -> Result<Box<dyn GcHeap>>;
43}
44
45/// A heap that manages garbage-collected objects.
46///
47/// Each `wasmtime::Store` is associated with a single `GcHeap`, and a `GcHeap`
48/// is only ever used with one store at a time, but `GcHeap`s may be reused with
49/// new stores after its original store is dropped. The `reset` method will be
50/// called in between each such reuse. (This reuse allows for better integration
51/// with the pooling allocator).
52///
53/// If a `GcHeap` mapped any memory, its `Drop` implementation should unmap that
54/// memory.
55///
56/// # Safety
57///
58/// The trait methods below are all safe: implementations of this trait must
59/// ensure that these methods cannot be misused to create memory unsafety. The
60/// expectation is that -- given that `VMGcRef` is a newtype over an index --
61/// implementations perform similar tricks as Wasm linear memory
62/// implementations. The heap should internally be a contiguous region of memory
63/// and `VMGcRef` indices into the heap must be bounds checked (explicitly or
64/// implicitly via virtual memory tricks).
65///
66/// Furthermore, if heap corruption occurs because (for example) a `VMGcRef`
67/// from a different heap is used with this heap, then that corruption must be
68/// limited to within this heap. Every heap is a mini sandbox. It follows that
69/// native pointers should never be written into or read out from the GC heap,
70/// since that could spread corruption from inside the GC heap out to the native
71/// host heap. The host data for an `externref`, therefore, is stored in a side
72/// table (`ExternRefHostDataTable`) and never inside the heap. Only an id
73/// referencing a slot in that table should ever be written into the GC heap.
74///
75/// These constraints give us great amounts of safety compared to working with
76/// raw pointers. The worst that could happen is corruption local to heap and a
77/// panic, or perhaps reading stale heap data from a previous Wasm instance. A
78/// corrupt `GcHeap` can *never* result in the native host's corruption.
79///
80/// The downside is that we are introducing `heap_base + index` computations and
81/// bounds checking to access GC memory, adding performance overhead. This is
82/// deemed to be a worthy trade off. Furthermore, it isn't even a clear cut
83/// performance degradation since this allows us to use 32-bit "pointers",
84/// giving us more compact data representations and the improved cache
85/// utilization that implies.
86pub unsafe trait GcHeap: 'static + Send + Sync {
87    ////////////////////////////////////////////////////////////////////////////
88    // Life Cycle GC Heap Methods
89
90    /// Is this GC heap currently attached to a memory?
91    fn is_attached(&self) -> bool;
92
93    /// Attach this GC heap to a memory.
94    ///
95    /// Once attached, this GC heap can be used with Wasm.
96    fn attach(&mut self, memory: crate::vm::Memory);
97
98    /// Reset this heap.
99    ///
100    /// Calling this method unassociates this heap with the store that it has
101    /// been associated with, making it available to be associated with a new
102    /// heap.
103    ///
104    /// This should refill free lists, reset bump pointers, and etc... as if
105    /// nothing were allocated in this heap (because nothing is allocated in
106    /// this heap anymore).
107    ///
108    /// This should retain any allocated memory from the global allocator and
109    /// any virtual memory mappings.
110    fn detach(&mut self) -> crate::vm::Memory;
111
112    ////////////////////////////////////////////////////////////////////////////
113    // `Any` methods
114
115    /// Get this heap as an `&Any`.
116    fn as_any(&self) -> &dyn Any;
117
118    /// Get this heap as an `&mut Any`.
119    fn as_any_mut(&mut self) -> &mut dyn Any;
120
121    ////////////////////////////////////////////////////////////////////////////
122    // No-GC Scope Methods
123
124    /// Enter a no-GC scope.
125    ///
126    /// Calling the `gc` method when we are inside a no-GC scope should panic.
127    ///
128    /// We can enter multiple, nested no-GC scopes and this method should
129    /// account for that.
130    fn enter_no_gc_scope(&mut self);
131
132    /// Exit a no-GC scope.
133    ///
134    /// Dual to `enter_no_gc_scope`.
135    fn exit_no_gc_scope(&mut self);
136
137    ////////////////////////////////////////////////////////////////////////////
138    // GC Barriers
139
140    /// Read barrier called every time the runtime clones a GC reference.
141    ///
142    /// Callers should pass a valid `VMGcRef` that belongs to the given
143    /// heap. Failure to do so is memory safe, but may result in general
144    /// failures such as panics or incorrect results.
145    fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef;
146
147    /// Write barrier called whenever the runtime is nulling out a GC reference.
148    ///
149    /// Default implemented in terms of the `write_gc_ref` barrier.
150    ///
151    /// If an `externref` is reclaimed, then its associated entry in the
152    /// `host_data_table` should be removed.
153    ///
154    /// Callers should pass a valid `VMGcRef` that belongs to the given
155    /// heap. Failure to do so is memory safe, but may result in general
156    /// failures such as panics or incorrect results.
157    ///
158    /// The given `gc_ref` should not be used again.
159    fn drop_gc_ref(&mut self, host_data_table: &mut ExternRefHostDataTable, gc_ref: VMGcRef) {
160        let mut dest = Some(gc_ref);
161        self.write_gc_ref(host_data_table, &mut dest, None);
162    }
163
164    /// Write barrier called every time the runtime overwrites a GC reference.
165    ///
166    /// The `source` is a borrowed GC reference, and should not have been cloned
167    /// already for this write operation. This allows implementations to fuse
168    /// the `source`'s read barrier into this write barrier.
169    ///
170    /// If an `externref` is reclaimed, then its associated entry in the
171    /// `host_data_table` should be removed.
172    ///
173    /// Callers should pass a valid `VMGcRef` that belongs to the given heap for
174    /// both the `source` and `destination`. Failure to do so is memory safe,
175    /// but may result in general failures such as panics or incorrect results.
176    fn write_gc_ref(
177        &mut self,
178        host_data_table: &mut ExternRefHostDataTable,
179        destination: &mut Option<VMGcRef>,
180        source: Option<&VMGcRef>,
181    );
182
183    /// Read barrier called whenever a GC reference is passed from the runtime
184    /// to Wasm: an argument to a host-to-Wasm call, or a return from a
185    /// Wasm-to-host call.
186    ///
187    /// Callers should pass a valid `VMGcRef` that belongs to the given
188    /// heap. Failure to do so is memory safe, but may result in general
189    /// failures such as panics or incorrect results.
190    fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef);
191
192    ////////////////////////////////////////////////////////////////////////////
193    // `externref` Methods
194
195    /// Allocate a `VMExternRef` with space for host data described by the given
196    /// layout.
197    ///
198    /// Return values:
199    ///
200    /// * `Ok(Ok(_))`: The allocation was successful.
201    ///
202    /// * `Ok(Err(n))`: There is currently not enough available space for this
203    ///   allocation of size `n`. The caller should either grow the heap or run
204    ///   a collection to reclaim space, and then try allocating again.
205    ///
206    /// * `Err(_)`: The collector cannot satisfy this allocation request, and
207    ///   would not be able to even after the caller were to trigger a
208    ///   collection. This could be because, for example, the requested
209    ///   allocation is larger than this collector's implementation limit for
210    ///   object size.
211    fn alloc_externref(
212        &mut self,
213        host_data: ExternRefHostDataId,
214    ) -> Result<Result<VMExternRef, u64>>;
215
216    /// Get the host data ID associated with the given `externref`.
217    ///
218    /// Callers should pass a valid `externref` that belongs to the given
219    /// heap. Failure to do so is memory safe, but may result in general
220    /// failures such as panics or incorrect results.
221    fn externref_host_data(&self, externref: &VMExternRef) -> ExternRefHostDataId;
222
223    ////////////////////////////////////////////////////////////////////////////
224    // Struct, array, and general GC object methods
225
226    /// Get the header of the object that `gc_ref` points to.
227    fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader;
228
229    /// Get the header of the object that `gc_ref` points to.
230    fn header_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcHeader;
231
232    /// Get the size (in bytes) of the object referenced by `gc_ref`.
233    ///
234    /// # Panics
235    ///
236    /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
237    fn object_size(&self, gc_ref: &VMGcRef) -> usize;
238
239    /// Allocate a raw, uninitialized GC-managed object with the given header
240    /// and layout.
241    ///
242    /// The object's fields and elements are left uninitialized. It is the
243    /// caller's responsibility to initialize them before exposing the struct to
244    /// Wasm or triggering a GC.
245    ///
246    /// The header's described type and layout must match *for this
247    /// collector*. That is, if this collector adds an extra header word to all
248    /// objects, the given layout must already include space for that header
249    /// word. Therefore, this method is effectively only usable with layouts
250    /// derived from a `Gc{Struct,Array}Layout` returned by this collector.
251    ///
252    /// Failure to uphold any of the above is memory safe, but may result in
253    /// general failures such as panics or incorrect results.
254    ///
255    /// Return values:
256    ///
257    /// * `Ok(Ok(_))`: The allocation was successful.
258    ///
259    /// * `Ok(Err(n))`: There is currently not enough available space for this
260    ///   allocation of size `n`. The caller should either grow the heap or run
261    ///   a collection to reclaim space, and then try allocating again.
262    ///
263    /// * `Err(_)`: The collector cannot satisfy this allocation request, and
264    ///   would not be able to even after the caller were to trigger a
265    ///   collection. This could be because, for example, the requested
266    ///   alignment is larger than this collector's implementation limit.
267    fn alloc_raw(&mut self, header: VMGcHeader, layout: Layout) -> Result<Result<VMGcRef, u64>>;
268
269    /// Allocate a GC-managed struct of the given type and layout.
270    ///
271    /// The struct's fields are left uninitialized. It is the caller's
272    /// responsibility to initialize them before exposing the struct to Wasm or
273    /// triggering a GC.
274    ///
275    /// The `ty` and `layout` must match.
276    ///
277    /// Failure to do either of the above is memory safe, but may result in
278    /// general failures such as panics or incorrect results.
279    ///
280    /// Return values:
281    ///
282    /// * `Ok(Ok(_))`: The allocation was successful.
283    ///
284    /// * `Ok(Err(n))`: There is currently not enough available space for this
285    ///   allocation of size `n`. The caller should either grow the heap or run
286    ///   a collection to reclaim space, and then try allocating again.
287    ///
288    /// * `Err(_)`: The collector cannot satisfy this allocation request, and
289    ///   would not be able to even after the caller were to trigger a
290    ///   collection. This could be because, for example, the requested
291    ///   allocation is larger than this collector's implementation limit for
292    ///   object size.
293    fn alloc_uninit_struct_or_exn(
294        &mut self,
295        ty: VMSharedTypeIndex,
296        layout: &GcStructLayout,
297    ) -> Result<Result<VMGcRef, u64>>;
298
299    /// Deallocate an uninitialized, GC-managed struct or exception.
300    ///
301    /// This is useful for if initialization of the struct's fields fails, so
302    /// that the struct's allocation can be eagerly reclaimed, and so that the
303    /// collector doesn't attempt to treat any of the uninitialized fields as
304    /// valid GC references, or something like that.
305    fn dealloc_uninit_struct_or_exn(&mut self, structref: VMGcRef);
306
307    /// * `Ok(Ok(_))`: The allocation was successful.
308    ///
309    /// * `Ok(Err(n))`: There is currently not enough available space for this
310    ///   allocation of size `n`. The caller should either grow the heap or run
311    ///   a collection to reclaim space, and then try allocating again.
312    ///
313    /// * `Err(_)`: The collector cannot satisfy this allocation request, and
314    ///   would not be able to even after the caller were to trigger a
315    ///   collection. This could be because, for example, the requested
316    ///   allocation is larger than this collector's implementation limit for
317    ///   object size.
318    fn alloc_uninit_array(
319        &mut self,
320        ty: VMSharedTypeIndex,
321        len: u32,
322        layout: &GcArrayLayout,
323    ) -> Result<Result<VMArrayRef, u64>>;
324
325    /// Deallocate an uninitialized, GC-managed array.
326    ///
327    /// This is useful for if initialization of the array's fields fails, so
328    /// that the array's allocation can be eagerly reclaimed, and so that the
329    /// collector doesn't attempt to treat any of the uninitialized fields as
330    /// valid GC references, or something like that.
331    fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef);
332
333    /// Get the length of the given array.
334    ///
335    /// Panics on out-of-bounds accesses.
336    ///
337    /// The given `arrayref` should be valid and of the given size. Failure to
338    /// do so is memory safe, but may result in general failures such as panics
339    /// or incorrect results.
340    fn array_len(&self, arrayref: &VMArrayRef) -> u32;
341
342    ////////////////////////////////////////////////////////////////////////////
343    // Garbage Collection Methods
344
345    /// Start a new garbage collection process.
346    ///
347    /// The given `roots` are GC roots and should not be collected (nor anything
348    /// transitively reachable from them).
349    ///
350    /// Upon reclaiming an `externref`, its associated entry in the
351    /// `host_data_table` is removed.
352    ///
353    /// Callers should pass valid GC roots that belongs to this heap, and the
354    /// host data table associated with this heap's `externref`s. Failure to do
355    /// so is memory safe, but may result in general failures such as panics or
356    /// incorrect results.
357    ///
358    /// This method should panic if we are in a no-GC scope.
359    fn gc<'a>(
360        &'a mut self,
361        roots: GcRootsIter<'a>,
362        host_data_table: &'a mut ExternRefHostDataTable,
363    ) -> Box<dyn GarbageCollection<'a> + 'a>;
364
365    ////////////////////////////////////////////////////////////////////////////
366    // JIT-Code Interaction Methods
367
368    /// Get the pointer that will be stored in the `VMContext::gc_heap_data`
369    /// field and be accessible from JIT code via collaboration with the
370    /// corresponding `GcCompiler` trait.
371    ///
372    /// # Safety
373    ///
374    /// The returned pointer, if any, must remain valid as long as `self` is not
375    /// dropped.
376    unsafe fn vmctx_gc_heap_data(&self) -> NonNull<u8>;
377
378    ////////////////////////////////////////////////////////////////////////////
379    // Accessors for the raw bytes of the GC heap
380
381    /// Take the underlying memory storage out of this GC heap.
382    ///
383    /// # Panics
384    ///
385    /// If this GC heap is used while the memory is taken then a panic will
386    /// occur. This will also panic if the memory is already taken.
387    fn take_memory(&mut self) -> crate::vm::Memory;
388
389    /// Replace this GC heap's underlying memory storage.
390    ///
391    /// # Safety
392    ///
393    /// The `memory` must have been taken via `take_memory` and the GC heap must
394    /// not have been used at all since the memory was taken. The memory must be
395    /// the same size or larger than it was.
396    unsafe fn replace_memory(&mut self, memory: crate::vm::Memory, delta_bytes_grown: u64);
397
398    /// Get a raw `VMMemoryDefinition` for this heap's underlying memory storage.
399    ///
400    /// If/when exposing this `VMMemoryDefinition` to Wasm, it is your
401    /// responsibility to ensure that you do not do that in such a way as to
402    /// violate Rust's borrowing rules (e.g. make sure there is no active
403    /// `heap_slice_mut()` call at the same time) and that if this GC heap is
404    /// resized (and its base potentially moves) then that Wasm gets a new,
405    /// updated `VMMemoryDefinition` record.
406    fn vmmemory(&self) -> VMMemoryDefinition;
407
408    /// Get a slice of the raw bytes of the GC heap.
409    #[inline]
410    fn heap_slice(&self) -> &[u8] {
411        let vmmemory = self.vmmemory();
412        let ptr = vmmemory.base.as_ptr().cast_const();
413        let len = vmmemory.current_length();
414        unsafe { slice::from_raw_parts(ptr, len) }
415    }
416
417    /// Get a mutable slice of the raw bytes of the GC heap.
418    #[inline]
419    fn heap_slice_mut(&mut self) -> &mut [u8] {
420        let vmmemory = self.vmmemory();
421        let ptr = vmmemory.base.as_ptr();
422        let len = vmmemory.current_length();
423        unsafe { slice::from_raw_parts_mut(ptr, len) }
424    }
425
426    ////////////////////////////////////////////////////////////////////////////
427    // Provided helper methods.
428
429    /// Index into this heap and get a shared reference to the `T` that `gc_ref`
430    /// points to.
431    ///
432    /// # Panics
433    ///
434    /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
435    #[inline]
436    fn index<T>(&self, gc_ref: &TypedGcRef<T>) -> &T
437    where
438        Self: Sized,
439        T: GcHeapObject,
440    {
441        assert!(!mem::needs_drop::<T>());
442        let gc_ref = gc_ref.as_untyped();
443        let start = gc_ref.as_heap_index().unwrap().get();
444        let start = usize::try_from(start).unwrap();
445        let len = mem::size_of::<T>();
446        let slice = &self.heap_slice()[start..][..len];
447        unsafe { &*(slice.as_ptr().cast::<T>()) }
448    }
449
450    /// Index into this heap and get an exclusive reference to the `T` that
451    /// `gc_ref` points to.
452    ///
453    /// # Panics
454    ///
455    /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
456    #[inline]
457    fn index_mut<T>(&mut self, gc_ref: &TypedGcRef<T>) -> &mut T
458    where
459        Self: Sized,
460        T: GcHeapObject,
461    {
462        assert!(!mem::needs_drop::<T>());
463        let gc_ref = gc_ref.as_untyped();
464        let start = gc_ref.as_heap_index().unwrap().get();
465        let start = usize::try_from(start).unwrap();
466        let len = mem::size_of::<T>();
467        let slice = &mut self.heap_slice_mut()[start..][..len];
468        unsafe { &mut *(slice.as_mut_ptr().cast::<T>()) }
469    }
470
471    /// Get the range of bytes that the given object occupies in the heap.
472    ///
473    /// # Panics
474    ///
475    /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
476    fn object_range(&self, gc_ref: &VMGcRef) -> Range<usize> {
477        let start = gc_ref.as_heap_index().unwrap().get();
478        let start = usize::try_from(start).unwrap();
479        let size = self.object_size(gc_ref);
480        let end = start.checked_add(size).unwrap();
481        start..end
482    }
483
484    /// Get a mutable borrow of the given object's data.
485    ///
486    /// # Panics
487    ///
488    /// Panics on out-of-bounds accesses or if the `gc_ref` is an `i31ref`.
489    fn gc_object_data(&self, gc_ref: &VMGcRef) -> &VMGcObjectData {
490        let range = self.object_range(gc_ref);
491        let data = &self.heap_slice()[range];
492        data.into()
493    }
494
495    /// Get a mutable borrow of the given object's data.
496    ///
497    /// # Panics
498    ///
499    /// Panics on out-of-bounds accesses or if the `gc_ref` is an `i31ref`.
500    fn gc_object_data_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcObjectData {
501        let range = self.object_range(gc_ref);
502        let data = &mut self.heap_slice_mut()[range];
503        data.into()
504    }
505
506    /// Get a pair of mutable borrows of the given objects' data.
507    ///
508    /// # Panics
509    ///
510    /// Panics if `a == b` or on out-of-bounds accesses or if either GC ref is
511    /// an `i31ref`.
512    fn gc_object_data_pair(
513        &mut self,
514        a: &VMGcRef,
515        b: &VMGcRef,
516    ) -> (&mut VMGcObjectData, &mut VMGcObjectData) {
517        assert_ne!(a, b);
518
519        let a_range = self.object_range(a);
520        let b_range = self.object_range(b);
521
522        // Assert that the two objects do not overlap.
523        assert!(a_range.start <= a_range.end);
524        assert!(b_range.start <= b_range.end);
525        assert!(a_range.end <= b_range.start || b_range.end <= a_range.start);
526
527        let (a_data, b_data) = if a_range.start < b_range.start {
528            let (a_half, b_half) = self.heap_slice_mut().split_at_mut(b_range.start);
529            let b_len = b_range.end - b_range.start;
530            (&mut a_half[a_range], &mut b_half[..b_len])
531        } else {
532            let (b_half, a_half) = self.heap_slice_mut().split_at_mut(a_range.start);
533            let a_len = a_range.end - a_range.start;
534            (&mut a_half[..a_len], &mut b_half[b_range])
535        };
536
537        (a_data.into(), b_data.into())
538    }
539}
540
541/// A list of GC roots.
542///
543/// This is effectively a builder for a `GcRootsIter` that will be given to a GC
544/// heap when it is time to perform garbage collection.
545#[derive(Default)]
546pub struct GcRootsList(Vec<RawGcRoot>);
547
548// Ideally these `*mut`s would be `&mut`s and we wouldn't need as much of this
549// machinery around `GcRootsList`, `RawGcRoot`, `GcRoot`, and `GcRootIter` but
550// if we try that then we run into two different kinds of lifetime issues:
551//
552// 1. When collecting the various roots from a `&mut StoreOpaque`, we borrow
553//    from `self` to push new GC roots onto the roots list. But then we want to
554//    call helper methods like `self.for_each_global(...)`, but we can't because
555//    there are active borrows of `self` preventing it.
556//
557// 2. We want to reuse the roots list and its backing storage across GCs, rather
558//    than reallocate on every GC. But the only place for the roots list to live
559//    such that it is easily reusable across GCs is in the store itself. But the
560//    contents of the roots list (when it is non-empty, during GCs) borrow from
561//    the store, which creates self-references.
562#[derive(Clone, Copy, Debug)]
563#[cfg_attr(
564    not(feature = "gc"),
565    expect(
566        dead_code,
567        reason = "not worth it at this time to #[cfg] away these variants",
568    )
569)]
570enum RawGcRoot {
571    Stack(SendSyncPtr<u32>),
572    NonStack(SendSyncPtr<VMGcRef>),
573}
574
575#[cfg(feature = "gc")]
576impl GcRootsList {
577    /// Add a GC root that is inside a Wasm stack frame to this list.
578    #[inline]
579    pub unsafe fn add_wasm_stack_root(&mut self, ptr_to_root: SendSyncPtr<u32>) {
580        unsafe {
581            log::trace!(
582                "Adding Wasm stack root: {:#p} -> {:#p}",
583                ptr_to_root,
584                VMGcRef::from_raw_u32(*ptr_to_root.as_ref()).unwrap()
585            );
586            debug_assert!(VMGcRef::from_raw_u32(*ptr_to_root.as_ref()).is_some());
587        }
588        self.0.push(RawGcRoot::Stack(ptr_to_root));
589    }
590
591    /// Add a GC root to this list.
592    #[inline]
593    pub unsafe fn add_root(&mut self, ptr_to_root: SendSyncPtr<VMGcRef>, why: &str) {
594        unsafe {
595            log::trace!(
596                "Adding non-stack root: {why}: {:#p}",
597                ptr_to_root.as_ref().unchecked_copy()
598            );
599        }
600        self.0.push(RawGcRoot::NonStack(ptr_to_root))
601    }
602
603    /// Get an iterator over all roots in this list.
604    ///
605    /// # Safety
606    ///
607    /// Callers must ensure that all the pointers to GC roots that have been
608    /// added to this list are valid for the duration of the `'a` lifetime.
609    #[inline]
610    pub unsafe fn iter<'a>(&'a mut self) -> GcRootsIter<'a> {
611        GcRootsIter {
612            list: self,
613            index: 0,
614        }
615    }
616
617    /// Is this list empty?
618    pub fn is_empty(&self) -> bool {
619        self.0.is_empty()
620    }
621
622    /// Clear this GC roots list.
623    #[inline]
624    pub fn clear(&mut self) {
625        self.0.clear();
626    }
627}
628
629/// An iterator over all the roots in a `GcRootsList`.
630pub struct GcRootsIter<'a> {
631    list: &'a mut GcRootsList,
632    index: usize,
633}
634
635impl<'a> Iterator for GcRootsIter<'a> {
636    type Item = GcRoot<'a>;
637
638    #[inline]
639    fn next(&mut self) -> Option<Self::Item> {
640        let root = GcRoot {
641            raw: self.list.0.get(self.index).copied()?,
642            _phantom: marker::PhantomData,
643        };
644        self.index += 1;
645        Some(root)
646    }
647}
648
649/// A GC root.
650///
651/// This is, effectively, a mutable reference to a `VMGcRef`.
652///
653/// Collector implementations should update the `VMGcRef` if they move the
654/// `VMGcRef`'s referent during the course of a GC.
655#[derive(Debug)]
656pub struct GcRoot<'a> {
657    raw: RawGcRoot,
658    _phantom: marker::PhantomData<&'a mut VMGcRef>,
659}
660
661impl GcRoot<'_> {
662    /// Is this root from inside a Wasm stack frame?
663    #[inline]
664    pub fn is_on_wasm_stack(&self) -> bool {
665        matches!(self.raw, RawGcRoot::Stack(_))
666    }
667
668    /// Get this GC root.
669    ///
670    /// Does NOT run GC barriers.
671    #[inline]
672    pub fn get(&self) -> VMGcRef {
673        match self.raw {
674            RawGcRoot::NonStack(ptr) => unsafe { ptr::read(ptr.as_ptr()) },
675            RawGcRoot::Stack(ptr) => unsafe {
676                let raw: u32 = ptr::read(ptr.as_ptr());
677                VMGcRef::from_raw_u32(raw).expect("non-null")
678            },
679        }
680    }
681
682    /// Set this GC root.
683    ///
684    /// Does NOT run GC barriers.
685    ///
686    /// Collector implementations should use this method to update GC root
687    /// pointers after the collector moves the GC object that the root is
688    /// referencing.
689    pub fn set(&mut self, new_ref: VMGcRef) {
690        match self.raw {
691            RawGcRoot::NonStack(ptr) => unsafe {
692                ptr::write(ptr.as_ptr(), new_ref);
693            },
694            RawGcRoot::Stack(ptr) => unsafe {
695                ptr::write(ptr.as_ptr(), new_ref.as_raw_u32());
696            },
697        }
698    }
699}
700
701/// A garbage collection process.
702///
703/// Implementations define the `collect_increment` method, and then consumers
704/// can either use
705///
706/// * `GarbageCollection::collect` for synchronous code, or
707///
708/// * `collect_async(Box<dyn GarbageCollection>)` for async code.
709///
710/// When using fuel and/or epochs, consumers can also use `collect_increment`
711/// directly and choose to abandon further execution in this GC's heap's whole
712/// store if the GC is taking too long to complete.
713pub trait GarbageCollection<'a>: Send + Sync {
714    /// Perform an incremental slice of this garbage collection process.
715    ///
716    /// Upon completion of the slice, a `GcProgress` is returned which informs
717    /// the caller whether to continue driving this GC process forward and
718    /// executing more slices (`GcProgress::Continue`) or whether the GC process
719    /// has finished (`GcProgress::Complete`).
720    ///
721    /// The mutator does *not* run in between increments. This method exists
722    /// solely to allow cooperative yielding
723    fn collect_increment(&mut self) -> GcProgress;
724
725    /// Run this GC process to completion.
726    ///
727    /// Keeps calling `collect_increment` in a loop until the GC process is
728    /// complete.
729    fn collect(&mut self) {
730        loop {
731            match self.collect_increment() {
732                GcProgress::Continue => continue,
733                GcProgress::Complete => return,
734            }
735        }
736    }
737}
738
739/// The result of doing an incremental amount of GC.
740pub enum GcProgress {
741    /// There is still more work to do.
742    Continue,
743    /// The GC is complete.
744    Complete,
745}
746
747/// Asynchronously run the given garbage collection process to completion,
748/// cooperatively yielding back to the event loop after each increment of work.
749pub async fn collect_async<'a>(
750    mut collection: Box<dyn GarbageCollection<'a> + 'a>,
751    asyncness: Asyncness,
752) {
753    loop {
754        match collection.collect_increment() {
755            GcProgress::Continue => {
756                if asyncness != Asyncness::No {
757                    #[cfg(feature = "async")]
758                    crate::runtime::vm::Yield::new().await
759                }
760            }
761            GcProgress::Complete => return,
762        }
763    }
764}
765
766#[cfg(all(test, feature = "async"))]
767mod collect_async_tests {
768    use super::*;
769
770    #[test]
771    fn is_send_and_sync() {
772        fn _assert_send_sync<T: Send + Sync>(_: T) {}
773
774        fn _foo<'a>(collection: Box<dyn GarbageCollection<'a>>) {
775            _assert_send_sync(collect_async(collection, Asyncness::Yes));
776        }
777    }
778}