wasmtime/runtime/vm/gc/
gc_runtime.rs

1//! Traits for abstracting over our different garbage collectors.
2
3use crate::prelude::*;
4use crate::runtime::vm::{
5    ExternRefHostDataId, ExternRefHostDataTable, GcHeapObject, SendSyncPtr, TypedGcRef, VMArrayRef,
6    VMExternRef, VMGcHeader, VMGcObjectData, VMGcRef,
7};
8use crate::vm::VMMemoryDefinition;
9use core::ptr::NonNull;
10use core::slice;
11use core::{alloc::Layout, any::Any, marker, mem, ops::Range, ptr};
12use wasmtime_environ::{GcArrayLayout, GcStructLayout, GcTypeLayouts, VMSharedTypeIndex};
13
14/// Trait for integrating a garbage collector with the runtime.
15///
16/// This trait is responsible for:
17///
18/// * GC barriers used by runtime code (as opposed to compiled Wasm code)
19///
20/// * Creating and managing GC heaps for individual stores
21///
22/// * Running garbage collection
23///
24/// # Safety
25///
26/// The collector, its GC heaps, and GC barriers when taken together as a whole
27/// must be safe. Additionally, they must work with the GC barriers emitted into
28/// compiled Wasm code via the collector's corresponding `GcCompiler`
29/// implementation. That is, if callers only call safe methods on this trait
30/// (while pairing it with its associated `GcCompiler`, `GcHeap`, and etc...)
31/// and uphold all the documented safety invariants of this trait's unsafe
32/// methods, then it must be impossible for callers to violate memory
33/// safety. Implementations of this trait may not add new safety invariants, not
34/// already documented in this trait's interface, that callers need to uphold.
35pub unsafe trait GcRuntime: 'static + Send + Sync {
36    /// Get this collector's GC type layouts.
37    fn layouts(&self) -> &dyn GcTypeLayouts;
38
39    /// Construct a new GC heap.
40    #[cfg(feature = "gc")]
41    fn new_gc_heap(&self, engine: &crate::Engine) -> Result<Box<dyn GcHeap>>;
42}
43
44/// A heap that manages garbage-collected objects.
45///
46/// Each `wasmtime::Store` is associated with a single `GcHeap`, and a `GcHeap`
47/// is only ever used with one store at a time, but `GcHeap`s may be reused with
48/// new stores after its original store is dropped. The `reset` method will be
49/// called in between each such reuse. (This reuse allows for better integration
50/// with the pooling allocator).
51///
52/// If a `GcHeap` mapped any memory, its `Drop` implementation should unmap that
53/// memory.
54///
55/// # Safety
56///
57/// The trait methods below are all safe: implementations of this trait must
58/// ensure that these methods cannot be misused to create memory unsafety. The
59/// expectation is that -- given that `VMGcRef` is a newtype over an index --
60/// implementations perform similar tricks as Wasm linear memory
61/// implementations. The heap should internally be a contiguous region of memory
62/// and `VMGcRef` indices into the heap must be bounds checked (explicitly or
63/// implicitly via virtual memory tricks).
64///
65/// Furthermore, if heap corruption occurs because (for example) a `VMGcRef`
66/// from a different heap is used with this heap, then that corruption must be
67/// limited to within this heap. Every heap is a mini sandbox. It follows that
68/// native pointers should never be written into or read out from the GC heap,
69/// since that could spread corruption from inside the GC heap out to the native
70/// host heap. The host data for an `externref`, therefore, is stored in a side
71/// table (`ExternRefHostDataTable`) and never inside the heap. Only an id
72/// referencing a slot in that table should ever be written into the GC heap.
73///
74/// These constraints give us great amounts of safety compared to working with
75/// raw pointers. The worst that could happen is corruption local to heap and a
76/// panic, or perhaps reading stale heap data from a previous Wasm instance. A
77/// corrupt `GcHeap` can *never* result in the native host's corruption.
78///
79/// The downside is that we are introducing `heap_base + index` computations and
80/// bounds checking to access GC memory, adding performance overhead. This is
81/// deemed to be a worthy trade off. Furthermore, it isn't even a clear cut
82/// performance degradation since this allows us to use 32-bit "pointers",
83/// giving us more compact data representations and the improved cache
84/// utilization that implies.
85pub unsafe trait GcHeap: 'static + Send + Sync {
86    ////////////////////////////////////////////////////////////////////////////
87    // Life Cycle GC Heap Methods
88
89    /// Is this GC heap currently attached to a memory?
90    fn is_attached(&self) -> bool;
91
92    /// Attach this GC heap to a memory.
93    ///
94    /// Once attached, this GC heap can be used with Wasm.
95    fn attach(&mut self, memory: crate::vm::Memory);
96
97    /// Reset this heap.
98    ///
99    /// Calling this method unassociates this heap with the store that it has
100    /// been associated with, making it available to be associated with a new
101    /// heap.
102    ///
103    /// This should refill free lists, reset bump pointers, and etc... as if
104    /// nothing were allocated in this heap (because nothing is allocated in
105    /// this heap anymore).
106    ///
107    /// This should retain any allocated memory from the global allocator and
108    /// any virtual memory mappings.
109    fn detach(&mut self) -> crate::vm::Memory;
110
111    ////////////////////////////////////////////////////////////////////////////
112    // `Any` methods
113
114    /// Get this heap as an `&Any`.
115    fn as_any(&self) -> &dyn Any;
116
117    /// Get this heap as an `&mut Any`.
118    fn as_any_mut(&mut self) -> &mut dyn Any;
119
120    ////////////////////////////////////////////////////////////////////////////
121    // No-GC Scope Methods
122
123    /// Enter a no-GC scope.
124    ///
125    /// Calling the `gc` method when we are inside a no-GC scope should panic.
126    ///
127    /// We can enter multiple, nested no-GC scopes and this method should
128    /// account for that.
129    fn enter_no_gc_scope(&mut self);
130
131    /// Exit a no-GC scope.
132    ///
133    /// Dual to `enter_no_gc_scope`.
134    fn exit_no_gc_scope(&mut self);
135
136    ////////////////////////////////////////////////////////////////////////////
137    // GC Barriers
138
139    /// Read barrier called every time the runtime clones a GC reference.
140    ///
141    /// Callers should pass a valid `VMGcRef` that belongs to the given
142    /// heap. Failure to do so is memory safe, but may result in general
143    /// failures such as panics or incorrect results.
144    fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef;
145
146    /// Write barrier called whenever the runtime is nulling out a GC reference.
147    ///
148    /// Default implemented in terms of the `write_gc_ref` barrier.
149    ///
150    /// If an `externref` is reclaimed, then its associated entry in the
151    /// `host_data_table` should be removed.
152    ///
153    /// Callers should pass a valid `VMGcRef` that belongs to the given
154    /// heap. Failure to do so is memory safe, but may result in general
155    /// failures such as panics or incorrect results.
156    ///
157    /// The given `gc_ref` should not be used again.
158    fn drop_gc_ref(&mut self, host_data_table: &mut ExternRefHostDataTable, gc_ref: VMGcRef) {
159        let mut dest = Some(gc_ref);
160        self.write_gc_ref(host_data_table, &mut dest, None);
161    }
162
163    /// Write barrier called every time the runtime overwrites a GC reference.
164    ///
165    /// The `source` is a borrowed GC reference, and should not have been cloned
166    /// already for this write operation. This allows implementations to fuse
167    /// the `source`'s read barrier into this write barrier.
168    ///
169    /// If an `externref` is reclaimed, then its associated entry in the
170    /// `host_data_table` should be removed.
171    ///
172    /// Callers should pass a valid `VMGcRef` that belongs to the given heap for
173    /// both the `source` and `destination`. Failure to do so is memory safe,
174    /// but may result in general failures such as panics or incorrect results.
175    fn write_gc_ref(
176        &mut self,
177        host_data_table: &mut ExternRefHostDataTable,
178        destination: &mut Option<VMGcRef>,
179        source: Option<&VMGcRef>,
180    );
181
182    /// Read barrier called whenever a GC reference is passed from the runtime
183    /// to Wasm: an argument to a host-to-Wasm call, or a return from a
184    /// Wasm-to-host call.
185    ///
186    /// Callers should pass a valid `VMGcRef` that belongs to the given
187    /// heap. Failure to do so is memory safe, but may result in general
188    /// failures such as panics or incorrect results.
189    fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef);
190
191    ////////////////////////////////////////////////////////////////////////////
192    // `externref` Methods
193
194    /// Allocate a `VMExternRef` with space for host data described by the given
195    /// layout.
196    ///
197    /// Return values:
198    ///
199    /// * `Ok(Ok(_))`: The allocation was successful.
200    ///
201    /// * `Ok(Err(n))`: There is currently not enough available space for this
202    ///   allocation of size `n`. The caller should either grow the heap or run
203    ///   a collection to reclaim space, and then try allocating again.
204    ///
205    /// * `Err(_)`: The collector cannot satisfy this allocation request, and
206    ///   would not be able to even after the caller were to trigger a
207    ///   collection. This could be because, for example, the requested
208    ///   allocation is larger than this collector's implementation limit for
209    ///   object size.
210    fn alloc_externref(
211        &mut self,
212        host_data: ExternRefHostDataId,
213    ) -> Result<Result<VMExternRef, u64>>;
214
215    /// Get the host data ID associated with the given `externref`.
216    ///
217    /// Callers should pass a valid `externref` that belongs to the given
218    /// heap. Failure to do so is memory safe, but may result in general
219    /// failures such as panics or incorrect results.
220    fn externref_host_data(&self, externref: &VMExternRef) -> ExternRefHostDataId;
221
222    ////////////////////////////////////////////////////////////////////////////
223    // Struct, array, and general GC object methods
224
225    /// Get the header of the object that `gc_ref` points to.
226    fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader;
227
228    /// Get the header of the object that `gc_ref` points to.
229    fn header_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcHeader;
230
231    /// Get the size (in bytes) of the object referenced by `gc_ref`.
232    ///
233    /// # Panics
234    ///
235    /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
236    fn object_size(&self, gc_ref: &VMGcRef) -> usize;
237
238    /// Allocate a raw, uninitialized GC-managed object with the given header
239    /// and layout.
240    ///
241    /// The object's fields and elements are left uninitialized. It is the
242    /// caller's responsibility to initialize them before exposing the struct to
243    /// Wasm or triggering a GC.
244    ///
245    /// The header's described type and layout must match *for this
246    /// collector*. That is, if this collector adds an extra header word to all
247    /// objects, the given layout must already include space for that header
248    /// word. Therefore, this method is effectively only usable with layouts
249    /// derived from a `Gc{Struct,Array}Layout` returned by this collector.
250    ///
251    /// Failure to uphold any of the above is memory safe, but may result in
252    /// general failures such as panics or incorrect results.
253    ///
254    /// Return values:
255    ///
256    /// * `Ok(Ok(_))`: The allocation was successful.
257    ///
258    /// * `Ok(Err(n))`: There is currently not enough available space for this
259    ///   allocation of size `n`. The caller should either grow the heap or run
260    ///   a collection to reclaim space, and then try allocating again.
261    ///
262    /// * `Err(_)`: The collector cannot satisfy this allocation request, and
263    ///   would not be able to even after the caller were to trigger a
264    ///   collection. This could be because, for example, the requested
265    ///   alignment is larger than this collector's implementation limit.
266    fn alloc_raw(&mut self, header: VMGcHeader, layout: Layout) -> Result<Result<VMGcRef, u64>>;
267
268    /// Allocate a GC-managed struct of the given type and layout.
269    ///
270    /// The struct's fields are left uninitialized. It is the caller's
271    /// responsibility to initialize them before exposing the struct to Wasm or
272    /// triggering a GC.
273    ///
274    /// The `ty` and `layout` must match.
275    ///
276    /// Failure to do either of the above is memory safe, but may result in
277    /// general failures such as panics or incorrect results.
278    ///
279    /// Return values:
280    ///
281    /// * `Ok(Ok(_))`: The allocation was successful.
282    ///
283    /// * `Ok(Err(n))`: There is currently not enough available space for this
284    ///   allocation of size `n`. The caller should either grow the heap or run
285    ///   a collection to reclaim space, and then try allocating again.
286    ///
287    /// * `Err(_)`: The collector cannot satisfy this allocation request, and
288    ///   would not be able to even after the caller were to trigger a
289    ///   collection. This could be because, for example, the requested
290    ///   allocation is larger than this collector's implementation limit for
291    ///   object size.
292    fn alloc_uninit_struct_or_exn(
293        &mut self,
294        ty: VMSharedTypeIndex,
295        layout: &GcStructLayout,
296    ) -> Result<Result<VMGcRef, u64>>;
297
298    /// Deallocate an uninitialized, GC-managed struct or exception.
299    ///
300    /// This is useful for if initialization of the struct's fields fails, so
301    /// that the struct's allocation can be eagerly reclaimed, and so that the
302    /// collector doesn't attempt to treat any of the uninitialized fields as
303    /// valid GC references, or something like that.
304    fn dealloc_uninit_struct_or_exn(&mut self, structref: VMGcRef);
305
306    /// * `Ok(Ok(_))`: The allocation was successful.
307    ///
308    /// * `Ok(Err(n))`: There is currently not enough available space for this
309    ///   allocation of size `n`. The caller should either grow the heap or run
310    ///   a collection to reclaim space, and then try allocating again.
311    ///
312    /// * `Err(_)`: The collector cannot satisfy this allocation request, and
313    ///   would not be able to even after the caller were to trigger a
314    ///   collection. This could be because, for example, the requested
315    ///   allocation is larger than this collector's implementation limit for
316    ///   object size.
317    fn alloc_uninit_array(
318        &mut self,
319        ty: VMSharedTypeIndex,
320        len: u32,
321        layout: &GcArrayLayout,
322    ) -> Result<Result<VMArrayRef, u64>>;
323
324    /// Deallocate an uninitialized, GC-managed array.
325    ///
326    /// This is useful for if initialization of the array's fields fails, so
327    /// that the array's allocation can be eagerly reclaimed, and so that the
328    /// collector doesn't attempt to treat any of the uninitialized fields as
329    /// valid GC references, or something like that.
330    fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef);
331
332    /// Get the length of the given array.
333    ///
334    /// Panics on out-of-bounds accesses.
335    ///
336    /// The given `arrayref` should be valid and of the given size. Failure to
337    /// do so is memory safe, but may result in general failures such as panics
338    /// or incorrect results.
339    fn array_len(&self, arrayref: &VMArrayRef) -> u32;
340
341    ////////////////////////////////////////////////////////////////////////////
342    // Garbage Collection Methods
343
344    /// Start a new garbage collection process.
345    ///
346    /// The given `roots` are GC roots and should not be collected (nor anything
347    /// transitively reachable from them).
348    ///
349    /// Upon reclaiming an `externref`, its associated entry in the
350    /// `host_data_table` is removed.
351    ///
352    /// Callers should pass valid GC roots that belongs to this heap, and the
353    /// host data table associated with this heap's `externref`s. Failure to do
354    /// so is memory safe, but may result in general failures such as panics or
355    /// incorrect results.
356    ///
357    /// This method should panic if we are in a no-GC scope.
358    fn gc<'a>(
359        &'a mut self,
360        roots: GcRootsIter<'a>,
361        host_data_table: &'a mut ExternRefHostDataTable,
362    ) -> Box<dyn GarbageCollection<'a> + 'a>;
363
364    ////////////////////////////////////////////////////////////////////////////
365    // JIT-Code Interaction Methods
366
367    /// Get the pointer that will be stored in the `VMContext::gc_heap_data`
368    /// field and be accessible from JIT code via collaboration with the
369    /// corresponding `GcCompiler` trait.
370    ///
371    /// # Safety
372    ///
373    /// The returned pointer, if any, must remain valid as long as `self` is not
374    /// dropped.
375    unsafe fn vmctx_gc_heap_data(&self) -> NonNull<u8>;
376
377    ////////////////////////////////////////////////////////////////////////////
378    // Accessors for the raw bytes of the GC heap
379
380    /// Take the underlying memory storage out of this GC heap.
381    ///
382    /// # Panics
383    ///
384    /// If this GC heap is used while the memory is taken then a panic will
385    /// occur. This will also panic if the memory is already taken.
386    fn take_memory(&mut self) -> crate::vm::Memory;
387
388    /// Replace this GC heap's underlying memory storage.
389    ///
390    /// # Safety
391    ///
392    /// The `memory` must have been taken via `take_memory` and the GC heap must
393    /// not have been used at all since the memory was taken. The memory must be
394    /// the same size or larger than it was.
395    unsafe fn replace_memory(&mut self, memory: crate::vm::Memory, delta_bytes_grown: u64);
396
397    /// Get a raw `VMMemoryDefinition` for this heap's underlying memory storage.
398    ///
399    /// If/when exposing this `VMMemoryDefinition` to Wasm, it is your
400    /// responsibility to ensure that you do not do that in such a way as to
401    /// violate Rust's borrowing rules (e.g. make sure there is no active
402    /// `heap_slice_mut()` call at the same time) and that if this GC heap is
403    /// resized (and its base potentially moves) then that Wasm gets a new,
404    /// updated `VMMemoryDefinition` record.
405    fn vmmemory(&self) -> VMMemoryDefinition;
406
407    /// Get a slice of the raw bytes of the GC heap.
408    #[inline]
409    fn heap_slice(&self) -> &[u8] {
410        let vmmemory = self.vmmemory();
411        let ptr = vmmemory.base.as_ptr().cast_const();
412        let len = vmmemory.current_length();
413        unsafe { slice::from_raw_parts(ptr, len) }
414    }
415
416    /// Get a mutable slice of the raw bytes of the GC heap.
417    #[inline]
418    fn heap_slice_mut(&mut self) -> &mut [u8] {
419        let vmmemory = self.vmmemory();
420        let ptr = vmmemory.base.as_ptr();
421        let len = vmmemory.current_length();
422        unsafe { slice::from_raw_parts_mut(ptr, len) }
423    }
424
425    ////////////////////////////////////////////////////////////////////////////
426    // Provided helper methods.
427
428    /// Index into this heap and get a shared reference to the `T` that `gc_ref`
429    /// points to.
430    ///
431    /// # Panics
432    ///
433    /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
434    #[inline]
435    fn index<T>(&self, gc_ref: &TypedGcRef<T>) -> &T
436    where
437        Self: Sized,
438        T: GcHeapObject,
439    {
440        assert!(!mem::needs_drop::<T>());
441        let gc_ref = gc_ref.as_untyped();
442        let start = gc_ref.as_heap_index().unwrap().get();
443        let start = usize::try_from(start).unwrap();
444        let len = mem::size_of::<T>();
445        let slice = &self.heap_slice()[start..][..len];
446        unsafe { &*(slice.as_ptr().cast::<T>()) }
447    }
448
449    /// Index into this heap and get an exclusive reference to the `T` that
450    /// `gc_ref` points to.
451    ///
452    /// # Panics
453    ///
454    /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
455    #[inline]
456    fn index_mut<T>(&mut self, gc_ref: &TypedGcRef<T>) -> &mut T
457    where
458        Self: Sized,
459        T: GcHeapObject,
460    {
461        assert!(!mem::needs_drop::<T>());
462        let gc_ref = gc_ref.as_untyped();
463        let start = gc_ref.as_heap_index().unwrap().get();
464        let start = usize::try_from(start).unwrap();
465        let len = mem::size_of::<T>();
466        let slice = &mut self.heap_slice_mut()[start..][..len];
467        unsafe { &mut *(slice.as_mut_ptr().cast::<T>()) }
468    }
469
470    /// Get the range of bytes that the given object occupies in the heap.
471    ///
472    /// # Panics
473    ///
474    /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
475    fn object_range(&self, gc_ref: &VMGcRef) -> Range<usize> {
476        let start = gc_ref.as_heap_index().unwrap().get();
477        let start = usize::try_from(start).unwrap();
478        let size = self.object_size(gc_ref);
479        let end = start.checked_add(size).unwrap();
480        start..end
481    }
482
483    /// Get a mutable borrow of the given object's data.
484    ///
485    /// # Panics
486    ///
487    /// Panics on out-of-bounds accesses or if the `gc_ref` is an `i31ref`.
488    fn gc_object_data(&self, gc_ref: &VMGcRef) -> &VMGcObjectData {
489        let range = self.object_range(gc_ref);
490        let data = &self.heap_slice()[range];
491        data.into()
492    }
493
494    /// Get a mutable borrow of the given object's data.
495    ///
496    /// # Panics
497    ///
498    /// Panics on out-of-bounds accesses or if the `gc_ref` is an `i31ref`.
499    fn gc_object_data_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcObjectData {
500        let range = self.object_range(gc_ref);
501        let data = &mut self.heap_slice_mut()[range];
502        data.into()
503    }
504
505    /// Get a pair of mutable borrows of the given objects' data.
506    ///
507    /// # Panics
508    ///
509    /// Panics if `a == b` or on out-of-bounds accesses or if either GC ref is
510    /// an `i31ref`.
511    fn gc_object_data_pair(
512        &mut self,
513        a: &VMGcRef,
514        b: &VMGcRef,
515    ) -> (&mut VMGcObjectData, &mut VMGcObjectData) {
516        assert_ne!(a, b);
517
518        let a_range = self.object_range(a);
519        let b_range = self.object_range(b);
520
521        // Assert that the two objects do not overlap.
522        assert!(a_range.start <= a_range.end);
523        assert!(b_range.start <= b_range.end);
524        assert!(a_range.end <= b_range.start || b_range.end <= a_range.start);
525
526        let (a_data, b_data) = if a_range.start < b_range.start {
527            let (a_half, b_half) = self.heap_slice_mut().split_at_mut(b_range.start);
528            let b_len = b_range.end - b_range.start;
529            (&mut a_half[a_range], &mut b_half[..b_len])
530        } else {
531            let (b_half, a_half) = self.heap_slice_mut().split_at_mut(a_range.start);
532            let a_len = a_range.end - a_range.start;
533            (&mut a_half[..a_len], &mut b_half[b_range])
534        };
535
536        (a_data.into(), b_data.into())
537    }
538}
539
540/// A list of GC roots.
541///
542/// This is effectively a builder for a `GcRootsIter` that will be given to a GC
543/// heap when it is time to perform garbage collection.
544#[derive(Default)]
545pub struct GcRootsList(Vec<RawGcRoot>);
546
547// Ideally these `*mut`s would be `&mut`s and we wouldn't need as much of this
548// machinery around `GcRootsList`, `RawGcRoot`, `GcRoot`, and `GcRootIter` but
549// if we try that then we run into two different kinds of lifetime issues:
550//
551// 1. When collecting the various roots from a `&mut StoreOpaque`, we borrow
552//    from `self` to push new GC roots onto the roots list. But then we want to
553//    call helper methods like `self.for_each_global(...)`, but we can't because
554//    there are active borrows of `self` preventing it.
555//
556// 2. We want to reuse the roots list and its backing storage across GCs, rather
557//    than reallocate on every GC. But the only place for the roots list to live
558//    such that it is easily reusable across GCs is in the store itself. But the
559//    contents of the roots list (when it is non-empty, during GCs) borrow from
560//    the store, which creates self-references.
561#[derive(Clone, Copy, Debug)]
562#[cfg_attr(
563    not(feature = "gc"),
564    expect(
565        dead_code,
566        reason = "not worth it at this time to #[cfg] away these variants",
567    )
568)]
569enum RawGcRoot {
570    Stack(SendSyncPtr<u32>),
571    NonStack(SendSyncPtr<VMGcRef>),
572}
573
574#[cfg(feature = "gc")]
575impl GcRootsList {
576    /// Add a GC root that is inside a Wasm stack frame to this list.
577    #[inline]
578    pub unsafe fn add_wasm_stack_root(&mut self, ptr_to_root: SendSyncPtr<u32>) {
579        unsafe {
580            log::trace!(
581                "Adding Wasm stack root: {:#p} -> {:#p}",
582                ptr_to_root,
583                VMGcRef::from_raw_u32(*ptr_to_root.as_ref()).unwrap()
584            );
585            debug_assert!(VMGcRef::from_raw_u32(*ptr_to_root.as_ref()).is_some());
586        }
587        self.0.push(RawGcRoot::Stack(ptr_to_root));
588    }
589
590    /// Add a GC root to this list.
591    #[inline]
592    pub unsafe fn add_root(&mut self, ptr_to_root: SendSyncPtr<VMGcRef>, why: &str) {
593        unsafe {
594            log::trace!(
595                "Adding non-stack root: {why}: {:#p}",
596                ptr_to_root.as_ref().unchecked_copy()
597            );
598        }
599        self.0.push(RawGcRoot::NonStack(ptr_to_root))
600    }
601
602    /// Get an iterator over all roots in this list.
603    ///
604    /// # Safety
605    ///
606    /// Callers must ensure that all the pointers to GC roots that have been
607    /// added to this list are valid for the duration of the `'a` lifetime.
608    #[inline]
609    pub unsafe fn iter<'a>(&'a mut self) -> GcRootsIter<'a> {
610        GcRootsIter {
611            list: self,
612            index: 0,
613        }
614    }
615
616    /// Is this list empty?
617    pub fn is_empty(&self) -> bool {
618        self.0.is_empty()
619    }
620
621    /// Clear this GC roots list.
622    #[inline]
623    pub fn clear(&mut self) {
624        self.0.clear();
625    }
626}
627
628/// An iterator over all the roots in a `GcRootsList`.
629pub struct GcRootsIter<'a> {
630    list: &'a mut GcRootsList,
631    index: usize,
632}
633
634impl<'a> Iterator for GcRootsIter<'a> {
635    type Item = GcRoot<'a>;
636
637    #[inline]
638    fn next(&mut self) -> Option<Self::Item> {
639        let root = GcRoot {
640            raw: self.list.0.get(self.index).copied()?,
641            _phantom: marker::PhantomData,
642        };
643        self.index += 1;
644        Some(root)
645    }
646}
647
648/// A GC root.
649///
650/// This is, effectively, a mutable reference to a `VMGcRef`.
651///
652/// Collector implementations should update the `VMGcRef` if they move the
653/// `VMGcRef`'s referent during the course of a GC.
654#[derive(Debug)]
655pub struct GcRoot<'a> {
656    raw: RawGcRoot,
657    _phantom: marker::PhantomData<&'a mut VMGcRef>,
658}
659
660impl GcRoot<'_> {
661    /// Is this root from inside a Wasm stack frame?
662    #[inline]
663    pub fn is_on_wasm_stack(&self) -> bool {
664        matches!(self.raw, RawGcRoot::Stack(_))
665    }
666
667    /// Get this GC root.
668    ///
669    /// Does NOT run GC barriers.
670    #[inline]
671    pub fn get(&self) -> VMGcRef {
672        match self.raw {
673            RawGcRoot::NonStack(ptr) => unsafe { ptr::read(ptr.as_ptr()) },
674            RawGcRoot::Stack(ptr) => unsafe {
675                let raw: u32 = ptr::read(ptr.as_ptr());
676                VMGcRef::from_raw_u32(raw).expect("non-null")
677            },
678        }
679    }
680
681    /// Set this GC root.
682    ///
683    /// Does NOT run GC barriers.
684    ///
685    /// Collector implementations should use this method to update GC root
686    /// pointers after the collector moves the GC object that the root is
687    /// referencing.
688    pub fn set(&mut self, new_ref: VMGcRef) {
689        match self.raw {
690            RawGcRoot::NonStack(ptr) => unsafe {
691                ptr::write(ptr.as_ptr(), new_ref);
692            },
693            RawGcRoot::Stack(ptr) => unsafe {
694                ptr::write(ptr.as_ptr(), new_ref.as_raw_u32());
695            },
696        }
697    }
698}
699
700/// A garbage collection process.
701///
702/// Implementations define the `collect_increment` method, and then consumers
703/// can either use
704///
705/// * `GarbageCollection::collect` for synchronous code, or
706///
707/// * `collect_async(Box<dyn GarbageCollection>)` for async code.
708///
709/// When using fuel and/or epochs, consumers can also use `collect_increment`
710/// directly and choose to abandon further execution in this GC's heap's whole
711/// store if the GC is taking too long to complete.
712pub trait GarbageCollection<'a>: Send + Sync {
713    /// Perform an incremental slice of this garbage collection process.
714    ///
715    /// Upon completion of the slice, a `GcProgress` is returned which informs
716    /// the caller whether to continue driving this GC process forward and
717    /// executing more slices (`GcProgress::Continue`) or whether the GC process
718    /// has finished (`GcProgress::Complete`).
719    ///
720    /// The mutator does *not* run in between increments. This method exists
721    /// solely to allow cooperative yielding
722    fn collect_increment(&mut self) -> GcProgress;
723
724    /// Run this GC process to completion.
725    ///
726    /// Keeps calling `collect_increment` in a loop until the GC process is
727    /// complete.
728    fn collect(&mut self) {
729        loop {
730            match self.collect_increment() {
731                GcProgress::Continue => continue,
732                GcProgress::Complete => return,
733            }
734        }
735    }
736}
737
738/// The result of doing an incremental amount of GC.
739pub enum GcProgress {
740    /// There is still more work to do.
741    Continue,
742    /// The GC is complete.
743    Complete,
744}
745
746/// Asynchronously run the given garbage collection process to completion,
747/// cooperatively yielding back to the event loop after each increment of work.
748pub async fn collect_async<'a>(
749    mut collection: Box<dyn GarbageCollection<'a> + 'a>,
750    async_yield: bool,
751) {
752    loop {
753        match collection.collect_increment() {
754            GcProgress::Continue => {
755                if async_yield {
756                    #[cfg(feature = "async")]
757                    crate::runtime::vm::Yield::new().await
758                }
759            }
760            GcProgress::Complete => return,
761        }
762    }
763}
764
765#[cfg(all(test, feature = "async"))]
766mod collect_async_tests {
767    use super::*;
768
769    #[test]
770    fn is_send_and_sync() {
771        fn _assert_send_sync<T: Send + Sync>(_: T) {}
772
773        fn _foo<'a>(collection: Box<dyn GarbageCollection<'a>>) {
774            _assert_send_sync(collect_async(collection, true));
775        }
776    }
777}