wasmtime/runtime/vm/gc/gc_runtime.rs
1//! Traits for abstracting over our different garbage collectors.
2
3use crate::prelude::*;
4use crate::runtime::vm::{
5 ExternRefHostDataId, ExternRefHostDataTable, GcHeapObject, SendSyncPtr, TypedGcRef, VMArrayRef,
6 VMExternRef, VMGcHeader, VMGcObjectData, VMGcRef, VMStructRef,
7};
8use crate::vm::VMMemoryDefinition;
9use core::ptr::NonNull;
10use core::slice;
11use core::{alloc::Layout, any::Any, marker, mem, ops::Range, ptr};
12use wasmtime_environ::{GcArrayLayout, GcStructLayout, GcTypeLayouts, VMSharedTypeIndex};
13
14/// Trait for integrating a garbage collector with the runtime.
15///
16/// This trait is responsible for:
17///
18/// * GC barriers used by runtime code (as opposed to compiled Wasm code)
19///
20/// * Creating and managing GC heaps for individual stores
21///
22/// * Running garbage collection
23///
24/// # Safety
25///
26/// The collector, its GC heaps, and GC barriers when taken together as a whole
27/// must be safe. Additionally, they must work with the GC barriers emitted into
28/// compiled Wasm code via the collector's corresponding `GcCompiler`
29/// implementation. That is, if callers only call safe methods on this trait
30/// (while pairing it with its associated `GcCompiler`, `GcHeap`, and etc...)
31/// and uphold all the documented safety invariants of this trait's unsafe
32/// methods, then it must be impossible for callers to violate memory
33/// safety. Implementations of this trait may not add new safety invariants, not
34/// already documented in this trait's interface, that callers need to uphold.
35pub unsafe trait GcRuntime: 'static + Send + Sync {
36 /// Get this collector's GC type layouts.
37 fn layouts(&self) -> &dyn GcTypeLayouts;
38
39 /// Construct a new GC heap.
40 #[cfg(feature = "gc")]
41 fn new_gc_heap(&self, engine: &crate::Engine) -> Result<Box<dyn GcHeap>>;
42}
43
44/// A heap that manages garbage-collected objects.
45///
46/// Each `wasmtime::Store` is associated with a single `GcHeap`, and a `GcHeap`
47/// is only ever used with one store at a time, but `GcHeap`s may be reused with
48/// new stores after its original store is dropped. The `reset` method will be
49/// called in between each such reuse. (This reuse allows for better integration
50/// with the pooling allocator).
51///
52/// If a `GcHeap` mapped any memory, its `Drop` implementation should unmap that
53/// memory.
54///
55/// # Safety
56///
57/// The trait methods below are all safe: implementations of this trait must
58/// ensure that these methods cannot be misused to create memory unsafety. The
59/// expectation is that -- given that `VMGcRef` is a newtype over an index --
60/// implementations perform similar tricks as Wasm linear memory
61/// implementations. The heap should internally be a contiguous region of memory
62/// and `VMGcRef` indices into the heap must be bounds checked (explicitly or
63/// implicitly via virtual memory tricks).
64///
65/// Furthermore, if heap corruption occurs because (for example) a `VMGcRef`
66/// from a different heap is used with this heap, then that corruption must be
67/// limited to within this heap. Every heap is a mini sandbox. It follows that
68/// native pointers should never be written into or read out from the GC heap,
69/// since that could spread corruption from inside the GC heap out to the native
70/// host heap. The host data for an `externref`, therefore, is stored in a side
71/// table (`ExternRefHostDataTable`) and never inside the heap. Only an id
72/// referencing a slot in that table should ever be written into the GC heap.
73///
74/// These constraints give us great amounts of safety compared to working with
75/// raw pointers. The worst that could happen is corruption local to heap and a
76/// panic, or perhaps reading stale heap data from a previous Wasm instance. A
77/// corrupt `GcHeap` can *never* result in the native host's corruption.
78///
79/// The downside is that we are introducing `heap_base + index` computations and
80/// bounds checking to access GC memory, adding performance overhead. This is
81/// deemed to be a worthy trade off. Furthermore, it isn't even a clear cut
82/// performance degradation since this allows us to use 32-bit "pointers",
83/// giving us more compact data representations and the improved cache
84/// utilization that implies.
85pub unsafe trait GcHeap: 'static + Send + Sync {
86 ////////////////////////////////////////////////////////////////////////////
87 // Life Cycle GC Heap Methods
88
89 /// Is this GC heap currently attached to a memory?
90 fn is_attached(&self) -> bool;
91
92 /// Attach this GC heap to a memory.
93 ///
94 /// Once attached, this GC heap can be used with Wasm.
95 fn attach(&mut self, memory: crate::vm::Memory);
96
97 /// Reset this heap.
98 ///
99 /// Calling this method unassociates this heap with the store that it has
100 /// been associated with, making it available to be associated with a new
101 /// heap.
102 ///
103 /// This should refill free lists, reset bump pointers, and etc... as if
104 /// nothing were allocated in this heap (because nothing is allocated in
105 /// this heap anymore).
106 ///
107 /// This should retain any allocated memory from the global allocator and
108 /// any virtual memory mappings.
109 fn detach(&mut self) -> crate::vm::Memory;
110
111 ////////////////////////////////////////////////////////////////////////////
112 // `Any` methods
113
114 /// Get this heap as an `&Any`.
115 fn as_any(&self) -> &dyn Any;
116
117 /// Get this heap as an `&mut Any`.
118 fn as_any_mut(&mut self) -> &mut dyn Any;
119
120 ////////////////////////////////////////////////////////////////////////////
121 // No-GC Scope Methods
122
123 /// Enter a no-GC scope.
124 ///
125 /// Calling the `gc` method when we are inside a no-GC scope should panic.
126 ///
127 /// We can enter multiple, nested no-GC scopes and this method should
128 /// account for that.
129 fn enter_no_gc_scope(&mut self);
130
131 /// Exit a no-GC scope.
132 ///
133 /// Dual to `enter_no_gc_scope`.
134 fn exit_no_gc_scope(&mut self);
135
136 ////////////////////////////////////////////////////////////////////////////
137 // GC Barriers
138
139 /// Read barrier called every time the runtime clones a GC reference.
140 ///
141 /// Callers should pass a valid `VMGcRef` that belongs to the given
142 /// heap. Failure to do so is memory safe, but may result in general
143 /// failures such as panics or incorrect results.
144 fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef;
145
146 /// Write barrier called whenever the runtime is nulling out a GC reference.
147 ///
148 /// Default implemented in terms of the `write_gc_ref` barrier.
149 ///
150 /// If an `externref` is reclaimed, then its associated entry in the
151 /// `host_data_table` should be removed.
152 ///
153 /// Callers should pass a valid `VMGcRef` that belongs to the given
154 /// heap. Failure to do so is memory safe, but may result in general
155 /// failures such as panics or incorrect results.
156 ///
157 /// The given `gc_ref` should not be used again.
158 fn drop_gc_ref(&mut self, host_data_table: &mut ExternRefHostDataTable, gc_ref: VMGcRef) {
159 let mut dest = Some(gc_ref);
160 self.write_gc_ref(host_data_table, &mut dest, None);
161 }
162
163 /// Write barrier called every time the runtime overwrites a GC reference.
164 ///
165 /// The `source` is a borrowed GC reference, and should not have been cloned
166 /// already for this write operation. This allows implementations to fuse
167 /// the `source`'s read barrier into this write barrier.
168 ///
169 /// If an `externref` is reclaimed, then its associated entry in the
170 /// `host_data_table` should be removed.
171 ///
172 /// Callers should pass a valid `VMGcRef` that belongs to the given heap for
173 /// both the `source` and `destination`. Failure to do so is memory safe,
174 /// but may result in general failures such as panics or incorrect results.
175 fn write_gc_ref(
176 &mut self,
177 host_data_table: &mut ExternRefHostDataTable,
178 destination: &mut Option<VMGcRef>,
179 source: Option<&VMGcRef>,
180 );
181
182 /// Read barrier called whenever a GC reference is passed from the runtime
183 /// to Wasm: an argument to a host-to-Wasm call, or a return from a
184 /// Wasm-to-host call.
185 ///
186 /// Callers should pass a valid `VMGcRef` that belongs to the given
187 /// heap. Failure to do so is memory safe, but may result in general
188 /// failures such as panics or incorrect results.
189 fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef);
190
191 ////////////////////////////////////////////////////////////////////////////
192 // `externref` Methods
193
194 /// Allocate a `VMExternRef` with space for host data described by the given
195 /// layout.
196 ///
197 /// Return values:
198 ///
199 /// * `Ok(Some(_))`: The allocation was successful.
200 ///
201 /// * `Ok(Err(n))`: There is currently not enough available space for this
202 /// allocation of size `n`. The caller should either grow the heap or run
203 /// a collection to reclaim space, and then try allocating again.
204 ///
205 /// * `Err(_)`: The collector cannot satisfy this allocation request, and
206 /// would not be able to even after the caller were to trigger a
207 /// collection. This could be because, for example, the requested
208 /// allocation is larger than this collector's implementation limit for
209 /// object size.
210 fn alloc_externref(
211 &mut self,
212 host_data: ExternRefHostDataId,
213 ) -> Result<Result<VMExternRef, u64>>;
214
215 /// Get the host data ID associated with the given `externref`.
216 ///
217 /// Callers should pass a valid `externref` that belongs to the given
218 /// heap. Failure to do so is memory safe, but may result in general
219 /// failures such as panics or incorrect results.
220 fn externref_host_data(&self, externref: &VMExternRef) -> ExternRefHostDataId;
221
222 ////////////////////////////////////////////////////////////////////////////
223 // Struct, array, and general GC object methods
224
225 /// Get the header of the object that `gc_ref` points to.
226 fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader;
227
228 /// Get the header of the object that `gc_ref` points to.
229 fn header_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcHeader;
230
231 /// Get the size (in bytes) of the object referenced by `gc_ref`.
232 ///
233 /// # Panics
234 ///
235 /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
236 fn object_size(&self, gc_ref: &VMGcRef) -> usize;
237
238 /// Allocate a raw, uninitialized GC-managed object with the given header
239 /// and layout.
240 ///
241 /// The object's fields and elements are left uninitialized. It is the
242 /// caller's responsibility to initialize them before exposing the struct to
243 /// Wasm or triggering a GC.
244 ///
245 /// The header's described type and layout must match *for this
246 /// collector*. That is, if this collector adds an extra header word to all
247 /// objects, the given layout must already include space for that header
248 /// word. Therefore, this method is effectively only usable with layouts
249 /// derived from a `Gc{Struct,Array}Layout` returned by this collector.
250 ///
251 /// Failure to uphold any of the above is memory safe, but may result in
252 /// general failures such as panics or incorrect results.
253 ///
254 /// Return values:
255 ///
256 /// * `Ok(Some(_))`: The allocation was successful.
257 ///
258 /// * `Ok(Err(n))`: There is currently not enough available space for this
259 /// allocation of size `n`. The caller should either grow the heap or run
260 /// a collection to reclaim space, and then try allocating again.
261 ///
262 /// * `Err(_)`: The collector cannot satisfy this allocation request, and
263 /// would not be able to even after the caller were to trigger a
264 /// collection. This could be because, for example, the requested
265 /// alignment is larger than this collector's implementation limit.
266 fn alloc_raw(&mut self, header: VMGcHeader, layout: Layout) -> Result<Result<VMGcRef, u64>>;
267
268 /// Allocate a GC-managed struct of the given type and layout.
269 ///
270 /// The struct's fields are left uninitialized. It is the caller's
271 /// responsibility to initialize them before exposing the struct to Wasm or
272 /// triggering a GC.
273 ///
274 /// The `ty` and `layout` must match.
275 ///
276 /// Failure to do either of the above is memory safe, but may result in
277 /// general failures such as panics or incorrect results.
278 ///
279 /// Return values:
280 ///
281 /// * `Ok(Some(_))`: The allocation was successful.
282 ///
283 /// * `Ok(Err(n))`: There is currently not enough available space for this
284 /// allocation of size `n`. The caller should either grow the heap or run
285 /// a collection to reclaim space, and then try allocating again.
286 ///
287 /// * `Err(_)`: The collector cannot satisfy this allocation request, and
288 /// would not be able to even after the caller were to trigger a
289 /// collection. This could be because, for example, the requested
290 /// allocation is larger than this collector's implementation limit for
291 /// object size.
292 fn alloc_uninit_struct(
293 &mut self,
294 ty: VMSharedTypeIndex,
295 layout: &GcStructLayout,
296 ) -> Result<Result<VMStructRef, u64>>;
297
298 /// Deallocate an uninitialized, GC-managed struct.
299 ///
300 /// This is useful for if initialization of the struct's fields fails, so
301 /// that the struct's allocation can be eagerly reclaimed, and so that the
302 /// collector doesn't attempt to treat any of the uninitialized fields as
303 /// valid GC references, or something like that.
304 fn dealloc_uninit_struct(&mut self, structref: VMStructRef);
305
306 /// * `Ok(Ok(_))`: The allocation was successful.
307 ///
308 /// * `Ok(Err(n))`: There is currently not enough available space for this
309 /// allocation of size `n`. The caller should either grow the heap or run
310 /// a collection to reclaim space, and then try allocating again.
311 ///
312 /// * `Err(_)`: The collector cannot satisfy this allocation request, and
313 /// would not be able to even after the caller were to trigger a
314 /// collection. This could be because, for example, the requested
315 /// allocation is larger than this collector's implementation limit for
316 /// object size.
317 fn alloc_uninit_array(
318 &mut self,
319 ty: VMSharedTypeIndex,
320 len: u32,
321 layout: &GcArrayLayout,
322 ) -> Result<Result<VMArrayRef, u64>>;
323
324 /// Deallocate an uninitialized, GC-managed array.
325 ///
326 /// This is useful for if initialization of the array's fields fails, so
327 /// that the array's allocation can be eagerly reclaimed, and so that the
328 /// collector doesn't attempt to treat any of the uninitialized fields as
329 /// valid GC references, or something like that.
330 fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef);
331
332 /// Get the length of the given array.
333 ///
334 /// Panics on out-of-bounds accesses.
335 ///
336 /// The given `arrayref` should be valid and of the given size. Failure to
337 /// do so is memory safe, but may result in general failures such as panics
338 /// or incorrect results.
339 fn array_len(&self, arrayref: &VMArrayRef) -> u32;
340
341 ////////////////////////////////////////////////////////////////////////////
342 // Garbage Collection Methods
343
344 /// Start a new garbage collection process.
345 ///
346 /// The given `roots` are GC roots and should not be collected (nor anything
347 /// transitively reachable from them).
348 ///
349 /// Upon reclaiming an `externref`, its associated entry in the
350 /// `host_data_table` is removed.
351 ///
352 /// Callers should pass valid GC roots that belongs to this heap, and the
353 /// host data table associated with this heap's `externref`s. Failure to do
354 /// so is memory safe, but may result in general failures such as panics or
355 /// incorrect results.
356 ///
357 /// This method should panic if we are in a no-GC scope.
358 fn gc<'a>(
359 &'a mut self,
360 roots: GcRootsIter<'a>,
361 host_data_table: &'a mut ExternRefHostDataTable,
362 ) -> Box<dyn GarbageCollection<'a> + 'a>;
363
364 ////////////////////////////////////////////////////////////////////////////
365 // JIT-Code Interaction Methods
366
367 /// Get the pointer that will be stored in the `VMContext::gc_heap_data`
368 /// field and be accessible from JIT code via collaboration with the
369 /// corresponding `GcCompiler` trait.
370 ///
371 /// # Safety
372 ///
373 /// The returned pointer, if any, must remain valid as long as `self` is not
374 /// dropped.
375 unsafe fn vmctx_gc_heap_data(&self) -> NonNull<u8>;
376
377 ////////////////////////////////////////////////////////////////////////////
378 // Accessors for the raw bytes of the GC heap
379
380 /// Take the underlying memory storage out of this GC heap.
381 ///
382 /// # Safety
383 ///
384 /// You may not use this GC heap again until after you replace the memory.
385 unsafe fn take_memory(&mut self) -> crate::vm::Memory;
386
387 /// Replace this GC heap's underlying memory storage.
388 ///
389 /// # Safety
390 ///
391 /// The `memory` must have been taken via `take_memory` and the GC heap must
392 /// not have been used at all since the memory was taken. The memory must be
393 /// the same size or larger than it was.
394 unsafe fn replace_memory(&mut self, memory: crate::vm::Memory, delta_bytes_grown: u64);
395
396 /// Get a raw `VMMemoryDefinition` for this heap's underlying memory storage.
397 ///
398 /// If/when exposing this `VMMemoryDefinition` to Wasm, it is your
399 /// responsibility to ensure that you do not do that in such a way as to
400 /// violate Rust's borrowing rules (e.g. make sure there is no active
401 /// `heap_slice_mut()` call at the same time) and that if this GC heap is
402 /// resized (and its base potentially moves) then that Wasm gets a new,
403 /// updated `VMMemoryDefinition` record.
404 fn vmmemory(&self) -> VMMemoryDefinition;
405
406 /// Get a slice of the raw bytes of the GC heap.
407 #[inline]
408 fn heap_slice(&self) -> &[u8] {
409 let vmmemory = self.vmmemory();
410 let ptr = vmmemory.base.as_ptr().cast_const();
411 let len = vmmemory.current_length();
412 unsafe { slice::from_raw_parts(ptr, len) }
413 }
414
415 /// Get a mutable slice of the raw bytes of the GC heap.
416 #[inline]
417 fn heap_slice_mut(&mut self) -> &mut [u8] {
418 let vmmemory = self.vmmemory();
419 let ptr = vmmemory.base.as_ptr();
420 let len = vmmemory.current_length();
421 unsafe { slice::from_raw_parts_mut(ptr, len) }
422 }
423
424 ////////////////////////////////////////////////////////////////////////////
425 // Provided helper methods.
426
427 /// Index into this heap and get a shared reference to the `T` that `gc_ref`
428 /// points to.
429 ///
430 /// # Panics
431 ///
432 /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
433 #[inline]
434 fn index<T>(&self, gc_ref: &TypedGcRef<T>) -> &T
435 where
436 Self: Sized,
437 T: GcHeapObject,
438 {
439 assert!(!mem::needs_drop::<T>());
440 let gc_ref = gc_ref.as_untyped();
441 let start = gc_ref.as_heap_index().unwrap().get();
442 let start = usize::try_from(start).unwrap();
443 let len = mem::size_of::<T>();
444 let slice = &self.heap_slice()[start..][..len];
445 unsafe { &*(slice.as_ptr().cast::<T>()) }
446 }
447
448 /// Index into this heap and get an exclusive reference to the `T` that
449 /// `gc_ref` points to.
450 ///
451 /// # Panics
452 ///
453 /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
454 #[inline]
455 fn index_mut<T>(&mut self, gc_ref: &TypedGcRef<T>) -> &mut T
456 where
457 Self: Sized,
458 T: GcHeapObject,
459 {
460 assert!(!mem::needs_drop::<T>());
461 let gc_ref = gc_ref.as_untyped();
462 let start = gc_ref.as_heap_index().unwrap().get();
463 let start = usize::try_from(start).unwrap();
464 let len = mem::size_of::<T>();
465 let slice = &mut self.heap_slice_mut()[start..][..len];
466 unsafe { &mut *(slice.as_mut_ptr().cast::<T>()) }
467 }
468
469 /// Get the range of bytes that the given object occupies in the heap.
470 ///
471 /// # Panics
472 ///
473 /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
474 fn object_range(&self, gc_ref: &VMGcRef) -> Range<usize> {
475 let start = gc_ref.as_heap_index().unwrap().get();
476 let start = usize::try_from(start).unwrap();
477 let size = self.object_size(gc_ref);
478 let end = start.checked_add(size).unwrap();
479 start..end
480 }
481
482 /// Get a mutable borrow of the given object's data.
483 ///
484 /// # Panics
485 ///
486 /// Panics on out-of-bounds accesses or if the `gc_ref` is an `i31ref`.
487 fn gc_object_data(&self, gc_ref: &VMGcRef) -> &VMGcObjectData {
488 let range = self.object_range(gc_ref);
489 let data = &self.heap_slice()[range];
490 data.into()
491 }
492
493 /// Get a mutable borrow of the given object's data.
494 ///
495 /// # Panics
496 ///
497 /// Panics on out-of-bounds accesses or if the `gc_ref` is an `i31ref`.
498 fn gc_object_data_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcObjectData {
499 let range = self.object_range(gc_ref);
500 let data = &mut self.heap_slice_mut()[range];
501 data.into()
502 }
503
504 /// Get a pair of mutable borrows of the given objects' data.
505 ///
506 /// # Panics
507 ///
508 /// Panics if `a == b` or on out-of-bounds accesses or if either GC ref is
509 /// an `i31ref`.
510 fn gc_object_data_pair(
511 &mut self,
512 a: &VMGcRef,
513 b: &VMGcRef,
514 ) -> (&mut VMGcObjectData, &mut VMGcObjectData) {
515 assert_ne!(a, b);
516
517 let a_range = self.object_range(a);
518 let b_range = self.object_range(b);
519
520 // Assert that the two objects do not overlap.
521 assert!(a_range.start <= a_range.end);
522 assert!(b_range.start <= b_range.end);
523 assert!(a_range.end <= b_range.start || b_range.end <= a_range.start);
524
525 let (a_data, b_data) = if a_range.start < b_range.start {
526 let (a_half, b_half) = self.heap_slice_mut().split_at_mut(b_range.start);
527 let b_len = b_range.end - b_range.start;
528 (&mut a_half[a_range], &mut b_half[..b_len])
529 } else {
530 let (b_half, a_half) = self.heap_slice_mut().split_at_mut(a_range.start);
531 let a_len = a_range.end - a_range.start;
532 (&mut a_half[..a_len], &mut b_half[b_range])
533 };
534
535 (a_data.into(), b_data.into())
536 }
537}
538
539/// A list of GC roots.
540///
541/// This is effectively a builder for a `GcRootsIter` that will be given to a GC
542/// heap when it is time to perform garbage collection.
543#[derive(Default)]
544pub struct GcRootsList(Vec<RawGcRoot>);
545
546// Ideally these `*mut`s would be `&mut`s and we wouldn't need as much of this
547// machinery around `GcRootsList`, `RawGcRoot`, `GcRoot`, and `GcRootIter` but
548// if we try that then we run into two different kinds of lifetime issues:
549//
550// 1. When collecting the various roots from a `&mut StoreOpaque`, we borrow
551// from `self` to push new GC roots onto the roots list. But then we want to
552// call helper methods like `self.for_each_global(...)`, but we can't because
553// there are active borrows of `self` preventing it.
554//
555// 2. We want to reuse the roots list and its backing storage across GCs, rather
556// than reallocate on every GC. But the only place for the roots list to live
557// such that it is easily reusable across GCs is in the store itself. But the
558// contents of the roots list (when it is non-empty, during GCs) borrow from
559// the store, which creates self-references.
560#[derive(Clone, Copy, Debug)]
561#[cfg_attr(
562 not(feature = "gc"),
563 expect(
564 dead_code,
565 reason = "not worth it at this time to #[cfg] away these variants",
566 )
567)]
568enum RawGcRoot {
569 Stack(SendSyncPtr<u32>),
570 NonStack(SendSyncPtr<VMGcRef>),
571}
572
573#[cfg(feature = "gc")]
574impl GcRootsList {
575 /// Add a GC root that is inside a Wasm stack frame to this list.
576 #[inline]
577 pub unsafe fn add_wasm_stack_root(&mut self, ptr_to_root: SendSyncPtr<u32>) {
578 log::trace!(
579 "Adding Wasm stack root: {:#p} -> {:#p}",
580 ptr_to_root,
581 VMGcRef::from_raw_u32(*ptr_to_root.as_ref()).unwrap()
582 );
583 debug_assert!(VMGcRef::from_raw_u32(*ptr_to_root.as_ref()).is_some());
584 self.0.push(RawGcRoot::Stack(ptr_to_root));
585 }
586
587 /// Add a GC root to this list.
588 #[inline]
589 pub unsafe fn add_root(&mut self, ptr_to_root: SendSyncPtr<VMGcRef>, why: &str) {
590 log::trace!(
591 "Adding non-stack root: {why}: {:#p}",
592 ptr_to_root.as_ref().unchecked_copy()
593 );
594 self.0.push(RawGcRoot::NonStack(ptr_to_root))
595 }
596
597 /// Get an iterator over all roots in this list.
598 ///
599 /// # Safety
600 ///
601 /// Callers must ensure that all the pointers to GC roots that have been
602 /// added to this list are valid for the duration of the `'a` lifetime.
603 #[inline]
604 pub unsafe fn iter<'a>(&'a mut self) -> GcRootsIter<'a> {
605 GcRootsIter {
606 list: self,
607 index: 0,
608 }
609 }
610
611 /// Is this list empty?
612 pub fn is_empty(&self) -> bool {
613 self.0.is_empty()
614 }
615
616 /// Clear this GC roots list.
617 #[inline]
618 pub fn clear(&mut self) {
619 self.0.clear();
620 }
621}
622
623/// An iterator over all the roots in a `GcRootsList`.
624pub struct GcRootsIter<'a> {
625 list: &'a mut GcRootsList,
626 index: usize,
627}
628
629impl<'a> Iterator for GcRootsIter<'a> {
630 type Item = GcRoot<'a>;
631
632 #[inline]
633 fn next(&mut self) -> Option<Self::Item> {
634 let root = GcRoot {
635 raw: self.list.0.get(self.index).copied()?,
636 _phantom: marker::PhantomData,
637 };
638 self.index += 1;
639 Some(root)
640 }
641}
642
643/// A GC root.
644///
645/// This is, effectively, a mutable reference to a `VMGcRef`.
646///
647/// Collector implementations should update the `VMGcRef` if they move the
648/// `VMGcRef`'s referent during the course of a GC.
649#[derive(Debug)]
650pub struct GcRoot<'a> {
651 raw: RawGcRoot,
652 _phantom: marker::PhantomData<&'a mut VMGcRef>,
653}
654
655impl GcRoot<'_> {
656 /// Is this root from inside a Wasm stack frame?
657 #[inline]
658 pub fn is_on_wasm_stack(&self) -> bool {
659 matches!(self.raw, RawGcRoot::Stack(_))
660 }
661
662 /// Get this GC root.
663 ///
664 /// Does NOT run GC barriers.
665 #[inline]
666 pub fn get(&self) -> VMGcRef {
667 match self.raw {
668 RawGcRoot::NonStack(ptr) => unsafe { ptr::read(ptr.as_ptr()) },
669 RawGcRoot::Stack(ptr) => unsafe {
670 let raw: u32 = ptr::read(ptr.as_ptr());
671 VMGcRef::from_raw_u32(raw).expect("non-null")
672 },
673 }
674 }
675
676 /// Set this GC root.
677 ///
678 /// Does NOT run GC barriers.
679 ///
680 /// Collector implementations should use this method to update GC root
681 /// pointers after the collector moves the GC object that the root is
682 /// referencing.
683 pub fn set(&mut self, new_ref: VMGcRef) {
684 match self.raw {
685 RawGcRoot::NonStack(ptr) => unsafe {
686 ptr::write(ptr.as_ptr(), new_ref);
687 },
688 RawGcRoot::Stack(ptr) => unsafe {
689 ptr::write(ptr.as_ptr(), new_ref.as_raw_u32());
690 },
691 }
692 }
693}
694
695/// A garbage collection process.
696///
697/// Implementations define the `collect_increment` method, and then consumers
698/// can either use
699///
700/// * `GarbageCollection::collect` for synchronous code, or
701///
702/// * `collect_async(Box<dyn GarbageCollection>)` for async code.
703///
704/// When using fuel and/or epochs, consumers can also use `collect_increment`
705/// directly and choose to abandon further execution in this GC's heap's whole
706/// store if the GC is taking too long to complete.
707pub trait GarbageCollection<'a>: Send + Sync {
708 /// Perform an incremental slice of this garbage collection process.
709 ///
710 /// Upon completion of the slice, a `GcProgress` is returned which informs
711 /// the caller whether to continue driving this GC process forward and
712 /// executing more slices (`GcProgress::Continue`) or whether the GC process
713 /// has finished (`GcProgress::Complete`).
714 ///
715 /// The mutator does *not* run in between increments. This method exists
716 /// solely to allow cooperative yielding
717 fn collect_increment(&mut self) -> GcProgress;
718
719 /// Run this GC process to completion.
720 ///
721 /// Keeps calling `collect_increment` in a loop until the GC process is
722 /// complete.
723 fn collect(&mut self) {
724 loop {
725 match self.collect_increment() {
726 GcProgress::Continue => continue,
727 GcProgress::Complete => return,
728 }
729 }
730 }
731}
732
733/// The result of doing an incremental amount of GC.
734pub enum GcProgress {
735 /// There is still more work to do.
736 Continue,
737 /// The GC is complete.
738 Complete,
739}
740
741/// Asynchronously run the given garbage collection process to completion,
742/// cooperatively yielding back to the event loop after each increment of work.
743#[cfg(feature = "async")]
744pub async fn collect_async<'a>(mut collection: Box<dyn GarbageCollection<'a> + 'a>) {
745 loop {
746 match collection.collect_increment() {
747 GcProgress::Continue => crate::runtime::vm::Yield::new().await,
748 GcProgress::Complete => return,
749 }
750 }
751}
752
753#[cfg(all(test, feature = "async"))]
754mod collect_async_tests {
755 use super::*;
756
757 #[test]
758 fn is_send_and_sync() {
759 fn _assert_send_sync<T: Send + Sync>(_: T) {}
760
761 fn _foo<'a>(collection: Box<dyn GarbageCollection<'a>>) {
762 _assert_send_sync(collect_async(collection));
763 }
764 }
765}