wasmtime/runtime/vm/gc/gc_runtime.rs
1//! Traits for abstracting over our different garbage collectors.
2
3use crate::prelude::*;
4use crate::runtime::vm::{
5 ExternRefHostDataId, ExternRefHostDataTable, GcHeapObject, SendSyncPtr, TypedGcRef, VMArrayRef,
6 VMExternRef, VMGcHeader, VMGcObjectData, VMGcRef, ValRaw,
7};
8use crate::store::Asyncness;
9use crate::vm::VMMemoryDefinition;
10use core::ptr::NonNull;
11use core::slice;
12use core::{alloc::Layout, any::Any, marker, mem, ops::Range, ptr};
13use wasmtime_environ::{GcArrayLayout, GcStructLayout, GcTypeLayouts, VMSharedTypeIndex};
14
15/// Trait for integrating a garbage collector with the runtime.
16///
17/// This trait is responsible for:
18///
19/// * GC barriers used by runtime code (as opposed to compiled Wasm code)
20///
21/// * Creating and managing GC heaps for individual stores
22///
23/// * Running garbage collection
24///
25/// # Safety
26///
27/// The collector, its GC heaps, and GC barriers when taken together as a whole
28/// must be safe. Additionally, they must work with the GC barriers emitted into
29/// compiled Wasm code via the collector's corresponding `GcCompiler`
30/// implementation. That is, if callers only call safe methods on this trait
31/// (while pairing it with its associated `GcCompiler`, `GcHeap`, and etc...)
32/// and uphold all the documented safety invariants of this trait's unsafe
33/// methods, then it must be impossible for callers to violate memory
34/// safety. Implementations of this trait may not add new safety invariants, not
35/// already documented in this trait's interface, that callers need to uphold.
36pub unsafe trait GcRuntime: 'static + Send + Sync {
37 /// Get this collector's GC type layouts.
38 fn layouts(&self) -> &dyn GcTypeLayouts;
39
40 /// Construct a new GC heap.
41 #[cfg(feature = "gc")]
42 fn new_gc_heap(&self, engine: &crate::Engine) -> Result<Box<dyn GcHeap>>;
43}
44
45/// A heap that manages garbage-collected objects.
46///
47/// Each `wasmtime::Store` is associated with a single `GcHeap`, and a `GcHeap`
48/// is only ever used with one store at a time, but `GcHeap`s may be reused with
49/// new stores after its original store is dropped. The `reset` method will be
50/// called in between each such reuse. (This reuse allows for better integration
51/// with the pooling allocator).
52///
53/// If a `GcHeap` mapped any memory, its `Drop` implementation should unmap that
54/// memory.
55///
56/// # Safety
57///
58/// The trait methods below are all safe: implementations of this trait must
59/// ensure that these methods cannot be misused to create memory unsafety. The
60/// expectation is that -- given that `VMGcRef` is a newtype over an index --
61/// implementations perform similar tricks as Wasm linear memory
62/// implementations. The heap should internally be a contiguous region of memory
63/// and `VMGcRef` indices into the heap must be bounds checked (explicitly or
64/// implicitly via virtual memory tricks).
65///
66/// Furthermore, if heap corruption occurs because (for example) a `VMGcRef`
67/// from a different heap is used with this heap, then that corruption must be
68/// limited to within this heap. Every heap is a mini sandbox. It follows that
69/// native pointers should never be written into or read out from the GC heap,
70/// since that could spread corruption from inside the GC heap out to the native
71/// host heap. The host data for an `externref`, therefore, is stored in a side
72/// table (`ExternRefHostDataTable`) and never inside the heap. Only an id
73/// referencing a slot in that table should ever be written into the GC heap.
74///
75/// These constraints give us great amounts of safety compared to working with
76/// raw pointers. The worst that could happen is corruption local to heap and a
77/// panic, or perhaps reading stale heap data from a previous Wasm instance. A
78/// corrupt `GcHeap` can *never* result in the native host's corruption.
79///
80/// The downside is that we are introducing `heap_base + index` computations and
81/// bounds checking to access GC memory, adding performance overhead. This is
82/// deemed to be a worthy trade off. Furthermore, it isn't even a clear cut
83/// performance degradation since this allows us to use 32-bit "pointers",
84/// giving us more compact data representations and the improved cache
85/// utilization that implies.
86pub unsafe trait GcHeap: 'static + Send + Sync {
87 ////////////////////////////////////////////////////////////////////////////
88 // Life Cycle GC Heap Methods
89
90 /// Is this GC heap currently attached to a memory?
91 fn is_attached(&self) -> bool;
92
93 /// Attach this GC heap to a memory.
94 ///
95 /// Once attached, this GC heap can be used with Wasm.
96 fn attach(&mut self, memory: crate::vm::Memory);
97
98 /// Reset this heap.
99 ///
100 /// Calling this method unassociates this heap with the store that it has
101 /// been associated with, making it available to be associated with a new
102 /// heap.
103 ///
104 /// This should refill free lists, reset bump pointers, and etc... as if
105 /// nothing were allocated in this heap (because nothing is allocated in
106 /// this heap anymore).
107 ///
108 /// This should retain any allocated memory from the global allocator and
109 /// any virtual memory mappings.
110 fn detach(&mut self) -> crate::vm::Memory;
111
112 /// Eagerly ensure that tracing information is registered for the given GC
113 /// type.
114 ///
115 /// This is called during module instantiation for every GC type in the
116 /// module's type collection, and during `StructRefPre` and `ArrayRefPre`
117 /// construction for host-allocated types.
118 ///
119 /// The default implementation is a no-op, which is appropriate for
120 /// collectors that do not need per-type tracing info (e.g. the null
121 /// collector).
122 fn ensure_trace_info(&mut self, _ty: VMSharedTypeIndex) {}
123
124 ////////////////////////////////////////////////////////////////////////////
125 // `Any` methods
126
127 /// Get this heap as an `&Any`.
128 fn as_any(&self) -> &dyn Any;
129
130 /// Get this heap as an `&mut Any`.
131 fn as_any_mut(&mut self) -> &mut dyn Any;
132
133 ////////////////////////////////////////////////////////////////////////////
134 // No-GC Scope Methods
135
136 /// Enter a no-GC scope.
137 ///
138 /// Calling the `gc` method when we are inside a no-GC scope should panic.
139 ///
140 /// We can enter multiple, nested no-GC scopes and this method should
141 /// account for that.
142 fn enter_no_gc_scope(&mut self);
143
144 /// Exit a no-GC scope.
145 ///
146 /// Dual to `enter_no_gc_scope`.
147 fn exit_no_gc_scope(&mut self);
148
149 ////////////////////////////////////////////////////////////////////////////
150 // GC Barriers
151
152 /// Read barrier called every time the runtime clones a GC reference.
153 ///
154 /// Callers should pass a valid `VMGcRef` that belongs to the given
155 /// heap. Failure to do so is memory safe, but may result in general
156 /// failures such as panics or incorrect results.
157 fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef;
158
159 /// Write barrier called whenever the runtime is nulling out a GC reference.
160 ///
161 /// Default implemented in terms of the `write_gc_ref` barrier.
162 ///
163 /// If an `externref` is reclaimed, then its associated entry in the
164 /// `host_data_table` should be removed.
165 ///
166 /// Callers should pass a valid `VMGcRef` that belongs to the given
167 /// heap. Failure to do so is memory safe, but may result in general
168 /// failures such as panics or incorrect results.
169 ///
170 /// The given `gc_ref` should not be used again.
171 fn drop_gc_ref(&mut self, host_data_table: &mut ExternRefHostDataTable, gc_ref: VMGcRef) {
172 let mut dest = Some(gc_ref);
173 self.write_gc_ref(host_data_table, &mut dest, None);
174 }
175
176 /// Write barrier called every time the runtime overwrites a GC reference.
177 ///
178 /// The `source` is a borrowed GC reference, and should not have been cloned
179 /// already for this write operation. This allows implementations to fuse
180 /// the `source`'s read barrier into this write barrier.
181 ///
182 /// If an `externref` is reclaimed, then its associated entry in the
183 /// `host_data_table` should be removed.
184 ///
185 /// Callers should pass a valid `VMGcRef` that belongs to the given heap for
186 /// both the `source` and `destination`. Failure to do so is memory safe,
187 /// but may result in general failures such as panics or incorrect results.
188 fn write_gc_ref(
189 &mut self,
190 host_data_table: &mut ExternRefHostDataTable,
191 destination: &mut Option<VMGcRef>,
192 source: Option<&VMGcRef>,
193 );
194
195 /// Read barrier called whenever a GC reference is passed from the runtime
196 /// to Wasm: an argument to a host-to-Wasm call, or a return from a
197 /// Wasm-to-host call.
198 ///
199 /// Callers should pass a valid `VMGcRef` that belongs to the given
200 /// heap. Failure to do so is memory safe, but may result in general
201 /// failures such as panics or incorrect results.
202 fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef);
203
204 ////////////////////////////////////////////////////////////////////////////
205 // `externref` Methods
206
207 /// Allocate a `VMExternRef` with space for host data described by the given
208 /// layout.
209 ///
210 /// Return values:
211 ///
212 /// * `Ok(Ok(_))`: The allocation was successful.
213 ///
214 /// * `Ok(Err(n))`: There is currently not enough available space for this
215 /// allocation of size `n`. The caller should either grow the heap or run
216 /// a collection to reclaim space, and then try allocating again.
217 ///
218 /// * `Err(_)`: The collector cannot satisfy this allocation request, and
219 /// would not be able to even after the caller were to trigger a
220 /// collection. This could be because, for example, the requested
221 /// allocation is larger than this collector's implementation limit for
222 /// object size.
223 fn alloc_externref(
224 &mut self,
225 host_data: ExternRefHostDataId,
226 ) -> Result<Result<VMExternRef, u64>>;
227
228 /// Get the host data ID associated with the given `externref`.
229 ///
230 /// Callers should pass a valid `externref` that belongs to the given
231 /// heap. Failure to do so is memory safe, but may result in general
232 /// failures such as panics or incorrect results.
233 fn externref_host_data(&self, externref: &VMExternRef) -> ExternRefHostDataId;
234
235 ////////////////////////////////////////////////////////////////////////////
236 // Struct, array, and general GC object methods
237
238 /// Get the header of the object that `gc_ref` points to.
239 fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader;
240
241 /// Get the header of the object that `gc_ref` points to.
242 fn header_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcHeader;
243
244 /// Get the size (in bytes) of the object referenced by `gc_ref`.
245 ///
246 /// # Panics
247 ///
248 /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
249 fn object_size(&self, gc_ref: &VMGcRef) -> usize;
250
251 /// Allocate a raw, uninitialized GC-managed object with the given header
252 /// and layout.
253 ///
254 /// The object's fields and elements are left uninitialized. It is the
255 /// caller's responsibility to initialize them before exposing the struct to
256 /// Wasm or triggering a GC.
257 ///
258 /// The header's described type and layout must match *for this
259 /// collector*. That is, if this collector adds an extra header word to all
260 /// objects, the given layout must already include space for that header
261 /// word. Therefore, this method is effectively only usable with layouts
262 /// derived from a `Gc{Struct,Array}Layout` returned by this collector.
263 ///
264 /// Failure to uphold any of the above is memory safe, but may result in
265 /// general failures such as panics or incorrect results.
266 ///
267 /// Return values:
268 ///
269 /// * `Ok(Ok(_))`: The allocation was successful.
270 ///
271 /// * `Ok(Err(n))`: There is currently not enough available space for this
272 /// allocation of size `n`. The caller should either grow the heap or run
273 /// a collection to reclaim space, and then try allocating again.
274 ///
275 /// * `Err(_)`: The collector cannot satisfy this allocation request, and
276 /// would not be able to even after the caller were to trigger a
277 /// collection. This could be because, for example, the requested
278 /// alignment is larger than this collector's implementation limit.
279 fn alloc_raw(&mut self, header: VMGcHeader, layout: Layout) -> Result<Result<VMGcRef, u64>>;
280
281 /// Allocate a GC-managed struct of the given type and layout.
282 ///
283 /// The struct's fields are left uninitialized. It is the caller's
284 /// responsibility to initialize them before exposing the struct to Wasm or
285 /// triggering a GC.
286 ///
287 /// The `ty` and `layout` must match.
288 ///
289 /// Failure to do either of the above is memory safe, but may result in
290 /// general failures such as panics or incorrect results.
291 ///
292 /// Return values:
293 ///
294 /// * `Ok(Ok(_))`: The allocation was successful.
295 ///
296 /// * `Ok(Err(n))`: There is currently not enough available space for this
297 /// allocation of size `n`. The caller should either grow the heap or run
298 /// a collection to reclaim space, and then try allocating again.
299 ///
300 /// * `Err(_)`: The collector cannot satisfy this allocation request, and
301 /// would not be able to even after the caller were to trigger a
302 /// collection. This could be because, for example, the requested
303 /// allocation is larger than this collector's implementation limit for
304 /// object size.
305 fn alloc_uninit_struct_or_exn(
306 &mut self,
307 ty: VMSharedTypeIndex,
308 layout: &GcStructLayout,
309 ) -> Result<Result<VMGcRef, u64>>;
310
311 /// Deallocate an uninitialized, GC-managed struct or exception.
312 ///
313 /// This is useful for if initialization of the struct's fields fails, so
314 /// that the struct's allocation can be eagerly reclaimed, and so that the
315 /// collector doesn't attempt to treat any of the uninitialized fields as
316 /// valid GC references, or something like that.
317 fn dealloc_uninit_struct_or_exn(&mut self, structref: VMGcRef);
318
319 /// * `Ok(Ok(_))`: The allocation was successful.
320 ///
321 /// * `Ok(Err(n))`: There is currently not enough available space for this
322 /// allocation of size `n`. The caller should either grow the heap or run
323 /// a collection to reclaim space, and then try allocating again.
324 ///
325 /// * `Err(_)`: The collector cannot satisfy this allocation request, and
326 /// would not be able to even after the caller were to trigger a
327 /// collection. This could be because, for example, the requested
328 /// allocation is larger than this collector's implementation limit for
329 /// object size.
330 fn alloc_uninit_array(
331 &mut self,
332 ty: VMSharedTypeIndex,
333 len: u32,
334 layout: &GcArrayLayout,
335 ) -> Result<Result<VMArrayRef, u64>>;
336
337 /// Deallocate an uninitialized, GC-managed array.
338 ///
339 /// This is useful for if initialization of the array's fields fails, so
340 /// that the array's allocation can be eagerly reclaimed, and so that the
341 /// collector doesn't attempt to treat any of the uninitialized fields as
342 /// valid GC references, or something like that.
343 fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef);
344
345 /// Get the length of the given array.
346 ///
347 /// Panics on out-of-bounds accesses.
348 ///
349 /// The given `arrayref` should be valid and of the given size. Failure to
350 /// do so is memory safe, but may result in general failures such as panics
351 /// or incorrect results.
352 fn array_len(&self, arrayref: &VMArrayRef) -> u32;
353
354 ////////////////////////////////////////////////////////////////////////////
355 // Garbage Collection Methods
356
357 /// Get the total number of bytes currently allocated (live or
358 /// dead-but-not-collected) in this heap.
359 ///
360 /// This is distinct from the heap capacity.
361 fn allocated_bytes(&self) -> usize;
362
363 /// Whether a GC should be performed before the next heap growth.
364 ///
365 /// Some collectors (e.g. the copying collector) need to perform a GC before
366 /// growing the heap in certain states, to ensure that the semi-spaces remain
367 /// properly balanced.
368 ///
369 /// Defaults to `false`.
370 fn needs_gc_before_next_growth(&self) -> bool {
371 false
372 }
373
374 /// Start a new garbage collection process.
375 ///
376 /// The given `roots` are GC roots and should not be collected (nor anything
377 /// transitively reachable from them).
378 ///
379 /// Upon reclaiming an `externref`, its associated entry in the
380 /// `host_data_table` is removed.
381 ///
382 /// Callers should pass valid GC roots that belongs to this heap, and the
383 /// host data table associated with this heap's `externref`s. Failure to do
384 /// so is memory safe, but may result in general failures such as panics or
385 /// incorrect results.
386 ///
387 /// This method should panic if we are in a no-GC scope.
388 fn gc<'a>(
389 &'a mut self,
390 roots: GcRootsIter<'a>,
391 host_data_table: &'a mut ExternRefHostDataTable,
392 ) -> Box<dyn GarbageCollection<'a> + 'a>;
393
394 ////////////////////////////////////////////////////////////////////////////
395 // JIT-Code Interaction Methods
396
397 /// Get the pointer that will be stored in the `VMContext::gc_heap_data`
398 /// field and be accessible from JIT code via collaboration with the
399 /// corresponding `GcCompiler` trait.
400 ///
401 /// # Safety
402 ///
403 /// The returned pointer, if any, must remain valid as long as `self` is not
404 /// dropped.
405 unsafe fn vmctx_gc_heap_data(&self) -> NonNull<u8>;
406
407 ////////////////////////////////////////////////////////////////////////////
408 // Accessors for the raw bytes of the GC heap
409
410 /// Take the underlying memory storage out of this GC heap.
411 ///
412 /// # Panics
413 ///
414 /// If this GC heap is used while the memory is taken then a panic will
415 /// occur. This will also panic if the memory is already taken.
416 fn take_memory(&mut self) -> crate::vm::Memory;
417
418 /// Replace this GC heap's underlying memory storage.
419 ///
420 /// # Safety
421 ///
422 /// The `memory` must have been taken via `take_memory` and the GC heap must
423 /// not have been used at all since the memory was taken. The memory must be
424 /// the same size or larger than it was.
425 unsafe fn replace_memory(&mut self, memory: crate::vm::Memory, delta_bytes_grown: u64);
426
427 /// Get a raw `VMMemoryDefinition` for this heap's underlying memory storage.
428 ///
429 /// If/when exposing this `VMMemoryDefinition` to Wasm, it is your
430 /// responsibility to ensure that you do not do that in such a way as to
431 /// violate Rust's borrowing rules (e.g. make sure there is no active
432 /// `heap_slice_mut()` call at the same time) and that if this GC heap is
433 /// resized (and its base potentially moves) then that Wasm gets a new,
434 /// updated `VMMemoryDefinition` record.
435 fn vmmemory(&self) -> VMMemoryDefinition;
436
437 /// Get a slice of the raw bytes of the GC heap.
438 #[inline]
439 fn heap_slice(&self) -> &[u8] {
440 let vmmemory = self.vmmemory();
441 let ptr = vmmemory.base.as_ptr().cast_const();
442 let len = vmmemory.current_length();
443 unsafe { slice::from_raw_parts(ptr, len) }
444 }
445
446 /// Get a mutable slice of the raw bytes of the GC heap.
447 #[inline]
448 fn heap_slice_mut(&mut self) -> &mut [u8] {
449 let vmmemory = self.vmmemory();
450 let ptr = vmmemory.base.as_ptr();
451 let len = vmmemory.current_length();
452 unsafe { slice::from_raw_parts_mut(ptr, len) }
453 }
454
455 ////////////////////////////////////////////////////////////////////////////
456 // Provided helper methods.
457
458 /// Index into this heap and get a shared reference to the `T` that `gc_ref`
459 /// points to.
460 ///
461 /// # Panics
462 ///
463 /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
464 #[inline]
465 fn index<T>(&self, gc_ref: &TypedGcRef<T>) -> &T
466 where
467 Self: Sized,
468 T: GcHeapObject,
469 {
470 assert!(!mem::needs_drop::<T>());
471 let gc_ref = gc_ref.as_untyped();
472 let start = gc_ref.as_heap_index().unwrap().get();
473 let start = usize::try_from(start).unwrap();
474 let len = mem::size_of::<T>();
475 let slice = &self.heap_slice()[start..][..len];
476 unsafe { &*(slice.as_ptr().cast::<T>()) }
477 }
478
479 /// Index into this heap and get an exclusive reference to the `T` that
480 /// `gc_ref` points to.
481 ///
482 /// # Panics
483 ///
484 /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
485 #[inline]
486 fn index_mut<T>(&mut self, gc_ref: &TypedGcRef<T>) -> &mut T
487 where
488 Self: Sized,
489 T: GcHeapObject,
490 {
491 assert!(!mem::needs_drop::<T>());
492 let gc_ref = gc_ref.as_untyped();
493 let start = gc_ref.as_heap_index().unwrap().get();
494 let start = usize::try_from(start).unwrap();
495 let len = mem::size_of::<T>();
496 let slice = &mut self.heap_slice_mut()[start..][..len];
497 unsafe { &mut *(slice.as_mut_ptr().cast::<T>()) }
498 }
499
500 /// Get the range of bytes that the given object occupies in the heap.
501 ///
502 /// # Panics
503 ///
504 /// Panics on out of bounds or if the `gc_ref` is an `i31ref`.
505 fn object_range(&self, gc_ref: &VMGcRef) -> Range<usize> {
506 let start = gc_ref.as_heap_index().unwrap().get();
507 let start = usize::try_from(start).unwrap();
508 let size = self.object_size(gc_ref);
509 let end = start.checked_add(size).unwrap();
510 start..end
511 }
512
513 /// Get a mutable borrow of the given object's data.
514 ///
515 /// # Panics
516 ///
517 /// Panics on out-of-bounds accesses or if the `gc_ref` is an `i31ref`.
518 fn gc_object_data(&self, gc_ref: &VMGcRef) -> &VMGcObjectData {
519 let range = self.object_range(gc_ref);
520 let data = &self.heap_slice()[range];
521 data.into()
522 }
523
524 /// Get a mutable borrow of the given object's data.
525 ///
526 /// # Panics
527 ///
528 /// Panics on out-of-bounds accesses or if the `gc_ref` is an `i31ref`.
529 fn gc_object_data_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcObjectData {
530 let range = self.object_range(gc_ref);
531 let data = &mut self.heap_slice_mut()[range];
532 data.into()
533 }
534
535 /// Get a pair of mutable borrows of the given objects' data.
536 ///
537 /// # Panics
538 ///
539 /// Panics if `a == b` or on out-of-bounds accesses or if either GC ref is
540 /// an `i31ref`.
541 fn gc_object_data_pair(
542 &mut self,
543 a: &VMGcRef,
544 b: &VMGcRef,
545 ) -> (&mut VMGcObjectData, &mut VMGcObjectData) {
546 assert_ne!(a, b);
547
548 let a_range = self.object_range(a);
549 let b_range = self.object_range(b);
550
551 // Assert that the two objects do not overlap.
552 assert!(a_range.start <= a_range.end);
553 assert!(b_range.start <= b_range.end);
554 assert!(a_range.end <= b_range.start || b_range.end <= a_range.start);
555
556 let (a_data, b_data) = if a_range.start < b_range.start {
557 let (a_half, b_half) = self.heap_slice_mut().split_at_mut(b_range.start);
558 let b_len = b_range.end - b_range.start;
559 (&mut a_half[a_range], &mut b_half[..b_len])
560 } else {
561 let (b_half, a_half) = self.heap_slice_mut().split_at_mut(a_range.start);
562 let a_len = a_range.end - a_range.start;
563 (&mut a_half[..a_len], &mut b_half[b_range])
564 };
565
566 (a_data.into(), b_data.into())
567 }
568}
569
570/// A list of GC roots.
571///
572/// This is effectively a builder for a `GcRootsIter` that will be given to a GC
573/// heap when it is time to perform garbage collection.
574#[derive(Default)]
575pub struct GcRootsList(Vec<RawGcRoot>);
576
577// Ideally these `*mut`s would be `&mut`s and we wouldn't need as much of this
578// machinery around `GcRootsList`, `RawGcRoot`, `GcRoot`, and `GcRootIter` but
579// if we try that then we run into two different kinds of lifetime issues:
580//
581// 1. When collecting the various roots from a `&mut StoreOpaque`, we borrow
582// from `self` to push new GC roots onto the roots list. But then we want to
583// call helper methods like `self.for_each_global(...)`, but we can't because
584// there are active borrows of `self` preventing it.
585//
586// 2. We want to reuse the roots list and its backing storage across GCs, rather
587// than reallocate on every GC. But the only place for the roots list to live
588// such that it is easily reusable across GCs is in the store itself. But the
589// contents of the roots list (when it is non-empty, during GCs) borrow from
590// the store, which creates self-references.
591#[derive(Clone, Copy, Debug)]
592#[cfg_attr(
593 not(feature = "gc"),
594 expect(
595 dead_code,
596 reason = "not worth it at this time to #[cfg] away these variants",
597 )
598)]
599enum RawGcRoot {
600 Stack(SendSyncPtr<u32>),
601 VMGcRef(SendSyncPtr<VMGcRef>),
602 ValRaw(SendSyncPtr<ValRaw>),
603}
604
605#[cfg(feature = "gc")]
606impl GcRootsList {
607 /// Add a GC root that is inside a Wasm stack frame to this list.
608 ///
609 /// # Safety
610 ///
611 /// The pointer must be to a valid stack-map slot on the Wasm stack and must
612 /// remain valid while registered within this `GcRootsList`.
613 #[inline]
614 pub unsafe fn add_wasm_stack_root(&mut self, ptr_to_root: SendSyncPtr<u32>) {
615 unsafe {
616 log::trace!(
617 "Adding Wasm stack root: {:#p} -> {:#p}",
618 ptr_to_root,
619 VMGcRef::from_raw_u32(*ptr_to_root.as_ref()).unwrap()
620 );
621 debug_assert!(VMGcRef::from_raw_u32(*ptr_to_root.as_ref()).is_some());
622 }
623 self.0.push(RawGcRoot::Stack(ptr_to_root));
624 }
625
626 /// Add a GC root to this list.
627 ///
628 /// # Safety
629 ///
630 /// The pointer must be to a valid `VMGcRef` and must remain valid while
631 /// registered within this `GcRootsList`.
632 #[inline]
633 pub unsafe fn add_vmgcref_root(&mut self, ptr_to_root: SendSyncPtr<VMGcRef>, why: &str) {
634 unsafe {
635 log::trace!(
636 "Adding VMGcRef root: {why}: {:#p}",
637 ptr_to_root.as_ref().unchecked_copy()
638 );
639 }
640 self.0.push(RawGcRoot::VMGcRef(ptr_to_root))
641 }
642
643 /// Add a GC root to this list.
644 ///
645 /// # Safety
646 ///
647 /// The pointer must be to a valid `ValRaw` that is a GC reference and must
648 /// remain valid while registered within this `GcRootsList`.
649 #[inline]
650 pub unsafe fn add_val_raw_root(&mut self, ptr_to_root: SendSyncPtr<ValRaw>, why: &str) {
651 unsafe {
652 log::trace!(
653 "Adding ValRaw root: {why}: {:#x}",
654 ptr_to_root.as_ref().get_anyref()
655 );
656 }
657 self.0.push(RawGcRoot::ValRaw(ptr_to_root))
658 }
659
660 /// Get an iterator over all roots in this list.
661 ///
662 /// # Safety
663 ///
664 /// Callers must ensure that all the pointers to GC roots that have been
665 /// added to this list are valid for the duration of the `'a` lifetime.
666 #[inline]
667 pub unsafe fn iter<'a>(&'a mut self) -> GcRootsIter<'a> {
668 GcRootsIter {
669 list: self,
670 index: 0,
671 }
672 }
673
674 /// Is this list empty?
675 pub fn is_empty(&self) -> bool {
676 self.0.is_empty()
677 }
678
679 /// Clear this GC roots list.
680 #[inline]
681 pub fn clear(&mut self) {
682 self.0.clear();
683 }
684}
685
686/// An iterator over all the roots in a `GcRootsList`.
687pub struct GcRootsIter<'a> {
688 list: &'a mut GcRootsList,
689 index: usize,
690}
691
692impl<'a> Iterator for GcRootsIter<'a> {
693 type Item = GcRoot<'a>;
694
695 #[inline]
696 fn next(&mut self) -> Option<Self::Item> {
697 let root = GcRoot {
698 raw: self.list.0.get(self.index).copied()?,
699 _phantom: marker::PhantomData,
700 };
701 self.index += 1;
702 Some(root)
703 }
704}
705
706/// A GC root.
707///
708/// This is, effectively, a mutable reference to a `VMGcRef`.
709///
710/// Collector implementations should update the `VMGcRef` if they move the
711/// `VMGcRef`'s referent during the course of a GC.
712#[derive(Debug)]
713pub struct GcRoot<'a> {
714 raw: RawGcRoot,
715 _phantom: marker::PhantomData<&'a mut VMGcRef>,
716}
717
718impl GcRoot<'_> {
719 /// Is this root from inside a Wasm stack frame?
720 #[inline]
721 pub fn is_on_wasm_stack(&self) -> bool {
722 matches!(self.raw, RawGcRoot::Stack(_))
723 }
724
725 /// Get this GC root.
726 ///
727 /// Does NOT run GC barriers.
728 #[inline]
729 pub fn get(&self) -> VMGcRef {
730 match self.raw {
731 RawGcRoot::VMGcRef(ptr) => unsafe { ptr::read(ptr.as_ptr()) },
732 RawGcRoot::Stack(ptr) => unsafe {
733 let raw: u32 = ptr::read(ptr.as_ptr());
734 VMGcRef::from_raw_u32(raw).expect("non-null")
735 },
736 RawGcRoot::ValRaw(ptr) => unsafe {
737 let val: ValRaw = ptr::read(ptr.as_ptr());
738 val.get_vmgcref().expect("non-null")
739 },
740 }
741 }
742
743 /// Set this GC root.
744 ///
745 /// Does NOT run GC barriers.
746 ///
747 /// Collector implementations should use this method to update GC root
748 /// pointers after the collector moves the GC object that the root is
749 /// referencing.
750 pub fn set(&mut self, new_ref: VMGcRef) {
751 match self.raw {
752 RawGcRoot::VMGcRef(ptr) => unsafe {
753 ptr::write(ptr.as_ptr(), new_ref);
754 },
755 RawGcRoot::Stack(ptr) => unsafe {
756 ptr::write(ptr.as_ptr(), new_ref.as_raw_u32());
757 },
758 RawGcRoot::ValRaw(ptr) => unsafe {
759 let val = ValRaw::vmgcref(Some(new_ref));
760 ptr::write(ptr.as_ptr(), val);
761 },
762 }
763 }
764}
765
766/// A garbage collection process.
767///
768/// Implementations define the `collect_increment` method, and then consumers
769/// can either use
770///
771/// * `GarbageCollection::collect` for synchronous code, or
772///
773/// * `collect_async(Box<dyn GarbageCollection>)` for async code.
774///
775/// When using fuel and/or epochs, consumers can also use `collect_increment`
776/// directly and choose to abandon further execution in this GC's heap's whole
777/// store if the GC is taking too long to complete.
778pub trait GarbageCollection<'a>: Send + Sync {
779 /// Perform an incremental slice of this garbage collection process.
780 ///
781 /// Upon completion of the slice, a `GcProgress` is returned which informs
782 /// the caller whether to continue driving this GC process forward and
783 /// executing more slices (`GcProgress::Continue`) or whether the GC process
784 /// has finished (`GcProgress::Complete`).
785 ///
786 /// The mutator does *not* run in between increments. This method exists
787 /// solely to allow cooperative yielding
788 fn collect_increment(&mut self) -> GcProgress;
789
790 /// Run this GC process to completion.
791 ///
792 /// Keeps calling `collect_increment` in a loop until the GC process is
793 /// complete.
794 fn collect(&mut self) {
795 loop {
796 match self.collect_increment() {
797 GcProgress::Continue => continue,
798 GcProgress::Complete => return,
799 }
800 }
801 }
802}
803
804/// The result of doing an incremental amount of GC.
805pub enum GcProgress {
806 /// There is still more work to do.
807 Continue,
808 /// The GC is complete.
809 Complete,
810}
811
812/// Asynchronously run the given garbage collection process to completion,
813/// cooperatively yielding back to the event loop after each increment of work.
814pub async fn collect_async<'a>(
815 mut collection: Box<dyn GarbageCollection<'a> + 'a>,
816 asyncness: Asyncness,
817 yield_fn: impl AsyncFn(),
818) {
819 #[cfg(not(feature = "async"))]
820 {
821 _ = yield_fn;
822 }
823
824 loop {
825 match collection.collect_increment() {
826 GcProgress::Continue => {
827 if asyncness != Asyncness::No {
828 #[cfg(feature = "async")]
829 yield_fn().await
830 }
831 }
832 GcProgress::Complete => return,
833 }
834 }
835}
836
837#[cfg(all(test, feature = "async"))]
838mod collect_async_tests {
839 use super::*;
840
841 #[test]
842 fn is_send_and_sync() {
843 fn _assert_send_sync<T: Send + Sync>(_: T) {}
844
845 fn _foo<'a>(collection: Box<dyn GarbageCollection<'a>>) {
846 _assert_send_sync(collect_async(collection, Asyncness::Yes, async || ()));
847 }
848 }
849}