Skip to main content

wasmtime/runtime/vm/
gc.rs

1#[cfg(feature = "gc")]
2mod enabled;
3#[cfg(feature = "gc")]
4pub use enabled::*;
5
6#[cfg(not(feature = "gc"))]
7mod disabled;
8#[cfg(not(feature = "gc"))]
9pub use disabled::*;
10
11mod func_ref;
12mod gc_ref;
13mod gc_runtime;
14mod host_data;
15mod i31;
16
17pub use func_ref::*;
18pub use gc_ref::*;
19pub use gc_runtime::*;
20pub use host_data::*;
21pub use i31::*;
22
23use crate::prelude::*;
24use crate::runtime::vm::{GcHeapAllocationIndex, VMMemoryDefinition};
25use crate::store::Asyncness;
26use core::any::Any;
27use core::mem::MaybeUninit;
28use core::{alloc::Layout, num::NonZeroU32};
29use wasmtime_environ::{GcArrayLayout, GcStructLayout, VMGcKind, VMSharedTypeIndex};
30
31/// GC-related data that is one-to-one with a `wasmtime::Store`.
32///
33/// Contains everything we need to do collections, invoke barriers, etc...
34///
35/// In general, exposes a very similar interface to `GcHeap`, but fills in some
36/// of the context arguments for callers (such as the `ExternRefHostDataTable`)
37/// since they are all stored together inside `GcStore`.
38pub struct GcStore {
39    /// This GC heap's allocation index (primarily used for integrating with the
40    /// pooling allocator).
41    pub allocation_index: GcHeapAllocationIndex,
42
43    /// The actual GC heap.
44    pub gc_heap: Box<dyn GcHeap>,
45
46    /// The `externref` host data table for this GC heap.
47    pub host_data_table: ExternRefHostDataTable,
48
49    /// The function-references table for this GC heap.
50    pub func_ref_table: FuncRefTable,
51
52    /// The total allocated bytes recorded after the last GC collection.
53    /// `None` if no collection has been performed yet. Used by the
54    /// grow-or-collect heuristic.
55    pub last_post_gc_allocated_bytes: Option<usize>,
56
57    /// An allocation counter that triggers GC when it reaches zero.
58    ///
59    /// Decremented on every allocation and when it hits zero, a GC is
60    /// forced and the counter is reset.
61    #[cfg(gc_zeal)]
62    gc_zeal_alloc_counter: Option<NonZeroU32>,
63
64    /// The initial value to reset the counter to after it triggers.
65    #[cfg(gc_zeal)]
66    gc_zeal_alloc_counter_init: Option<NonZeroU32>,
67}
68
69impl GcStore {
70    /// Create a new `GcStore`.
71    pub fn new(
72        allocation_index: GcHeapAllocationIndex,
73        gc_heap: Box<dyn GcHeap>,
74        gc_zeal_alloc_counter: Option<NonZeroU32>,
75    ) -> Self {
76        let host_data_table = ExternRefHostDataTable::default();
77        let func_ref_table = FuncRefTable::default();
78
79        let _ = &gc_zeal_alloc_counter;
80
81        Self {
82            allocation_index,
83            gc_heap,
84            host_data_table,
85            func_ref_table,
86            last_post_gc_allocated_bytes: None,
87            #[cfg(gc_zeal)]
88            gc_zeal_alloc_counter,
89            #[cfg(gc_zeal)]
90            gc_zeal_alloc_counter_init: gc_zeal_alloc_counter,
91        }
92    }
93
94    /// Get the `VMMemoryDefinition` for this GC heap.
95    pub fn vmmemory_definition(&self) -> VMMemoryDefinition {
96        self.gc_heap.vmmemory()
97    }
98
99    /// Get the current capacity (in bytes) of this GC heap.
100    pub fn gc_heap_capacity(&self) -> usize {
101        self.gc_heap.heap_slice().len()
102    }
103
104    /// Asynchronously perform garbage collection within this heap.
105    pub async fn gc(
106        &mut self,
107        asyncness: Asyncness,
108        roots: GcRootsIter<'_>,
109        yield_fn: impl AsyncFn(),
110    ) {
111        let collection = self.gc_heap.gc(roots, &mut self.host_data_table);
112        collect_async(collection, asyncness, yield_fn).await;
113        self.last_post_gc_allocated_bytes = Some({
114            let size = self.gc_heap.allocated_bytes();
115            log::trace!("After collection, GC heap size = {size} bytes");
116            size
117        });
118    }
119
120    /// Get the kind of the given GC reference.
121    pub fn kind(&self, gc_ref: &VMGcRef) -> VMGcKind {
122        debug_assert!(!gc_ref.is_i31());
123        self.header(gc_ref).kind()
124    }
125
126    /// Get the header of the given GC reference.
127    pub fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader {
128        debug_assert!(!gc_ref.is_i31());
129        self.gc_heap.header(gc_ref)
130    }
131
132    /// Clone a GC reference, calling GC write barriers as necessary.
133    pub fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
134        if gc_ref.is_i31() {
135            gc_ref.copy_i31()
136        } else {
137            self.gc_heap.clone_gc_ref(gc_ref)
138        }
139    }
140
141    /// Write the `source` GC reference into the uninitialized `destination`
142    /// slot, performing write barriers as necessary.
143    pub fn init_gc_ref(
144        &mut self,
145        destination: &mut MaybeUninit<Option<VMGcRef>>,
146        source: Option<&VMGcRef>,
147    ) {
148        // Initialize the destination to `None`, at which point the regular GC
149        // write barrier is safe to reuse.
150        let destination = destination.write(None);
151        self.write_gc_ref(destination, source);
152    }
153
154    /// Dynamically tests whether a `init_gc_ref` is needed to write `gc_ref`
155    /// into an uninitialized destination.
156    pub(crate) fn needs_init_barrier(gc_ref: Option<&VMGcRef>) -> bool {
157        assert!(cfg!(feature = "gc") || gc_ref.is_none());
158        gc_ref.is_some_and(|r| !r.is_i31())
159    }
160
161    /// Dynamically tests whether a `write_gc_ref` is needed to write `gc_ref`
162    /// into `dest`.
163    pub(crate) fn needs_write_barrier(
164        dest: &mut Option<VMGcRef>,
165        gc_ref: Option<&VMGcRef>,
166    ) -> bool {
167        assert!(cfg!(feature = "gc") || gc_ref.is_none());
168        assert!(cfg!(feature = "gc") || dest.is_none());
169        dest.as_ref().is_some_and(|r| !r.is_i31()) || gc_ref.is_some_and(|r| !r.is_i31())
170    }
171
172    /// Same as [`Self::write_gc_ref`] but doesn't require a `store` when
173    /// possible.
174    ///
175    /// # Panics
176    ///
177    /// Panics if `store` is `None` and one of `dest` or `gc_ref` requires a
178    /// write barrier.
179    pub(crate) fn write_gc_ref_optional_store(
180        store: Option<&mut Self>,
181        dest: &mut Option<VMGcRef>,
182        gc_ref: Option<&VMGcRef>,
183    ) {
184        if Self::needs_write_barrier(dest, gc_ref) {
185            store.unwrap().write_gc_ref(dest, gc_ref)
186        } else {
187            *dest = gc_ref.map(|r| r.copy_i31());
188        }
189    }
190
191    /// Write the `source` GC reference into the `destination` slot, performing
192    /// write barriers as necessary.
193    pub fn write_gc_ref(&mut self, destination: &mut Option<VMGcRef>, source: Option<&VMGcRef>) {
194        // If neither the source nor destination actually point to a GC object
195        // (that is, they are both either null or `i31ref`s) then we can skip
196        // the GC barrier.
197        if Self::needs_write_barrier(destination, source) {
198            self.gc_heap
199                .write_gc_ref(&mut self.host_data_table, destination, source);
200        } else {
201            *destination = source.map(|s| s.copy_i31());
202        }
203    }
204
205    /// Drop the given GC reference, performing drop barriers as necessary.
206    pub fn drop_gc_ref(&mut self, gc_ref: VMGcRef) {
207        if !gc_ref.is_i31() {
208            self.gc_heap.drop_gc_ref(&mut self.host_data_table, gc_ref);
209        }
210    }
211
212    /// Hook to call whenever a GC reference is about to be exposed to Wasm.
213    ///
214    /// Returns the raw representation of this GC ref, ready to be passed to
215    /// Wasm.
216    #[must_use]
217    pub fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef) -> NonZeroU32 {
218        let raw = gc_ref.as_raw_non_zero_u32();
219        if !gc_ref.is_i31() {
220            log::trace!("exposing GC ref to Wasm: {gc_ref:p}");
221            self.gc_heap.expose_gc_ref_to_wasm(gc_ref);
222        }
223        raw
224    }
225
226    /// Allocate a new `externref`.
227    ///
228    /// Returns:
229    ///
230    /// * `Ok(Ok(_))`: Successfully allocated the `externref`.
231    ///
232    /// * `Ok(Err((value, n)))`: Failed to allocate the `externref`, but doing a GC
233    ///   and then trying again may succeed. Returns the given `value` as the
234    ///   error payload, along with the size of the failed allocation.
235    ///
236    /// * `Err(_)`: Unrecoverable allocation failure.
237    pub fn alloc_externref(
238        &mut self,
239        value: Box<dyn Any + Send + Sync>,
240    ) -> Result<Result<VMExternRef, (Box<dyn Any + Send + Sync>, u64)>> {
241        let host_data_id = self.host_data_table.alloc(value);
242        match self.gc_heap.alloc_externref(host_data_id)? {
243            Ok(x) => Ok(Ok(x)),
244            Err(n) => Ok(Err((self.host_data_table.dealloc(host_data_id), n))),
245        }
246    }
247
248    /// Get a shared borrow of the given `externref`'s host data.
249    ///
250    /// Passing invalid `VMExternRef`s (eg garbage values or `externref`s
251    /// associated with a different heap is memory safe but will lead to general
252    /// incorrectness such as panics and wrong results.
253    pub fn externref_host_data(&self, externref: &VMExternRef) -> &(dyn Any + Send + Sync) {
254        let host_data_id = self.gc_heap.externref_host_data(externref);
255        self.host_data_table.get(host_data_id)
256    }
257
258    /// Get a mutable borrow of the given `externref`'s host data.
259    ///
260    /// Passing invalid `VMExternRef`s (eg garbage values or `externref`s
261    /// associated with a different heap is memory safe but will lead to general
262    /// incorrectness such as panics and wrong results.
263    pub fn externref_host_data_mut(
264        &mut self,
265        externref: &VMExternRef,
266    ) -> &mut (dyn Any + Send + Sync) {
267        let host_data_id = self.gc_heap.externref_host_data(externref);
268        self.host_data_table.get_mut(host_data_id)
269    }
270
271    /// Allocate a raw object with the given header and layout.
272    pub fn alloc_raw(
273        &mut self,
274        header: VMGcHeader,
275        layout: Layout,
276    ) -> Result<Result<VMGcRef, u64>> {
277        // When gc_zeal is enabled with an allocation counter, decrement it and
278        // force a GC cycle when it reaches zero by returning a fake OOM.
279        #[cfg(gc_zeal)]
280        if let Some(counter) = self.gc_zeal_alloc_counter.take() {
281            match NonZeroU32::new(counter.get() - 1) {
282                Some(c) => self.gc_zeal_alloc_counter = Some(c),
283                None => {
284                    log::trace!("gc_zeal: allocation counter reached zero, forcing GC");
285                    self.gc_zeal_alloc_counter = self.gc_zeal_alloc_counter_init;
286                    return Ok(Err(0));
287                }
288            }
289        }
290
291        self.gc_heap.alloc_raw(header, layout)
292    }
293
294    /// Allocate an uninitialized struct with the given type index and layout.
295    ///
296    /// This does NOT check that the index is currently allocated in the types
297    /// registry or that the layout matches the index's type. Failure to uphold
298    /// those invariants is memory safe, but will lead to general incorrectness
299    /// such as panics and wrong results.
300    pub fn alloc_uninit_struct(
301        &mut self,
302        ty: VMSharedTypeIndex,
303        layout: &GcStructLayout,
304    ) -> Result<Result<VMStructRef, u64>> {
305        self.gc_heap
306            .alloc_uninit_struct_or_exn(ty, layout)
307            .map(|r| r.map(|r| r.into_structref_unchecked()))
308    }
309
310    /// Deallocate an uninitialized struct.
311    pub fn dealloc_uninit_struct(&mut self, structref: VMStructRef) {
312        self.gc_heap.dealloc_uninit_struct_or_exn(structref.into())
313    }
314
315    /// Get the data for the given object reference.
316    ///
317    /// Panics when the structref and its size is out of the GC heap bounds.
318    pub fn gc_object_data(&mut self, gc_ref: &VMGcRef) -> &mut VMGcObjectData {
319        self.gc_heap.gc_object_data_mut(gc_ref)
320    }
321
322    /// Get the object datas for the given pair of object references.
323    ///
324    /// Panics if `a` and `b` are the same reference or either is out of bounds.
325    pub fn gc_object_data_pair(
326        &mut self,
327        a: &VMGcRef,
328        b: &VMGcRef,
329    ) -> (&mut VMGcObjectData, &mut VMGcObjectData) {
330        assert_ne!(a, b);
331        self.gc_heap.gc_object_data_pair(a, b)
332    }
333
334    /// Allocate an uninitialized array with the given type index.
335    ///
336    /// This does NOT check that the index is currently allocated in the types
337    /// registry or that the layout matches the index's type. Failure to uphold
338    /// those invariants is memory safe, but will lead to general incorrectness
339    /// such as panics and wrong results.
340    pub fn alloc_uninit_array(
341        &mut self,
342        ty: VMSharedTypeIndex,
343        len: u32,
344        layout: &GcArrayLayout,
345    ) -> Result<Result<VMArrayRef, u64>> {
346        self.gc_heap.alloc_uninit_array(ty, len, layout)
347    }
348
349    /// Deallocate an uninitialized array.
350    pub fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef) {
351        self.gc_heap.dealloc_uninit_array(arrayref);
352    }
353
354    /// Get the length of the given array.
355    pub fn array_len(&self, arrayref: &VMArrayRef) -> u32 {
356        self.gc_heap.array_len(arrayref)
357    }
358
359    /// Allocate an uninitialized exception object with the given type
360    /// index.
361    ///
362    /// This does NOT check that the index is currently allocated in the types
363    /// registry or that the layout matches the index's type. Failure to uphold
364    /// those invariants is memory safe, but will lead to general incorrectness
365    /// such as panics and wrong results.
366    pub fn alloc_uninit_exn(
367        &mut self,
368        ty: VMSharedTypeIndex,
369        layout: &GcStructLayout,
370    ) -> Result<Result<VMExnRef, u64>> {
371        self.gc_heap
372            .alloc_uninit_struct_or_exn(ty, layout)
373            .map(|r| r.map(|r| r.into_exnref_unchecked()))
374    }
375
376    /// Deallocate an uninitialized exception object.
377    pub fn dealloc_uninit_exn(&mut self, exnref: VMExnRef) {
378        self.gc_heap.dealloc_uninit_struct_or_exn(exnref.into());
379    }
380
381    #[cfg(feature = "gc")]
382    pub(crate) fn reset_gc_zeal_alloc_counter(&mut self) {
383        #[cfg(gc_zeal)]
384        {
385            self.gc_zeal_alloc_counter = self.gc_zeal_alloc_counter_init;
386        }
387    }
388}