1#[cfg(feature = "gc")]
2mod enabled;
3#[cfg(feature = "gc")]
4pub use enabled::*;
5
6#[cfg(not(feature = "gc"))]
7mod disabled;
8#[cfg(not(feature = "gc"))]
9pub use disabled::*;
10
11mod func_ref;
12mod gc_ref;
13mod gc_runtime;
14mod host_data;
15mod i31;
16
17pub use func_ref::*;
18pub use gc_ref::*;
19pub use gc_runtime::*;
20pub use host_data::*;
21pub use i31::*;
22
23use crate::prelude::*;
24use crate::runtime::vm::{GcHeapAllocationIndex, VMMemoryDefinition};
25use crate::store::Asyncness;
26use core::any::Any;
27use core::mem::MaybeUninit;
28use core::{alloc::Layout, num::NonZeroU32};
29use wasmtime_environ::{GcArrayLayout, GcStructLayout, VMGcKind, VMSharedTypeIndex};
30
31pub struct GcStore {
39 pub allocation_index: GcHeapAllocationIndex,
42
43 pub gc_heap: Box<dyn GcHeap>,
45
46 pub host_data_table: ExternRefHostDataTable,
48
49 pub func_ref_table: FuncRefTable,
51
52 pub last_post_gc_allocated_bytes: Option<usize>,
56
57 #[cfg(gc_zeal)]
62 gc_zeal_alloc_counter: Option<NonZeroU32>,
63
64 #[cfg(gc_zeal)]
66 gc_zeal_alloc_counter_init: Option<NonZeroU32>,
67}
68
69impl GcStore {
70 pub fn new(
72 allocation_index: GcHeapAllocationIndex,
73 gc_heap: Box<dyn GcHeap>,
74 gc_zeal_alloc_counter: Option<NonZeroU32>,
75 ) -> Self {
76 let host_data_table = ExternRefHostDataTable::default();
77 let func_ref_table = FuncRefTable::default();
78
79 let _ = &gc_zeal_alloc_counter;
80
81 Self {
82 allocation_index,
83 gc_heap,
84 host_data_table,
85 func_ref_table,
86 last_post_gc_allocated_bytes: None,
87 #[cfg(gc_zeal)]
88 gc_zeal_alloc_counter,
89 #[cfg(gc_zeal)]
90 gc_zeal_alloc_counter_init: gc_zeal_alloc_counter,
91 }
92 }
93
94 pub fn vmmemory_definition(&self) -> VMMemoryDefinition {
96 self.gc_heap.vmmemory()
97 }
98
99 pub fn gc_heap_capacity(&self) -> usize {
101 self.gc_heap.heap_slice().len()
102 }
103
104 pub async fn gc(
106 &mut self,
107 asyncness: Asyncness,
108 roots: GcRootsIter<'_>,
109 yield_fn: impl AsyncFn(),
110 ) {
111 let collection = self.gc_heap.gc(roots, &mut self.host_data_table);
112 collect_async(collection, asyncness, yield_fn).await;
113 self.last_post_gc_allocated_bytes = Some({
114 let size = self.gc_heap.allocated_bytes();
115 log::trace!("After collection, GC heap size = {size} bytes");
116 size
117 });
118 }
119
120 pub fn kind(&self, gc_ref: &VMGcRef) -> VMGcKind {
122 debug_assert!(!gc_ref.is_i31());
123 self.header(gc_ref).kind()
124 }
125
126 pub fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader {
128 debug_assert!(!gc_ref.is_i31());
129 self.gc_heap.header(gc_ref)
130 }
131
132 pub fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
134 if gc_ref.is_i31() {
135 gc_ref.copy_i31()
136 } else {
137 self.gc_heap.clone_gc_ref(gc_ref)
138 }
139 }
140
141 pub fn init_gc_ref(
144 &mut self,
145 destination: &mut MaybeUninit<Option<VMGcRef>>,
146 source: Option<&VMGcRef>,
147 ) {
148 let destination = destination.write(None);
151 self.write_gc_ref(destination, source);
152 }
153
154 pub(crate) fn needs_init_barrier(gc_ref: Option<&VMGcRef>) -> bool {
157 assert!(cfg!(feature = "gc") || gc_ref.is_none());
158 gc_ref.is_some_and(|r| !r.is_i31())
159 }
160
161 pub(crate) fn needs_write_barrier(
164 dest: &mut Option<VMGcRef>,
165 gc_ref: Option<&VMGcRef>,
166 ) -> bool {
167 assert!(cfg!(feature = "gc") || gc_ref.is_none());
168 assert!(cfg!(feature = "gc") || dest.is_none());
169 dest.as_ref().is_some_and(|r| !r.is_i31()) || gc_ref.is_some_and(|r| !r.is_i31())
170 }
171
172 pub(crate) fn write_gc_ref_optional_store(
180 store: Option<&mut Self>,
181 dest: &mut Option<VMGcRef>,
182 gc_ref: Option<&VMGcRef>,
183 ) {
184 if Self::needs_write_barrier(dest, gc_ref) {
185 store.unwrap().write_gc_ref(dest, gc_ref)
186 } else {
187 *dest = gc_ref.map(|r| r.copy_i31());
188 }
189 }
190
191 pub fn write_gc_ref(&mut self, destination: &mut Option<VMGcRef>, source: Option<&VMGcRef>) {
194 if Self::needs_write_barrier(destination, source) {
198 self.gc_heap
199 .write_gc_ref(&mut self.host_data_table, destination, source);
200 } else {
201 *destination = source.map(|s| s.copy_i31());
202 }
203 }
204
205 pub fn drop_gc_ref(&mut self, gc_ref: VMGcRef) {
207 if !gc_ref.is_i31() {
208 self.gc_heap.drop_gc_ref(&mut self.host_data_table, gc_ref);
209 }
210 }
211
212 #[must_use]
217 pub fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef) -> NonZeroU32 {
218 let raw = gc_ref.as_raw_non_zero_u32();
219 if !gc_ref.is_i31() {
220 log::trace!("exposing GC ref to Wasm: {gc_ref:p}");
221 self.gc_heap.expose_gc_ref_to_wasm(gc_ref);
222 }
223 raw
224 }
225
226 pub fn alloc_externref(
238 &mut self,
239 value: Box<dyn Any + Send + Sync>,
240 ) -> Result<Result<VMExternRef, (Box<dyn Any + Send + Sync>, u64)>> {
241 let host_data_id = self.host_data_table.alloc(value);
242 match self.gc_heap.alloc_externref(host_data_id)? {
243 Ok(x) => Ok(Ok(x)),
244 Err(n) => Ok(Err((self.host_data_table.dealloc(host_data_id), n))),
245 }
246 }
247
248 pub fn externref_host_data(&self, externref: &VMExternRef) -> &(dyn Any + Send + Sync) {
254 let host_data_id = self.gc_heap.externref_host_data(externref);
255 self.host_data_table.get(host_data_id)
256 }
257
258 pub fn externref_host_data_mut(
264 &mut self,
265 externref: &VMExternRef,
266 ) -> &mut (dyn Any + Send + Sync) {
267 let host_data_id = self.gc_heap.externref_host_data(externref);
268 self.host_data_table.get_mut(host_data_id)
269 }
270
271 pub fn alloc_raw(
273 &mut self,
274 header: VMGcHeader,
275 layout: Layout,
276 ) -> Result<Result<VMGcRef, u64>> {
277 #[cfg(gc_zeal)]
280 if let Some(counter) = self.gc_zeal_alloc_counter.take() {
281 match NonZeroU32::new(counter.get() - 1) {
282 Some(c) => self.gc_zeal_alloc_counter = Some(c),
283 None => {
284 log::trace!("gc_zeal: allocation counter reached zero, forcing GC");
285 self.gc_zeal_alloc_counter = self.gc_zeal_alloc_counter_init;
286 return Ok(Err(0));
287 }
288 }
289 }
290
291 self.gc_heap.alloc_raw(header, layout)
292 }
293
294 pub fn alloc_uninit_struct(
301 &mut self,
302 ty: VMSharedTypeIndex,
303 layout: &GcStructLayout,
304 ) -> Result<Result<VMStructRef, u64>> {
305 self.gc_heap
306 .alloc_uninit_struct_or_exn(ty, layout)
307 .map(|r| r.map(|r| r.into_structref_unchecked()))
308 }
309
310 pub fn dealloc_uninit_struct(&mut self, structref: VMStructRef) {
312 self.gc_heap.dealloc_uninit_struct_or_exn(structref.into())
313 }
314
315 pub fn gc_object_data(&mut self, gc_ref: &VMGcRef) -> &mut VMGcObjectData {
319 self.gc_heap.gc_object_data_mut(gc_ref)
320 }
321
322 pub fn gc_object_data_pair(
326 &mut self,
327 a: &VMGcRef,
328 b: &VMGcRef,
329 ) -> (&mut VMGcObjectData, &mut VMGcObjectData) {
330 assert_ne!(a, b);
331 self.gc_heap.gc_object_data_pair(a, b)
332 }
333
334 pub fn alloc_uninit_array(
341 &mut self,
342 ty: VMSharedTypeIndex,
343 len: u32,
344 layout: &GcArrayLayout,
345 ) -> Result<Result<VMArrayRef, u64>> {
346 self.gc_heap.alloc_uninit_array(ty, len, layout)
347 }
348
349 pub fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef) {
351 self.gc_heap.dealloc_uninit_array(arrayref);
352 }
353
354 pub fn array_len(&self, arrayref: &VMArrayRef) -> u32 {
356 self.gc_heap.array_len(arrayref)
357 }
358
359 pub fn alloc_uninit_exn(
367 &mut self,
368 ty: VMSharedTypeIndex,
369 layout: &GcStructLayout,
370 ) -> Result<Result<VMExnRef, u64>> {
371 self.gc_heap
372 .alloc_uninit_struct_or_exn(ty, layout)
373 .map(|r| r.map(|r| r.into_exnref_unchecked()))
374 }
375
376 pub fn dealloc_uninit_exn(&mut self, exnref: VMExnRef) {
378 self.gc_heap.dealloc_uninit_struct_or_exn(exnref.into());
379 }
380
381 #[cfg(feature = "gc")]
382 pub(crate) fn reset_gc_zeal_alloc_counter(&mut self) {
383 #[cfg(gc_zeal)]
384 {
385 self.gc_zeal_alloc_counter = self.gc_zeal_alloc_counter_init;
386 }
387 }
388}