1use super::free_list::FreeList;
45use super::{VMArrayRef, VMGcObjectDataMut, VMStructRef};
46use crate::hash_set::HashSet;
47use crate::prelude::*;
48use crate::runtime::vm::{
49 mmap::AlignedLength, ExternRefHostDataId, ExternRefHostDataTable, GarbageCollection, GcHeap,
50 GcHeapObject, GcProgress, GcRootsIter, GcRuntime, Mmap, TypedGcRef, VMExternRef, VMGcHeader,
51 VMGcRef,
52};
53use core::ops::{Deref, DerefMut, Range};
54use core::{
55 alloc::Layout,
56 any::Any,
57 cell::UnsafeCell,
58 mem,
59 num::NonZeroUsize,
60 ptr::{self, NonNull},
61};
62use wasmtime_environ::drc::DrcTypeLayouts;
63use wasmtime_environ::{GcArrayLayout, GcStructLayout, GcTypeLayouts, VMGcKind, VMSharedTypeIndex};
64
65#[derive(Default)]
73pub struct DrcCollector {
74 layouts: DrcTypeLayouts,
75}
76
77unsafe impl GcRuntime for DrcCollector {
78 fn layouts(&self) -> &dyn GcTypeLayouts {
79 &self.layouts
80 }
81
82 fn new_gc_heap(&self) -> Result<Box<dyn GcHeap>> {
83 let heap = DrcHeap::new()?;
84 Ok(Box::new(heap) as _)
85 }
86}
87
88struct DrcHeap {
90 no_gc_count: u64,
91 activations_table: Box<VMGcRefActivationsTable>,
94 heap: Mmap<AlignedLength>,
95 free_list: FreeList,
96}
97
98impl DrcHeap {
99 fn new() -> Result<Self> {
101 Self::with_capacity(super::DEFAULT_GC_HEAP_CAPACITY)
102 }
103
104 fn with_capacity(capacity: usize) -> Result<Self> {
106 let heap = Mmap::with_at_least(capacity)?;
107 let free_list = FreeList::new(heap.len());
108 Ok(Self {
109 no_gc_count: 0,
110 activations_table: Box::new(VMGcRefActivationsTable::default()),
111 heap,
112 free_list,
113 })
114 }
115
116 fn dealloc(&mut self, gc_ref: VMGcRef) {
117 let drc_ref = drc_ref(&gc_ref);
118 let size = self.index(drc_ref).object_size();
119 let layout = FreeList::layout(size);
120 self.free_list
121 .dealloc(gc_ref.as_heap_index().unwrap(), layout);
122 }
123
124 fn object_range(&self, gc_ref: &VMGcRef) -> Range<usize> {
125 let start = gc_ref.as_heap_index().unwrap().get();
126 let start = usize::try_from(start).unwrap();
127 let size = self
128 .index::<VMDrcHeader>(gc_ref.as_typed_unchecked())
129 .object_size();
130 let end = start.checked_add(size).unwrap();
131 start..end
132 }
133
134 fn inc_ref(&mut self, gc_ref: &VMGcRef) {
136 if gc_ref.is_i31() {
137 return;
138 }
139
140 let drc_ref = drc_ref(gc_ref);
141 let header = self.index_mut(&drc_ref);
142 debug_assert_ne!(
143 *header.ref_count.get_mut(),
144 0,
145 "{:#p} is supposedly live; should have nonzero ref count",
146 *gc_ref
147 );
148 *header.ref_count.get_mut() += 1;
149 log::trace!(
150 "increment {:#p} ref count -> {}",
151 *gc_ref,
152 header.ref_count.get_mut()
153 );
154 }
155
156 fn dec_ref(&mut self, gc_ref: &VMGcRef) -> bool {
161 if gc_ref.is_i31() {
162 return false;
163 }
164
165 let drc_ref = drc_ref(gc_ref);
166 let header = self.index_mut(drc_ref);
167 debug_assert_ne!(
168 *header.ref_count.get_mut(),
169 0,
170 "{:#p} is supposedly live; should have nonzero ref count",
171 *gc_ref
172 );
173 *header.ref_count.get_mut() -= 1;
174 log::trace!(
175 "decrement {:#p} ref count -> {}",
176 *gc_ref,
177 header.ref_count.get_mut()
178 );
179 *header.ref_count.get_mut() == 0
180 }
181
182 fn dec_ref_and_maybe_dealloc(
187 &mut self,
188 host_data_table: &mut ExternRefHostDataTable,
189 gc_ref: &VMGcRef,
190 ) {
191 if self.dec_ref(gc_ref) {
192 if let Some(externref) = gc_ref.as_typed::<VMDrcExternRef>(self) {
195 let host_data_id = self.index(externref).host_data;
196 host_data_table.dealloc(host_data_id);
197 }
198
199 self.dealloc(gc_ref.unchecked_copy());
204 }
205 }
206
207 fn trace(&mut self, roots: &mut GcRootsIter<'_>) {
208 debug_assert!({
209 self.activations_table.precise_stack_roots.is_empty()
214 });
215
216 let mut activations_table_set: DebugOnly<HashSet<_>> = Default::default();
222 if cfg!(debug_assertions) {
223 self.activations_table.elements(|elem| {
224 activations_table_set.insert(elem.unchecked_copy());
225 });
226 }
227
228 for root in roots {
229 if !root.is_on_wasm_stack() {
230 continue;
236 }
237
238 let gc_ref = root.get();
239 debug_assert!(
240 gc_ref.is_i31() || activations_table_set.contains(&gc_ref),
241 "every on-stack gc_ref inside a Wasm frame should \
242 have an entry in the VMGcRefActivationsTable; \
243 {gc_ref:#p} is not in the table",
244 );
245 if gc_ref.is_i31() {
246 continue;
247 }
248
249 debug_assert_ne!(
250 *self.index_mut(drc_ref(&gc_ref)).ref_count.get_mut(),
251 0,
252 "{gc_ref:#p} is on the Wasm stack and therefore should be held \
253 by the activations table; should have nonzero ref count",
254 );
255
256 log::trace!("Found GC reference on the stack: {:#p}", gc_ref);
257 let is_new = self
258 .activations_table
259 .precise_stack_roots
260 .insert(gc_ref.unchecked_copy());
261 if is_new {
262 self.inc_ref(&gc_ref);
263 }
264 }
265 }
266
267 fn iter_bump_chunk(&mut self) -> impl Iterator<Item = VMGcRef> + '_ {
268 let num_filled = self.activations_table.num_filled_in_bump_chunk();
269 self.activations_table
270 .alloc
271 .chunk
272 .iter_mut()
273 .take(num_filled)
274 .map(|slot| {
275 let raw = *slot.get_mut();
276 VMGcRef::from_raw_u32(raw).expect("non-null")
277 })
278 }
279
280 #[inline(never)]
281 #[cold]
282 fn log_gc_ref_set(prefix: &str, items: impl Iterator<Item = VMGcRef>) {
283 assert!(log::log_enabled!(log::Level::Trace));
284 let mut set = "{".to_string();
285 let mut any = false;
286 for gc_ref in items {
287 any = true;
288 set += &format!("\n {gc_ref:#p},");
289 }
290 if any {
291 set.push('\n');
292 }
293 set.push('}');
294 log::trace!("{prefix}: {set}");
295 }
296
297 fn drain_bump_chunk(&mut self, mut f: impl FnMut(&mut Self, VMGcRef)) {
298 let num_filled = self.activations_table.num_filled_in_bump_chunk();
299
300 let mut alloc = mem::take(&mut self.activations_table.alloc);
303 for slot in alloc.chunk.iter_mut().take(num_filled) {
304 let raw = mem::take(slot.get_mut());
305 let gc_ref = VMGcRef::from_raw_u32(raw).expect("non-null");
306 f(self, gc_ref);
307 *slot.get_mut() = 0;
308 }
309
310 debug_assert!(
311 alloc.chunk.iter_mut().all(|slot| *slot.get_mut() == 0),
312 "after sweeping the bump chunk, all slots should be empty",
313 );
314
315 debug_assert!(self.activations_table.alloc.chunk.is_empty());
316 self.activations_table.alloc = alloc;
317 }
318
319 fn sweep(&mut self, host_data_table: &mut ExternRefHostDataTable) {
322 if log::log_enabled!(log::Level::Trace) {
323 Self::log_gc_ref_set("bump chunk before sweeping", self.iter_bump_chunk());
324 }
325
326 log::trace!("Begin sweeping bump chunk");
328 self.drain_bump_chunk(|heap, gc_ref| {
329 heap.dec_ref_and_maybe_dealloc(host_data_table, &gc_ref);
330 });
331 log::trace!("Done sweeping bump chunk");
332
333 if self.activations_table.alloc.chunk.is_empty() {
334 self.activations_table.alloc.force_allocation();
338 } else {
339 self.activations_table.alloc.reset();
341 }
342
343 if log::log_enabled!(log::Level::Trace) {
344 Self::log_gc_ref_set(
345 "hash set before sweeping",
346 self.activations_table
347 .over_approximated_stack_roots
348 .iter()
349 .map(|r| r.unchecked_copy()),
350 );
351 }
352
353 mem::swap(
356 &mut self.activations_table.precise_stack_roots,
357 &mut self.activations_table.over_approximated_stack_roots,
358 );
359
360 log::trace!("Begin sweeping hash set");
367 let mut precise_stack_roots = mem::take(&mut self.activations_table.precise_stack_roots);
368 for gc_ref in precise_stack_roots.drain() {
369 self.dec_ref_and_maybe_dealloc(host_data_table, &gc_ref);
370 }
371 log::trace!("Done sweeping hash set");
372
373 self.activations_table.precise_stack_roots = precise_stack_roots;
376
377 if log::log_enabled!(log::Level::Trace) {
378 Self::log_gc_ref_set(
379 "hash set after sweeping",
380 self.activations_table
381 .over_approximated_stack_roots
382 .iter()
383 .map(|r| r.unchecked_copy()),
384 );
385 }
386 }
387}
388
389fn drc_ref(gc_ref: &VMGcRef) -> &TypedGcRef<VMDrcHeader> {
392 debug_assert!(!gc_ref.is_i31());
393 gc_ref.as_typed_unchecked()
394}
395
396fn externref_to_drc(externref: &VMExternRef) -> &TypedGcRef<VMDrcExternRef> {
399 let gc_ref = externref.as_gc_ref();
400 debug_assert!(!gc_ref.is_i31());
401 gc_ref.as_typed_unchecked()
402}
403
404#[repr(C)]
410struct VMDrcHeader {
411 header: VMGcHeader,
412 ref_count: UnsafeCell<u64>,
413}
414
415unsafe impl Send for VMDrcHeader {}
419unsafe impl Sync for VMDrcHeader {}
420
421unsafe impl GcHeapObject for VMDrcHeader {
422 #[inline]
423 fn is(_header: &VMGcHeader) -> bool {
424 true
426 }
427}
428
429impl VMDrcHeader {
430 fn object_size(&self) -> usize {
434 let size = self.header.reserved_u27();
435 usize::try_from(size).unwrap()
436 }
437}
438
439#[repr(C)]
441struct VMDrcArrayHeader {
442 header: VMDrcHeader,
443 length: u32,
444}
445
446unsafe impl GcHeapObject for VMDrcArrayHeader {
447 #[inline]
448 fn is(header: &VMGcHeader) -> bool {
449 header.kind() == VMGcKind::ArrayRef
450 }
451}
452
453#[repr(C)]
455struct VMDrcExternRef {
456 header: VMDrcHeader,
457 host_data: ExternRefHostDataId,
458}
459
460unsafe impl GcHeapObject for VMDrcExternRef {
461 #[inline]
462 fn is(header: &VMGcHeader) -> bool {
463 header.kind() == VMGcKind::ExternRef
464 }
465}
466
467unsafe impl GcHeap for DrcHeap {
468 fn as_any(&self) -> &dyn Any {
469 self as _
470 }
471
472 fn as_any_mut(&mut self) -> &mut dyn Any {
473 self as _
474 }
475
476 fn enter_no_gc_scope(&mut self) {
477 self.no_gc_count += 1;
478 }
479
480 fn exit_no_gc_scope(&mut self) {
481 self.no_gc_count -= 1;
482 }
483
484 fn clone_gc_ref(&mut self, gc_ref: &VMGcRef) -> VMGcRef {
485 self.inc_ref(gc_ref);
486 gc_ref.unchecked_copy()
487 }
488
489 fn write_gc_ref(
490 &mut self,
491 host_data_table: &mut ExternRefHostDataTable,
492 destination: &mut Option<VMGcRef>,
493 source: Option<&VMGcRef>,
494 ) {
495 if let Some(src) = source {
497 self.inc_ref(src);
498 }
499
500 if let Some(dest) = destination {
503 self.dec_ref_and_maybe_dealloc(host_data_table, dest);
504 }
505
506 *destination = source.map(|s| s.unchecked_copy());
508 }
509
510 fn expose_gc_ref_to_wasm(&mut self, gc_ref: VMGcRef) {
511 self.activations_table.insert_without_gc(gc_ref);
512 }
513
514 fn need_gc_before_entering_wasm(&self, num_gc_refs: NonZeroUsize) -> bool {
515 num_gc_refs.get() > self.activations_table.bump_capacity_remaining()
516 }
517
518 fn alloc_externref(&mut self, host_data: ExternRefHostDataId) -> Result<Option<VMExternRef>> {
519 let gc_ref =
520 match self.alloc_raw(VMGcHeader::externref(), Layout::new::<VMDrcExternRef>())? {
521 None => return Ok(None),
522 Some(gc_ref) => gc_ref,
523 };
524 self.index_mut::<VMDrcExternRef>(gc_ref.as_typed_unchecked())
525 .host_data = host_data;
526 Ok(Some(gc_ref.into_externref_unchecked()))
527 }
528
529 fn externref_host_data(&self, externref: &VMExternRef) -> ExternRefHostDataId {
530 let typed_ref = externref_to_drc(externref);
531 self.index(typed_ref).host_data
532 }
533
534 fn header(&self, gc_ref: &VMGcRef) -> &VMGcHeader {
535 self.index(gc_ref.as_typed_unchecked())
536 }
537
538 fn header_mut(&mut self, gc_ref: &VMGcRef) -> &mut VMGcHeader {
539 self.index_mut(gc_ref.as_typed_unchecked())
540 }
541
542 fn object_size(&self, gc_ref: &VMGcRef) -> usize {
543 let size = self.header(gc_ref).reserved_u27();
544 usize::try_from(size).unwrap()
545 }
546
547 fn alloc_raw(&mut self, mut header: VMGcHeader, layout: Layout) -> Result<Option<VMGcRef>> {
548 debug_assert!(layout.size() >= core::mem::size_of::<VMDrcHeader>());
549 debug_assert!(layout.align() >= core::mem::align_of::<VMDrcHeader>());
550
551 let size = u32::try_from(layout.size()).unwrap();
552 if !VMGcKind::value_fits_in_unused_bits(size) {
553 return Err(crate::Trap::AllocationTooLarge.into());
554 }
555
556 let gc_ref = match self.free_list.alloc(layout)? {
557 None => return Ok(None),
558 Some(index) => VMGcRef::from_heap_index(index).unwrap(),
559 };
560
561 debug_assert_eq!(header.reserved_u27(), 0);
562 header.set_reserved_u27(size);
563
564 *self.index_mut(drc_ref(&gc_ref)) = VMDrcHeader {
565 header,
566 ref_count: UnsafeCell::new(1),
567 };
568 log::trace!("increment {gc_ref:#p} ref count -> 1");
569 Ok(Some(gc_ref))
570 }
571
572 fn alloc_uninit_struct(
573 &mut self,
574 ty: VMSharedTypeIndex,
575 layout: &GcStructLayout,
576 ) -> Result<Option<VMStructRef>> {
577 let gc_ref = match self.alloc_raw(
578 VMGcHeader::from_kind_and_index(VMGcKind::StructRef, ty),
579 layout.layout(),
580 )? {
581 None => return Ok(None),
582 Some(gc_ref) => gc_ref,
583 };
584 Ok(Some(gc_ref.into_structref_unchecked()))
585 }
586
587 fn dealloc_uninit_struct(&mut self, structref: VMStructRef) {
588 self.dealloc(structref.into());
589 }
590
591 fn gc_object_data(&mut self, gc_ref: &VMGcRef) -> VMGcObjectDataMut<'_> {
592 let range = self.object_range(gc_ref);
593 let data = &mut self.heap_slice_mut()[range];
594 VMGcObjectDataMut::new(data)
595 }
596
597 fn gc_object_data_pair(
598 &mut self,
599 a: &VMGcRef,
600 b: &VMGcRef,
601 ) -> (VMGcObjectDataMut<'_>, VMGcObjectDataMut<'_>) {
602 assert_ne!(a, b);
603
604 let a_range = self.object_range(a);
605 let b_range = self.object_range(b);
606
607 assert!(a_range.start <= a_range.end);
609 assert!(b_range.start <= b_range.end);
610 assert!(a_range.end <= b_range.start || b_range.end <= a_range.start);
611
612 let (a_data, b_data) = if a_range.start < b_range.start {
613 let (a_half, b_half) = self.heap_slice_mut().split_at_mut(b_range.start);
614 let b_len = b_range.end - b_range.start;
615 (&mut a_half[a_range], &mut b_half[..b_len])
616 } else {
617 let (b_half, a_half) = self.heap_slice_mut().split_at_mut(a_range.start);
618 let a_len = a_range.end - a_range.start;
619 (&mut a_half[..a_len], &mut b_half[b_range])
620 };
621
622 (
623 VMGcObjectDataMut::new(a_data),
624 VMGcObjectDataMut::new(b_data),
625 )
626 }
627
628 fn alloc_uninit_array(
629 &mut self,
630 ty: VMSharedTypeIndex,
631 length: u32,
632 layout: &GcArrayLayout,
633 ) -> Result<Option<VMArrayRef>> {
634 let gc_ref = match self.alloc_raw(
635 VMGcHeader::from_kind_and_index(VMGcKind::ArrayRef, ty),
636 layout.layout(length),
637 )? {
638 None => return Ok(None),
639 Some(gc_ref) => gc_ref,
640 };
641 self.index_mut::<VMDrcArrayHeader>(gc_ref.as_typed_unchecked())
642 .length = length;
643 Ok(Some(gc_ref.into_arrayref_unchecked()))
644 }
645
646 fn dealloc_uninit_array(&mut self, arrayref: VMArrayRef) {
647 self.dealloc(arrayref.into())
648 }
649
650 fn array_len(&self, arrayref: &VMArrayRef) -> u32 {
651 debug_assert!(arrayref.as_gc_ref().is_typed::<VMDrcArrayHeader>(self));
652 self.index::<VMDrcArrayHeader>(arrayref.as_gc_ref().as_typed_unchecked())
653 .length
654 }
655
656 fn gc<'a>(
657 &'a mut self,
658 roots: GcRootsIter<'a>,
659 host_data_table: &'a mut ExternRefHostDataTable,
660 ) -> Box<dyn GarbageCollection<'a> + 'a> {
661 assert_eq!(self.no_gc_count, 0, "Cannot GC inside a no-GC scope!");
662 Box::new(DrcCollection {
663 roots,
664 host_data_table,
665 heap: self,
666 phase: DrcCollectionPhase::Trace,
667 })
668 }
669
670 unsafe fn vmctx_gc_heap_data(&self) -> NonNull<u8> {
671 let ptr: NonNull<VMGcRefActivationsTable> = NonNull::from(&*self.activations_table);
672 ptr.cast()
673 }
674
675 #[cfg(feature = "pooling-allocator")]
676 fn reset(&mut self) {
677 let DrcHeap {
678 no_gc_count,
679 activations_table,
680 free_list,
681 heap: _,
682 } = self;
683
684 *no_gc_count = 0;
685 free_list.reset();
686 activations_table.reset();
687 }
688
689 fn heap_slice(&self) -> &[UnsafeCell<u8>] {
690 let ptr = self.heap.as_ptr().cast();
691 let len = self.heap.len();
692 unsafe { core::slice::from_raw_parts(ptr, len) }
693 }
694
695 fn heap_slice_mut(&mut self) -> &mut [u8] {
696 let ptr = self.heap.as_mut_ptr();
697 let len = self.heap.len();
698 unsafe { core::slice::from_raw_parts_mut(ptr, len) }
699 }
700}
701
702struct DrcCollection<'a> {
703 roots: GcRootsIter<'a>,
704 host_data_table: &'a mut ExternRefHostDataTable,
705 heap: &'a mut DrcHeap,
706 phase: DrcCollectionPhase,
707}
708
709enum DrcCollectionPhase {
710 Trace,
711 Sweep,
712 Done,
713}
714
715impl<'a> GarbageCollection<'a> for DrcCollection<'a> {
716 fn collect_increment(&mut self) -> GcProgress {
717 match self.phase {
718 DrcCollectionPhase::Trace => {
719 log::trace!("Begin DRC trace");
720 self.heap.trace(&mut self.roots);
721 log::trace!("End DRC trace");
722 self.phase = DrcCollectionPhase::Sweep;
723 GcProgress::Continue
724 }
725 DrcCollectionPhase::Sweep => {
726 log::trace!("Begin DRC sweep");
727 self.heap.sweep(self.host_data_table);
728 log::trace!("End DRC sweep");
729 self.phase = DrcCollectionPhase::Done;
730 GcProgress::Complete
731 }
732 DrcCollectionPhase::Done => GcProgress::Complete,
733 }
734 }
735}
736
737type TableElem = UnsafeCell<u32>;
741
742#[repr(C)]
750struct VMGcRefActivationsTable {
751 alloc: VMGcRefTableAlloc,
756
757 over_approximated_stack_roots: HashSet<VMGcRef>,
765
766 precise_stack_roots: HashSet<VMGcRef>,
773}
774
775#[repr(C)]
780struct VMGcRefTableAlloc {
781 next: UnsafeCell<NonNull<TableElem>>,
786
787 end: NonNull<TableElem>,
792
793 chunk: Box<[TableElem]>,
797}
798
799impl Default for VMGcRefTableAlloc {
800 fn default() -> Self {
801 let mut chunk: Box<[TableElem]> = Box::new([]);
807 let next = chunk.as_mut_ptr();
808 let end = unsafe { next.add(chunk.len()) };
809 VMGcRefTableAlloc {
810 next: UnsafeCell::new(NonNull::new(next).unwrap()),
811 end: NonNull::new(end).unwrap(),
812 chunk,
813 }
814 }
815}
816
817impl VMGcRefTableAlloc {
818 const CHUNK_SIZE: usize = 4096 / mem::size_of::<TableElem>();
820
821 fn force_allocation(&mut self) {
823 assert!(self.chunk.is_empty());
824 self.chunk = (0..Self::CHUNK_SIZE).map(|_| UnsafeCell::new(0)).collect();
825 self.reset();
826 }
827
828 fn reset(&mut self) {
831 self.next = UnsafeCell::new(NonNull::new(self.chunk.as_mut_ptr()).unwrap());
832 self.end = NonNull::new(unsafe { self.chunk.as_mut_ptr().add(self.chunk.len()) }).unwrap();
833 }
834}
835
836unsafe impl Send for VMGcRefTableAlloc {}
840unsafe impl Sync for VMGcRefTableAlloc {}
841
842fn _assert_send_sync() {
843 fn _assert<T: Send + Sync>() {}
844 _assert::<VMGcRefActivationsTable>();
845}
846
847impl Default for VMGcRefActivationsTable {
848 fn default() -> Self {
849 Self::new()
850 }
851}
852
853impl VMGcRefActivationsTable {
854 fn new() -> Self {
856 VMGcRefActivationsTable {
857 alloc: VMGcRefTableAlloc::default(),
858 over_approximated_stack_roots: HashSet::new(),
859 precise_stack_roots: HashSet::new(),
860 }
861 }
862
863 #[cfg(feature = "pooling-allocator")]
864 fn reset(&mut self) {
865 let VMGcRefActivationsTable {
866 alloc,
867 over_approximated_stack_roots,
868 precise_stack_roots,
869 } = self;
870
871 alloc.reset();
872 over_approximated_stack_roots.clear();
873 precise_stack_roots.clear();
874 }
875
876 #[inline]
878 fn bump_capacity_remaining(&self) -> usize {
879 let end = self.alloc.end.as_ptr() as usize;
880 let next = unsafe { *self.alloc.next.get() };
881 end - next.as_ptr() as usize
882 }
883
884 #[inline]
894 fn try_insert(&mut self, gc_ref: VMGcRef) -> Result<(), VMGcRef> {
895 unsafe {
896 let next = *self.alloc.next.get();
897 if next == self.alloc.end {
898 return Err(gc_ref);
899 }
900
901 debug_assert_eq!(
902 (*next.as_ref().get()),
903 0,
904 "slots >= the `next` bump finger are always `None`"
905 );
906 ptr::write(next.as_ptr(), UnsafeCell::new(gc_ref.as_raw_u32()));
907
908 let next = NonNull::new_unchecked(next.as_ptr().add(1));
909 debug_assert!(next <= self.alloc.end);
910 *self.alloc.next.get() = next;
911
912 Ok(())
913 }
914 }
915
916 #[inline]
918 fn insert_without_gc(&mut self, gc_ref: VMGcRef) {
919 if let Err(gc_ref) = self.try_insert(gc_ref) {
920 self.insert_slow_without_gc(gc_ref);
921 }
922 }
923
924 #[inline(never)]
925 fn insert_slow_without_gc(&mut self, gc_ref: VMGcRef) {
926 self.over_approximated_stack_roots.insert(gc_ref);
927 }
928
929 fn num_filled_in_bump_chunk(&self) -> usize {
930 let next = unsafe { *self.alloc.next.get() };
931 let bytes_unused = (self.alloc.end.as_ptr() as usize) - (next.as_ptr() as usize);
932 let slots_unused = bytes_unused / mem::size_of::<TableElem>();
933 self.alloc.chunk.len().saturating_sub(slots_unused)
934 }
935
936 fn elements(&self, mut f: impl FnMut(&VMGcRef)) {
937 for elem in self.over_approximated_stack_roots.iter() {
938 f(elem);
939 }
940
941 let num_filled = self.num_filled_in_bump_chunk();
944 for slot in self.alloc.chunk.iter().take(num_filled) {
945 if let Some(elem) = VMGcRef::from_raw_u32(unsafe { *slot.get() }) {
946 f(&elem);
947 }
948 }
949 }
950}
951
952#[derive(Debug, Default)]
953struct DebugOnly<T> {
954 inner: T,
955}
956
957impl<T> Deref for DebugOnly<T> {
958 type Target = T;
959
960 fn deref(&self) -> &T {
961 if cfg!(debug_assertions) {
962 &self.inner
963 } else {
964 panic!(
965 "only deref `DebugOnly` when `cfg(debug_assertions)` or \
966 inside a `debug_assert!(..)`"
967 )
968 }
969 }
970}
971
972impl<T> DerefMut for DebugOnly<T> {
973 fn deref_mut(&mut self) -> &mut T {
974 if cfg!(debug_assertions) {
975 &mut self.inner
976 } else {
977 panic!(
978 "only deref `DebugOnly` when `cfg(debug_assertions)` or \
979 inside a `debug_assert!(..)`"
980 )
981 }
982 }
983}
984
985#[cfg(test)]
986mod tests {
987 use super::*;
988 use wasmtime_environ::HostPtr;
989
990 #[test]
991 fn vm_drc_header_size_align() {
992 assert_eq!(
993 (wasmtime_environ::drc::HEADER_SIZE as usize),
994 core::mem::size_of::<VMDrcHeader>()
995 );
996 assert_eq!(
997 (wasmtime_environ::drc::HEADER_ALIGN as usize),
998 core::mem::align_of::<VMDrcHeader>()
999 );
1000 }
1001
1002 #[test]
1003 fn vm_drc_array_header_length_offset() {
1004 assert_eq!(
1005 wasmtime_environ::drc::ARRAY_LENGTH_OFFSET,
1006 u32::try_from(core::mem::offset_of!(VMDrcArrayHeader, length)).unwrap(),
1007 );
1008 }
1009
1010 #[test]
1011 fn ref_count_is_at_correct_offset() {
1012 let extern_data = VMDrcHeader {
1013 header: VMGcHeader::externref(),
1014 ref_count: UnsafeCell::new(0),
1015 };
1016
1017 let extern_data_ptr = &extern_data as *const _;
1018 let ref_count_ptr = &extern_data.ref_count as *const _;
1019
1020 let actual_offset = (ref_count_ptr as usize) - (extern_data_ptr as usize);
1021
1022 let offsets = wasmtime_environ::VMOffsets::from(wasmtime_environ::VMOffsetsFields {
1023 ptr: HostPtr,
1024 num_imported_functions: 0,
1025 num_imported_tables: 0,
1026 num_imported_memories: 0,
1027 num_imported_globals: 0,
1028 num_imported_tags: 0,
1029 num_defined_tables: 0,
1030 num_defined_memories: 0,
1031 num_owned_memories: 0,
1032 num_defined_globals: 0,
1033 num_defined_tags: 0,
1034 num_escaped_funcs: 0,
1035 });
1036
1037 assert_eq!(
1038 offsets.vm_drc_header_ref_count(),
1039 u32::try_from(actual_offset).unwrap(),
1040 );
1041 }
1042
1043 #[test]
1044 fn table_next_is_at_correct_offset() {
1045 let table = VMGcRefActivationsTable::new();
1046
1047 let table_ptr = &table as *const _;
1048 let next_ptr = &table.alloc.next as *const _;
1049
1050 let actual_offset = (next_ptr as usize) - (table_ptr as usize);
1051
1052 let offsets = wasmtime_environ::VMOffsets::from(wasmtime_environ::VMOffsetsFields {
1053 ptr: HostPtr,
1054 num_imported_functions: 0,
1055 num_imported_tables: 0,
1056 num_imported_memories: 0,
1057 num_imported_globals: 0,
1058 num_imported_tags: 0,
1059 num_defined_tables: 0,
1060 num_defined_memories: 0,
1061 num_owned_memories: 0,
1062 num_defined_globals: 0,
1063 num_defined_tags: 0,
1064 num_escaped_funcs: 0,
1065 });
1066 assert_eq!(
1067 offsets.vm_gc_ref_activation_table_next() as usize,
1068 actual_offset
1069 );
1070 }
1071
1072 #[test]
1073 fn table_end_is_at_correct_offset() {
1074 let table = VMGcRefActivationsTable::new();
1075
1076 let table_ptr = &table as *const _;
1077 let end_ptr = &table.alloc.end as *const _;
1078
1079 let actual_offset = (end_ptr as usize) - (table_ptr as usize);
1080
1081 let offsets = wasmtime_environ::VMOffsets::from(wasmtime_environ::VMOffsetsFields {
1082 ptr: HostPtr,
1083 num_imported_functions: 0,
1084 num_imported_tables: 0,
1085 num_imported_memories: 0,
1086 num_imported_globals: 0,
1087 num_imported_tags: 0,
1088 num_defined_tables: 0,
1089 num_defined_memories: 0,
1090 num_owned_memories: 0,
1091 num_defined_globals: 0,
1092 num_defined_tags: 0,
1093 num_escaped_funcs: 0,
1094 });
1095 assert_eq!(
1096 offsets.vm_gc_ref_activation_table_end() as usize,
1097 actual_offset
1098 );
1099 }
1100}