wasmtime/runtime/vm/instance/allocator/pooling/
gc_heap_pool.rs

1use super::index_allocator::{SimpleIndexAllocator, SlotId};
2use super::GcHeapAllocationIndex;
3use crate::prelude::*;
4use crate::runtime::vm::{GcHeap, GcRuntime, PoolingInstanceAllocatorConfig, Result};
5use std::sync::Mutex;
6
7/// A pool of reusable GC heaps.
8pub struct GcHeapPool {
9    max_gc_heaps: usize,
10    index_allocator: SimpleIndexAllocator,
11    heaps: Mutex<Vec<Option<Box<dyn GcHeap>>>>,
12}
13
14impl std::fmt::Debug for GcHeapPool {
15    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
16        f.debug_struct("GcHeapPool")
17            .field("max_gc_heaps", &self.max_gc_heaps)
18            .field("index_allocator", &self.index_allocator)
19            .field("heaps", &"..")
20            .finish()
21    }
22}
23
24impl GcHeapPool {
25    /// Create a new `GcHeapPool` with the given configuration.
26    pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
27        let index_allocator = SimpleIndexAllocator::new(config.limits.total_gc_heaps);
28        let max_gc_heaps = usize::try_from(config.limits.total_gc_heaps).unwrap();
29
30        // Each individual GC heap in the pool is lazily allocated. See the
31        // `allocate` method.
32        let heaps = Mutex::new((0..max_gc_heaps).map(|_| None).collect());
33
34        Ok(Self {
35            max_gc_heaps,
36            index_allocator,
37            heaps,
38        })
39    }
40
41    /// Are there zero slots in use right now?
42    #[allow(unused)] // some cfgs don't use this
43    pub fn is_empty(&self) -> bool {
44        self.index_allocator.is_empty()
45    }
46
47    /// Allocate a single table for the given instance allocation request.
48    pub fn allocate(
49        &self,
50        gc_runtime: &dyn GcRuntime,
51    ) -> Result<(GcHeapAllocationIndex, Box<dyn GcHeap>)> {
52        let allocation_index = self
53            .index_allocator
54            .alloc()
55            .map(|slot| GcHeapAllocationIndex(slot.0))
56            .ok_or_else(|| {
57                anyhow!(
58                    "maximum concurrent GC heap limit of {} reached",
59                    self.max_gc_heaps
60                )
61            })?;
62        debug_assert_ne!(allocation_index, GcHeapAllocationIndex::default());
63
64        let heap = match {
65            let mut heaps = self.heaps.lock().unwrap();
66            heaps[allocation_index.index()].take()
67        } {
68            // If we already have a heap at this slot, reuse it.
69            Some(heap) => heap,
70            // Otherwise, we haven't forced this slot's lazily allocated heap
71            // yet. So do that now.
72            None => gc_runtime.new_gc_heap()?,
73        };
74
75        Ok((allocation_index, heap))
76    }
77
78    /// Deallocate a previously-allocated GC heap.
79    pub fn deallocate(&self, allocation_index: GcHeapAllocationIndex, mut heap: Box<dyn GcHeap>) {
80        debug_assert_ne!(allocation_index, GcHeapAllocationIndex::default());
81        heap.reset();
82
83        // NB: Replace the heap before freeing the index. If we did it in the
84        // opposite order, a concurrent allocation request could reallocate the
85        // index before we have replaced the heap.
86
87        {
88            let mut heaps = self.heaps.lock().unwrap();
89            let old_entry = std::mem::replace(&mut heaps[allocation_index.index()], Some(heap));
90            debug_assert!(old_entry.is_none());
91        }
92
93        self.index_allocator.free(SlotId(allocation_index.0));
94    }
95}