wasmtime/runtime/vm/instance/allocator/
pooling.rs

1//! Implements the pooling instance allocator.
2//!
3//! The pooling instance allocator maps memory in advance and allocates
4//! instances, memories, tables, and stacks from a pool of available resources.
5//! Using the pooling instance allocator can speed up module instantiation when
6//! modules can be constrained based on configurable limits
7//! ([`InstanceLimits`]). Each new instance is stored in a "slot"; as instances
8//! are allocated and freed, these slots are either filled or emptied:
9//!
10//! ```text
11//! ┌──────┬──────┬──────┬──────┬──────┐
12//! │Slot 0│Slot 1│Slot 2│Slot 3│......│
13//! └──────┴──────┴──────┴──────┴──────┘
14//! ```
15//!
16//! Each slot has a "slot ID"--an index into the pool. Slot IDs are handed out
17//! by the [`index_allocator`] module. Note that each kind of pool-allocated
18//! item is stored in its own separate pool: [`memory_pool`], [`table_pool`],
19//! [`stack_pool`]. See those modules for more details.
20
21mod decommit_queue;
22mod index_allocator;
23mod memory_pool;
24mod table_pool;
25
26#[cfg(feature = "gc")]
27mod gc_heap_pool;
28
29#[cfg(all(feature = "async"))]
30mod generic_stack_pool;
31#[cfg(all(feature = "async", unix, not(miri)))]
32mod unix_stack_pool;
33
34#[cfg(all(feature = "async"))]
35cfg_if::cfg_if! {
36    if #[cfg(all(unix, not(miri), not(asan)))] {
37        use unix_stack_pool as stack_pool;
38    } else {
39        use generic_stack_pool as stack_pool;
40    }
41}
42
43use self::decommit_queue::DecommitQueue;
44use self::memory_pool::MemoryPool;
45use self::table_pool::TablePool;
46use super::{
47    InstanceAllocationRequest, InstanceAllocatorImpl, MemoryAllocationIndex, TableAllocationIndex,
48};
49use crate::prelude::*;
50use crate::runtime::vm::{
51    instance::Instance,
52    mpk::{self, ProtectionKey, ProtectionMask},
53    CompiledModuleId, Memory, Table,
54};
55use crate::MpkEnabled;
56use std::borrow::Cow;
57use std::fmt::Display;
58use std::sync::{Mutex, MutexGuard};
59use std::{
60    mem,
61    sync::atomic::{AtomicU64, Ordering},
62};
63use wasmtime_environ::{
64    DefinedMemoryIndex, DefinedTableIndex, HostPtr, Module, Tunables, VMOffsets,
65};
66
67#[cfg(feature = "gc")]
68use super::GcHeapAllocationIndex;
69#[cfg(feature = "gc")]
70use crate::runtime::vm::{GcHeap, GcRuntime};
71#[cfg(feature = "gc")]
72use gc_heap_pool::GcHeapPool;
73
74#[cfg(feature = "async")]
75use stack_pool::StackPool;
76
77#[cfg(feature = "component-model")]
78use wasmtime_environ::{
79    component::{Component, VMComponentOffsets},
80    StaticModuleIndex,
81};
82
83fn round_up_to_pow2(n: usize, to: usize) -> usize {
84    debug_assert!(to > 0);
85    debug_assert!(to.is_power_of_two());
86    (n + to - 1) & !(to - 1)
87}
88
89/// Instance-related limit configuration for pooling.
90///
91/// More docs on this can be found at `wasmtime::PoolingAllocationConfig`.
92#[derive(Debug, Copy, Clone)]
93pub struct InstanceLimits {
94    /// The maximum number of component instances that may be allocated
95    /// concurrently.
96    pub total_component_instances: u32,
97
98    /// The maximum size of a component's `VMComponentContext`, not including
99    /// any of its inner core modules' `VMContext` sizes.
100    pub component_instance_size: usize,
101
102    /// The maximum number of core module instances that may be allocated
103    /// concurrently.
104    pub total_core_instances: u32,
105
106    /// The maximum number of core module instances that a single component may
107    /// transitively contain.
108    pub max_core_instances_per_component: u32,
109
110    /// The maximum number of Wasm linear memories that a component may
111    /// transitively contain.
112    pub max_memories_per_component: u32,
113
114    /// The maximum number of tables that a component may transitively contain.
115    pub max_tables_per_component: u32,
116
117    /// The total number of linear memories in the pool, across all instances.
118    pub total_memories: u32,
119
120    /// The total number of tables in the pool, across all instances.
121    pub total_tables: u32,
122
123    /// The total number of async stacks in the pool, across all instances.
124    #[cfg(feature = "async")]
125    pub total_stacks: u32,
126
127    /// Maximum size of a core instance's `VMContext`.
128    pub core_instance_size: usize,
129
130    /// Maximum number of tables per instance.
131    pub max_tables_per_module: u32,
132
133    /// Maximum number of table elements per table.
134    pub table_elements: usize,
135
136    /// Maximum number of linear memories per instance.
137    pub max_memories_per_module: u32,
138
139    /// Maximum byte size of a linear memory, must be smaller than
140    /// `memory_reservation` in `Tunables`.
141    pub max_memory_size: usize,
142
143    /// The total number of GC heaps in the pool, across all instances.
144    #[cfg(feature = "gc")]
145    pub total_gc_heaps: u32,
146}
147
148impl Default for InstanceLimits {
149    fn default() -> Self {
150        let total = if cfg!(target_pointer_width = "32") {
151            100
152        } else {
153            1000
154        };
155        // See doc comments for `wasmtime::PoolingAllocationConfig` for these
156        // default values
157        Self {
158            total_component_instances: total,
159            component_instance_size: 1 << 20, // 1 MiB
160            total_core_instances: total,
161            max_core_instances_per_component: u32::MAX,
162            max_memories_per_component: u32::MAX,
163            max_tables_per_component: u32::MAX,
164            total_memories: total,
165            total_tables: total,
166            #[cfg(feature = "async")]
167            total_stacks: total,
168            core_instance_size: 1 << 20, // 1 MiB
169            max_tables_per_module: 1,
170            // NB: in #8504 it was seen that a C# module in debug module can
171            // have 10k+ elements.
172            table_elements: 20_000,
173            max_memories_per_module: 1,
174            #[cfg(target_pointer_width = "64")]
175            max_memory_size: 1 << 32, // 4G,
176            #[cfg(target_pointer_width = "32")]
177            max_memory_size: 10 << 20, // 10 MiB
178            #[cfg(feature = "gc")]
179            total_gc_heaps: total,
180        }
181    }
182}
183
184/// Configuration options for the pooling instance allocator supplied at
185/// construction.
186#[derive(Copy, Clone, Debug)]
187pub struct PoolingInstanceAllocatorConfig {
188    /// See `PoolingAllocatorConfig::max_unused_warm_slots` in `wasmtime`
189    pub max_unused_warm_slots: u32,
190    /// The target number of decommits to do per batch. This is not precise, as
191    /// we can queue up decommits at times when we aren't prepared to
192    /// immediately flush them, and so we may go over this target size
193    /// occasionally.
194    pub decommit_batch_size: usize,
195    /// The size, in bytes, of async stacks to allocate (not including the guard
196    /// page).
197    pub stack_size: usize,
198    /// The limits to apply to instances allocated within this allocator.
199    pub limits: InstanceLimits,
200    /// Whether or not async stacks are zeroed after use.
201    pub async_stack_zeroing: bool,
202    /// If async stack zeroing is enabled and the host platform is Linux this is
203    /// how much memory to zero out with `memset`.
204    ///
205    /// The rest of memory will be zeroed out with `madvise`.
206    #[cfg(feature = "async")]
207    pub async_stack_keep_resident: usize,
208    /// How much linear memory, in bytes, to keep resident after resetting for
209    /// use with the next instance. This much memory will be `memset` to zero
210    /// when a linear memory is deallocated.
211    ///
212    /// Memory exceeding this amount in the wasm linear memory will be released
213    /// with `madvise` back to the kernel.
214    ///
215    /// Only applicable on Linux.
216    pub linear_memory_keep_resident: usize,
217    /// Same as `linear_memory_keep_resident` but for tables.
218    pub table_keep_resident: usize,
219    /// Whether to enable memory protection keys.
220    pub memory_protection_keys: MpkEnabled,
221    /// How many memory protection keys to allocate.
222    pub max_memory_protection_keys: usize,
223}
224
225impl Default for PoolingInstanceAllocatorConfig {
226    fn default() -> PoolingInstanceAllocatorConfig {
227        PoolingInstanceAllocatorConfig {
228            max_unused_warm_slots: 100,
229            decommit_batch_size: 1,
230            stack_size: 2 << 20,
231            limits: InstanceLimits::default(),
232            async_stack_zeroing: false,
233            #[cfg(feature = "async")]
234            async_stack_keep_resident: 0,
235            linear_memory_keep_resident: 0,
236            table_keep_resident: 0,
237            memory_protection_keys: MpkEnabled::Disable,
238            max_memory_protection_keys: 16,
239        }
240    }
241}
242
243/// An error returned when the pooling allocator cannot allocate a table,
244/// memory, etc... because the maximum number of concurrent allocations for that
245/// entity has been reached.
246#[derive(Debug)]
247pub struct PoolConcurrencyLimitError {
248    limit: usize,
249    kind: Cow<'static, str>,
250}
251
252impl std::error::Error for PoolConcurrencyLimitError {}
253
254impl Display for PoolConcurrencyLimitError {
255    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
256        let limit = self.limit;
257        let kind = &self.kind;
258        write!(f, "maximum concurrent limit of {limit} for {kind} reached")
259    }
260}
261
262impl PoolConcurrencyLimitError {
263    fn new(limit: usize, kind: impl Into<Cow<'static, str>>) -> Self {
264        Self {
265            limit,
266            kind: kind.into(),
267        }
268    }
269}
270
271/// Implements the pooling instance allocator.
272///
273/// This allocator internally maintains pools of instances, memories, tables,
274/// and stacks.
275///
276/// Note: the resource pools are manually dropped so that the fault handler
277/// terminates correctly.
278#[derive(Debug)]
279pub struct PoolingInstanceAllocator {
280    decommit_batch_size: usize,
281    limits: InstanceLimits,
282
283    // The number of live core module and component instances at any given
284    // time. Note that this can temporarily go over the configured limit. This
285    // doesn't mean we have actually overshot, but that we attempted to allocate
286    // a new instance and incremented the counter, we've seen (or are about to
287    // see) that the counter is beyond the configured threshold, and are going
288    // to decrement the counter and return an error but haven't done so yet. See
289    // the increment trait methods for more details.
290    live_core_instances: AtomicU64,
291    live_component_instances: AtomicU64,
292
293    decommit_queue: Mutex<DecommitQueue>,
294    memories: MemoryPool,
295    tables: TablePool,
296
297    #[cfg(feature = "gc")]
298    gc_heaps: GcHeapPool,
299
300    #[cfg(feature = "async")]
301    stacks: StackPool,
302}
303
304#[cfg(debug_assertions)]
305impl Drop for PoolingInstanceAllocator {
306    fn drop(&mut self) {
307        // NB: when cfg(not(debug_assertions)) it is okay that we don't flush
308        // the queue, as the sub-pools will unmap those ranges anyways, so
309        // there's no point in decommitting them. But we do need to flush the
310        // queue when debug assertions are enabled to make sure that all
311        // entities get returned to their associated sub-pools and we can
312        // differentiate between a leaking slot and an enqueued-for-decommit
313        // slot.
314        let queue = self.decommit_queue.lock().unwrap();
315        self.flush_decommit_queue(queue);
316
317        debug_assert_eq!(self.live_component_instances.load(Ordering::Acquire), 0);
318        debug_assert_eq!(self.live_core_instances.load(Ordering::Acquire), 0);
319
320        debug_assert!(self.memories.is_empty());
321        debug_assert!(self.tables.is_empty());
322
323        #[cfg(feature = "gc")]
324        debug_assert!(self.gc_heaps.is_empty());
325
326        #[cfg(feature = "async")]
327        debug_assert!(self.stacks.is_empty());
328    }
329}
330
331impl PoolingInstanceAllocator {
332    /// Creates a new pooling instance allocator with the given strategy and limits.
333    pub fn new(config: &PoolingInstanceAllocatorConfig, tunables: &Tunables) -> Result<Self> {
334        Ok(Self {
335            decommit_batch_size: config.decommit_batch_size,
336            limits: config.limits,
337            live_component_instances: AtomicU64::new(0),
338            live_core_instances: AtomicU64::new(0),
339            decommit_queue: Mutex::new(DecommitQueue::default()),
340            memories: MemoryPool::new(config, tunables)?,
341            tables: TablePool::new(config)?,
342            #[cfg(feature = "gc")]
343            gc_heaps: GcHeapPool::new(config)?,
344            #[cfg(feature = "async")]
345            stacks: StackPool::new(config)?,
346        })
347    }
348
349    fn core_instance_size(&self) -> usize {
350        round_up_to_pow2(self.limits.core_instance_size, mem::align_of::<Instance>())
351    }
352
353    fn validate_table_plans(&self, module: &Module) -> Result<()> {
354        self.tables.validate(module)
355    }
356
357    fn validate_memory_plans(&self, module: &Module) -> Result<()> {
358        self.memories.validate(module)
359    }
360
361    fn validate_core_instance_size(&self, offsets: &VMOffsets<HostPtr>) -> Result<()> {
362        let layout = Instance::alloc_layout(offsets);
363        if layout.size() <= self.core_instance_size() {
364            return Ok(());
365        }
366
367        // If this `module` exceeds the allocation size allotted to it then an
368        // error will be reported here. The error of "required N bytes but
369        // cannot allocate that" is pretty opaque, however, because it's not
370        // clear what the breakdown of the N bytes are and what to optimize
371        // next. To help provide a better error message here some fancy-ish
372        // logic is done here to report the breakdown of the byte request into
373        // the largest portions and where it's coming from.
374        let mut message = format!(
375            "instance allocation for this module \
376             requires {} bytes which exceeds the configured maximum \
377             of {} bytes; breakdown of allocation requirement:\n\n",
378            layout.size(),
379            self.core_instance_size(),
380        );
381
382        let mut remaining = layout.size();
383        let mut push = |name: &str, bytes: usize| {
384            assert!(remaining >= bytes);
385            remaining -= bytes;
386
387            // If the `name` region is more than 5% of the allocation request
388            // then report it here, otherwise ignore it. We have less than 20
389            // fields so we're guaranteed that something should be reported, and
390            // otherwise it's not particularly interesting to learn about 5
391            // different fields that are all 8 or 0 bytes. Only try to report
392            // the "major" sources of bytes here.
393            if bytes > layout.size() / 20 {
394                message.push_str(&format!(
395                    " * {:.02}% - {} bytes - {}\n",
396                    ((bytes as f32) / (layout.size() as f32)) * 100.0,
397                    bytes,
398                    name,
399                ));
400            }
401        };
402
403        // The `Instance` itself requires some size allocated to it.
404        push("instance state management", mem::size_of::<Instance>());
405
406        // Afterwards the `VMContext`'s regions are why we're requesting bytes,
407        // so ask it for descriptions on each region's byte size.
408        for (desc, size) in offsets.region_sizes() {
409            push(desc, size as usize);
410        }
411
412        // double-check we accounted for all the bytes
413        assert_eq!(remaining, 0);
414
415        bail!("{}", message)
416    }
417
418    #[cfg(feature = "component-model")]
419    fn validate_component_instance_size(
420        &self,
421        offsets: &VMComponentOffsets<HostPtr>,
422    ) -> Result<()> {
423        if usize::try_from(offsets.size_of_vmctx()).unwrap() <= self.limits.component_instance_size
424        {
425            return Ok(());
426        }
427
428        // TODO: Add context with detailed accounting of what makes up all the
429        // `VMComponentContext`'s space like we do for module instances.
430        bail!(
431            "instance allocation for this component requires {} bytes of `VMComponentContext` \
432             space which exceeds the configured maximum of {} bytes",
433            offsets.size_of_vmctx(),
434            self.limits.component_instance_size
435        )
436    }
437
438    fn flush_decommit_queue(&self, mut locked_queue: MutexGuard<'_, DecommitQueue>) -> bool {
439        // Take the queue out of the mutex and drop the lock, to minimize
440        // contention.
441        let queue = mem::take(&mut *locked_queue);
442        drop(locked_queue);
443        queue.flush(self)
444    }
445
446    /// Execute `f` and if it returns `Err(PoolConcurrencyLimitError)`, then try
447    /// flushing the decommit queue. If flushing the queue freed up slots, then
448    /// try running `f` again.
449    fn with_flush_and_retry<T>(&self, mut f: impl FnMut() -> Result<T>) -> Result<T> {
450        f().or_else(|e| {
451            if e.is::<PoolConcurrencyLimitError>() {
452                let queue = self.decommit_queue.lock().unwrap();
453                if self.flush_decommit_queue(queue) {
454                    return f();
455                }
456            }
457
458            Err(e)
459        })
460    }
461
462    fn merge_or_flush(&self, mut local_queue: DecommitQueue) {
463        match local_queue.raw_len() {
464            // If we didn't enqueue any regions for decommit, then we must have
465            // either memset the whole entity or eagerly remapped it to zero
466            // because we don't have linux's `madvise(DONTNEED)` semantics. In
467            // either case, the entity slot is ready for reuse immediately.
468            0 => {
469                local_queue.flush(self);
470            }
471
472            // We enqueued at least our batch size of regions for decommit, so
473            // flush the local queue immediately. Don't bother inspecting (or
474            // locking!) the shared queue.
475            n if n >= self.decommit_batch_size => {
476                local_queue.flush(self);
477            }
478
479            // If we enqueued some regions for decommit, but did not reach our
480            // batch size, so we don't want to flush it yet, then merge the
481            // local queue into the shared queue.
482            n => {
483                debug_assert!(n < self.decommit_batch_size);
484                let mut shared_queue = self.decommit_queue.lock().unwrap();
485                shared_queue.append(&mut local_queue);
486                // And if the shared queue now has at least as many regions
487                // enqueued for decommit as our batch size, then we can flush
488                // it.
489                if shared_queue.raw_len() >= self.decommit_batch_size {
490                    self.flush_decommit_queue(shared_queue);
491                }
492            }
493        }
494    }
495}
496
497unsafe impl InstanceAllocatorImpl for PoolingInstanceAllocator {
498    #[cfg(feature = "component-model")]
499    fn validate_component_impl<'a>(
500        &self,
501        component: &Component,
502        offsets: &VMComponentOffsets<HostPtr>,
503        get_module: &'a dyn Fn(StaticModuleIndex) -> &'a Module,
504    ) -> Result<()> {
505        self.validate_component_instance_size(offsets)
506            .context("component instance size does not fit in pooling allocator requirements")?;
507
508        let mut num_core_instances = 0;
509        let mut num_memories = 0;
510        let mut num_tables = 0;
511        for init in &component.initializers {
512            use wasmtime_environ::component::GlobalInitializer::*;
513            use wasmtime_environ::component::InstantiateModule;
514            match init {
515                InstantiateModule(InstantiateModule::Import(_, _)) => {
516                    num_core_instances += 1;
517                    // Can't statically account for the total vmctx size, number
518                    // of memories, and number of tables in this component.
519                }
520                InstantiateModule(InstantiateModule::Static(static_module_index, _)) => {
521                    let module = get_module(*static_module_index);
522                    let offsets = VMOffsets::new(HostPtr, &module);
523                    self.validate_module_impl(module, &offsets)?;
524                    num_core_instances += 1;
525                    num_memories += module.num_defined_memories();
526                    num_tables += module.num_defined_tables();
527                }
528                LowerImport { .. }
529                | ExtractMemory(_)
530                | ExtractTable(_)
531                | ExtractRealloc(_)
532                | ExtractCallback(_)
533                | ExtractPostReturn(_)
534                | Resource(_) => {}
535            }
536        }
537
538        if num_core_instances
539            > usize::try_from(self.limits.max_core_instances_per_component).unwrap()
540        {
541            bail!(
542                "The component transitively contains {num_core_instances} core module instances, \
543                 which exceeds the configured maximum of {} in the pooling allocator",
544                self.limits.max_core_instances_per_component
545            );
546        }
547
548        if num_memories > usize::try_from(self.limits.max_memories_per_component).unwrap() {
549            bail!(
550                "The component transitively contains {num_memories} Wasm linear memories, which \
551                 exceeds the configured maximum of {} in the pooling allocator",
552                self.limits.max_memories_per_component
553            );
554        }
555
556        if num_tables > usize::try_from(self.limits.max_tables_per_component).unwrap() {
557            bail!(
558                "The component transitively contains {num_tables} tables, which exceeds the \
559                 configured maximum of {} in the pooling allocator",
560                self.limits.max_tables_per_component
561            );
562        }
563
564        Ok(())
565    }
566
567    fn validate_module_impl(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
568        self.validate_memory_plans(module)
569            .context("module memory does not fit in pooling allocator requirements")?;
570        self.validate_table_plans(module)
571            .context("module table does not fit in pooling allocator requirements")?;
572        self.validate_core_instance_size(offsets)
573            .context("module instance size does not fit in pooling allocator requirements")?;
574        Ok(())
575    }
576
577    fn increment_component_instance_count(&self) -> Result<()> {
578        let old_count = self.live_component_instances.fetch_add(1, Ordering::AcqRel);
579        if old_count >= u64::from(self.limits.total_component_instances) {
580            self.decrement_component_instance_count();
581            return Err(PoolConcurrencyLimitError::new(
582                usize::try_from(self.limits.total_component_instances).unwrap(),
583                "component instances",
584            )
585            .into());
586        }
587        Ok(())
588    }
589
590    fn decrement_component_instance_count(&self) {
591        self.live_component_instances.fetch_sub(1, Ordering::AcqRel);
592    }
593
594    fn increment_core_instance_count(&self) -> Result<()> {
595        let old_count = self.live_core_instances.fetch_add(1, Ordering::AcqRel);
596        if old_count >= u64::from(self.limits.total_core_instances) {
597            self.decrement_core_instance_count();
598            return Err(PoolConcurrencyLimitError::new(
599                usize::try_from(self.limits.total_core_instances).unwrap(),
600                "core instances",
601            )
602            .into());
603        }
604        Ok(())
605    }
606
607    fn decrement_core_instance_count(&self) {
608        self.live_core_instances.fetch_sub(1, Ordering::AcqRel);
609    }
610
611    unsafe fn allocate_memory(
612        &self,
613        request: &mut InstanceAllocationRequest,
614        ty: &wasmtime_environ::Memory,
615        tunables: &Tunables,
616        memory_index: DefinedMemoryIndex,
617    ) -> Result<(MemoryAllocationIndex, Memory)> {
618        self.with_flush_and_retry(|| self.memories.allocate(request, ty, tunables, memory_index))
619    }
620
621    unsafe fn deallocate_memory(
622        &self,
623        _memory_index: DefinedMemoryIndex,
624        allocation_index: MemoryAllocationIndex,
625        memory: Memory,
626    ) {
627        // Reset the image slot. If there is any error clearing the
628        // image, just drop it here, and let the drop handler for the
629        // slot unmap in a way that retains the address space
630        // reservation.
631        let mut image = memory.unwrap_static_image();
632        let mut queue = DecommitQueue::default();
633        image
634            .clear_and_remain_ready(self.memories.keep_resident, |ptr, len| {
635                queue.push_raw(ptr, len);
636            })
637            .expect("failed to reset memory image");
638        queue.push_memory(allocation_index, image);
639        self.merge_or_flush(queue);
640    }
641
642    unsafe fn allocate_table(
643        &self,
644        request: &mut InstanceAllocationRequest,
645        ty: &wasmtime_environ::Table,
646        tunables: &Tunables,
647        _table_index: DefinedTableIndex,
648    ) -> Result<(super::TableAllocationIndex, Table)> {
649        self.with_flush_and_retry(|| self.tables.allocate(request, ty, tunables))
650    }
651
652    unsafe fn deallocate_table(
653        &self,
654        _table_index: DefinedTableIndex,
655        allocation_index: TableAllocationIndex,
656        mut table: Table,
657    ) {
658        let mut queue = DecommitQueue::default();
659        self.tables
660            .reset_table_pages_to_zero(allocation_index, &mut table, |ptr, len| {
661                queue.push_raw(ptr, len);
662            });
663        queue.push_table(allocation_index, table);
664        self.merge_or_flush(queue);
665    }
666
667    #[cfg(feature = "async")]
668    fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack> {
669        self.with_flush_and_retry(|| self.stacks.allocate())
670    }
671
672    #[cfg(feature = "async")]
673    unsafe fn deallocate_fiber_stack(&self, mut stack: wasmtime_fiber::FiberStack) {
674        let mut queue = DecommitQueue::default();
675        self.stacks
676            .zero_stack(&mut stack, |ptr, len| queue.push_raw(ptr, len));
677        queue.push_stack(stack);
678        self.merge_or_flush(queue);
679    }
680
681    fn purge_module(&self, module: CompiledModuleId) {
682        self.memories.purge_module(module);
683    }
684
685    fn next_available_pkey(&self) -> Option<ProtectionKey> {
686        self.memories.next_available_pkey()
687    }
688
689    fn restrict_to_pkey(&self, pkey: ProtectionKey) {
690        mpk::allow(ProtectionMask::zero().or(pkey));
691    }
692
693    fn allow_all_pkeys(&self) {
694        mpk::allow(ProtectionMask::all());
695    }
696
697    #[cfg(feature = "gc")]
698    fn allocate_gc_heap(
699        &self,
700        engine: &crate::Engine,
701        gc_runtime: &dyn GcRuntime,
702    ) -> Result<(GcHeapAllocationIndex, Box<dyn GcHeap>)> {
703        self.gc_heaps.allocate(engine, gc_runtime)
704    }
705
706    #[cfg(feature = "gc")]
707    fn deallocate_gc_heap(
708        &self,
709        allocation_index: GcHeapAllocationIndex,
710        gc_heap: Box<dyn GcHeap>,
711    ) {
712        self.gc_heaps.deallocate(allocation_index, gc_heap);
713    }
714}
715
716#[cfg(test)]
717#[cfg(target_pointer_width = "64")]
718mod test {
719    use super::*;
720
721    #[test]
722    fn test_pooling_allocator_with_memory_pages_exceeded() {
723        let config = PoolingInstanceAllocatorConfig {
724            limits: InstanceLimits {
725                total_memories: 1,
726                max_memory_size: 0x100010000,
727                ..Default::default()
728            },
729            ..PoolingInstanceAllocatorConfig::default()
730        };
731        assert_eq!(
732            PoolingInstanceAllocator::new(
733                &config,
734                &Tunables {
735                    memory_reservation: 0x10000,
736                    ..Tunables::default_host()
737                },
738            )
739            .map_err(|e| e.to_string())
740            .expect_err("expected a failure constructing instance allocator"),
741            "maximum memory size of 0x100010000 bytes exceeds the configured \
742             memory reservation of 0x10000 bytes"
743        );
744    }
745
746    #[cfg(all(unix, target_pointer_width = "64", feature = "async", not(miri)))]
747    #[test]
748    fn test_stack_zeroed() -> Result<()> {
749        let config = PoolingInstanceAllocatorConfig {
750            max_unused_warm_slots: 0,
751            limits: InstanceLimits {
752                total_stacks: 1,
753                total_memories: 0,
754                total_tables: 0,
755                ..Default::default()
756            },
757            stack_size: 128,
758            async_stack_zeroing: true,
759            ..PoolingInstanceAllocatorConfig::default()
760        };
761        let allocator = PoolingInstanceAllocator::new(&config, &Tunables::default_host())?;
762
763        unsafe {
764            for _ in 0..255 {
765                let stack = allocator.allocate_fiber_stack()?;
766
767                // The stack pointer is at the top, so decrement it first
768                let addr = stack.top().unwrap().sub(1);
769
770                assert_eq!(*addr, 0);
771                *addr = 1;
772
773                allocator.deallocate_fiber_stack(stack);
774            }
775        }
776
777        Ok(())
778    }
779
780    #[cfg(all(unix, target_pointer_width = "64", feature = "async", not(miri)))]
781    #[test]
782    fn test_stack_unzeroed() -> Result<()> {
783        let config = PoolingInstanceAllocatorConfig {
784            max_unused_warm_slots: 0,
785            limits: InstanceLimits {
786                total_stacks: 1,
787                total_memories: 0,
788                total_tables: 0,
789                ..Default::default()
790            },
791            stack_size: 128,
792            async_stack_zeroing: false,
793            ..PoolingInstanceAllocatorConfig::default()
794        };
795        let allocator = PoolingInstanceAllocator::new(&config, &Tunables::default_host())?;
796
797        unsafe {
798            for i in 0..255 {
799                let stack = allocator.allocate_fiber_stack()?;
800
801                // The stack pointer is at the top, so decrement it first
802                let addr = stack.top().unwrap().sub(1);
803
804                assert_eq!(*addr, i);
805                *addr = i + 1;
806
807                allocator.deallocate_fiber_stack(stack);
808            }
809        }
810
811        Ok(())
812    }
813}