wasmtime/runtime/vm/instance/allocator/
pooling.rs

1//! Implements the pooling instance allocator.
2//!
3//! The pooling instance allocator maps memory in advance and allocates
4//! instances, memories, tables, and stacks from a pool of available resources.
5//! Using the pooling instance allocator can speed up module instantiation when
6//! modules can be constrained based on configurable limits
7//! ([`InstanceLimits`]). Each new instance is stored in a "slot"; as instances
8//! are allocated and freed, these slots are either filled or emptied:
9//!
10//! ```text
11//! ┌──────┬──────┬──────┬──────┬──────┐
12//! │Slot 0│Slot 1│Slot 2│Slot 3│......│
13//! └──────┴──────┴──────┴──────┴──────┘
14//! ```
15//!
16//! Each slot has a "slot ID"--an index into the pool. Slot IDs are handed out
17//! by the [`index_allocator`] module. Note that each kind of pool-allocated
18//! item is stored in its own separate pool: [`memory_pool`], [`table_pool`],
19//! [`stack_pool`]. See those modules for more details.
20
21mod decommit_queue;
22mod index_allocator;
23mod memory_pool;
24mod table_pool;
25
26#[cfg(feature = "gc")]
27mod gc_heap_pool;
28
29#[cfg(all(feature = "async"))]
30mod generic_stack_pool;
31#[cfg(all(feature = "async", unix, not(miri)))]
32mod unix_stack_pool;
33
34#[cfg(all(feature = "async"))]
35cfg_if::cfg_if! {
36    if #[cfg(all(unix, not(miri), not(asan)))] {
37        use unix_stack_pool as stack_pool;
38    } else {
39        use generic_stack_pool as stack_pool;
40    }
41}
42
43use self::decommit_queue::DecommitQueue;
44use self::memory_pool::MemoryPool;
45use self::table_pool::TablePool;
46use super::{
47    InstanceAllocationRequest, InstanceAllocatorImpl, MemoryAllocationIndex, TableAllocationIndex,
48};
49use crate::MpkEnabled;
50use crate::prelude::*;
51use crate::runtime::vm::{
52    CompiledModuleId, Memory, Table,
53    instance::Instance,
54    mpk::{self, ProtectionKey, ProtectionMask},
55};
56use std::borrow::Cow;
57use std::fmt::Display;
58use std::sync::{Mutex, MutexGuard};
59use std::{
60    mem,
61    sync::atomic::{AtomicU64, Ordering},
62};
63use wasmtime_environ::{
64    DefinedMemoryIndex, DefinedTableIndex, HostPtr, Module, Tunables, VMOffsets,
65};
66
67#[cfg(feature = "gc")]
68use super::GcHeapAllocationIndex;
69#[cfg(feature = "gc")]
70use crate::runtime::vm::{GcHeap, GcRuntime};
71#[cfg(feature = "gc")]
72use gc_heap_pool::GcHeapPool;
73
74#[cfg(feature = "async")]
75use stack_pool::StackPool;
76
77#[cfg(feature = "component-model")]
78use wasmtime_environ::{
79    StaticModuleIndex,
80    component::{Component, VMComponentOffsets},
81};
82
83fn round_up_to_pow2(n: usize, to: usize) -> usize {
84    debug_assert!(to > 0);
85    debug_assert!(to.is_power_of_two());
86    (n + to - 1) & !(to - 1)
87}
88
89/// Instance-related limit configuration for pooling.
90///
91/// More docs on this can be found at `wasmtime::PoolingAllocationConfig`.
92#[derive(Debug, Copy, Clone)]
93pub struct InstanceLimits {
94    /// The maximum number of component instances that may be allocated
95    /// concurrently.
96    pub total_component_instances: u32,
97
98    /// The maximum size of a component's `VMComponentContext`, not including
99    /// any of its inner core modules' `VMContext` sizes.
100    pub component_instance_size: usize,
101
102    /// The maximum number of core module instances that may be allocated
103    /// concurrently.
104    pub total_core_instances: u32,
105
106    /// The maximum number of core module instances that a single component may
107    /// transitively contain.
108    pub max_core_instances_per_component: u32,
109
110    /// The maximum number of Wasm linear memories that a component may
111    /// transitively contain.
112    pub max_memories_per_component: u32,
113
114    /// The maximum number of tables that a component may transitively contain.
115    pub max_tables_per_component: u32,
116
117    /// The total number of linear memories in the pool, across all instances.
118    pub total_memories: u32,
119
120    /// The total number of tables in the pool, across all instances.
121    pub total_tables: u32,
122
123    /// The total number of async stacks in the pool, across all instances.
124    #[cfg(feature = "async")]
125    pub total_stacks: u32,
126
127    /// Maximum size of a core instance's `VMContext`.
128    pub core_instance_size: usize,
129
130    /// Maximum number of tables per instance.
131    pub max_tables_per_module: u32,
132
133    /// Maximum number of word-size elements per table.
134    ///
135    /// Note that tables for element types such as continuations
136    /// that use more than one word of storage may store fewer
137    /// elements.
138    pub table_elements: usize,
139
140    /// Maximum number of linear memories per instance.
141    pub max_memories_per_module: u32,
142
143    /// Maximum byte size of a linear memory, must be smaller than
144    /// `memory_reservation` in `Tunables`.
145    pub max_memory_size: usize,
146
147    /// The total number of GC heaps in the pool, across all instances.
148    #[cfg(feature = "gc")]
149    pub total_gc_heaps: u32,
150}
151
152impl Default for InstanceLimits {
153    fn default() -> Self {
154        let total = if cfg!(target_pointer_width = "32") {
155            100
156        } else {
157            1000
158        };
159        // See doc comments for `wasmtime::PoolingAllocationConfig` for these
160        // default values
161        Self {
162            total_component_instances: total,
163            component_instance_size: 1 << 20, // 1 MiB
164            total_core_instances: total,
165            max_core_instances_per_component: u32::MAX,
166            max_memories_per_component: u32::MAX,
167            max_tables_per_component: u32::MAX,
168            total_memories: total,
169            total_tables: total,
170            #[cfg(feature = "async")]
171            total_stacks: total,
172            core_instance_size: 1 << 20, // 1 MiB
173            max_tables_per_module: 1,
174            // NB: in #8504 it was seen that a C# module in debug module can
175            // have 10k+ elements.
176            table_elements: 20_000,
177            max_memories_per_module: 1,
178            #[cfg(target_pointer_width = "64")]
179            max_memory_size: 1 << 32, // 4G,
180            #[cfg(target_pointer_width = "32")]
181            max_memory_size: 10 << 20, // 10 MiB
182            #[cfg(feature = "gc")]
183            total_gc_heaps: total,
184        }
185    }
186}
187
188/// Configuration options for the pooling instance allocator supplied at
189/// construction.
190#[derive(Copy, Clone, Debug)]
191pub struct PoolingInstanceAllocatorConfig {
192    /// See `PoolingAllocatorConfig::max_unused_warm_slots` in `wasmtime`
193    pub max_unused_warm_slots: u32,
194    /// The target number of decommits to do per batch. This is not precise, as
195    /// we can queue up decommits at times when we aren't prepared to
196    /// immediately flush them, and so we may go over this target size
197    /// occasionally.
198    pub decommit_batch_size: usize,
199    /// The size, in bytes, of async stacks to allocate (not including the guard
200    /// page).
201    pub stack_size: usize,
202    /// The limits to apply to instances allocated within this allocator.
203    pub limits: InstanceLimits,
204    /// Whether or not async stacks are zeroed after use.
205    pub async_stack_zeroing: bool,
206    /// If async stack zeroing is enabled and the host platform is Linux this is
207    /// how much memory to zero out with `memset`.
208    ///
209    /// The rest of memory will be zeroed out with `madvise`.
210    #[cfg(feature = "async")]
211    pub async_stack_keep_resident: usize,
212    /// How much linear memory, in bytes, to keep resident after resetting for
213    /// use with the next instance. This much memory will be `memset` to zero
214    /// when a linear memory is deallocated.
215    ///
216    /// Memory exceeding this amount in the wasm linear memory will be released
217    /// with `madvise` back to the kernel.
218    ///
219    /// Only applicable on Linux.
220    pub linear_memory_keep_resident: usize,
221    /// Same as `linear_memory_keep_resident` but for tables.
222    pub table_keep_resident: usize,
223    /// Whether to enable memory protection keys.
224    pub memory_protection_keys: MpkEnabled,
225    /// How many memory protection keys to allocate.
226    pub max_memory_protection_keys: usize,
227}
228
229impl Default for PoolingInstanceAllocatorConfig {
230    fn default() -> PoolingInstanceAllocatorConfig {
231        PoolingInstanceAllocatorConfig {
232            max_unused_warm_slots: 100,
233            decommit_batch_size: 1,
234            stack_size: 2 << 20,
235            limits: InstanceLimits::default(),
236            async_stack_zeroing: false,
237            #[cfg(feature = "async")]
238            async_stack_keep_resident: 0,
239            linear_memory_keep_resident: 0,
240            table_keep_resident: 0,
241            memory_protection_keys: MpkEnabled::Disable,
242            max_memory_protection_keys: 16,
243        }
244    }
245}
246
247/// An error returned when the pooling allocator cannot allocate a table,
248/// memory, etc... because the maximum number of concurrent allocations for that
249/// entity has been reached.
250#[derive(Debug)]
251pub struct PoolConcurrencyLimitError {
252    limit: usize,
253    kind: Cow<'static, str>,
254}
255
256impl std::error::Error for PoolConcurrencyLimitError {}
257
258impl Display for PoolConcurrencyLimitError {
259    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
260        let limit = self.limit;
261        let kind = &self.kind;
262        write!(f, "maximum concurrent limit of {limit} for {kind} reached")
263    }
264}
265
266impl PoolConcurrencyLimitError {
267    fn new(limit: usize, kind: impl Into<Cow<'static, str>>) -> Self {
268        Self {
269            limit,
270            kind: kind.into(),
271        }
272    }
273}
274
275/// Implements the pooling instance allocator.
276///
277/// This allocator internally maintains pools of instances, memories, tables,
278/// and stacks.
279///
280/// Note: the resource pools are manually dropped so that the fault handler
281/// terminates correctly.
282#[derive(Debug)]
283pub struct PoolingInstanceAllocator {
284    decommit_batch_size: usize,
285    limits: InstanceLimits,
286
287    // The number of live core module and component instances at any given
288    // time. Note that this can temporarily go over the configured limit. This
289    // doesn't mean we have actually overshot, but that we attempted to allocate
290    // a new instance and incremented the counter, we've seen (or are about to
291    // see) that the counter is beyond the configured threshold, and are going
292    // to decrement the counter and return an error but haven't done so yet. See
293    // the increment trait methods for more details.
294    live_core_instances: AtomicU64,
295    live_component_instances: AtomicU64,
296
297    decommit_queue: Mutex<DecommitQueue>,
298    memories: MemoryPool,
299    tables: TablePool,
300
301    #[cfg(feature = "gc")]
302    gc_heaps: GcHeapPool,
303
304    #[cfg(feature = "async")]
305    stacks: StackPool,
306}
307
308#[cfg(debug_assertions)]
309impl Drop for PoolingInstanceAllocator {
310    fn drop(&mut self) {
311        // NB: when cfg(not(debug_assertions)) it is okay that we don't flush
312        // the queue, as the sub-pools will unmap those ranges anyways, so
313        // there's no point in decommitting them. But we do need to flush the
314        // queue when debug assertions are enabled to make sure that all
315        // entities get returned to their associated sub-pools and we can
316        // differentiate between a leaking slot and an enqueued-for-decommit
317        // slot.
318        let queue = self.decommit_queue.lock().unwrap();
319        self.flush_decommit_queue(queue);
320
321        debug_assert_eq!(self.live_component_instances.load(Ordering::Acquire), 0);
322        debug_assert_eq!(self.live_core_instances.load(Ordering::Acquire), 0);
323
324        debug_assert!(self.memories.is_empty());
325        debug_assert!(self.tables.is_empty());
326
327        #[cfg(feature = "gc")]
328        debug_assert!(self.gc_heaps.is_empty());
329
330        #[cfg(feature = "async")]
331        debug_assert!(self.stacks.is_empty());
332    }
333}
334
335impl PoolingInstanceAllocator {
336    /// Creates a new pooling instance allocator with the given strategy and limits.
337    pub fn new(config: &PoolingInstanceAllocatorConfig, tunables: &Tunables) -> Result<Self> {
338        Ok(Self {
339            decommit_batch_size: config.decommit_batch_size,
340            limits: config.limits,
341            live_component_instances: AtomicU64::new(0),
342            live_core_instances: AtomicU64::new(0),
343            decommit_queue: Mutex::new(DecommitQueue::default()),
344            memories: MemoryPool::new(config, tunables)?,
345            tables: TablePool::new(config)?,
346            #[cfg(feature = "gc")]
347            gc_heaps: GcHeapPool::new(config)?,
348            #[cfg(feature = "async")]
349            stacks: StackPool::new(config)?,
350        })
351    }
352
353    fn core_instance_size(&self) -> usize {
354        round_up_to_pow2(self.limits.core_instance_size, mem::align_of::<Instance>())
355    }
356
357    fn validate_table_plans(&self, module: &Module) -> Result<()> {
358        self.tables.validate(module)
359    }
360
361    fn validate_memory_plans(&self, module: &Module) -> Result<()> {
362        self.memories.validate_memories(module)
363    }
364
365    fn validate_core_instance_size(&self, offsets: &VMOffsets<HostPtr>) -> Result<()> {
366        let layout = Instance::alloc_layout(offsets);
367        if layout.size() <= self.core_instance_size() {
368            return Ok(());
369        }
370
371        // If this `module` exceeds the allocation size allotted to it then an
372        // error will be reported here. The error of "required N bytes but
373        // cannot allocate that" is pretty opaque, however, because it's not
374        // clear what the breakdown of the N bytes are and what to optimize
375        // next. To help provide a better error message here some fancy-ish
376        // logic is done here to report the breakdown of the byte request into
377        // the largest portions and where it's coming from.
378        let mut message = format!(
379            "instance allocation for this module \
380             requires {} bytes which exceeds the configured maximum \
381             of {} bytes; breakdown of allocation requirement:\n\n",
382            layout.size(),
383            self.core_instance_size(),
384        );
385
386        let mut remaining = layout.size();
387        let mut push = |name: &str, bytes: usize| {
388            assert!(remaining >= bytes);
389            remaining -= bytes;
390
391            // If the `name` region is more than 5% of the allocation request
392            // then report it here, otherwise ignore it. We have less than 20
393            // fields so we're guaranteed that something should be reported, and
394            // otherwise it's not particularly interesting to learn about 5
395            // different fields that are all 8 or 0 bytes. Only try to report
396            // the "major" sources of bytes here.
397            if bytes > layout.size() / 20 {
398                message.push_str(&format!(
399                    " * {:.02}% - {} bytes - {}\n",
400                    ((bytes as f32) / (layout.size() as f32)) * 100.0,
401                    bytes,
402                    name,
403                ));
404            }
405        };
406
407        // The `Instance` itself requires some size allocated to it.
408        push("instance state management", mem::size_of::<Instance>());
409
410        // Afterwards the `VMContext`'s regions are why we're requesting bytes,
411        // so ask it for descriptions on each region's byte size.
412        for (desc, size) in offsets.region_sizes() {
413            push(desc, size as usize);
414        }
415
416        // double-check we accounted for all the bytes
417        assert_eq!(remaining, 0);
418
419        bail!("{}", message)
420    }
421
422    #[cfg(feature = "component-model")]
423    fn validate_component_instance_size(
424        &self,
425        offsets: &VMComponentOffsets<HostPtr>,
426    ) -> Result<()> {
427        if usize::try_from(offsets.size_of_vmctx()).unwrap() <= self.limits.component_instance_size
428        {
429            return Ok(());
430        }
431
432        // TODO: Add context with detailed accounting of what makes up all the
433        // `VMComponentContext`'s space like we do for module instances.
434        bail!(
435            "instance allocation for this component requires {} bytes of `VMComponentContext` \
436             space which exceeds the configured maximum of {} bytes",
437            offsets.size_of_vmctx(),
438            self.limits.component_instance_size
439        )
440    }
441
442    fn flush_decommit_queue(&self, mut locked_queue: MutexGuard<'_, DecommitQueue>) -> bool {
443        // Take the queue out of the mutex and drop the lock, to minimize
444        // contention.
445        let queue = mem::take(&mut *locked_queue);
446        drop(locked_queue);
447        queue.flush(self)
448    }
449
450    /// Execute `f` and if it returns `Err(PoolConcurrencyLimitError)`, then try
451    /// flushing the decommit queue. If flushing the queue freed up slots, then
452    /// try running `f` again.
453    fn with_flush_and_retry<T>(&self, mut f: impl FnMut() -> Result<T>) -> Result<T> {
454        f().or_else(|e| {
455            if e.is::<PoolConcurrencyLimitError>() {
456                let queue = self.decommit_queue.lock().unwrap();
457                if self.flush_decommit_queue(queue) {
458                    return f();
459                }
460            }
461
462            Err(e)
463        })
464    }
465
466    fn merge_or_flush(&self, mut local_queue: DecommitQueue) {
467        match local_queue.raw_len() {
468            // If we didn't enqueue any regions for decommit, then we must have
469            // either memset the whole entity or eagerly remapped it to zero
470            // because we don't have linux's `madvise(DONTNEED)` semantics. In
471            // either case, the entity slot is ready for reuse immediately.
472            0 => {
473                local_queue.flush(self);
474            }
475
476            // We enqueued at least our batch size of regions for decommit, so
477            // flush the local queue immediately. Don't bother inspecting (or
478            // locking!) the shared queue.
479            n if n >= self.decommit_batch_size => {
480                local_queue.flush(self);
481            }
482
483            // If we enqueued some regions for decommit, but did not reach our
484            // batch size, so we don't want to flush it yet, then merge the
485            // local queue into the shared queue.
486            n => {
487                debug_assert!(n < self.decommit_batch_size);
488                let mut shared_queue = self.decommit_queue.lock().unwrap();
489                shared_queue.append(&mut local_queue);
490                // And if the shared queue now has at least as many regions
491                // enqueued for decommit as our batch size, then we can flush
492                // it.
493                if shared_queue.raw_len() >= self.decommit_batch_size {
494                    self.flush_decommit_queue(shared_queue);
495                }
496            }
497        }
498    }
499}
500
501unsafe impl InstanceAllocatorImpl for PoolingInstanceAllocator {
502    #[cfg(feature = "component-model")]
503    fn validate_component_impl<'a>(
504        &self,
505        component: &Component,
506        offsets: &VMComponentOffsets<HostPtr>,
507        get_module: &'a dyn Fn(StaticModuleIndex) -> &'a Module,
508    ) -> Result<()> {
509        self.validate_component_instance_size(offsets)
510            .context("component instance size does not fit in pooling allocator requirements")?;
511
512        let mut num_core_instances = 0;
513        let mut num_memories = 0;
514        let mut num_tables = 0;
515        for init in &component.initializers {
516            use wasmtime_environ::component::GlobalInitializer::*;
517            use wasmtime_environ::component::InstantiateModule;
518            match init {
519                InstantiateModule(InstantiateModule::Import(_, _)) => {
520                    num_core_instances += 1;
521                    // Can't statically account for the total vmctx size, number
522                    // of memories, and number of tables in this component.
523                }
524                InstantiateModule(InstantiateModule::Static(static_module_index, _)) => {
525                    let module = get_module(*static_module_index);
526                    let offsets = VMOffsets::new(HostPtr, &module);
527                    self.validate_module_impl(module, &offsets)?;
528                    num_core_instances += 1;
529                    num_memories += module.num_defined_memories();
530                    num_tables += module.num_defined_tables();
531                }
532                LowerImport { .. }
533                | ExtractMemory(_)
534                | ExtractTable(_)
535                | ExtractRealloc(_)
536                | ExtractCallback(_)
537                | ExtractPostReturn(_)
538                | Resource(_) => {}
539            }
540        }
541
542        if num_core_instances
543            > usize::try_from(self.limits.max_core_instances_per_component).unwrap()
544        {
545            bail!(
546                "The component transitively contains {num_core_instances} core module instances, \
547                 which exceeds the configured maximum of {} in the pooling allocator",
548                self.limits.max_core_instances_per_component
549            );
550        }
551
552        if num_memories > usize::try_from(self.limits.max_memories_per_component).unwrap() {
553            bail!(
554                "The component transitively contains {num_memories} Wasm linear memories, which \
555                 exceeds the configured maximum of {} in the pooling allocator",
556                self.limits.max_memories_per_component
557            );
558        }
559
560        if num_tables > usize::try_from(self.limits.max_tables_per_component).unwrap() {
561            bail!(
562                "The component transitively contains {num_tables} tables, which exceeds the \
563                 configured maximum of {} in the pooling allocator",
564                self.limits.max_tables_per_component
565            );
566        }
567
568        Ok(())
569    }
570
571    fn validate_module_impl(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
572        self.validate_memory_plans(module)
573            .context("module memory does not fit in pooling allocator requirements")?;
574        self.validate_table_plans(module)
575            .context("module table does not fit in pooling allocator requirements")?;
576        self.validate_core_instance_size(offsets)
577            .context("module instance size does not fit in pooling allocator requirements")?;
578        Ok(())
579    }
580
581    #[cfg(feature = "gc")]
582    fn validate_memory_impl(&self, memory: &wasmtime_environ::Memory) -> Result<()> {
583        self.memories.validate_memory(memory)
584    }
585
586    #[cfg(feature = "component-model")]
587    fn increment_component_instance_count(&self) -> Result<()> {
588        let old_count = self.live_component_instances.fetch_add(1, Ordering::AcqRel);
589        if old_count >= u64::from(self.limits.total_component_instances) {
590            self.decrement_component_instance_count();
591            return Err(PoolConcurrencyLimitError::new(
592                usize::try_from(self.limits.total_component_instances).unwrap(),
593                "component instances",
594            )
595            .into());
596        }
597        Ok(())
598    }
599
600    #[cfg(feature = "component-model")]
601    fn decrement_component_instance_count(&self) {
602        self.live_component_instances.fetch_sub(1, Ordering::AcqRel);
603    }
604
605    fn increment_core_instance_count(&self) -> Result<()> {
606        let old_count = self.live_core_instances.fetch_add(1, Ordering::AcqRel);
607        if old_count >= u64::from(self.limits.total_core_instances) {
608            self.decrement_core_instance_count();
609            return Err(PoolConcurrencyLimitError::new(
610                usize::try_from(self.limits.total_core_instances).unwrap(),
611                "core instances",
612            )
613            .into());
614        }
615        Ok(())
616    }
617
618    fn decrement_core_instance_count(&self) {
619        self.live_core_instances.fetch_sub(1, Ordering::AcqRel);
620    }
621
622    unsafe fn allocate_memory(
623        &self,
624        request: &mut InstanceAllocationRequest,
625        ty: &wasmtime_environ::Memory,
626        tunables: &Tunables,
627        memory_index: Option<DefinedMemoryIndex>,
628    ) -> Result<(MemoryAllocationIndex, Memory)> {
629        self.with_flush_and_retry(|| self.memories.allocate(request, ty, tunables, memory_index))
630    }
631
632    unsafe fn deallocate_memory(
633        &self,
634        _memory_index: Option<DefinedMemoryIndex>,
635        allocation_index: MemoryAllocationIndex,
636        memory: Memory,
637    ) {
638        // Reset the image slot. If there is any error clearing the
639        // image, just drop it here, and let the drop handler for the
640        // slot unmap in a way that retains the address space
641        // reservation.
642        let mut image = memory.unwrap_static_image();
643        let mut queue = DecommitQueue::default();
644        image
645            .clear_and_remain_ready(self.memories.keep_resident, |ptr, len| {
646                queue.push_raw(ptr, len);
647            })
648            .expect("failed to reset memory image");
649        queue.push_memory(allocation_index, image);
650        self.merge_or_flush(queue);
651    }
652
653    unsafe fn allocate_table(
654        &self,
655        request: &mut InstanceAllocationRequest,
656        ty: &wasmtime_environ::Table,
657        tunables: &Tunables,
658        _table_index: DefinedTableIndex,
659    ) -> Result<(super::TableAllocationIndex, Table)> {
660        self.with_flush_and_retry(|| self.tables.allocate(request, ty, tunables))
661    }
662
663    unsafe fn deallocate_table(
664        &self,
665        _table_index: DefinedTableIndex,
666        allocation_index: TableAllocationIndex,
667        mut table: Table,
668    ) {
669        let mut queue = DecommitQueue::default();
670        self.tables
671            .reset_table_pages_to_zero(allocation_index, &mut table, |ptr, len| {
672                queue.push_raw(ptr, len);
673            });
674        queue.push_table(allocation_index, table);
675        self.merge_or_flush(queue);
676    }
677
678    #[cfg(feature = "async")]
679    fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack> {
680        self.with_flush_and_retry(|| self.stacks.allocate())
681    }
682
683    #[cfg(feature = "async")]
684    unsafe fn deallocate_fiber_stack(&self, mut stack: wasmtime_fiber::FiberStack) {
685        let mut queue = DecommitQueue::default();
686        self.stacks
687            .zero_stack(&mut stack, |ptr, len| queue.push_raw(ptr, len));
688        queue.push_stack(stack);
689        self.merge_or_flush(queue);
690    }
691
692    fn purge_module(&self, module: CompiledModuleId) {
693        self.memories.purge_module(module);
694    }
695
696    fn next_available_pkey(&self) -> Option<ProtectionKey> {
697        self.memories.next_available_pkey()
698    }
699
700    fn restrict_to_pkey(&self, pkey: ProtectionKey) {
701        mpk::allow(ProtectionMask::zero().or(pkey));
702    }
703
704    fn allow_all_pkeys(&self) {
705        mpk::allow(ProtectionMask::all());
706    }
707
708    #[cfg(feature = "gc")]
709    fn allocate_gc_heap(
710        &self,
711        engine: &crate::Engine,
712        gc_runtime: &dyn GcRuntime,
713        memory_alloc_index: MemoryAllocationIndex,
714        memory: Memory,
715    ) -> Result<(GcHeapAllocationIndex, Box<dyn GcHeap>)> {
716        self.gc_heaps
717            .allocate(engine, gc_runtime, memory_alloc_index, memory)
718    }
719
720    #[cfg(feature = "gc")]
721    fn deallocate_gc_heap(
722        &self,
723        allocation_index: GcHeapAllocationIndex,
724        gc_heap: Box<dyn GcHeap>,
725    ) -> (MemoryAllocationIndex, Memory) {
726        self.gc_heaps.deallocate(allocation_index, gc_heap)
727    }
728}
729
730#[cfg(test)]
731#[cfg(target_pointer_width = "64")]
732mod test {
733    use super::*;
734
735    #[test]
736    fn test_pooling_allocator_with_memory_pages_exceeded() {
737        let config = PoolingInstanceAllocatorConfig {
738            limits: InstanceLimits {
739                total_memories: 1,
740                max_memory_size: 0x100010000,
741                ..Default::default()
742            },
743            ..PoolingInstanceAllocatorConfig::default()
744        };
745        assert_eq!(
746            PoolingInstanceAllocator::new(
747                &config,
748                &Tunables {
749                    memory_reservation: 0x10000,
750                    ..Tunables::default_host()
751                },
752            )
753            .map_err(|e| e.to_string())
754            .expect_err("expected a failure constructing instance allocator"),
755            "maximum memory size of 0x100010000 bytes exceeds the configured \
756             memory reservation of 0x10000 bytes"
757        );
758    }
759
760    #[cfg(all(
761        unix,
762        target_pointer_width = "64",
763        feature = "async",
764        not(miri),
765        not(asan)
766    ))]
767    #[test]
768    fn test_stack_zeroed() -> Result<()> {
769        let config = PoolingInstanceAllocatorConfig {
770            max_unused_warm_slots: 0,
771            limits: InstanceLimits {
772                total_stacks: 1,
773                total_memories: 0,
774                total_tables: 0,
775                ..Default::default()
776            },
777            stack_size: 128,
778            async_stack_zeroing: true,
779            ..PoolingInstanceAllocatorConfig::default()
780        };
781        let allocator = PoolingInstanceAllocator::new(&config, &Tunables::default_host())?;
782
783        unsafe {
784            for _ in 0..255 {
785                let stack = allocator.allocate_fiber_stack()?;
786
787                // The stack pointer is at the top, so decrement it first
788                let addr = stack.top().unwrap().sub(1);
789
790                assert_eq!(*addr, 0);
791                *addr = 1;
792
793                allocator.deallocate_fiber_stack(stack);
794            }
795        }
796
797        Ok(())
798    }
799
800    #[cfg(all(
801        unix,
802        target_pointer_width = "64",
803        feature = "async",
804        not(miri),
805        not(asan)
806    ))]
807    #[test]
808    fn test_stack_unzeroed() -> Result<()> {
809        let config = PoolingInstanceAllocatorConfig {
810            max_unused_warm_slots: 0,
811            limits: InstanceLimits {
812                total_stacks: 1,
813                total_memories: 0,
814                total_tables: 0,
815                ..Default::default()
816            },
817            stack_size: 128,
818            async_stack_zeroing: false,
819            ..PoolingInstanceAllocatorConfig::default()
820        };
821        let allocator = PoolingInstanceAllocator::new(&config, &Tunables::default_host())?;
822
823        unsafe {
824            for i in 0..255 {
825                let stack = allocator.allocate_fiber_stack()?;
826
827                // The stack pointer is at the top, so decrement it first
828                let addr = stack.top().unwrap().sub(1);
829
830                assert_eq!(*addr, i);
831                *addr = i + 1;
832
833                allocator.deallocate_fiber_stack(stack);
834            }
835        }
836
837        Ok(())
838    }
839}