wasmtime/runtime/vm/instance/allocator/
pooling.rs

1//! Implements the pooling instance allocator.
2//!
3//! The pooling instance allocator maps memory in advance and allocates
4//! instances, memories, tables, and stacks from a pool of available resources.
5//! Using the pooling instance allocator can speed up module instantiation when
6//! modules can be constrained based on configurable limits
7//! ([`InstanceLimits`]). Each new instance is stored in a "slot"; as instances
8//! are allocated and freed, these slots are either filled or emptied:
9//!
10//! ```text
11//! ┌──────┬──────┬──────┬──────┬──────┐
12//! │Slot 0│Slot 1│Slot 2│Slot 3│......│
13//! └──────┴──────┴──────┴──────┴──────┘
14//! ```
15//!
16//! Each slot has a "slot ID"--an index into the pool. Slot IDs are handed out
17//! by the [`index_allocator`] module. Note that each kind of pool-allocated
18//! item is stored in its own separate pool: [`memory_pool`], [`table_pool`],
19//! [`stack_pool`]. See those modules for more details.
20
21mod decommit_queue;
22mod index_allocator;
23mod memory_pool;
24mod table_pool;
25
26#[cfg(feature = "gc")]
27mod gc_heap_pool;
28
29#[cfg(all(feature = "async"))]
30mod generic_stack_pool;
31#[cfg(all(feature = "async", unix, not(miri)))]
32mod unix_stack_pool;
33
34#[cfg(all(feature = "async"))]
35cfg_if::cfg_if! {
36    if #[cfg(all(unix, not(miri), not(asan)))] {
37        use unix_stack_pool as stack_pool;
38    } else {
39        use generic_stack_pool as stack_pool;
40    }
41}
42
43use self::decommit_queue::DecommitQueue;
44use self::memory_pool::MemoryPool;
45use self::table_pool::TablePool;
46use super::{
47    InstanceAllocationRequest, InstanceAllocatorImpl, MemoryAllocationIndex, TableAllocationIndex,
48};
49use crate::MpkEnabled;
50use crate::prelude::*;
51use crate::runtime::vm::{
52    CompiledModuleId, Memory, Table,
53    instance::Instance,
54    mpk::{self, ProtectionKey, ProtectionMask},
55};
56use std::borrow::Cow;
57use std::fmt::Display;
58use std::sync::{Mutex, MutexGuard};
59use std::{
60    mem,
61    sync::atomic::{AtomicU64, Ordering},
62};
63use wasmtime_environ::{
64    DefinedMemoryIndex, DefinedTableIndex, HostPtr, Module, Tunables, VMOffsets,
65};
66
67#[cfg(feature = "gc")]
68use super::GcHeapAllocationIndex;
69#[cfg(feature = "gc")]
70use crate::runtime::vm::{GcHeap, GcRuntime};
71#[cfg(feature = "gc")]
72use gc_heap_pool::GcHeapPool;
73
74#[cfg(feature = "async")]
75use stack_pool::StackPool;
76
77#[cfg(feature = "component-model")]
78use wasmtime_environ::{
79    StaticModuleIndex,
80    component::{Component, VMComponentOffsets},
81};
82
83fn round_up_to_pow2(n: usize, to: usize) -> usize {
84    debug_assert!(to > 0);
85    debug_assert!(to.is_power_of_two());
86    (n + to - 1) & !(to - 1)
87}
88
89/// Instance-related limit configuration for pooling.
90///
91/// More docs on this can be found at `wasmtime::PoolingAllocationConfig`.
92#[derive(Debug, Copy, Clone)]
93pub struct InstanceLimits {
94    /// The maximum number of component instances that may be allocated
95    /// concurrently.
96    pub total_component_instances: u32,
97
98    /// The maximum size of a component's `VMComponentContext`, not including
99    /// any of its inner core modules' `VMContext` sizes.
100    pub component_instance_size: usize,
101
102    /// The maximum number of core module instances that may be allocated
103    /// concurrently.
104    pub total_core_instances: u32,
105
106    /// The maximum number of core module instances that a single component may
107    /// transitively contain.
108    pub max_core_instances_per_component: u32,
109
110    /// The maximum number of Wasm linear memories that a component may
111    /// transitively contain.
112    pub max_memories_per_component: u32,
113
114    /// The maximum number of tables that a component may transitively contain.
115    pub max_tables_per_component: u32,
116
117    /// The total number of linear memories in the pool, across all instances.
118    pub total_memories: u32,
119
120    /// The total number of tables in the pool, across all instances.
121    pub total_tables: u32,
122
123    /// The total number of async stacks in the pool, across all instances.
124    #[cfg(feature = "async")]
125    pub total_stacks: u32,
126
127    /// Maximum size of a core instance's `VMContext`.
128    pub core_instance_size: usize,
129
130    /// Maximum number of tables per instance.
131    pub max_tables_per_module: u32,
132
133    /// Maximum number of word-size elements per table.
134    ///
135    /// Note that tables for element types such as continuations
136    /// that use more than one word of storage may store fewer
137    /// elements.
138    pub table_elements: usize,
139
140    /// Maximum number of linear memories per instance.
141    pub max_memories_per_module: u32,
142
143    /// Maximum byte size of a linear memory, must be smaller than
144    /// `memory_reservation` in `Tunables`.
145    pub max_memory_size: usize,
146
147    /// The total number of GC heaps in the pool, across all instances.
148    #[cfg(feature = "gc")]
149    pub total_gc_heaps: u32,
150}
151
152impl Default for InstanceLimits {
153    fn default() -> Self {
154        let total = if cfg!(target_pointer_width = "32") {
155            100
156        } else {
157            1000
158        };
159        // See doc comments for `wasmtime::PoolingAllocationConfig` for these
160        // default values
161        Self {
162            total_component_instances: total,
163            component_instance_size: 1 << 20, // 1 MiB
164            total_core_instances: total,
165            max_core_instances_per_component: u32::MAX,
166            max_memories_per_component: u32::MAX,
167            max_tables_per_component: u32::MAX,
168            total_memories: total,
169            total_tables: total,
170            #[cfg(feature = "async")]
171            total_stacks: total,
172            core_instance_size: 1 << 20, // 1 MiB
173            max_tables_per_module: 1,
174            // NB: in #8504 it was seen that a C# module in debug module can
175            // have 10k+ elements.
176            table_elements: 20_000,
177            max_memories_per_module: 1,
178            #[cfg(target_pointer_width = "64")]
179            max_memory_size: 1 << 32, // 4G,
180            #[cfg(target_pointer_width = "32")]
181            max_memory_size: 10 << 20, // 10 MiB
182            #[cfg(feature = "gc")]
183            total_gc_heaps: total,
184        }
185    }
186}
187
188/// Configuration options for the pooling instance allocator supplied at
189/// construction.
190#[derive(Copy, Clone, Debug)]
191pub struct PoolingInstanceAllocatorConfig {
192    /// See `PoolingAllocatorConfig::max_unused_warm_slots` in `wasmtime`
193    pub max_unused_warm_slots: u32,
194    /// The target number of decommits to do per batch. This is not precise, as
195    /// we can queue up decommits at times when we aren't prepared to
196    /// immediately flush them, and so we may go over this target size
197    /// occasionally.
198    pub decommit_batch_size: usize,
199    /// The size, in bytes, of async stacks to allocate (not including the guard
200    /// page).
201    pub stack_size: usize,
202    /// The limits to apply to instances allocated within this allocator.
203    pub limits: InstanceLimits,
204    /// Whether or not async stacks are zeroed after use.
205    pub async_stack_zeroing: bool,
206    /// If async stack zeroing is enabled and the host platform is Linux this is
207    /// how much memory to zero out with `memset`.
208    ///
209    /// The rest of memory will be zeroed out with `madvise`.
210    #[cfg(feature = "async")]
211    pub async_stack_keep_resident: usize,
212    /// How much linear memory, in bytes, to keep resident after resetting for
213    /// use with the next instance. This much memory will be `memset` to zero
214    /// when a linear memory is deallocated.
215    ///
216    /// Memory exceeding this amount in the wasm linear memory will be released
217    /// with `madvise` back to the kernel.
218    ///
219    /// Only applicable on Linux.
220    pub linear_memory_keep_resident: usize,
221    /// Same as `linear_memory_keep_resident` but for tables.
222    pub table_keep_resident: usize,
223    /// Whether to enable memory protection keys.
224    pub memory_protection_keys: MpkEnabled,
225    /// How many memory protection keys to allocate.
226    pub max_memory_protection_keys: usize,
227}
228
229impl Default for PoolingInstanceAllocatorConfig {
230    fn default() -> PoolingInstanceAllocatorConfig {
231        PoolingInstanceAllocatorConfig {
232            max_unused_warm_slots: 100,
233            decommit_batch_size: 1,
234            stack_size: 2 << 20,
235            limits: InstanceLimits::default(),
236            async_stack_zeroing: false,
237            #[cfg(feature = "async")]
238            async_stack_keep_resident: 0,
239            linear_memory_keep_resident: 0,
240            table_keep_resident: 0,
241            memory_protection_keys: MpkEnabled::Disable,
242            max_memory_protection_keys: 16,
243        }
244    }
245}
246
247/// An error returned when the pooling allocator cannot allocate a table,
248/// memory, etc... because the maximum number of concurrent allocations for that
249/// entity has been reached.
250#[derive(Debug)]
251pub struct PoolConcurrencyLimitError {
252    limit: usize,
253    kind: Cow<'static, str>,
254}
255
256impl core::error::Error for PoolConcurrencyLimitError {}
257
258impl Display for PoolConcurrencyLimitError {
259    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
260        let limit = self.limit;
261        let kind = &self.kind;
262        write!(f, "maximum concurrent limit of {limit} for {kind} reached")
263    }
264}
265
266impl PoolConcurrencyLimitError {
267    fn new(limit: usize, kind: impl Into<Cow<'static, str>>) -> Self {
268        Self {
269            limit,
270            kind: kind.into(),
271        }
272    }
273}
274
275/// Implements the pooling instance allocator.
276///
277/// This allocator internally maintains pools of instances, memories, tables,
278/// and stacks.
279///
280/// Note: the resource pools are manually dropped so that the fault handler
281/// terminates correctly.
282#[derive(Debug)]
283pub struct PoolingInstanceAllocator {
284    decommit_batch_size: usize,
285    limits: InstanceLimits,
286
287    // The number of live core module and component instances at any given
288    // time. Note that this can temporarily go over the configured limit. This
289    // doesn't mean we have actually overshot, but that we attempted to allocate
290    // a new instance and incremented the counter, we've seen (or are about to
291    // see) that the counter is beyond the configured threshold, and are going
292    // to decrement the counter and return an error but haven't done so yet. See
293    // the increment trait methods for more details.
294    live_core_instances: AtomicU64,
295    live_component_instances: AtomicU64,
296
297    decommit_queue: Mutex<DecommitQueue>,
298    memories: MemoryPool,
299    tables: TablePool,
300
301    #[cfg(feature = "gc")]
302    gc_heaps: GcHeapPool,
303
304    #[cfg(feature = "async")]
305    stacks: StackPool,
306}
307
308impl Drop for PoolingInstanceAllocator {
309    fn drop(&mut self) {
310        if !cfg!(debug_assertions) {
311            return;
312        }
313
314        // NB: when cfg(not(debug_assertions)) it is okay that we don't flush
315        // the queue, as the sub-pools will unmap those ranges anyways, so
316        // there's no point in decommitting them. But we do need to flush the
317        // queue when debug assertions are enabled to make sure that all
318        // entities get returned to their associated sub-pools and we can
319        // differentiate between a leaking slot and an enqueued-for-decommit
320        // slot.
321        let queue = self.decommit_queue.lock().unwrap();
322        self.flush_decommit_queue(queue);
323
324        debug_assert_eq!(self.live_component_instances.load(Ordering::Acquire), 0);
325        debug_assert_eq!(self.live_core_instances.load(Ordering::Acquire), 0);
326
327        debug_assert!(self.memories.is_empty());
328        debug_assert!(self.tables.is_empty());
329
330        #[cfg(feature = "gc")]
331        debug_assert!(self.gc_heaps.is_empty());
332
333        #[cfg(feature = "async")]
334        debug_assert!(self.stacks.is_empty());
335    }
336}
337
338impl PoolingInstanceAllocator {
339    /// Creates a new pooling instance allocator with the given strategy and limits.
340    pub fn new(config: &PoolingInstanceAllocatorConfig, tunables: &Tunables) -> Result<Self> {
341        Ok(Self {
342            decommit_batch_size: config.decommit_batch_size,
343            limits: config.limits,
344            live_component_instances: AtomicU64::new(0),
345            live_core_instances: AtomicU64::new(0),
346            decommit_queue: Mutex::new(DecommitQueue::default()),
347            memories: MemoryPool::new(config, tunables)?,
348            tables: TablePool::new(config)?,
349            #[cfg(feature = "gc")]
350            gc_heaps: GcHeapPool::new(config)?,
351            #[cfg(feature = "async")]
352            stacks: StackPool::new(config)?,
353        })
354    }
355
356    fn core_instance_size(&self) -> usize {
357        round_up_to_pow2(self.limits.core_instance_size, mem::align_of::<Instance>())
358    }
359
360    fn validate_table_plans(&self, module: &Module) -> Result<()> {
361        self.tables.validate(module)
362    }
363
364    fn validate_memory_plans(&self, module: &Module) -> Result<()> {
365        self.memories.validate_memories(module)
366    }
367
368    fn validate_core_instance_size(&self, offsets: &VMOffsets<HostPtr>) -> Result<()> {
369        let layout = Instance::alloc_layout(offsets);
370        if layout.size() <= self.core_instance_size() {
371            return Ok(());
372        }
373
374        // If this `module` exceeds the allocation size allotted to it then an
375        // error will be reported here. The error of "required N bytes but
376        // cannot allocate that" is pretty opaque, however, because it's not
377        // clear what the breakdown of the N bytes are and what to optimize
378        // next. To help provide a better error message here some fancy-ish
379        // logic is done here to report the breakdown of the byte request into
380        // the largest portions and where it's coming from.
381        let mut message = format!(
382            "instance allocation for this module \
383             requires {} bytes which exceeds the configured maximum \
384             of {} bytes; breakdown of allocation requirement:\n\n",
385            layout.size(),
386            self.core_instance_size(),
387        );
388
389        let mut remaining = layout.size();
390        let mut push = |name: &str, bytes: usize| {
391            assert!(remaining >= bytes);
392            remaining -= bytes;
393
394            // If the `name` region is more than 5% of the allocation request
395            // then report it here, otherwise ignore it. We have less than 20
396            // fields so we're guaranteed that something should be reported, and
397            // otherwise it's not particularly interesting to learn about 5
398            // different fields that are all 8 or 0 bytes. Only try to report
399            // the "major" sources of bytes here.
400            if bytes > layout.size() / 20 {
401                message.push_str(&format!(
402                    " * {:.02}% - {} bytes - {}\n",
403                    ((bytes as f32) / (layout.size() as f32)) * 100.0,
404                    bytes,
405                    name,
406                ));
407            }
408        };
409
410        // The `Instance` itself requires some size allocated to it.
411        push("instance state management", mem::size_of::<Instance>());
412
413        // Afterwards the `VMContext`'s regions are why we're requesting bytes,
414        // so ask it for descriptions on each region's byte size.
415        for (desc, size) in offsets.region_sizes() {
416            push(desc, size as usize);
417        }
418
419        // double-check we accounted for all the bytes
420        assert_eq!(remaining, 0);
421
422        bail!("{}", message)
423    }
424
425    #[cfg(feature = "component-model")]
426    fn validate_component_instance_size(
427        &self,
428        offsets: &VMComponentOffsets<HostPtr>,
429    ) -> Result<()> {
430        if usize::try_from(offsets.size_of_vmctx()).unwrap() <= self.limits.component_instance_size
431        {
432            return Ok(());
433        }
434
435        // TODO: Add context with detailed accounting of what makes up all the
436        // `VMComponentContext`'s space like we do for module instances.
437        bail!(
438            "instance allocation for this component requires {} bytes of `VMComponentContext` \
439             space which exceeds the configured maximum of {} bytes",
440            offsets.size_of_vmctx(),
441            self.limits.component_instance_size
442        )
443    }
444
445    fn flush_decommit_queue(&self, mut locked_queue: MutexGuard<'_, DecommitQueue>) -> bool {
446        // Take the queue out of the mutex and drop the lock, to minimize
447        // contention.
448        let queue = mem::take(&mut *locked_queue);
449        drop(locked_queue);
450        queue.flush(self)
451    }
452
453    /// Execute `f` and if it returns `Err(PoolConcurrencyLimitError)`, then try
454    /// flushing the decommit queue. If flushing the queue freed up slots, then
455    /// try running `f` again.
456    fn with_flush_and_retry<T>(&self, mut f: impl FnMut() -> Result<T>) -> Result<T> {
457        f().or_else(|e| {
458            if e.is::<PoolConcurrencyLimitError>() {
459                let queue = self.decommit_queue.lock().unwrap();
460                if self.flush_decommit_queue(queue) {
461                    return f();
462                }
463            }
464
465            Err(e)
466        })
467    }
468
469    fn merge_or_flush(&self, mut local_queue: DecommitQueue) {
470        match local_queue.raw_len() {
471            // If we didn't enqueue any regions for decommit, then we must have
472            // either memset the whole entity or eagerly remapped it to zero
473            // because we don't have linux's `madvise(DONTNEED)` semantics. In
474            // either case, the entity slot is ready for reuse immediately.
475            0 => {
476                local_queue.flush(self);
477            }
478
479            // We enqueued at least our batch size of regions for decommit, so
480            // flush the local queue immediately. Don't bother inspecting (or
481            // locking!) the shared queue.
482            n if n >= self.decommit_batch_size => {
483                local_queue.flush(self);
484            }
485
486            // If we enqueued some regions for decommit, but did not reach our
487            // batch size, so we don't want to flush it yet, then merge the
488            // local queue into the shared queue.
489            n => {
490                debug_assert!(n < self.decommit_batch_size);
491                let mut shared_queue = self.decommit_queue.lock().unwrap();
492                shared_queue.append(&mut local_queue);
493                // And if the shared queue now has at least as many regions
494                // enqueued for decommit as our batch size, then we can flush
495                // it.
496                if shared_queue.raw_len() >= self.decommit_batch_size {
497                    self.flush_decommit_queue(shared_queue);
498                }
499            }
500        }
501    }
502}
503
504unsafe impl InstanceAllocatorImpl for PoolingInstanceAllocator {
505    #[cfg(feature = "component-model")]
506    fn validate_component_impl<'a>(
507        &self,
508        component: &Component,
509        offsets: &VMComponentOffsets<HostPtr>,
510        get_module: &'a dyn Fn(StaticModuleIndex) -> &'a Module,
511    ) -> Result<()> {
512        self.validate_component_instance_size(offsets)
513            .context("component instance size does not fit in pooling allocator requirements")?;
514
515        let mut num_core_instances = 0;
516        let mut num_memories = 0;
517        let mut num_tables = 0;
518        for init in &component.initializers {
519            use wasmtime_environ::component::GlobalInitializer::*;
520            use wasmtime_environ::component::InstantiateModule;
521            match init {
522                InstantiateModule(InstantiateModule::Import(_, _)) => {
523                    num_core_instances += 1;
524                    // Can't statically account for the total vmctx size, number
525                    // of memories, and number of tables in this component.
526                }
527                InstantiateModule(InstantiateModule::Static(static_module_index, _)) => {
528                    let module = get_module(*static_module_index);
529                    let offsets = VMOffsets::new(HostPtr, &module);
530                    self.validate_module_impl(module, &offsets)?;
531                    num_core_instances += 1;
532                    num_memories += module.num_defined_memories();
533                    num_tables += module.num_defined_tables();
534                }
535                LowerImport { .. }
536                | ExtractMemory(_)
537                | ExtractTable(_)
538                | ExtractRealloc(_)
539                | ExtractCallback(_)
540                | ExtractPostReturn(_)
541                | Resource(_) => {}
542            }
543        }
544
545        if num_core_instances
546            > usize::try_from(self.limits.max_core_instances_per_component).unwrap()
547        {
548            bail!(
549                "The component transitively contains {num_core_instances} core module instances, \
550                 which exceeds the configured maximum of {} in the pooling allocator",
551                self.limits.max_core_instances_per_component
552            );
553        }
554
555        if num_memories > usize::try_from(self.limits.max_memories_per_component).unwrap() {
556            bail!(
557                "The component transitively contains {num_memories} Wasm linear memories, which \
558                 exceeds the configured maximum of {} in the pooling allocator",
559                self.limits.max_memories_per_component
560            );
561        }
562
563        if num_tables > usize::try_from(self.limits.max_tables_per_component).unwrap() {
564            bail!(
565                "The component transitively contains {num_tables} tables, which exceeds the \
566                 configured maximum of {} in the pooling allocator",
567                self.limits.max_tables_per_component
568            );
569        }
570
571        Ok(())
572    }
573
574    fn validate_module_impl(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
575        self.validate_memory_plans(module)
576            .context("module memory does not fit in pooling allocator requirements")?;
577        self.validate_table_plans(module)
578            .context("module table does not fit in pooling allocator requirements")?;
579        self.validate_core_instance_size(offsets)
580            .context("module instance size does not fit in pooling allocator requirements")?;
581        Ok(())
582    }
583
584    #[cfg(feature = "gc")]
585    fn validate_memory_impl(&self, memory: &wasmtime_environ::Memory) -> Result<()> {
586        self.memories.validate_memory(memory)
587    }
588
589    #[cfg(feature = "component-model")]
590    fn increment_component_instance_count(&self) -> Result<()> {
591        let old_count = self.live_component_instances.fetch_add(1, Ordering::AcqRel);
592        if old_count >= u64::from(self.limits.total_component_instances) {
593            self.decrement_component_instance_count();
594            return Err(PoolConcurrencyLimitError::new(
595                usize::try_from(self.limits.total_component_instances).unwrap(),
596                "component instances",
597            )
598            .into());
599        }
600        Ok(())
601    }
602
603    #[cfg(feature = "component-model")]
604    fn decrement_component_instance_count(&self) {
605        self.live_component_instances.fetch_sub(1, Ordering::AcqRel);
606    }
607
608    fn increment_core_instance_count(&self) -> Result<()> {
609        let old_count = self.live_core_instances.fetch_add(1, Ordering::AcqRel);
610        if old_count >= u64::from(self.limits.total_core_instances) {
611            self.decrement_core_instance_count();
612            return Err(PoolConcurrencyLimitError::new(
613                usize::try_from(self.limits.total_core_instances).unwrap(),
614                "core instances",
615            )
616            .into());
617        }
618        Ok(())
619    }
620
621    fn decrement_core_instance_count(&self) {
622        self.live_core_instances.fetch_sub(1, Ordering::AcqRel);
623    }
624
625    fn allocate_memory(
626        &self,
627        request: &mut InstanceAllocationRequest,
628        ty: &wasmtime_environ::Memory,
629        tunables: &Tunables,
630        memory_index: Option<DefinedMemoryIndex>,
631    ) -> Result<(MemoryAllocationIndex, Memory)> {
632        self.with_flush_and_retry(|| self.memories.allocate(request, ty, tunables, memory_index))
633    }
634
635    unsafe fn deallocate_memory(
636        &self,
637        _memory_index: Option<DefinedMemoryIndex>,
638        allocation_index: MemoryAllocationIndex,
639        memory: Memory,
640    ) {
641        // Reset the image slot. If there is any error clearing the
642        // image, just drop it here, and let the drop handler for the
643        // slot unmap in a way that retains the address space
644        // reservation.
645        let mut image = memory.unwrap_static_image();
646        let mut queue = DecommitQueue::default();
647        image
648            .clear_and_remain_ready(self.memories.keep_resident, |ptr, len| {
649                // SAFETY: the memory in `image` won't be used until this
650                // decommit queue is flushed, and by definition the memory is
651                // not in use when calling this function.
652                unsafe {
653                    queue.push_raw(ptr, len);
654                }
655            })
656            .expect("failed to reset memory image");
657
658        // SAFETY: this image is not in use and its memory regions were enqueued
659        // with `push_raw` above.
660        unsafe {
661            queue.push_memory(allocation_index, image);
662        }
663        self.merge_or_flush(queue);
664    }
665
666    fn allocate_table(
667        &self,
668        request: &mut InstanceAllocationRequest,
669        ty: &wasmtime_environ::Table,
670        tunables: &Tunables,
671        _table_index: DefinedTableIndex,
672    ) -> Result<(super::TableAllocationIndex, Table)> {
673        self.with_flush_and_retry(|| self.tables.allocate(request, ty, tunables))
674    }
675
676    unsafe fn deallocate_table(
677        &self,
678        _table_index: DefinedTableIndex,
679        allocation_index: TableAllocationIndex,
680        mut table: Table,
681    ) {
682        let mut queue = DecommitQueue::default();
683        // SAFETY: This table is no longer in use by the allocator when this
684        // method is called and additionally all image ranges are pushed with
685        // the understanding that the memory won't get used until the whole
686        // queue is flushed.
687        unsafe {
688            self.tables
689                .reset_table_pages_to_zero(allocation_index, &mut table, |ptr, len| {
690                    queue.push_raw(ptr, len);
691                });
692        }
693
694        // SAFETY: the table has had all its memory regions enqueued above.
695        unsafe {
696            queue.push_table(allocation_index, table);
697        }
698        self.merge_or_flush(queue);
699    }
700
701    #[cfg(feature = "async")]
702    fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack> {
703        self.with_flush_and_retry(|| self.stacks.allocate())
704    }
705
706    #[cfg(feature = "async")]
707    unsafe fn deallocate_fiber_stack(&self, mut stack: wasmtime_fiber::FiberStack) {
708        let mut queue = DecommitQueue::default();
709        // SAFETY: the stack is no longer in use by definition when this
710        // function is called and memory ranges pushed here are otherwise no
711        // longer in use.
712        unsafe {
713            self.stacks
714                .zero_stack(&mut stack, |ptr, len| queue.push_raw(ptr, len));
715        }
716        // SAFETY: this stack's memory regions were enqueued above.
717        unsafe {
718            queue.push_stack(stack);
719        }
720        self.merge_or_flush(queue);
721    }
722
723    fn purge_module(&self, module: CompiledModuleId) {
724        self.memories.purge_module(module);
725    }
726
727    fn next_available_pkey(&self) -> Option<ProtectionKey> {
728        self.memories.next_available_pkey()
729    }
730
731    fn restrict_to_pkey(&self, pkey: ProtectionKey) {
732        mpk::allow(ProtectionMask::zero().or(pkey));
733    }
734
735    fn allow_all_pkeys(&self) {
736        mpk::allow(ProtectionMask::all());
737    }
738
739    #[cfg(feature = "gc")]
740    fn allocate_gc_heap(
741        &self,
742        engine: &crate::Engine,
743        gc_runtime: &dyn GcRuntime,
744        memory_alloc_index: MemoryAllocationIndex,
745        memory: Memory,
746    ) -> Result<(GcHeapAllocationIndex, Box<dyn GcHeap>)> {
747        self.gc_heaps
748            .allocate(engine, gc_runtime, memory_alloc_index, memory)
749    }
750
751    #[cfg(feature = "gc")]
752    fn deallocate_gc_heap(
753        &self,
754        allocation_index: GcHeapAllocationIndex,
755        gc_heap: Box<dyn GcHeap>,
756    ) -> (MemoryAllocationIndex, Memory) {
757        self.gc_heaps.deallocate(allocation_index, gc_heap)
758    }
759}
760
761#[cfg(test)]
762#[cfg(target_pointer_width = "64")]
763mod test {
764    use super::*;
765
766    #[test]
767    fn test_pooling_allocator_with_memory_pages_exceeded() {
768        let config = PoolingInstanceAllocatorConfig {
769            limits: InstanceLimits {
770                total_memories: 1,
771                max_memory_size: 0x100010000,
772                ..Default::default()
773            },
774            ..PoolingInstanceAllocatorConfig::default()
775        };
776        assert_eq!(
777            PoolingInstanceAllocator::new(
778                &config,
779                &Tunables {
780                    memory_reservation: 0x10000,
781                    ..Tunables::default_host()
782                },
783            )
784            .map_err(|e| e.to_string())
785            .expect_err("expected a failure constructing instance allocator"),
786            "maximum memory size of 0x100010000 bytes exceeds the configured \
787             memory reservation of 0x10000 bytes"
788        );
789    }
790
791    #[cfg(all(
792        unix,
793        target_pointer_width = "64",
794        feature = "async",
795        not(miri),
796        not(asan)
797    ))]
798    #[test]
799    fn test_stack_zeroed() -> Result<()> {
800        let config = PoolingInstanceAllocatorConfig {
801            max_unused_warm_slots: 0,
802            limits: InstanceLimits {
803                total_stacks: 1,
804                total_memories: 0,
805                total_tables: 0,
806                ..Default::default()
807            },
808            stack_size: 128,
809            async_stack_zeroing: true,
810            ..PoolingInstanceAllocatorConfig::default()
811        };
812        let allocator = PoolingInstanceAllocator::new(&config, &Tunables::default_host())?;
813
814        unsafe {
815            for _ in 0..255 {
816                let stack = allocator.allocate_fiber_stack()?;
817
818                // The stack pointer is at the top, so decrement it first
819                let addr = stack.top().unwrap().sub(1);
820
821                assert_eq!(*addr, 0);
822                *addr = 1;
823
824                allocator.deallocate_fiber_stack(stack);
825            }
826        }
827
828        Ok(())
829    }
830
831    #[cfg(all(
832        unix,
833        target_pointer_width = "64",
834        feature = "async",
835        not(miri),
836        not(asan)
837    ))]
838    #[test]
839    fn test_stack_unzeroed() -> Result<()> {
840        let config = PoolingInstanceAllocatorConfig {
841            max_unused_warm_slots: 0,
842            limits: InstanceLimits {
843                total_stacks: 1,
844                total_memories: 0,
845                total_tables: 0,
846                ..Default::default()
847            },
848            stack_size: 128,
849            async_stack_zeroing: false,
850            ..PoolingInstanceAllocatorConfig::default()
851        };
852        let allocator = PoolingInstanceAllocator::new(&config, &Tunables::default_host())?;
853
854        unsafe {
855            for i in 0..255 {
856                let stack = allocator.allocate_fiber_stack()?;
857
858                // The stack pointer is at the top, so decrement it first
859                let addr = stack.top().unwrap().sub(1);
860
861                assert_eq!(*addr, i);
862                *addr = i + 1;
863
864                allocator.deallocate_fiber_stack(stack);
865            }
866        }
867
868        Ok(())
869    }
870}