1mod decommit_queue;
22mod index_allocator;
23mod memory_pool;
24mod table_pool;
25
26#[cfg(feature = "gc")]
27mod gc_heap_pool;
28
29#[cfg(all(feature = "async"))]
30mod generic_stack_pool;
31#[cfg(all(feature = "async", unix, not(miri)))]
32mod unix_stack_pool;
33
34#[cfg(all(feature = "async"))]
35cfg_if::cfg_if! {
36 if #[cfg(all(unix, not(miri), not(asan)))] {
37 use unix_stack_pool as stack_pool;
38 } else {
39 use generic_stack_pool as stack_pool;
40 }
41}
42
43use self::decommit_queue::DecommitQueue;
44use self::memory_pool::MemoryPool;
45use self::table_pool::TablePool;
46use super::{
47 InstanceAllocationRequest, InstanceAllocatorImpl, MemoryAllocationIndex, TableAllocationIndex,
48};
49use crate::MpkEnabled;
50use crate::prelude::*;
51use crate::runtime::vm::{
52 CompiledModuleId, Memory, Table,
53 instance::Instance,
54 mpk::{self, ProtectionKey, ProtectionMask},
55};
56use std::borrow::Cow;
57use std::fmt::Display;
58use std::sync::{Mutex, MutexGuard};
59use std::{
60 mem,
61 sync::atomic::{AtomicU64, Ordering},
62};
63use wasmtime_environ::{
64 DefinedMemoryIndex, DefinedTableIndex, HostPtr, Module, Tunables, VMOffsets,
65};
66
67#[cfg(feature = "gc")]
68use super::GcHeapAllocationIndex;
69#[cfg(feature = "gc")]
70use crate::runtime::vm::{GcHeap, GcRuntime};
71#[cfg(feature = "gc")]
72use gc_heap_pool::GcHeapPool;
73
74#[cfg(feature = "async")]
75use stack_pool::StackPool;
76
77#[cfg(feature = "component-model")]
78use wasmtime_environ::{
79 StaticModuleIndex,
80 component::{Component, VMComponentOffsets},
81};
82
83fn round_up_to_pow2(n: usize, to: usize) -> usize {
84 debug_assert!(to > 0);
85 debug_assert!(to.is_power_of_two());
86 (n + to - 1) & !(to - 1)
87}
88
89#[derive(Debug, Copy, Clone)]
93pub struct InstanceLimits {
94 pub total_component_instances: u32,
97
98 pub component_instance_size: usize,
101
102 pub total_core_instances: u32,
105
106 pub max_core_instances_per_component: u32,
109
110 pub max_memories_per_component: u32,
113
114 pub max_tables_per_component: u32,
116
117 pub total_memories: u32,
119
120 pub total_tables: u32,
122
123 #[cfg(feature = "async")]
125 pub total_stacks: u32,
126
127 pub core_instance_size: usize,
129
130 pub max_tables_per_module: u32,
132
133 pub table_elements: usize,
139
140 pub max_memories_per_module: u32,
142
143 pub max_memory_size: usize,
146
147 #[cfg(feature = "gc")]
149 pub total_gc_heaps: u32,
150}
151
152impl Default for InstanceLimits {
153 fn default() -> Self {
154 let total = if cfg!(target_pointer_width = "32") {
155 100
156 } else {
157 1000
158 };
159 Self {
162 total_component_instances: total,
163 component_instance_size: 1 << 20, total_core_instances: total,
165 max_core_instances_per_component: u32::MAX,
166 max_memories_per_component: u32::MAX,
167 max_tables_per_component: u32::MAX,
168 total_memories: total,
169 total_tables: total,
170 #[cfg(feature = "async")]
171 total_stacks: total,
172 core_instance_size: 1 << 20, max_tables_per_module: 1,
174 table_elements: 20_000,
177 max_memories_per_module: 1,
178 #[cfg(target_pointer_width = "64")]
179 max_memory_size: 1 << 32, #[cfg(target_pointer_width = "32")]
181 max_memory_size: 10 << 20, #[cfg(feature = "gc")]
183 total_gc_heaps: total,
184 }
185 }
186}
187
188#[derive(Copy, Clone, Debug)]
191pub struct PoolingInstanceAllocatorConfig {
192 pub max_unused_warm_slots: u32,
194 pub decommit_batch_size: usize,
199 pub stack_size: usize,
202 pub limits: InstanceLimits,
204 pub async_stack_zeroing: bool,
206 #[cfg(feature = "async")]
211 pub async_stack_keep_resident: usize,
212 pub linear_memory_keep_resident: usize,
221 pub table_keep_resident: usize,
223 pub memory_protection_keys: MpkEnabled,
225 pub max_memory_protection_keys: usize,
227}
228
229impl Default for PoolingInstanceAllocatorConfig {
230 fn default() -> PoolingInstanceAllocatorConfig {
231 PoolingInstanceAllocatorConfig {
232 max_unused_warm_slots: 100,
233 decommit_batch_size: 1,
234 stack_size: 2 << 20,
235 limits: InstanceLimits::default(),
236 async_stack_zeroing: false,
237 #[cfg(feature = "async")]
238 async_stack_keep_resident: 0,
239 linear_memory_keep_resident: 0,
240 table_keep_resident: 0,
241 memory_protection_keys: MpkEnabled::Disable,
242 max_memory_protection_keys: 16,
243 }
244 }
245}
246
247#[derive(Debug)]
251pub struct PoolConcurrencyLimitError {
252 limit: usize,
253 kind: Cow<'static, str>,
254}
255
256impl core::error::Error for PoolConcurrencyLimitError {}
257
258impl Display for PoolConcurrencyLimitError {
259 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
260 let limit = self.limit;
261 let kind = &self.kind;
262 write!(f, "maximum concurrent limit of {limit} for {kind} reached")
263 }
264}
265
266impl PoolConcurrencyLimitError {
267 fn new(limit: usize, kind: impl Into<Cow<'static, str>>) -> Self {
268 Self {
269 limit,
270 kind: kind.into(),
271 }
272 }
273}
274
275#[derive(Debug)]
283pub struct PoolingInstanceAllocator {
284 decommit_batch_size: usize,
285 limits: InstanceLimits,
286
287 live_core_instances: AtomicU64,
295 live_component_instances: AtomicU64,
296
297 decommit_queue: Mutex<DecommitQueue>,
298 memories: MemoryPool,
299 tables: TablePool,
300
301 #[cfg(feature = "gc")]
302 gc_heaps: GcHeapPool,
303
304 #[cfg(feature = "async")]
305 stacks: StackPool,
306}
307
308impl Drop for PoolingInstanceAllocator {
309 fn drop(&mut self) {
310 if !cfg!(debug_assertions) {
311 return;
312 }
313
314 let queue = self.decommit_queue.lock().unwrap();
322 self.flush_decommit_queue(queue);
323
324 debug_assert_eq!(self.live_component_instances.load(Ordering::Acquire), 0);
325 debug_assert_eq!(self.live_core_instances.load(Ordering::Acquire), 0);
326
327 debug_assert!(self.memories.is_empty());
328 debug_assert!(self.tables.is_empty());
329
330 #[cfg(feature = "gc")]
331 debug_assert!(self.gc_heaps.is_empty());
332
333 #[cfg(feature = "async")]
334 debug_assert!(self.stacks.is_empty());
335 }
336}
337
338impl PoolingInstanceAllocator {
339 pub fn new(config: &PoolingInstanceAllocatorConfig, tunables: &Tunables) -> Result<Self> {
341 Ok(Self {
342 decommit_batch_size: config.decommit_batch_size,
343 limits: config.limits,
344 live_component_instances: AtomicU64::new(0),
345 live_core_instances: AtomicU64::new(0),
346 decommit_queue: Mutex::new(DecommitQueue::default()),
347 memories: MemoryPool::new(config, tunables)?,
348 tables: TablePool::new(config)?,
349 #[cfg(feature = "gc")]
350 gc_heaps: GcHeapPool::new(config)?,
351 #[cfg(feature = "async")]
352 stacks: StackPool::new(config)?,
353 })
354 }
355
356 fn core_instance_size(&self) -> usize {
357 round_up_to_pow2(self.limits.core_instance_size, mem::align_of::<Instance>())
358 }
359
360 fn validate_table_plans(&self, module: &Module) -> Result<()> {
361 self.tables.validate(module)
362 }
363
364 fn validate_memory_plans(&self, module: &Module) -> Result<()> {
365 self.memories.validate_memories(module)
366 }
367
368 fn validate_core_instance_size(&self, offsets: &VMOffsets<HostPtr>) -> Result<()> {
369 let layout = Instance::alloc_layout(offsets);
370 if layout.size() <= self.core_instance_size() {
371 return Ok(());
372 }
373
374 let mut message = format!(
382 "instance allocation for this module \
383 requires {} bytes which exceeds the configured maximum \
384 of {} bytes; breakdown of allocation requirement:\n\n",
385 layout.size(),
386 self.core_instance_size(),
387 );
388
389 let mut remaining = layout.size();
390 let mut push = |name: &str, bytes: usize| {
391 assert!(remaining >= bytes);
392 remaining -= bytes;
393
394 if bytes > layout.size() / 20 {
401 message.push_str(&format!(
402 " * {:.02}% - {} bytes - {}\n",
403 ((bytes as f32) / (layout.size() as f32)) * 100.0,
404 bytes,
405 name,
406 ));
407 }
408 };
409
410 push("instance state management", mem::size_of::<Instance>());
412
413 for (desc, size) in offsets.region_sizes() {
416 push(desc, size as usize);
417 }
418
419 assert_eq!(remaining, 0);
421
422 bail!("{}", message)
423 }
424
425 #[cfg(feature = "component-model")]
426 fn validate_component_instance_size(
427 &self,
428 offsets: &VMComponentOffsets<HostPtr>,
429 ) -> Result<()> {
430 if usize::try_from(offsets.size_of_vmctx()).unwrap() <= self.limits.component_instance_size
431 {
432 return Ok(());
433 }
434
435 bail!(
438 "instance allocation for this component requires {} bytes of `VMComponentContext` \
439 space which exceeds the configured maximum of {} bytes",
440 offsets.size_of_vmctx(),
441 self.limits.component_instance_size
442 )
443 }
444
445 fn flush_decommit_queue(&self, mut locked_queue: MutexGuard<'_, DecommitQueue>) -> bool {
446 let queue = mem::take(&mut *locked_queue);
449 drop(locked_queue);
450 queue.flush(self)
451 }
452
453 fn with_flush_and_retry<T>(&self, mut f: impl FnMut() -> Result<T>) -> Result<T> {
457 f().or_else(|e| {
458 if e.is::<PoolConcurrencyLimitError>() {
459 let queue = self.decommit_queue.lock().unwrap();
460 if self.flush_decommit_queue(queue) {
461 return f();
462 }
463 }
464
465 Err(e)
466 })
467 }
468
469 fn merge_or_flush(&self, mut local_queue: DecommitQueue) {
470 match local_queue.raw_len() {
471 0 => {
476 local_queue.flush(self);
477 }
478
479 n if n >= self.decommit_batch_size => {
483 local_queue.flush(self);
484 }
485
486 n => {
490 debug_assert!(n < self.decommit_batch_size);
491 let mut shared_queue = self.decommit_queue.lock().unwrap();
492 shared_queue.append(&mut local_queue);
493 if shared_queue.raw_len() >= self.decommit_batch_size {
497 self.flush_decommit_queue(shared_queue);
498 }
499 }
500 }
501 }
502}
503
504unsafe impl InstanceAllocatorImpl for PoolingInstanceAllocator {
505 #[cfg(feature = "component-model")]
506 fn validate_component_impl<'a>(
507 &self,
508 component: &Component,
509 offsets: &VMComponentOffsets<HostPtr>,
510 get_module: &'a dyn Fn(StaticModuleIndex) -> &'a Module,
511 ) -> Result<()> {
512 self.validate_component_instance_size(offsets)
513 .context("component instance size does not fit in pooling allocator requirements")?;
514
515 let mut num_core_instances = 0;
516 let mut num_memories = 0;
517 let mut num_tables = 0;
518 for init in &component.initializers {
519 use wasmtime_environ::component::GlobalInitializer::*;
520 use wasmtime_environ::component::InstantiateModule;
521 match init {
522 InstantiateModule(InstantiateModule::Import(_, _)) => {
523 num_core_instances += 1;
524 }
527 InstantiateModule(InstantiateModule::Static(static_module_index, _)) => {
528 let module = get_module(*static_module_index);
529 let offsets = VMOffsets::new(HostPtr, &module);
530 self.validate_module_impl(module, &offsets)?;
531 num_core_instances += 1;
532 num_memories += module.num_defined_memories();
533 num_tables += module.num_defined_tables();
534 }
535 LowerImport { .. }
536 | ExtractMemory(_)
537 | ExtractTable(_)
538 | ExtractRealloc(_)
539 | ExtractCallback(_)
540 | ExtractPostReturn(_)
541 | Resource(_) => {}
542 }
543 }
544
545 if num_core_instances
546 > usize::try_from(self.limits.max_core_instances_per_component).unwrap()
547 {
548 bail!(
549 "The component transitively contains {num_core_instances} core module instances, \
550 which exceeds the configured maximum of {} in the pooling allocator",
551 self.limits.max_core_instances_per_component
552 );
553 }
554
555 if num_memories > usize::try_from(self.limits.max_memories_per_component).unwrap() {
556 bail!(
557 "The component transitively contains {num_memories} Wasm linear memories, which \
558 exceeds the configured maximum of {} in the pooling allocator",
559 self.limits.max_memories_per_component
560 );
561 }
562
563 if num_tables > usize::try_from(self.limits.max_tables_per_component).unwrap() {
564 bail!(
565 "The component transitively contains {num_tables} tables, which exceeds the \
566 configured maximum of {} in the pooling allocator",
567 self.limits.max_tables_per_component
568 );
569 }
570
571 Ok(())
572 }
573
574 fn validate_module_impl(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
575 self.validate_memory_plans(module)
576 .context("module memory does not fit in pooling allocator requirements")?;
577 self.validate_table_plans(module)
578 .context("module table does not fit in pooling allocator requirements")?;
579 self.validate_core_instance_size(offsets)
580 .context("module instance size does not fit in pooling allocator requirements")?;
581 Ok(())
582 }
583
584 #[cfg(feature = "gc")]
585 fn validate_memory_impl(&self, memory: &wasmtime_environ::Memory) -> Result<()> {
586 self.memories.validate_memory(memory)
587 }
588
589 #[cfg(feature = "component-model")]
590 fn increment_component_instance_count(&self) -> Result<()> {
591 let old_count = self.live_component_instances.fetch_add(1, Ordering::AcqRel);
592 if old_count >= u64::from(self.limits.total_component_instances) {
593 self.decrement_component_instance_count();
594 return Err(PoolConcurrencyLimitError::new(
595 usize::try_from(self.limits.total_component_instances).unwrap(),
596 "component instances",
597 )
598 .into());
599 }
600 Ok(())
601 }
602
603 #[cfg(feature = "component-model")]
604 fn decrement_component_instance_count(&self) {
605 self.live_component_instances.fetch_sub(1, Ordering::AcqRel);
606 }
607
608 fn increment_core_instance_count(&self) -> Result<()> {
609 let old_count = self.live_core_instances.fetch_add(1, Ordering::AcqRel);
610 if old_count >= u64::from(self.limits.total_core_instances) {
611 self.decrement_core_instance_count();
612 return Err(PoolConcurrencyLimitError::new(
613 usize::try_from(self.limits.total_core_instances).unwrap(),
614 "core instances",
615 )
616 .into());
617 }
618 Ok(())
619 }
620
621 fn decrement_core_instance_count(&self) {
622 self.live_core_instances.fetch_sub(1, Ordering::AcqRel);
623 }
624
625 fn allocate_memory(
626 &self,
627 request: &mut InstanceAllocationRequest,
628 ty: &wasmtime_environ::Memory,
629 tunables: &Tunables,
630 memory_index: Option<DefinedMemoryIndex>,
631 ) -> Result<(MemoryAllocationIndex, Memory)> {
632 self.with_flush_and_retry(|| self.memories.allocate(request, ty, tunables, memory_index))
633 }
634
635 unsafe fn deallocate_memory(
636 &self,
637 _memory_index: Option<DefinedMemoryIndex>,
638 allocation_index: MemoryAllocationIndex,
639 memory: Memory,
640 ) {
641 let mut image = memory.unwrap_static_image();
646 let mut queue = DecommitQueue::default();
647 image
648 .clear_and_remain_ready(self.memories.keep_resident, |ptr, len| {
649 unsafe {
653 queue.push_raw(ptr, len);
654 }
655 })
656 .expect("failed to reset memory image");
657
658 unsafe {
661 queue.push_memory(allocation_index, image);
662 }
663 self.merge_or_flush(queue);
664 }
665
666 fn allocate_table(
667 &self,
668 request: &mut InstanceAllocationRequest,
669 ty: &wasmtime_environ::Table,
670 tunables: &Tunables,
671 _table_index: DefinedTableIndex,
672 ) -> Result<(super::TableAllocationIndex, Table)> {
673 self.with_flush_and_retry(|| self.tables.allocate(request, ty, tunables))
674 }
675
676 unsafe fn deallocate_table(
677 &self,
678 _table_index: DefinedTableIndex,
679 allocation_index: TableAllocationIndex,
680 mut table: Table,
681 ) {
682 let mut queue = DecommitQueue::default();
683 unsafe {
688 self.tables
689 .reset_table_pages_to_zero(allocation_index, &mut table, |ptr, len| {
690 queue.push_raw(ptr, len);
691 });
692 }
693
694 unsafe {
696 queue.push_table(allocation_index, table);
697 }
698 self.merge_or_flush(queue);
699 }
700
701 #[cfg(feature = "async")]
702 fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack> {
703 self.with_flush_and_retry(|| self.stacks.allocate())
704 }
705
706 #[cfg(feature = "async")]
707 unsafe fn deallocate_fiber_stack(&self, mut stack: wasmtime_fiber::FiberStack) {
708 let mut queue = DecommitQueue::default();
709 unsafe {
713 self.stacks
714 .zero_stack(&mut stack, |ptr, len| queue.push_raw(ptr, len));
715 }
716 unsafe {
718 queue.push_stack(stack);
719 }
720 self.merge_or_flush(queue);
721 }
722
723 fn purge_module(&self, module: CompiledModuleId) {
724 self.memories.purge_module(module);
725 }
726
727 fn next_available_pkey(&self) -> Option<ProtectionKey> {
728 self.memories.next_available_pkey()
729 }
730
731 fn restrict_to_pkey(&self, pkey: ProtectionKey) {
732 mpk::allow(ProtectionMask::zero().or(pkey));
733 }
734
735 fn allow_all_pkeys(&self) {
736 mpk::allow(ProtectionMask::all());
737 }
738
739 #[cfg(feature = "gc")]
740 fn allocate_gc_heap(
741 &self,
742 engine: &crate::Engine,
743 gc_runtime: &dyn GcRuntime,
744 memory_alloc_index: MemoryAllocationIndex,
745 memory: Memory,
746 ) -> Result<(GcHeapAllocationIndex, Box<dyn GcHeap>)> {
747 self.gc_heaps
748 .allocate(engine, gc_runtime, memory_alloc_index, memory)
749 }
750
751 #[cfg(feature = "gc")]
752 fn deallocate_gc_heap(
753 &self,
754 allocation_index: GcHeapAllocationIndex,
755 gc_heap: Box<dyn GcHeap>,
756 ) -> (MemoryAllocationIndex, Memory) {
757 self.gc_heaps.deallocate(allocation_index, gc_heap)
758 }
759}
760
761#[cfg(test)]
762#[cfg(target_pointer_width = "64")]
763mod test {
764 use super::*;
765
766 #[test]
767 fn test_pooling_allocator_with_memory_pages_exceeded() {
768 let config = PoolingInstanceAllocatorConfig {
769 limits: InstanceLimits {
770 total_memories: 1,
771 max_memory_size: 0x100010000,
772 ..Default::default()
773 },
774 ..PoolingInstanceAllocatorConfig::default()
775 };
776 assert_eq!(
777 PoolingInstanceAllocator::new(
778 &config,
779 &Tunables {
780 memory_reservation: 0x10000,
781 ..Tunables::default_host()
782 },
783 )
784 .map_err(|e| e.to_string())
785 .expect_err("expected a failure constructing instance allocator"),
786 "maximum memory size of 0x100010000 bytes exceeds the configured \
787 memory reservation of 0x10000 bytes"
788 );
789 }
790
791 #[cfg(all(
792 unix,
793 target_pointer_width = "64",
794 feature = "async",
795 not(miri),
796 not(asan)
797 ))]
798 #[test]
799 fn test_stack_zeroed() -> Result<()> {
800 let config = PoolingInstanceAllocatorConfig {
801 max_unused_warm_slots: 0,
802 limits: InstanceLimits {
803 total_stacks: 1,
804 total_memories: 0,
805 total_tables: 0,
806 ..Default::default()
807 },
808 stack_size: 128,
809 async_stack_zeroing: true,
810 ..PoolingInstanceAllocatorConfig::default()
811 };
812 let allocator = PoolingInstanceAllocator::new(&config, &Tunables::default_host())?;
813
814 unsafe {
815 for _ in 0..255 {
816 let stack = allocator.allocate_fiber_stack()?;
817
818 let addr = stack.top().unwrap().sub(1);
820
821 assert_eq!(*addr, 0);
822 *addr = 1;
823
824 allocator.deallocate_fiber_stack(stack);
825 }
826 }
827
828 Ok(())
829 }
830
831 #[cfg(all(
832 unix,
833 target_pointer_width = "64",
834 feature = "async",
835 not(miri),
836 not(asan)
837 ))]
838 #[test]
839 fn test_stack_unzeroed() -> Result<()> {
840 let config = PoolingInstanceAllocatorConfig {
841 max_unused_warm_slots: 0,
842 limits: InstanceLimits {
843 total_stacks: 1,
844 total_memories: 0,
845 total_tables: 0,
846 ..Default::default()
847 },
848 stack_size: 128,
849 async_stack_zeroing: false,
850 ..PoolingInstanceAllocatorConfig::default()
851 };
852 let allocator = PoolingInstanceAllocator::new(&config, &Tunables::default_host())?;
853
854 unsafe {
855 for i in 0..255 {
856 let stack = allocator.allocate_fiber_stack()?;
857
858 let addr = stack.top().unwrap().sub(1);
860
861 assert_eq!(*addr, i);
862 *addr = i + 1;
863
864 allocator.deallocate_fiber_stack(stack);
865 }
866 }
867
868 Ok(())
869 }
870}