1mod decommit_queue;
22mod index_allocator;
23mod memory_pool;
24mod table_pool;
25
26#[cfg(feature = "gc")]
27mod gc_heap_pool;
28
29#[cfg(all(feature = "async"))]
30mod generic_stack_pool;
31#[cfg(all(feature = "async", unix, not(miri)))]
32mod unix_stack_pool;
33
34#[cfg(all(feature = "async"))]
35cfg_if::cfg_if! {
36 if #[cfg(all(unix, not(miri), not(asan)))] {
37 use unix_stack_pool as stack_pool;
38 } else {
39 use generic_stack_pool as stack_pool;
40 }
41}
42
43use self::decommit_queue::DecommitQueue;
44use self::memory_pool::MemoryPool;
45use self::table_pool::TablePool;
46use super::{
47 InstanceAllocationRequest, InstanceAllocatorImpl, MemoryAllocationIndex, TableAllocationIndex,
48};
49use crate::prelude::*;
50use crate::runtime::vm::{
51 instance::Instance,
52 mpk::{self, ProtectionKey, ProtectionMask},
53 CompiledModuleId, Memory, Table,
54};
55use crate::MpkEnabled;
56use std::borrow::Cow;
57use std::fmt::Display;
58use std::sync::{Mutex, MutexGuard};
59use std::{
60 mem,
61 sync::atomic::{AtomicU64, Ordering},
62};
63use wasmtime_environ::{
64 DefinedMemoryIndex, DefinedTableIndex, HostPtr, Module, Tunables, VMOffsets,
65};
66
67#[cfg(feature = "gc")]
68use super::GcHeapAllocationIndex;
69#[cfg(feature = "gc")]
70use crate::runtime::vm::{GcHeap, GcRuntime};
71#[cfg(feature = "gc")]
72use gc_heap_pool::GcHeapPool;
73
74#[cfg(feature = "async")]
75use stack_pool::StackPool;
76
77#[cfg(feature = "component-model")]
78use wasmtime_environ::{
79 component::{Component, VMComponentOffsets},
80 StaticModuleIndex,
81};
82
83fn round_up_to_pow2(n: usize, to: usize) -> usize {
84 debug_assert!(to > 0);
85 debug_assert!(to.is_power_of_two());
86 (n + to - 1) & !(to - 1)
87}
88
89#[derive(Debug, Copy, Clone)]
93pub struct InstanceLimits {
94 pub total_component_instances: u32,
97
98 pub component_instance_size: usize,
101
102 pub total_core_instances: u32,
105
106 pub max_core_instances_per_component: u32,
109
110 pub max_memories_per_component: u32,
113
114 pub max_tables_per_component: u32,
116
117 pub total_memories: u32,
119
120 pub total_tables: u32,
122
123 #[cfg(feature = "async")]
125 pub total_stacks: u32,
126
127 pub core_instance_size: usize,
129
130 pub max_tables_per_module: u32,
132
133 pub table_elements: usize,
135
136 pub max_memories_per_module: u32,
138
139 pub max_memory_size: usize,
142
143 #[cfg(feature = "gc")]
145 pub total_gc_heaps: u32,
146}
147
148impl Default for InstanceLimits {
149 fn default() -> Self {
150 Self {
153 total_component_instances: 1000,
154 component_instance_size: 1 << 20, total_core_instances: 1000,
156 max_core_instances_per_component: u32::MAX,
157 max_memories_per_component: u32::MAX,
158 max_tables_per_component: u32::MAX,
159 total_memories: 1000,
160 total_tables: 1000,
161 #[cfg(feature = "async")]
162 total_stacks: 1000,
163 core_instance_size: 1 << 20, max_tables_per_module: 1,
165 table_elements: 20_000,
168 max_memories_per_module: 1,
169 #[cfg(target_pointer_width = "64")]
170 max_memory_size: 1 << 32, #[cfg(target_pointer_width = "32")]
172 max_memory_size: usize::MAX,
173 #[cfg(feature = "gc")]
174 total_gc_heaps: 1000,
175 }
176 }
177}
178
179#[derive(Copy, Clone, Debug)]
182pub struct PoolingInstanceAllocatorConfig {
183 pub max_unused_warm_slots: u32,
185 pub decommit_batch_size: usize,
190 pub stack_size: usize,
193 pub limits: InstanceLimits,
195 pub async_stack_zeroing: bool,
197 #[cfg(feature = "async")]
202 pub async_stack_keep_resident: usize,
203 pub linear_memory_keep_resident: usize,
212 pub table_keep_resident: usize,
214 pub memory_protection_keys: MpkEnabled,
216 pub max_memory_protection_keys: usize,
218}
219
220impl Default for PoolingInstanceAllocatorConfig {
221 fn default() -> PoolingInstanceAllocatorConfig {
222 PoolingInstanceAllocatorConfig {
223 max_unused_warm_slots: 100,
224 decommit_batch_size: 1,
225 stack_size: 2 << 20,
226 limits: InstanceLimits::default(),
227 async_stack_zeroing: false,
228 #[cfg(feature = "async")]
229 async_stack_keep_resident: 0,
230 linear_memory_keep_resident: 0,
231 table_keep_resident: 0,
232 memory_protection_keys: MpkEnabled::Disable,
233 max_memory_protection_keys: 16,
234 }
235 }
236}
237
238#[derive(Debug)]
242pub struct PoolConcurrencyLimitError {
243 limit: usize,
244 kind: Cow<'static, str>,
245}
246
247impl std::error::Error for PoolConcurrencyLimitError {}
248
249impl Display for PoolConcurrencyLimitError {
250 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
251 let limit = self.limit;
252 let kind = &self.kind;
253 write!(f, "maximum concurrent limit of {limit} for {kind} reached")
254 }
255}
256
257impl PoolConcurrencyLimitError {
258 fn new(limit: usize, kind: impl Into<Cow<'static, str>>) -> Self {
259 Self {
260 limit,
261 kind: kind.into(),
262 }
263 }
264}
265
266#[derive(Debug)]
274pub struct PoolingInstanceAllocator {
275 decommit_batch_size: usize,
276 limits: InstanceLimits,
277
278 live_core_instances: AtomicU64,
286 live_component_instances: AtomicU64,
287
288 decommit_queue: Mutex<DecommitQueue>,
289 memories: MemoryPool,
290 tables: TablePool,
291
292 #[cfg(feature = "gc")]
293 gc_heaps: GcHeapPool,
294
295 #[cfg(feature = "async")]
296 stacks: StackPool,
297}
298
299#[cfg(debug_assertions)]
300impl Drop for PoolingInstanceAllocator {
301 fn drop(&mut self) {
302 let queue = self.decommit_queue.lock().unwrap();
310 self.flush_decommit_queue(queue);
311
312 debug_assert_eq!(self.live_component_instances.load(Ordering::Acquire), 0);
313 debug_assert_eq!(self.live_core_instances.load(Ordering::Acquire), 0);
314
315 debug_assert!(self.memories.is_empty());
316 debug_assert!(self.tables.is_empty());
317
318 #[cfg(feature = "gc")]
319 debug_assert!(self.gc_heaps.is_empty());
320
321 #[cfg(feature = "async")]
322 debug_assert!(self.stacks.is_empty());
323 }
324}
325
326impl PoolingInstanceAllocator {
327 pub fn new(config: &PoolingInstanceAllocatorConfig, tunables: &Tunables) -> Result<Self> {
329 Ok(Self {
330 decommit_batch_size: config.decommit_batch_size,
331 limits: config.limits,
332 live_component_instances: AtomicU64::new(0),
333 live_core_instances: AtomicU64::new(0),
334 decommit_queue: Mutex::new(DecommitQueue::default()),
335 memories: MemoryPool::new(config, tunables)?,
336 tables: TablePool::new(config)?,
337 #[cfg(feature = "gc")]
338 gc_heaps: GcHeapPool::new(config)?,
339 #[cfg(feature = "async")]
340 stacks: StackPool::new(config)?,
341 })
342 }
343
344 fn core_instance_size(&self) -> usize {
345 round_up_to_pow2(self.limits.core_instance_size, mem::align_of::<Instance>())
346 }
347
348 fn validate_table_plans(&self, module: &Module) -> Result<()> {
349 self.tables.validate(module)
350 }
351
352 fn validate_memory_plans(&self, module: &Module) -> Result<()> {
353 self.memories.validate(module)
354 }
355
356 fn validate_core_instance_size(&self, offsets: &VMOffsets<HostPtr>) -> Result<()> {
357 let layout = Instance::alloc_layout(offsets);
358 if layout.size() <= self.core_instance_size() {
359 return Ok(());
360 }
361
362 let mut message = format!(
370 "instance allocation for this module \
371 requires {} bytes which exceeds the configured maximum \
372 of {} bytes; breakdown of allocation requirement:\n\n",
373 layout.size(),
374 self.core_instance_size(),
375 );
376
377 let mut remaining = layout.size();
378 let mut push = |name: &str, bytes: usize| {
379 assert!(remaining >= bytes);
380 remaining -= bytes;
381
382 if bytes > layout.size() / 20 {
389 message.push_str(&format!(
390 " * {:.02}% - {} bytes - {}\n",
391 ((bytes as f32) / (layout.size() as f32)) * 100.0,
392 bytes,
393 name,
394 ));
395 }
396 };
397
398 push("instance state management", mem::size_of::<Instance>());
400
401 for (desc, size) in offsets.region_sizes() {
404 push(desc, size as usize);
405 }
406
407 assert_eq!(remaining, 0);
409
410 bail!("{}", message)
411 }
412
413 #[cfg(feature = "component-model")]
414 fn validate_component_instance_size(
415 &self,
416 offsets: &VMComponentOffsets<HostPtr>,
417 ) -> Result<()> {
418 if usize::try_from(offsets.size_of_vmctx()).unwrap() <= self.limits.component_instance_size
419 {
420 return Ok(());
421 }
422
423 bail!(
426 "instance allocation for this component requires {} bytes of `VMComponentContext` \
427 space which exceeds the configured maximum of {} bytes",
428 offsets.size_of_vmctx(),
429 self.limits.component_instance_size
430 )
431 }
432
433 fn flush_decommit_queue(&self, mut locked_queue: MutexGuard<'_, DecommitQueue>) -> bool {
434 let queue = mem::take(&mut *locked_queue);
437 drop(locked_queue);
438 queue.flush(self)
439 }
440
441 fn with_flush_and_retry<T>(&self, mut f: impl FnMut() -> Result<T>) -> Result<T> {
445 f().or_else(|e| {
446 if e.is::<PoolConcurrencyLimitError>() {
447 let queue = self.decommit_queue.lock().unwrap();
448 if self.flush_decommit_queue(queue) {
449 return f();
450 }
451 }
452
453 Err(e)
454 })
455 }
456
457 fn merge_or_flush(&self, mut local_queue: DecommitQueue) {
458 match local_queue.raw_len() {
459 0 => {
464 local_queue.flush(self);
465 }
466
467 n if n >= self.decommit_batch_size => {
471 local_queue.flush(self);
472 }
473
474 n => {
478 debug_assert!(n < self.decommit_batch_size);
479 let mut shared_queue = self.decommit_queue.lock().unwrap();
480 shared_queue.append(&mut local_queue);
481 if shared_queue.raw_len() >= self.decommit_batch_size {
485 self.flush_decommit_queue(shared_queue);
486 }
487 }
488 }
489 }
490}
491
492unsafe impl InstanceAllocatorImpl for PoolingInstanceAllocator {
493 #[cfg(feature = "component-model")]
494 fn validate_component_impl<'a>(
495 &self,
496 component: &Component,
497 offsets: &VMComponentOffsets<HostPtr>,
498 get_module: &'a dyn Fn(StaticModuleIndex) -> &'a Module,
499 ) -> Result<()> {
500 self.validate_component_instance_size(offsets)?;
501
502 let mut num_core_instances = 0;
503 let mut num_memories = 0;
504 let mut num_tables = 0;
505 for init in &component.initializers {
506 use wasmtime_environ::component::GlobalInitializer::*;
507 use wasmtime_environ::component::InstantiateModule;
508 match init {
509 InstantiateModule(InstantiateModule::Import(_, _)) => {
510 num_core_instances += 1;
511 }
514 InstantiateModule(InstantiateModule::Static(static_module_index, _)) => {
515 let module = get_module(*static_module_index);
516 let offsets = VMOffsets::new(HostPtr, &module);
517 self.validate_module_impl(module, &offsets)?;
518 num_core_instances += 1;
519 num_memories += module.num_defined_memories();
520 num_tables += module.num_defined_tables();
521 }
522 LowerImport { .. }
523 | ExtractMemory(_)
524 | ExtractRealloc(_)
525 | ExtractCallback(_)
526 | ExtractPostReturn(_)
527 | Resource(_) => {}
528 }
529 }
530
531 if num_core_instances
532 > usize::try_from(self.limits.max_core_instances_per_component).unwrap()
533 {
534 bail!(
535 "The component transitively contains {num_core_instances} core module instances, \
536 which exceeds the configured maximum of {}",
537 self.limits.max_core_instances_per_component
538 );
539 }
540
541 if num_memories > usize::try_from(self.limits.max_memories_per_component).unwrap() {
542 bail!(
543 "The component transitively contains {num_memories} Wasm linear memories, which \
544 exceeds the configured maximum of {}",
545 self.limits.max_memories_per_component
546 );
547 }
548
549 if num_tables > usize::try_from(self.limits.max_tables_per_component).unwrap() {
550 bail!(
551 "The component transitively contains {num_tables} tables, which exceeds the \
552 configured maximum of {}",
553 self.limits.max_tables_per_component
554 );
555 }
556
557 Ok(())
558 }
559
560 fn validate_module_impl(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
561 self.validate_memory_plans(module)?;
562 self.validate_table_plans(module)?;
563 self.validate_core_instance_size(offsets)?;
564 Ok(())
565 }
566
567 fn increment_component_instance_count(&self) -> Result<()> {
568 let old_count = self.live_component_instances.fetch_add(1, Ordering::AcqRel);
569 if old_count >= u64::from(self.limits.total_component_instances) {
570 self.decrement_component_instance_count();
571 return Err(PoolConcurrencyLimitError::new(
572 usize::try_from(self.limits.total_component_instances).unwrap(),
573 "component instances",
574 )
575 .into());
576 }
577 Ok(())
578 }
579
580 fn decrement_component_instance_count(&self) {
581 self.live_component_instances.fetch_sub(1, Ordering::AcqRel);
582 }
583
584 fn increment_core_instance_count(&self) -> Result<()> {
585 let old_count = self.live_core_instances.fetch_add(1, Ordering::AcqRel);
586 if old_count >= u64::from(self.limits.total_core_instances) {
587 self.decrement_core_instance_count();
588 return Err(PoolConcurrencyLimitError::new(
589 usize::try_from(self.limits.total_core_instances).unwrap(),
590 "core instances",
591 )
592 .into());
593 }
594 Ok(())
595 }
596
597 fn decrement_core_instance_count(&self) {
598 self.live_core_instances.fetch_sub(1, Ordering::AcqRel);
599 }
600
601 unsafe fn allocate_memory(
602 &self,
603 request: &mut InstanceAllocationRequest,
604 ty: &wasmtime_environ::Memory,
605 tunables: &Tunables,
606 memory_index: DefinedMemoryIndex,
607 ) -> Result<(MemoryAllocationIndex, Memory)> {
608 self.with_flush_and_retry(|| self.memories.allocate(request, ty, tunables, memory_index))
609 }
610
611 unsafe fn deallocate_memory(
612 &self,
613 _memory_index: DefinedMemoryIndex,
614 allocation_index: MemoryAllocationIndex,
615 memory: Memory,
616 ) {
617 let mut image = memory.unwrap_static_image();
622 let mut queue = DecommitQueue::default();
623 image
624 .clear_and_remain_ready(self.memories.keep_resident, |ptr, len| {
625 queue.push_raw(ptr, len);
626 })
627 .expect("failed to reset memory image");
628 queue.push_memory(allocation_index, image);
629 self.merge_or_flush(queue);
630 }
631
632 unsafe fn allocate_table(
633 &self,
634 request: &mut InstanceAllocationRequest,
635 ty: &wasmtime_environ::Table,
636 tunables: &Tunables,
637 _table_index: DefinedTableIndex,
638 ) -> Result<(super::TableAllocationIndex, Table)> {
639 self.with_flush_and_retry(|| self.tables.allocate(request, ty, tunables))
640 }
641
642 unsafe fn deallocate_table(
643 &self,
644 _table_index: DefinedTableIndex,
645 allocation_index: TableAllocationIndex,
646 mut table: Table,
647 ) {
648 let mut queue = DecommitQueue::default();
649 self.tables
650 .reset_table_pages_to_zero(allocation_index, &mut table, |ptr, len| {
651 queue.push_raw(ptr, len);
652 });
653 queue.push_table(allocation_index, table);
654 self.merge_or_flush(queue);
655 }
656
657 #[cfg(feature = "async")]
658 fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack> {
659 self.with_flush_and_retry(|| self.stacks.allocate())
660 }
661
662 #[cfg(feature = "async")]
663 unsafe fn deallocate_fiber_stack(&self, mut stack: wasmtime_fiber::FiberStack) {
664 let mut queue = DecommitQueue::default();
665 self.stacks
666 .zero_stack(&mut stack, |ptr, len| queue.push_raw(ptr, len));
667 queue.push_stack(stack);
668 self.merge_or_flush(queue);
669 }
670
671 fn purge_module(&self, module: CompiledModuleId) {
672 self.memories.purge_module(module);
673 }
674
675 fn next_available_pkey(&self) -> Option<ProtectionKey> {
676 self.memories.next_available_pkey()
677 }
678
679 fn restrict_to_pkey(&self, pkey: ProtectionKey) {
680 mpk::allow(ProtectionMask::zero().or(pkey));
681 }
682
683 fn allow_all_pkeys(&self) {
684 mpk::allow(ProtectionMask::all());
685 }
686
687 #[cfg(feature = "gc")]
688 fn allocate_gc_heap(
689 &self,
690 gc_runtime: &dyn GcRuntime,
691 ) -> Result<(GcHeapAllocationIndex, Box<dyn GcHeap>)> {
692 self.gc_heaps.allocate(gc_runtime)
693 }
694
695 #[cfg(feature = "gc")]
696 fn deallocate_gc_heap(
697 &self,
698 allocation_index: GcHeapAllocationIndex,
699 gc_heap: Box<dyn GcHeap>,
700 ) {
701 self.gc_heaps.deallocate(allocation_index, gc_heap);
702 }
703}
704
705#[cfg(test)]
706#[cfg(target_pointer_width = "64")]
707mod test {
708 use super::*;
709
710 #[test]
711 fn test_pooling_allocator_with_memory_pages_exceeded() {
712 let config = PoolingInstanceAllocatorConfig {
713 limits: InstanceLimits {
714 total_memories: 1,
715 max_memory_size: 0x100010000,
716 ..Default::default()
717 },
718 ..PoolingInstanceAllocatorConfig::default()
719 };
720 assert_eq!(
721 PoolingInstanceAllocator::new(
722 &config,
723 &Tunables {
724 memory_reservation: 0x10000,
725 ..Tunables::default_host()
726 },
727 )
728 .map_err(|e| e.to_string())
729 .expect_err("expected a failure constructing instance allocator"),
730 "maximum memory size of 0x100010000 bytes exceeds the configured \
731 memory reservation of 0x10000 bytes"
732 );
733 }
734
735 #[cfg(all(unix, target_pointer_width = "64", feature = "async", not(miri)))]
736 #[test]
737 fn test_stack_zeroed() -> Result<()> {
738 let config = PoolingInstanceAllocatorConfig {
739 max_unused_warm_slots: 0,
740 limits: InstanceLimits {
741 total_stacks: 1,
742 total_memories: 0,
743 total_tables: 0,
744 ..Default::default()
745 },
746 stack_size: 128,
747 async_stack_zeroing: true,
748 ..PoolingInstanceAllocatorConfig::default()
749 };
750 let allocator = PoolingInstanceAllocator::new(&config, &Tunables::default_host())?;
751
752 unsafe {
753 for _ in 0..255 {
754 let stack = allocator.allocate_fiber_stack()?;
755
756 let addr = stack.top().unwrap().sub(1);
758
759 assert_eq!(*addr, 0);
760 *addr = 1;
761
762 allocator.deallocate_fiber_stack(stack);
763 }
764 }
765
766 Ok(())
767 }
768
769 #[cfg(all(unix, target_pointer_width = "64", feature = "async", not(miri)))]
770 #[test]
771 fn test_stack_unzeroed() -> Result<()> {
772 let config = PoolingInstanceAllocatorConfig {
773 max_unused_warm_slots: 0,
774 limits: InstanceLimits {
775 total_stacks: 1,
776 total_memories: 0,
777 total_tables: 0,
778 ..Default::default()
779 },
780 stack_size: 128,
781 async_stack_zeroing: false,
782 ..PoolingInstanceAllocatorConfig::default()
783 };
784 let allocator = PoolingInstanceAllocator::new(&config, &Tunables::default_host())?;
785
786 unsafe {
787 for i in 0..255 {
788 let stack = allocator.allocate_fiber_stack()?;
789
790 let addr = stack.top().unwrap().sub(1);
792
793 assert_eq!(*addr, i);
794 *addr = i + 1;
795
796 allocator.deallocate_fiber_stack(stack);
797 }
798 }
799
800 Ok(())
801 }
802}