wasmtime/runtime/vm/instance/allocator/pooling/
generic_stack_pool.rs1#![cfg_attr(not(asan), allow(dead_code))]
2
3use crate::prelude::*;
4use crate::{runtime::vm::PoolingInstanceAllocatorConfig, PoolConcurrencyLimitError};
5use std::sync::atomic::{AtomicU64, Ordering};
6
7#[derive(Debug)]
19pub struct StackPool {
20 stack_size: usize,
21 stack_zeroing: bool,
22 live_stacks: AtomicU64,
23 stack_limit: u64,
24}
25
26impl StackPool {
27 pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
28 Ok(StackPool {
29 stack_size: config.stack_size,
30 stack_zeroing: config.async_stack_zeroing,
31 live_stacks: AtomicU64::new(0),
32 stack_limit: config.limits.total_stacks.into(),
33 })
34 }
35
36 #[allow(unused)] pub fn is_empty(&self) -> bool {
38 self.live_stacks.load(Ordering::Acquire) == 0
39 }
40
41 pub fn allocate(&self) -> Result<wasmtime_fiber::FiberStack> {
42 if self.stack_size == 0 {
43 bail!("fiber stack allocation not supported")
44 }
45
46 let old_count = self.live_stacks.fetch_add(1, Ordering::AcqRel);
47 if old_count >= self.stack_limit {
48 self.live_stacks.fetch_sub(1, Ordering::AcqRel);
49 return Err(PoolConcurrencyLimitError::new(
50 usize::try_from(self.stack_limit).unwrap(),
51 "fibers",
52 )
53 .into());
54 }
55
56 match wasmtime_fiber::FiberStack::new(self.stack_size, self.stack_zeroing) {
57 Ok(stack) => Ok(stack),
58 Err(e) => {
59 self.live_stacks.fetch_sub(1, Ordering::AcqRel);
60 Err(anyhow::Error::from(e))
61 }
62 }
63 }
64
65 pub unsafe fn zero_stack(
66 &self,
67 _stack: &mut wasmtime_fiber::FiberStack,
68 _decommit: impl FnMut(*mut u8, usize),
69 ) {
70 }
73
74 pub unsafe fn deallocate(&self, stack: wasmtime_fiber::FiberStack) {
76 self.live_stacks.fetch_sub(1, Ordering::AcqRel);
77 let _ = stack;
79 }
80}