wasmtime/runtime/vm/instance/allocator/pooling/
unix_stack_pool.rs1#![cfg_attr(asan, allow(dead_code))]
2
3use super::index_allocator::{SimpleIndexAllocator, SlotId};
4use crate::prelude::*;
5use crate::runtime::vm::sys::vm::commit_pages;
6use crate::runtime::vm::{
7 mmap::AlignedLength, HostAlignedByteCount, Mmap, PoolingInstanceAllocatorConfig,
8};
9
10#[derive(Debug)]
21pub struct StackPool {
22 mapping: Mmap<AlignedLength>,
23 stack_size: HostAlignedByteCount,
24 max_stacks: usize,
25 page_size: HostAlignedByteCount,
26 index_allocator: SimpleIndexAllocator,
27 async_stack_zeroing: bool,
28 async_stack_keep_resident: HostAlignedByteCount,
29}
30
31impl StackPool {
32 pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
33 use rustix::mm::{mprotect, MprotectFlags};
34
35 let page_size = HostAlignedByteCount::host_page_size();
36
37 let stack_size = if config.stack_size == 0 {
39 HostAlignedByteCount::ZERO
40 } else {
41 HostAlignedByteCount::new_rounded_up(config.stack_size)
42 .and_then(|size| size.checked_add(HostAlignedByteCount::host_page_size()))
43 .context("stack size exceeds addressable memory")?
44 };
45
46 let max_stacks = usize::try_from(config.limits.total_stacks).unwrap();
47
48 let allocation_size = stack_size
49 .checked_mul(max_stacks)
50 .context("total size of execution stacks exceeds addressable memory")?;
51
52 let mapping = Mmap::accessible_reserved(allocation_size, allocation_size)
53 .context("failed to create stack pool mapping")?;
54
55 if !allocation_size.is_zero() {
57 unsafe {
58 for i in 0..max_stacks {
59 let offset = stack_size.unchecked_mul(i);
62 let bottom_of_stack = mapping.as_ptr().add(offset.byte_count()).cast_mut();
64 mprotect(
65 bottom_of_stack.cast(),
66 page_size.byte_count(),
67 MprotectFlags::empty(),
68 )
69 .context("failed to protect stack guard page")?;
70 }
71 }
72 }
73
74 Ok(Self {
75 mapping,
76 stack_size,
77 max_stacks,
78 page_size,
79 async_stack_zeroing: config.async_stack_zeroing,
80 async_stack_keep_resident: HostAlignedByteCount::new_rounded_up(
81 config.async_stack_keep_resident,
82 )?,
83 index_allocator: SimpleIndexAllocator::new(config.limits.total_stacks),
84 })
85 }
86
87 #[allow(unused)] pub fn is_empty(&self) -> bool {
90 self.index_allocator.is_empty()
91 }
92
93 pub fn allocate(&self) -> Result<wasmtime_fiber::FiberStack> {
95 if self.stack_size.is_zero() {
96 bail!("pooling allocator not configured to enable fiber stack allocation");
97 }
98
99 let index = self
100 .index_allocator
101 .alloc()
102 .ok_or_else(|| super::PoolConcurrencyLimitError::new(self.max_stacks, "fibers"))?
103 .index();
104
105 assert!(index < self.max_stacks);
106
107 unsafe {
108 let size_without_guard = self.stack_size.checked_sub(self.page_size).expect(
110 "self.stack_size is host-page-aligned and is > 0,\
111 so it must be >= self.page_size",
112 );
113
114 let bottom_of_stack = self
115 .mapping
116 .as_ptr()
117 .add(self.stack_size.unchecked_mul(index).byte_count())
118 .cast_mut();
119
120 commit_pages(bottom_of_stack, size_without_guard.byte_count())?;
121
122 let stack = wasmtime_fiber::FiberStack::from_raw_parts(
123 bottom_of_stack,
124 self.page_size.byte_count(),
125 size_without_guard.byte_count(),
126 )?;
127 Ok(stack)
128 }
129 }
130
131 pub unsafe fn zero_stack(
147 &self,
148 stack: &mut wasmtime_fiber::FiberStack,
149 mut decommit: impl FnMut(*mut u8, usize),
150 ) {
151 assert!(stack.is_from_raw_parts());
152 assert!(
153 !self.stack_size.is_zero(),
154 "pooling allocator not configured to enable fiber stack allocation \
155 (Self::allocate should have returned an error)"
156 );
157
158 if !self.async_stack_zeroing {
159 return;
160 }
161
162 let top = stack
163 .top()
164 .expect("fiber stack not allocated from the pool") as usize;
165
166 let base = self.mapping.as_ptr() as usize;
167 let len = self.mapping.len();
168 assert!(
169 top > base && top <= (base + len),
170 "fiber stack top pointer not in range"
171 );
172
173 let stack_size = self.stack_size.checked_sub(self.page_size).expect(
175 "self.stack_size is host-page-aligned and is > 0,\
176 so it must be >= self.page_size",
177 );
178 let bottom_of_stack = top - stack_size.byte_count();
179 let start_of_stack = bottom_of_stack - self.page_size.byte_count();
180 assert!(start_of_stack >= base && start_of_stack < (base + len));
181 assert!((start_of_stack - base) % self.stack_size.byte_count() == 0);
182
183 let size_to_memset = stack_size.min(self.async_stack_keep_resident);
191 let rest = stack_size
192 .checked_sub(size_to_memset)
193 .expect("stack_size >= size_to_memset");
194 std::ptr::write_bytes(
195 (bottom_of_stack + rest.byte_count()) as *mut u8,
196 0,
197 size_to_memset.byte_count(),
198 );
199
200 decommit(bottom_of_stack as _, rest.byte_count());
202 }
203
204 pub unsafe fn deallocate(&self, stack: wasmtime_fiber::FiberStack) {
214 assert!(stack.is_from_raw_parts());
215
216 let top = stack
217 .top()
218 .expect("fiber stack not allocated from the pool") as usize;
219
220 let base = self.mapping.as_ptr() as usize;
221 let len = self.mapping.len();
222 assert!(
223 top > base && top <= (base + len),
224 "fiber stack top pointer not in range"
225 );
226
227 let stack_size = self.stack_size.byte_count() - self.page_size.byte_count();
229 let bottom_of_stack = top - stack_size;
230 let start_of_stack = bottom_of_stack - self.page_size.byte_count();
231 assert!(start_of_stack >= base && start_of_stack < (base + len));
232 assert!((start_of_stack - base) % self.stack_size.byte_count() == 0);
233
234 let index = (start_of_stack - base) / self.stack_size.byte_count();
235 assert!(index < self.max_stacks);
236 let index = u32::try_from(index).unwrap();
237
238 self.index_allocator.free(SlotId(index));
239 }
240}
241
242#[cfg(all(test, unix, feature = "async", not(miri)))]
243mod tests {
244 use super::*;
245 use crate::runtime::vm::InstanceLimits;
246
247 #[test]
248 fn test_stack_pool() -> Result<()> {
249 let config = PoolingInstanceAllocatorConfig {
250 limits: InstanceLimits {
251 total_stacks: 10,
252 ..Default::default()
253 },
254 stack_size: 1,
255 async_stack_zeroing: true,
256 ..PoolingInstanceAllocatorConfig::default()
257 };
258 let pool = StackPool::new(&config)?;
259
260 let native_page_size = crate::runtime::vm::host_page_size();
261 assert_eq!(pool.stack_size, 2 * native_page_size);
262 assert_eq!(pool.max_stacks, 10);
263 assert_eq!(pool.page_size, native_page_size);
264
265 assert_eq!(pool.index_allocator.testing_freelist(), []);
266
267 let base = pool.mapping.as_ptr() as usize;
268
269 let mut stacks = Vec::new();
270 for i in 0..10 {
271 let stack = pool.allocate().expect("allocation should succeed");
272 assert_eq!(
273 ((stack.top().unwrap() as usize - base) / pool.stack_size.byte_count()) - 1,
274 i
275 );
276 stacks.push(stack);
277 }
278
279 assert_eq!(pool.index_allocator.testing_freelist(), []);
280
281 assert!(pool.allocate().is_err(), "allocation should fail");
282
283 for stack in stacks {
284 unsafe {
285 pool.deallocate(stack);
286 }
287 }
288
289 assert_eq!(
290 pool.index_allocator.testing_freelist(),
291 [
292 SlotId(0),
293 SlotId(1),
294 SlotId(2),
295 SlotId(3),
296 SlotId(4),
297 SlotId(5),
298 SlotId(6),
299 SlotId(7),
300 SlotId(8),
301 SlotId(9)
302 ],
303 );
304
305 Ok(())
306 }
307}