wasmtime/runtime/vm/instance/allocator/pooling/
unix_stack_pool.rs1#![cfg_attr(asan, allow(dead_code))]
2
3use super::index_allocator::{SimpleIndexAllocator, SlotId};
4use crate::prelude::*;
5use crate::runtime::vm::sys::vm::commit_pages;
6use crate::runtime::vm::{
7 HostAlignedByteCount, Mmap, PoolingInstanceAllocatorConfig, mmap::AlignedLength,
8};
9
10#[derive(Debug)]
21pub struct StackPool {
22 mapping: Mmap<AlignedLength>,
23 stack_size: HostAlignedByteCount,
24 max_stacks: usize,
25 page_size: HostAlignedByteCount,
26 index_allocator: SimpleIndexAllocator,
27 async_stack_zeroing: bool,
28 async_stack_keep_resident: HostAlignedByteCount,
29}
30
31impl StackPool {
32 #[cfg(test)]
33 pub fn enabled() -> bool {
34 true
35 }
36
37 pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
38 use rustix::mm::{MprotectFlags, mprotect};
39
40 let page_size = HostAlignedByteCount::host_page_size();
41
42 let stack_size = if config.stack_size == 0 {
44 HostAlignedByteCount::ZERO
45 } else {
46 HostAlignedByteCount::new_rounded_up(config.stack_size)
47 .and_then(|size| size.checked_add(HostAlignedByteCount::host_page_size()))
48 .context("stack size exceeds addressable memory")?
49 };
50
51 let max_stacks = usize::try_from(config.limits.total_stacks).unwrap();
52
53 let allocation_size = stack_size
54 .checked_mul(max_stacks)
55 .context("total size of execution stacks exceeds addressable memory")?;
56
57 let mapping = Mmap::accessible_reserved(allocation_size, allocation_size)
58 .context("failed to create stack pool mapping")?;
59
60 if !allocation_size.is_zero() {
62 unsafe {
63 for i in 0..max_stacks {
64 let offset = stack_size.unchecked_mul(i);
67 let bottom_of_stack = mapping.as_ptr().add(offset.byte_count()).cast_mut();
69 mprotect(
70 bottom_of_stack.cast(),
71 page_size.byte_count(),
72 MprotectFlags::empty(),
73 )
74 .context("failed to protect stack guard page")?;
75 }
76 }
77 }
78
79 Ok(Self {
80 mapping,
81 stack_size,
82 max_stacks,
83 page_size,
84 async_stack_zeroing: config.async_stack_zeroing,
85 async_stack_keep_resident: HostAlignedByteCount::new_rounded_up(
86 config.async_stack_keep_resident,
87 )?,
88 index_allocator: SimpleIndexAllocator::new(config.limits.total_stacks),
89 })
90 }
91
92 pub fn is_empty(&self) -> bool {
94 self.index_allocator.is_empty()
95 }
96
97 pub fn allocate(&self) -> Result<wasmtime_fiber::FiberStack> {
99 if self.stack_size.is_zero() {
100 bail!("pooling allocator not configured to enable fiber stack allocation");
101 }
102
103 let index = self
104 .index_allocator
105 .alloc()
106 .ok_or_else(|| super::PoolConcurrencyLimitError::new(self.max_stacks, "fibers"))?
107 .index();
108
109 assert!(index < self.max_stacks);
110
111 unsafe {
112 let size_without_guard = self.stack_size.checked_sub(self.page_size).expect(
114 "self.stack_size is host-page-aligned and is > 0,\
115 so it must be >= self.page_size",
116 );
117
118 let bottom_of_stack = self
119 .mapping
120 .as_ptr()
121 .add(self.stack_size.unchecked_mul(index).byte_count())
122 .cast_mut();
123
124 commit_pages(bottom_of_stack, size_without_guard.byte_count())?;
125
126 let stack = wasmtime_fiber::FiberStack::from_raw_parts(
127 bottom_of_stack,
128 self.page_size.byte_count(),
129 size_without_guard.byte_count(),
130 )?;
131 Ok(stack)
132 }
133 }
134
135 pub unsafe fn zero_stack(
151 &self,
152 stack: &mut wasmtime_fiber::FiberStack,
153 mut decommit: impl FnMut(*mut u8, usize),
154 ) -> usize {
155 assert!(stack.is_from_raw_parts());
156 assert!(
157 !self.stack_size.is_zero(),
158 "pooling allocator not configured to enable fiber stack allocation \
159 (Self::allocate should have returned an error)"
160 );
161
162 if !self.async_stack_zeroing {
163 return 0;
164 }
165
166 let top = stack
167 .top()
168 .expect("fiber stack not allocated from the pool") as usize;
169
170 let base = self.mapping.as_ptr() as usize;
171 let len = self.mapping.len();
172 assert!(
173 top > base && top <= (base + len),
174 "fiber stack top pointer not in range"
175 );
176
177 let stack_size = self.stack_size.checked_sub(self.page_size).expect(
179 "self.stack_size is host-page-aligned and is > 0,\
180 so it must be >= self.page_size",
181 );
182 let bottom_of_stack = top - stack_size.byte_count();
183 let start_of_stack = bottom_of_stack - self.page_size.byte_count();
184 assert!(start_of_stack >= base && start_of_stack < (base + len));
185 assert!((start_of_stack - base) % self.stack_size.byte_count() == 0);
186
187 let size_to_memset = stack_size.min(self.async_stack_keep_resident);
195 let rest = stack_size
196 .checked_sub(size_to_memset)
197 .expect("stack_size >= size_to_memset");
198
199 unsafe {
202 std::ptr::write_bytes(
203 (bottom_of_stack + rest.byte_count()) as *mut u8,
204 0,
205 size_to_memset.byte_count(),
206 );
207 }
208
209 decommit(bottom_of_stack as _, rest.byte_count());
211
212 size_to_memset.byte_count()
213 }
214
215 pub unsafe fn deallocate(&self, stack: wasmtime_fiber::FiberStack, bytes_resident: usize) {
225 assert!(stack.is_from_raw_parts());
226
227 let top = stack
228 .top()
229 .expect("fiber stack not allocated from the pool") as usize;
230
231 let base = self.mapping.as_ptr() as usize;
232 let len = self.mapping.len();
233 assert!(
234 top > base && top <= (base + len),
235 "fiber stack top pointer not in range"
236 );
237
238 let stack_size = self.stack_size.byte_count() - self.page_size.byte_count();
240 let bottom_of_stack = top - stack_size;
241 let start_of_stack = bottom_of_stack - self.page_size.byte_count();
242 assert!(start_of_stack >= base && start_of_stack < (base + len));
243 assert!((start_of_stack - base) % self.stack_size.byte_count() == 0);
244
245 let index = (start_of_stack - base) / self.stack_size.byte_count();
246 assert!(index < self.max_stacks);
247 let index = u32::try_from(index).unwrap();
248
249 self.index_allocator.free(SlotId(index), bytes_resident);
250 }
251
252 pub fn unused_warm_slots(&self) -> u32 {
253 self.index_allocator.unused_warm_slots()
254 }
255
256 pub fn unused_bytes_resident(&self) -> Option<usize> {
257 if self.async_stack_zeroing {
258 Some(self.index_allocator.unused_bytes_resident())
259 } else {
260 None
261 }
262 }
263}
264
265#[cfg(all(test, unix, feature = "async", not(miri), not(asan)))]
266mod tests {
267 use super::*;
268 use crate::runtime::vm::InstanceLimits;
269
270 #[test]
271 fn test_stack_pool() -> Result<()> {
272 let config = PoolingInstanceAllocatorConfig {
273 limits: InstanceLimits {
274 total_stacks: 10,
275 ..Default::default()
276 },
277 stack_size: 1,
278 async_stack_zeroing: true,
279 ..PoolingInstanceAllocatorConfig::default()
280 };
281 let pool = StackPool::new(&config)?;
282
283 let native_page_size = crate::runtime::vm::host_page_size();
284 assert_eq!(pool.stack_size, 2 * native_page_size);
285 assert_eq!(pool.max_stacks, 10);
286 assert_eq!(pool.page_size, native_page_size);
287
288 assert_eq!(pool.index_allocator.testing_freelist(), []);
289
290 let base = pool.mapping.as_ptr() as usize;
291
292 let mut stacks = Vec::new();
293 for i in 0..10 {
294 let stack = pool.allocate().expect("allocation should succeed");
295 assert_eq!(
296 ((stack.top().unwrap() as usize - base) / pool.stack_size.byte_count()) - 1,
297 i
298 );
299 stacks.push(stack);
300 }
301
302 assert_eq!(pool.index_allocator.testing_freelist(), []);
303
304 assert!(pool.allocate().is_err(), "allocation should fail");
305
306 for stack in stacks {
307 unsafe {
308 pool.deallocate(stack, 0);
309 }
310 }
311
312 assert_eq!(
313 pool.index_allocator.testing_freelist(),
314 [
315 SlotId(0),
316 SlotId(1),
317 SlotId(2),
318 SlotId(3),
319 SlotId(4),
320 SlotId(5),
321 SlotId(6),
322 SlotId(7),
323 SlotId(8),
324 SlotId(9)
325 ],
326 );
327
328 Ok(())
329 }
330}