wasmtime/runtime/vm/instance/allocator/pooling/
decommit_queue.rs

1//! A queue for batching decommits together.
2//!
3//! We don't immediately decommit a Wasm table/memory/stack/etc... eagerly, but
4//! instead batch them up to be decommitted together. This module implements
5//! that queuing and batching.
6//!
7//! Even when batching is "disabled" we still use this queue. Batching is
8//! disabled by specifying a batch size of one, in which case, this queue will
9//! immediately get flushed every time we push onto it.
10
11use super::PoolingInstanceAllocator;
12use crate::vm::{MemoryAllocationIndex, MemoryImageSlot, Table, TableAllocationIndex};
13use smallvec::SmallVec;
14
15#[cfg(feature = "async")]
16use wasmtime_fiber::FiberStack;
17
18#[cfg(unix)]
19#[allow(non_camel_case_types)]
20type iovec = libc::iovec;
21
22#[cfg(not(unix))]
23#[allow(non_camel_case_types)]
24struct iovec {
25    iov_base: *mut libc::c_void,
26    iov_len: libc::size_t,
27}
28
29#[repr(transparent)]
30struct IoVec(iovec);
31
32unsafe impl Send for IoVec {}
33unsafe impl Sync for IoVec {}
34
35impl std::fmt::Debug for IoVec {
36    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
37        f.debug_struct("IoVec")
38            .field("base", &self.0.iov_base)
39            .field("len", &self.0.iov_len)
40            .finish()
41    }
42}
43
44#[cfg(feature = "async")]
45struct SendSyncStack(FiberStack);
46#[cfg(feature = "async")]
47unsafe impl Send for SendSyncStack {}
48#[cfg(feature = "async")]
49unsafe impl Sync for SendSyncStack {}
50
51#[derive(Default)]
52pub struct DecommitQueue {
53    raw: SmallVec<[IoVec; 2]>,
54    memories: SmallVec<[(MemoryAllocationIndex, MemoryImageSlot); 1]>,
55    tables: SmallVec<[(TableAllocationIndex, Table); 1]>,
56    #[cfg(feature = "async")]
57    stacks: SmallVec<[SendSyncStack; 1]>,
58    //
59    // TODO: GC heaps are not well-integrated with the pooling allocator
60    // yet. Once we better integrate them, we should start (optionally) zeroing
61    // them, and batching that up here.
62    //
63    // #[cfg(feature = "gc")]
64    // pub gc_heaps: SmallVec<[(GcHeapAllocationIndex, Box<dyn GcHeap>); 1]>,
65}
66
67impl std::fmt::Debug for DecommitQueue {
68    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
69        f.debug_struct("DecommitQueue")
70            .field("raw", &self.raw)
71            .finish_non_exhaustive()
72    }
73}
74
75impl DecommitQueue {
76    /// Append another queue to this queue.
77    pub fn append(
78        &mut self,
79        Self {
80            raw,
81            memories,
82            tables,
83            #[cfg(feature = "async")]
84            stacks,
85        }: &mut Self,
86    ) {
87        self.raw.append(raw);
88        self.memories.append(memories);
89        self.tables.append(tables);
90        #[cfg(feature = "async")]
91        self.stacks.append(stacks);
92    }
93
94    /// How many raw memory regions are enqueued for decommit?
95    pub fn raw_len(&self) -> usize {
96        self.raw.len()
97    }
98
99    /// Enqueue a region of memory for decommit.
100    ///
101    /// It is the caller's responsibility to push the associated data via
102    /// `self.push_{memory,table,stack}` as appropriate.
103    ///
104    /// # Safety
105    ///
106    /// The enqueued memory regions must be safe to decommit when `flush` is
107    /// called (no other references, not in use, won't be otherwise unmapped,
108    /// etc...).
109    pub unsafe fn push_raw(&mut self, ptr: *mut u8, len: usize) {
110        self.raw.push(IoVec(iovec {
111            iov_base: ptr.cast(),
112            iov_len: len,
113        }));
114    }
115
116    /// Push a memory into the queue.
117    ///
118    /// # Safety
119    ///
120    /// This memory should not be in use, and its decommit regions must have
121    /// already been enqueued via `self.enqueue_raw`.
122    pub unsafe fn push_memory(
123        &mut self,
124        allocation_index: MemoryAllocationIndex,
125        image: MemoryImageSlot,
126    ) {
127        self.memories.push((allocation_index, image));
128    }
129
130    /// Push a table into the queue.
131    ///
132    /// # Safety
133    ///
134    /// This table should not be in use, and its decommit regions must have
135    /// already been enqueued via `self.enqueue_raw`.
136    pub unsafe fn push_table(&mut self, allocation_index: TableAllocationIndex, table: Table) {
137        self.tables.push((allocation_index, table));
138    }
139
140    /// Push a stack into the queue.
141    ///
142    /// # Safety
143    ///
144    /// This stack should not be in use, and its decommit regions must have
145    /// already been enqueued via `self.enqueue_raw`.
146    #[cfg(feature = "async")]
147    pub unsafe fn push_stack(&mut self, stack: FiberStack) {
148        self.stacks.push(SendSyncStack(stack));
149    }
150
151    fn decommit_all_raw(&mut self) {
152        for iovec in self.raw.drain(..) {
153            unsafe {
154                crate::vm::sys::vm::decommit_pages(iovec.0.iov_base.cast(), iovec.0.iov_len)
155                    .unwrap_or_else(|e| {
156                        panic!(
157                            "failed to decommit ptr={:#p}, len={:#x}: {e}",
158                            iovec.0.iov_base, iovec.0.iov_len
159                        )
160                    });
161            }
162        }
163    }
164
165    /// Flush this queue, decommitting all enqueued regions in batch.
166    ///
167    /// Returns `true` if we did any decommits and returned their entities to
168    /// the associated free lists; `false` if the queue was empty.
169    pub fn flush(mut self, pool: &PoolingInstanceAllocator) -> bool {
170        // First, do the raw decommit syscall(s).
171        self.decommit_all_raw();
172
173        // Second, restore the various entities to their associated pools' free
174        // lists. This is safe, and they are ready for reuse, now that their
175        // memory regions have been decommitted.
176        let mut deallocated_any = false;
177        for (allocation_index, image) in self.memories {
178            deallocated_any = true;
179            unsafe {
180                pool.memories.deallocate(allocation_index, image);
181            }
182        }
183        for (allocation_index, table) in self.tables {
184            deallocated_any = true;
185            unsafe {
186                pool.tables.deallocate(allocation_index, table);
187            }
188        }
189        #[cfg(feature = "async")]
190        for stack in self.stacks {
191            deallocated_any = true;
192            unsafe {
193                pool.stacks.deallocate(stack.0);
194            }
195        }
196
197        deallocated_any
198    }
199}