wasmtime/runtime/vm/instance/allocator/pooling/
table_pool.rs

1use super::{
2    TableAllocationIndex,
3    index_allocator::{SimpleIndexAllocator, SlotId},
4};
5use crate::runtime::vm::sys::vm::{PageMap, commit_pages, reset_with_pagemap};
6use crate::runtime::vm::{
7    InstanceAllocationRequest, Mmap, PoolingInstanceAllocatorConfig, SendSyncPtr, Table,
8    mmap::AlignedLength,
9};
10use crate::{prelude::*, vm::HostAlignedByteCount};
11use std::ptr::NonNull;
12use wasmtime_environ::Module;
13
14/// Represents a pool of WebAssembly tables.
15///
16/// Each instance index into the pool returns an iterator over the base addresses
17/// of the instance's tables.
18#[derive(Debug)]
19pub struct TablePool {
20    index_allocator: SimpleIndexAllocator,
21    mapping: Mmap<AlignedLength>,
22    table_size: HostAlignedByteCount,
23    max_total_tables: usize,
24    tables_per_instance: usize,
25    keep_resident: HostAlignedByteCount,
26    nominal_table_elements: usize,
27}
28
29impl TablePool {
30    /// Create a new `TablePool`.
31    pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
32        let table_size = HostAlignedByteCount::new_rounded_up(
33            crate::runtime::vm::table::NOMINAL_MAX_TABLE_ELEM_SIZE
34                .checked_mul(config.limits.table_elements)
35                .ok_or_else(|| anyhow!("table size exceeds addressable memory"))?,
36        )?;
37
38        let max_total_tables = usize::try_from(config.limits.total_tables).unwrap();
39        let tables_per_instance = usize::try_from(config.limits.max_tables_per_module).unwrap();
40
41        let allocation_size = table_size
42            .checked_mul(max_total_tables)
43            .context("total size of tables exceeds addressable memory")?;
44
45        let mapping = Mmap::accessible_reserved(allocation_size, allocation_size)
46            .context("failed to create table pool mapping")?;
47
48        let keep_resident = HostAlignedByteCount::new_rounded_up(config.table_keep_resident)?;
49
50        Ok(Self {
51            index_allocator: SimpleIndexAllocator::new(config.limits.total_tables),
52            mapping,
53            table_size,
54            max_total_tables,
55            tables_per_instance,
56            keep_resident,
57            nominal_table_elements: config.limits.table_elements,
58        })
59    }
60
61    /// Validate whether this module's tables are allocatable by this pool.
62    pub fn validate(&self, module: &Module) -> Result<()> {
63        let tables = module.num_defined_tables();
64
65        if tables > self.tables_per_instance {
66            bail!(
67                "defined tables count of {} exceeds the per-instance limit of {}",
68                tables,
69                self.tables_per_instance,
70            );
71        }
72
73        if tables > self.max_total_tables {
74            bail!(
75                "defined tables count of {} exceeds the total tables limit of {}",
76                tables,
77                self.max_total_tables,
78            );
79        }
80
81        for (i, table) in module.tables.iter().skip(module.num_imported_tables) {
82            if table.limits.min > u64::try_from(self.nominal_table_elements)? {
83                bail!(
84                    "table index {} has a minimum element size of {} which exceeds the limit of {}",
85                    i.as_u32(),
86                    table.limits.min,
87                    self.nominal_table_elements,
88                );
89            }
90        }
91        Ok(())
92    }
93
94    /// Are there zero slots in use right now?
95    pub fn is_empty(&self) -> bool {
96        self.index_allocator.is_empty()
97    }
98
99    /// Get the base pointer of the given table allocation.
100    fn get(&self, table_index: TableAllocationIndex) -> *mut u8 {
101        assert!(table_index.index() < self.max_total_tables);
102
103        unsafe {
104            self.mapping
105                .as_ptr()
106                .add(
107                    self.table_size
108                        .checked_mul(table_index.index())
109                        .expect(
110                            "checked in constructor that table_size * table_index doesn't overflow",
111                        )
112                        .byte_count(),
113                )
114                .cast_mut()
115        }
116    }
117
118    /// Returns the number of bytes occupied by table entry data
119    ///
120    /// This is typically just the `nominal_table_elements` multiplied by
121    /// the size of the table's element type, but may be less in the case
122    /// of types such as VMContRef for which less capacity will be available
123    /// (maintaining a consistent table size in the pool).
124    fn data_size(&self, table_type: crate::vm::table::TableElementType) -> usize {
125        let element_size = table_type.element_size();
126        let elements = self
127            .nominal_table_elements
128            .min(self.table_size.byte_count() / element_size);
129        elements * element_size
130    }
131
132    /// Allocate a single table for the given instance allocation request.
133    pub async fn allocate(
134        &self,
135        request: &mut InstanceAllocationRequest<'_, '_>,
136        ty: &wasmtime_environ::Table,
137    ) -> Result<(TableAllocationIndex, Table)> {
138        let tunables = request.store.engine().tunables();
139        let allocation_index = self
140            .index_allocator
141            .alloc()
142            .map(|slot| TableAllocationIndex(slot.0))
143            .ok_or_else(|| {
144                super::PoolConcurrencyLimitError::new(self.max_total_tables, "tables")
145            })?;
146        let mut guard = DeallocateIndexGuard {
147            pool: self,
148            allocation_index,
149            active: true,
150        };
151
152        let base = self.get(allocation_index);
153        let data_size = self.data_size(crate::vm::table::wasm_to_table_type(ty.ref_type));
154        unsafe {
155            commit_pages(base, data_size)?;
156        }
157
158        let ptr = NonNull::new(std::ptr::slice_from_raw_parts_mut(base.cast(), data_size)).unwrap();
159        let table = unsafe {
160            Table::new_static(
161                ty,
162                tunables,
163                SendSyncPtr::new(ptr),
164                request.limiter.as_deref_mut(),
165            )
166            .await?
167        };
168        guard.active = false;
169        return Ok((allocation_index, table));
170
171        struct DeallocateIndexGuard<'a> {
172            pool: &'a TablePool,
173            allocation_index: TableAllocationIndex,
174            active: bool,
175        }
176
177        impl Drop for DeallocateIndexGuard<'_> {
178            fn drop(&mut self) {
179                if !self.active {
180                    return;
181                }
182                self.pool
183                    .index_allocator
184                    .free(SlotId(self.allocation_index.0));
185            }
186        }
187    }
188
189    /// Deallocate a previously-allocated table.
190    ///
191    /// # Safety
192    ///
193    /// The table must have been previously-allocated by this pool and assigned
194    /// the given allocation index, it must currently be allocated, and it must
195    /// never be used again.
196    ///
197    /// The caller must have already called `reset_table_pages_to_zero` on the
198    /// memory and flushed any enqueued decommits for this table's memory.
199    pub unsafe fn deallocate(&self, allocation_index: TableAllocationIndex, table: Table) {
200        assert!(table.is_static());
201        drop(table);
202        self.index_allocator.free(SlotId(allocation_index.0));
203    }
204
205    /// Reset the given table's memory to zero.
206    ///
207    /// Invokes the given `decommit` function for each region of memory that
208    /// needs to be decommitted. It is the caller's responsibility to actually
209    /// perform that decommit before this table is reused.
210    ///
211    /// # Safety
212    ///
213    /// This table must not be in active use, and ready for returning to the
214    /// table pool once it is zeroed and decommitted.
215    pub unsafe fn reset_table_pages_to_zero(
216        &self,
217        pagemap: Option<&PageMap>,
218        allocation_index: TableAllocationIndex,
219        table: &mut Table,
220        decommit: impl FnMut(*mut u8, usize),
221    ) {
222        assert!(table.is_static());
223        let base = self.get(allocation_index);
224        let table_byte_size = table.size() * table.element_type().element_size();
225        let table_byte_size_page_aligned = HostAlignedByteCount::new_rounded_up(table_byte_size)
226            .expect("table entry size doesn't overflow");
227
228        // SAFETY: The `base` pointer is valid for `size` bytes and is safe to
229        // mutate here given the contract of our own function.
230        unsafe {
231            reset_with_pagemap(
232                pagemap,
233                base,
234                table_byte_size_page_aligned,
235                self.keep_resident,
236                |slice| slice.fill(0),
237                decommit,
238            )
239        }
240    }
241}
242
243#[cfg(test)]
244mod tests {
245    use super::*;
246    use crate::runtime::vm::InstanceLimits;
247
248    #[test]
249    fn test_table_pool() -> Result<()> {
250        let pool = TablePool::new(&PoolingInstanceAllocatorConfig {
251            limits: InstanceLimits {
252                total_tables: 7,
253                table_elements: 100,
254                max_memory_size: 0,
255                max_memories_per_module: 0,
256                ..Default::default()
257            },
258            ..Default::default()
259        })?;
260
261        let host_page_size = HostAlignedByteCount::host_page_size();
262
263        assert_eq!(pool.table_size, host_page_size);
264        assert_eq!(pool.max_total_tables, 7);
265        assert_eq!(pool.nominal_table_elements, 100);
266
267        let base = pool.mapping.as_ptr() as usize;
268
269        for i in 0..7 {
270            let index = TableAllocationIndex(i);
271            let ptr = pool.get(index);
272            assert_eq!(
273                ptr as usize - base,
274                pool.table_size.checked_mul(i as usize).unwrap()
275            );
276        }
277
278        Ok(())
279    }
280
281    #[test]
282    fn test_table_pool_continuations_capacity() -> Result<()> {
283        let mkpool = |table_elements: usize| -> Result<TablePool> {
284            TablePool::new(&PoolingInstanceAllocatorConfig {
285                limits: InstanceLimits {
286                    table_elements,
287                    total_tables: 7,
288                    max_memory_size: 0,
289                    max_memories_per_module: 0,
290                    ..Default::default()
291                },
292                ..Default::default()
293            })
294        };
295
296        let host_page_size = HostAlignedByteCount::host_page_size();
297        let words_per_page = host_page_size.byte_count() / size_of::<*const u8>();
298        let pool_big = mkpool(words_per_page - 1)?;
299        let pool_small = mkpool(5)?;
300
301        assert_eq!(pool_small.table_size, host_page_size);
302        assert_eq!(pool_big.table_size, host_page_size);
303
304        // table should store nominal_table_elements of data for func in both cases
305        let func_table_type = crate::vm::table::TableElementType::Func;
306        assert_eq!(
307            pool_small.data_size(func_table_type),
308            pool_small.nominal_table_elements * func_table_type.element_size()
309        );
310        assert_eq!(
311            pool_big.data_size(func_table_type),
312            pool_big.nominal_table_elements * func_table_type.element_size()
313        );
314
315        // In the "big" case, continuations should fill page size (capacity limited).
316        // In the "small" case, continuations should fill only part of the page, capping
317        // at the requested table size for nominal elements.
318        let cont_table_type = crate::vm::table::TableElementType::Cont;
319        assert_eq!(
320            pool_small.data_size(cont_table_type),
321            pool_small.nominal_table_elements * cont_table_type.element_size()
322        );
323        assert_eq!(pool_big.data_size(cont_table_type), host_page_size);
324
325        Ok(())
326    }
327}