wasmtime/runtime/vm/instance/allocator/pooling/
table_pool.rs

1use super::{
2    TableAllocationIndex,
3    index_allocator::{SimpleIndexAllocator, SlotId},
4};
5use crate::runtime::vm::sys::vm::commit_pages;
6use crate::runtime::vm::{
7    InstanceAllocationRequest, Mmap, PoolingInstanceAllocatorConfig, SendSyncPtr, Table,
8    mmap::AlignedLength,
9};
10use crate::{prelude::*, vm::HostAlignedByteCount};
11use std::ptr::NonNull;
12use wasmtime_environ::{Module, Tunables};
13
14/// Represents a pool of WebAssembly tables.
15///
16/// Each instance index into the pool returns an iterator over the base addresses
17/// of the instance's tables.
18#[derive(Debug)]
19pub struct TablePool {
20    index_allocator: SimpleIndexAllocator,
21    mapping: Mmap<AlignedLength>,
22    table_size: HostAlignedByteCount,
23    max_total_tables: usize,
24    tables_per_instance: usize,
25    keep_resident: HostAlignedByteCount,
26    nominal_table_elements: usize,
27}
28
29impl TablePool {
30    /// Create a new `TablePool`.
31    pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
32        let table_size = HostAlignedByteCount::new_rounded_up(
33            crate::runtime::vm::table::NOMINAL_MAX_TABLE_ELEM_SIZE
34                .checked_mul(config.limits.table_elements)
35                .ok_or_else(|| anyhow!("table size exceeds addressable memory"))?,
36        )?;
37
38        let max_total_tables = usize::try_from(config.limits.total_tables).unwrap();
39        let tables_per_instance = usize::try_from(config.limits.max_tables_per_module).unwrap();
40
41        let allocation_size = table_size
42            .checked_mul(max_total_tables)
43            .context("total size of tables exceeds addressable memory")?;
44
45        let mapping = Mmap::accessible_reserved(allocation_size, allocation_size)
46            .context("failed to create table pool mapping")?;
47
48        let keep_resident = HostAlignedByteCount::new_rounded_up(config.table_keep_resident)?;
49
50        Ok(Self {
51            index_allocator: SimpleIndexAllocator::new(config.limits.total_tables),
52            mapping,
53            table_size,
54            max_total_tables,
55            tables_per_instance,
56            keep_resident,
57            nominal_table_elements: config.limits.table_elements,
58        })
59    }
60
61    /// Validate whether this module's tables are allocatable by this pool.
62    pub fn validate(&self, module: &Module) -> Result<()> {
63        let tables = module.num_defined_tables();
64
65        if tables > self.tables_per_instance {
66            bail!(
67                "defined tables count of {} exceeds the per-instance limit of {}",
68                tables,
69                self.tables_per_instance,
70            );
71        }
72
73        if tables > self.max_total_tables {
74            bail!(
75                "defined tables count of {} exceeds the total tables limit of {}",
76                tables,
77                self.max_total_tables,
78            );
79        }
80
81        for (i, table) in module.tables.iter().skip(module.num_imported_tables) {
82            if table.limits.min > u64::try_from(self.nominal_table_elements)? {
83                bail!(
84                    "table index {} has a minimum element size of {} which exceeds the limit of {}",
85                    i.as_u32(),
86                    table.limits.min,
87                    self.nominal_table_elements,
88                );
89            }
90        }
91        Ok(())
92    }
93
94    /// Are there zero slots in use right now?
95    pub fn is_empty(&self) -> bool {
96        self.index_allocator.is_empty()
97    }
98
99    /// Get the base pointer of the given table allocation.
100    fn get(&self, table_index: TableAllocationIndex) -> *mut u8 {
101        assert!(table_index.index() < self.max_total_tables);
102
103        unsafe {
104            self.mapping
105                .as_ptr()
106                .add(
107                    self.table_size
108                        .checked_mul(table_index.index())
109                        .expect(
110                            "checked in constructor that table_size * table_index doesn't overflow",
111                        )
112                        .byte_count(),
113                )
114                .cast_mut()
115        }
116    }
117
118    /// Returns the number of bytes occupied by table entry data
119    ///
120    /// This is typically just the `nominal_table_elements` multiplied by
121    /// the size of the table's element type, but may be less in the case
122    /// of types such as VMContRef for which less capacity will be available
123    /// (maintaining a consistent table size in the pool).
124    fn data_size(&self, table_type: crate::vm::table::TableElementType) -> usize {
125        let element_size = table_type.element_size();
126        let elements = self
127            .nominal_table_elements
128            .min(self.table_size.byte_count() / element_size);
129        elements * element_size
130    }
131
132    /// Allocate a single table for the given instance allocation request.
133    pub fn allocate(
134        &self,
135        request: &mut InstanceAllocationRequest,
136        ty: &wasmtime_environ::Table,
137        tunables: &Tunables,
138    ) -> Result<(TableAllocationIndex, Table)> {
139        let allocation_index = self
140            .index_allocator
141            .alloc()
142            .map(|slot| TableAllocationIndex(slot.0))
143            .ok_or_else(|| {
144                super::PoolConcurrencyLimitError::new(self.max_total_tables, "tables")
145            })?;
146
147        match (|| {
148            let base = self.get(allocation_index);
149            let data_size = self.data_size(crate::vm::table::wasm_to_table_type(ty.ref_type));
150            unsafe {
151                commit_pages(base, data_size)?;
152            }
153
154            let ptr =
155                NonNull::new(std::ptr::slice_from_raw_parts_mut(base.cast(), data_size)).unwrap();
156            unsafe {
157                Table::new_static(
158                    ty,
159                    tunables,
160                    SendSyncPtr::new(ptr),
161                    &mut *request.store.get().unwrap(),
162                )
163            }
164        })() {
165            Ok(table) => Ok((allocation_index, table)),
166            Err(e) => {
167                self.index_allocator.free(SlotId(allocation_index.0));
168                Err(e)
169            }
170        }
171    }
172
173    /// Deallocate a previously-allocated table.
174    ///
175    /// # Safety
176    ///
177    /// The table must have been previously-allocated by this pool and assigned
178    /// the given allocation index, it must currently be allocated, and it must
179    /// never be used again.
180    ///
181    /// The caller must have already called `reset_table_pages_to_zero` on the
182    /// memory and flushed any enqueued decommits for this table's memory.
183    pub unsafe fn deallocate(&self, allocation_index: TableAllocationIndex, table: Table) {
184        assert!(table.is_static());
185        drop(table);
186        self.index_allocator.free(SlotId(allocation_index.0));
187    }
188
189    /// Reset the given table's memory to zero.
190    ///
191    /// Invokes the given `decommit` function for each region of memory that
192    /// needs to be decommitted. It is the caller's responsibility to actually
193    /// perform that decommit before this table is reused.
194    ///
195    /// # Safety
196    ///
197    /// This table must not be in active use, and ready for returning to the
198    /// table pool once it is zeroed and decommitted.
199    pub unsafe fn reset_table_pages_to_zero(
200        &self,
201        allocation_index: TableAllocationIndex,
202        table: &mut Table,
203        mut decommit: impl FnMut(*mut u8, usize),
204    ) {
205        assert!(table.is_static());
206        let base = self.get(allocation_index);
207        let size = HostAlignedByteCount::new_rounded_up(self.data_size(table.element_type()))
208            .expect("table entry size doesn't overflow");
209
210        // `memset` the first `keep_resident` bytes.
211        let size_to_memset = size.min(self.keep_resident);
212        std::ptr::write_bytes(base, 0, size_to_memset.byte_count());
213
214        // And decommit the rest of it.
215        decommit(
216            base.add(size_to_memset.byte_count()),
217            size.checked_sub(size_to_memset)
218                .expect("size_to_memset <= size")
219                .byte_count(),
220        );
221    }
222}
223
224#[cfg(test)]
225mod tests {
226    use super::*;
227    use crate::runtime::vm::InstanceLimits;
228
229    #[test]
230    fn test_table_pool() -> Result<()> {
231        let pool = TablePool::new(&PoolingInstanceAllocatorConfig {
232            limits: InstanceLimits {
233                total_tables: 7,
234                table_elements: 100,
235                max_memory_size: 0,
236                max_memories_per_module: 0,
237                ..Default::default()
238            },
239            ..Default::default()
240        })?;
241
242        let host_page_size = HostAlignedByteCount::host_page_size();
243
244        assert_eq!(pool.table_size, host_page_size);
245        assert_eq!(pool.max_total_tables, 7);
246        assert_eq!(pool.nominal_table_elements, 100);
247
248        let base = pool.mapping.as_ptr() as usize;
249
250        for i in 0..7 {
251            let index = TableAllocationIndex(i);
252            let ptr = pool.get(index);
253            assert_eq!(
254                ptr as usize - base,
255                pool.table_size.checked_mul(i as usize).unwrap()
256            );
257        }
258
259        Ok(())
260    }
261
262    #[test]
263    fn test_table_pool_continuations_capacity() -> Result<()> {
264        let mkpool = |table_elements: usize| -> Result<TablePool> {
265            TablePool::new(&PoolingInstanceAllocatorConfig {
266                limits: InstanceLimits {
267                    table_elements,
268                    total_tables: 7,
269                    max_memory_size: 0,
270                    max_memories_per_module: 0,
271                    ..Default::default()
272                },
273                ..Default::default()
274            })
275        };
276
277        let host_page_size = HostAlignedByteCount::host_page_size();
278        let words_per_page = host_page_size.byte_count() / size_of::<*const u8>();
279        let pool_big = mkpool(words_per_page - 1)?;
280        let pool_small = mkpool(5)?;
281
282        assert_eq!(pool_small.table_size, host_page_size);
283        assert_eq!(pool_big.table_size, host_page_size);
284
285        // table should store nominal_table_elements of data for func in both cases
286        let func_table_type = crate::vm::table::TableElementType::Func;
287        assert_eq!(
288            pool_small.data_size(func_table_type),
289            pool_small.nominal_table_elements * func_table_type.element_size()
290        );
291        assert_eq!(
292            pool_big.data_size(func_table_type),
293            pool_big.nominal_table_elements * func_table_type.element_size()
294        );
295
296        // In the "big" case, continuations should fill page size (capacity limited).
297        // In the "small" case, continuations should fill only part of the page, capping
298        // at the requested table size for nominal elements.
299        let cont_table_type = crate::vm::table::TableElementType::Cont;
300        assert_eq!(
301            pool_small.data_size(cont_table_type),
302            pool_small.nominal_table_elements * cont_table_type.element_size()
303        );
304        assert_eq!(pool_big.data_size(cont_table_type), host_page_size);
305
306        Ok(())
307    }
308}