wasmtime/runtime/vm/instance/allocator/pooling/
table_pool.rs

1use super::{
2    TableAllocationIndex,
3    index_allocator::{SimpleIndexAllocator, SlotId},
4};
5use crate::runtime::vm::sys::vm::{PageMap, commit_pages, reset_with_pagemap};
6use crate::runtime::vm::{
7    InstanceAllocationRequest, Mmap, PoolingInstanceAllocatorConfig, SendSyncPtr, Table,
8    mmap::AlignedLength,
9};
10use crate::{prelude::*, vm::HostAlignedByteCount};
11use std::ptr::NonNull;
12use wasmtime_environ::Module;
13
14/// Represents a pool of WebAssembly tables.
15///
16/// Each instance index into the pool returns an iterator over the base addresses
17/// of the instance's tables.
18#[derive(Debug)]
19pub struct TablePool {
20    index_allocator: SimpleIndexAllocator,
21    mapping: Mmap<AlignedLength>,
22    table_size: HostAlignedByteCount,
23    max_total_tables: usize,
24    tables_per_instance: usize,
25    keep_resident: HostAlignedByteCount,
26    nominal_table_elements: usize,
27}
28
29impl TablePool {
30    /// Create a new `TablePool`.
31    pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result<Self> {
32        let table_size = HostAlignedByteCount::new_rounded_up(
33            crate::runtime::vm::table::NOMINAL_MAX_TABLE_ELEM_SIZE
34                .checked_mul(config.limits.table_elements)
35                .ok_or_else(|| anyhow!("table size exceeds addressable memory"))?,
36        )?;
37
38        let max_total_tables = usize::try_from(config.limits.total_tables).unwrap();
39        let tables_per_instance = usize::try_from(config.limits.max_tables_per_module).unwrap();
40
41        let allocation_size = table_size
42            .checked_mul(max_total_tables)
43            .context("total size of tables exceeds addressable memory")?;
44
45        let mapping = Mmap::accessible_reserved(allocation_size, allocation_size)
46            .context("failed to create table pool mapping")?;
47
48        let keep_resident = HostAlignedByteCount::new_rounded_up(config.table_keep_resident)?;
49
50        Ok(Self {
51            index_allocator: SimpleIndexAllocator::new(config.limits.total_tables),
52            mapping,
53            table_size,
54            max_total_tables,
55            tables_per_instance,
56            keep_resident,
57            nominal_table_elements: config.limits.table_elements,
58        })
59    }
60
61    /// Validate whether this module's tables are allocatable by this pool.
62    pub fn validate(&self, module: &Module) -> Result<()> {
63        let tables = module.num_defined_tables();
64
65        if tables > self.tables_per_instance {
66            bail!(
67                "defined tables count of {} exceeds the per-instance limit of {}",
68                tables,
69                self.tables_per_instance,
70            );
71        }
72
73        if tables > self.max_total_tables {
74            bail!(
75                "defined tables count of {} exceeds the total tables limit of {}",
76                tables,
77                self.max_total_tables,
78            );
79        }
80
81        for (i, table) in module.tables.iter().skip(module.num_imported_tables) {
82            if table.limits.min > u64::try_from(self.nominal_table_elements)? {
83                bail!(
84                    "table index {} has a minimum element size of {} which exceeds the limit of {}",
85                    i.as_u32(),
86                    table.limits.min,
87                    self.nominal_table_elements,
88                );
89            }
90        }
91        Ok(())
92    }
93
94    /// Are there zero slots in use right now?
95    pub fn is_empty(&self) -> bool {
96        self.index_allocator.is_empty()
97    }
98
99    /// Get the base pointer of the given table allocation.
100    fn get(&self, table_index: TableAllocationIndex) -> *mut u8 {
101        assert!(table_index.index() < self.max_total_tables);
102
103        unsafe {
104            self.mapping
105                .as_ptr()
106                .add(
107                    self.table_size
108                        .checked_mul(table_index.index())
109                        .expect(
110                            "checked in constructor that table_size * table_index doesn't overflow",
111                        )
112                        .byte_count(),
113                )
114                .cast_mut()
115        }
116    }
117
118    /// Returns the number of bytes occupied by table entry data
119    ///
120    /// This is typically just the `nominal_table_elements` multiplied by
121    /// the size of the table's element type, but may be less in the case
122    /// of types such as VMContRef for which less capacity will be available
123    /// (maintaining a consistent table size in the pool).
124    fn data_size(&self, table_type: crate::vm::table::TableElementType) -> usize {
125        let element_size = table_type.element_size();
126        let elements = self
127            .nominal_table_elements
128            .min(self.table_size.byte_count() / element_size);
129        elements * element_size
130    }
131
132    /// Allocate a single table for the given instance allocation request.
133    pub async fn allocate(
134        &self,
135        request: &mut InstanceAllocationRequest<'_, '_>,
136        ty: &wasmtime_environ::Table,
137    ) -> Result<(TableAllocationIndex, Table)> {
138        let tunables = request.store.engine().tunables();
139        let allocation_index = self
140            .index_allocator
141            .alloc()
142            .map(|slot| TableAllocationIndex(slot.0))
143            .ok_or_else(|| {
144                super::PoolConcurrencyLimitError::new(self.max_total_tables, "tables")
145            })?;
146        let mut guard = DeallocateIndexGuard {
147            pool: self,
148            allocation_index,
149            active: true,
150        };
151
152        let base = self.get(allocation_index);
153        let data_size = self.data_size(crate::vm::table::wasm_to_table_type(ty.ref_type));
154        unsafe {
155            commit_pages(base, data_size)?;
156        }
157
158        let ptr = NonNull::new(std::ptr::slice_from_raw_parts_mut(base.cast(), data_size)).unwrap();
159        let table = unsafe {
160            Table::new_static(
161                ty,
162                tunables,
163                SendSyncPtr::new(ptr),
164                request.limiter.as_deref_mut(),
165            )
166            .await?
167        };
168        guard.active = false;
169        return Ok((allocation_index, table));
170
171        struct DeallocateIndexGuard<'a> {
172            pool: &'a TablePool,
173            allocation_index: TableAllocationIndex,
174            active: bool,
175        }
176
177        impl Drop for DeallocateIndexGuard<'_> {
178            fn drop(&mut self) {
179                if !self.active {
180                    return;
181                }
182                self.pool
183                    .index_allocator
184                    .free(SlotId(self.allocation_index.0), 0);
185            }
186        }
187    }
188
189    /// Deallocate a previously-allocated table.
190    ///
191    /// # Safety
192    ///
193    /// The table must have been previously-allocated by this pool and assigned
194    /// the given allocation index, it must currently be allocated, and it must
195    /// never be used again.
196    ///
197    /// The caller must have already called `reset_table_pages_to_zero` on the
198    /// memory and flushed any enqueued decommits for this table's memory.
199    pub unsafe fn deallocate(
200        &self,
201        allocation_index: TableAllocationIndex,
202        table: Table,
203        bytes_resident: usize,
204    ) {
205        assert!(table.is_static());
206        drop(table);
207        self.index_allocator
208            .free(SlotId(allocation_index.0), bytes_resident);
209    }
210
211    /// Reset the given table's memory to zero.
212    ///
213    /// Invokes the given `decommit` function for each region of memory that
214    /// needs to be decommitted. It is the caller's responsibility to actually
215    /// perform that decommit before this table is reused.
216    ///
217    /// Returns the number of bytse that are still resident in memory in this
218    /// table.
219    ///
220    /// # Safety
221    ///
222    /// This table must not be in active use, and ready for returning to the
223    /// table pool once it is zeroed and decommitted.
224    pub unsafe fn reset_table_pages_to_zero(
225        &self,
226        pagemap: Option<&PageMap>,
227        allocation_index: TableAllocationIndex,
228        table: &mut Table,
229        decommit: impl FnMut(*mut u8, usize),
230    ) -> usize {
231        assert!(table.is_static());
232        let base = self.get(allocation_index);
233        let table_byte_size = table.size() * table.element_type().element_size();
234        let table_byte_size_page_aligned = HostAlignedByteCount::new_rounded_up(table_byte_size)
235            .expect("table entry size doesn't overflow");
236
237        // SAFETY: The `base` pointer is valid for `size` bytes and is safe to
238        // mutate here given the contract of our own function.
239        unsafe {
240            reset_with_pagemap(
241                pagemap,
242                base,
243                table_byte_size_page_aligned,
244                self.keep_resident,
245                |slice| slice.fill(0),
246                decommit,
247            )
248        }
249    }
250
251    pub fn unused_warm_slots(&self) -> u32 {
252        self.index_allocator.unused_warm_slots()
253    }
254
255    pub fn unused_bytes_resident(&self) -> usize {
256        self.index_allocator.unused_bytes_resident()
257    }
258}
259
260#[cfg(test)]
261mod tests {
262    use super::*;
263    use crate::runtime::vm::InstanceLimits;
264
265    #[test]
266    fn test_table_pool() -> Result<()> {
267        let pool = TablePool::new(&PoolingInstanceAllocatorConfig {
268            limits: InstanceLimits {
269                total_tables: 7,
270                table_elements: 100,
271                max_memory_size: 0,
272                max_memories_per_module: 0,
273                ..Default::default()
274            },
275            ..Default::default()
276        })?;
277
278        let host_page_size = HostAlignedByteCount::host_page_size();
279
280        assert_eq!(pool.table_size, host_page_size);
281        assert_eq!(pool.max_total_tables, 7);
282        assert_eq!(pool.nominal_table_elements, 100);
283
284        let base = pool.mapping.as_ptr() as usize;
285
286        for i in 0..7 {
287            let index = TableAllocationIndex(i);
288            let ptr = pool.get(index);
289            assert_eq!(
290                ptr as usize - base,
291                pool.table_size.checked_mul(i as usize).unwrap()
292            );
293        }
294
295        Ok(())
296    }
297
298    #[test]
299    fn test_table_pool_continuations_capacity() -> Result<()> {
300        let mkpool = |table_elements: usize| -> Result<TablePool> {
301            TablePool::new(&PoolingInstanceAllocatorConfig {
302                limits: InstanceLimits {
303                    table_elements,
304                    total_tables: 7,
305                    max_memory_size: 0,
306                    max_memories_per_module: 0,
307                    ..Default::default()
308                },
309                ..Default::default()
310            })
311        };
312
313        let host_page_size = HostAlignedByteCount::host_page_size();
314        let words_per_page = host_page_size.byte_count() / size_of::<*const u8>();
315        let pool_big = mkpool(words_per_page - 1)?;
316        let pool_small = mkpool(5)?;
317
318        assert_eq!(pool_small.table_size, host_page_size);
319        assert_eq!(pool_big.table_size, host_page_size);
320
321        // table should store nominal_table_elements of data for func in both cases
322        let func_table_type = crate::vm::table::TableElementType::Func;
323        assert_eq!(
324            pool_small.data_size(func_table_type),
325            pool_small.nominal_table_elements * func_table_type.element_size()
326        );
327        assert_eq!(
328            pool_big.data_size(func_table_type),
329            pool_big.nominal_table_elements * func_table_type.element_size()
330        );
331
332        // In the "big" case, continuations should fill page size (capacity limited).
333        // In the "small" case, continuations should fill only part of the page, capping
334        // at the requested table size for nominal elements.
335        let cont_table_type = crate::vm::table::TableElementType::Cont;
336        assert_eq!(
337            pool_small.data_size(cont_table_type),
338            pool_small.nominal_table_elements * cont_table_type.element_size()
339        );
340        assert_eq!(pool_big.data_size(cont_table_type), host_page_size);
341
342        Ok(())
343    }
344}