wasmtime/runtime/vm/instance/allocator/pooling/
memory_pool.rs

1//! Implements a memory pool using a single allocated memory slab.
2//!
3//! The pooling instance allocator maps one large slab of memory in advance and
4//! allocates WebAssembly memories from this slab--a [`MemoryPool`]. Each
5//! WebAssembly memory is allocated in its own slot (see uses of `index` and
6//! [`SlotId`] in this module):
7//!
8//! ```text
9//! ┌──────┬──────┬──────┬──────┬──────┐
10//! │Slot 0│Slot 1│Slot 2│Slot 3│......│
11//! └──────┴──────┴──────┴──────┴──────┘
12//! ```
13//!
14//! Diving deeper, we note that a [`MemoryPool`] protects Wasmtime from
15//! out-of-bounds memory accesses by inserting inaccessible guard regions
16//! between memory slots. These guard regions are configured to raise a signal
17//! if they are accessed--a WebAssembly out-of-bounds (OOB) memory access. The
18//! [`MemoryPool`] documentation has a more detailed chart but one can think of
19//! memory slots being laid out like the following:
20//!
21//! ```text
22//! ┌─────┬─────┬─────┬─────┬─────┬─────┬─────┬─────┐
23//! │Guard│Mem 0│Guard│Mem 1│Guard│Mem 2│.....│Guard│
24//! └─────┴─────┴─────┴─────┴─────┴─────┴─────┴─────┘
25//! ```
26//!
27//! But we can be more efficient about guard regions: with memory protection
28//! keys (MPK) enabled, the interleaved guard regions can be smaller. If we
29//! surround a memory with memories from other instances and each instance is
30//! protected by different protection keys, the guard region can be smaller AND
31//! the pool will still raise a signal on an OOB access. This complicates how we
32//! lay out memory slots: we must store memories from the same instance in the
33//! same "stripe". Each stripe is protected by a different protection key.
34//!
35//! This concept, dubbed [ColorGuard] in the original paper, relies on careful
36//! calculation of the memory sizes to prevent any "overlapping access" (see
37//! [`calculate`]): there are limited protection keys available (15) so the next
38//! memory using the same key must be at least as far away as the guard region
39//! we would insert otherwise. This ends up looking like the following, where a
40//! store for instance 0 (`I0`) "stripes" two memories (`M0` and `M1`) with the
41//! same protection key 1 and far enough apart to signal an OOB access:
42//!
43//! ```text
44//! ┌─────┬─────┬─────┬─────┬────────────────┬─────┬─────┬─────┐
45//! │.....│I0:M1│.....│.....│.<enough slots>.│I0:M2│.....│.....│
46//! ├─────┼─────┼─────┼─────┼────────────────┼─────┼─────┼─────┤
47//! │.....│key 1│key 2│key 3│..<more keys>...│key 1│key 2│.....│
48//! └─────┴─────┴─────┴─────┴────────────────┴─────┴─────┴─────┘
49//! ```
50//!
51//! [ColorGuard]: https://plas2022.github.io/files/pdf/SegueColorGuard.pdf
52
53use super::{
54    MemoryAllocationIndex,
55    index_allocator::{MemoryInModule, ModuleAffinityIndexAllocator, SlotId},
56};
57use crate::prelude::*;
58use crate::runtime::vm::{
59    CompiledModuleId, InstanceAllocationRequest, InstanceLimits, Memory, MemoryBase,
60    MemoryImageSlot, Mmap, MmapOffset, PoolingInstanceAllocatorConfig, mmap::AlignedLength,
61};
62use crate::{
63    Enabled,
64    runtime::vm::mpk::{self, ProtectionKey, ProtectionMask},
65    vm::HostAlignedByteCount,
66};
67use std::mem;
68use std::sync::atomic::{AtomicUsize, Ordering};
69use std::sync::{Arc, Mutex};
70use wasmtime_environ::{DefinedMemoryIndex, Module, Tunables};
71
72/// A set of allocator slots.
73///
74/// The allocated slots can be split by striping them: e.g., with two stripe
75/// colors 0 and 1, we would allocate all even slots using stripe 0 and all odd
76/// slots using stripe 1.
77///
78/// This is helpful for the use of protection keys: (a) if a request comes to
79/// allocate multiple instances, we can allocate them all from the same stripe
80/// and (b) if a store wants to allocate more from the same stripe it can.
81#[derive(Debug)]
82struct Stripe {
83    allocator: ModuleAffinityIndexAllocator,
84    pkey: Option<ProtectionKey>,
85}
86
87/// Represents a pool of WebAssembly linear memories.
88///
89/// A linear memory is divided into accessible pages and guard pages. A memory
90/// pool contains linear memories: each memory occupies a slot in an
91/// allocated slab (i.e., `mapping`):
92///
93/// ```text
94///          layout.max_memory_bytes                 layout.slot_bytes
95///                    |                                   |
96///              ◄─────┴────►                  ◄───────────┴──────────►
97/// ┌───────────┬────────────┬───────────┐     ┌───────────┬───────────┬───────────┐
98/// | PROT_NONE |            | PROT_NONE | ... |           | PROT_NONE | PROT_NONE |
99/// └───────────┴────────────┴───────────┘     └───────────┴───────────┴───────────┘
100/// |           |◄──────────────────┬─────────────────────────────────► ◄────┬────►
101/// |           |                   |                                        |
102/// mapping     |            `layout.num_slots` memories         layout.post_slab_guard_size
103///             |
104///   layout.pre_slab_guard_size
105/// ```
106#[derive(Debug)]
107pub struct MemoryPool {
108    mapping: Arc<Mmap<AlignedLength>>,
109    /// This memory pool is stripe-aware. If using  memory protection keys, this
110    /// will contain one stripe per available key; otherwise, a single stripe
111    /// with an empty key.
112    stripes: Vec<Stripe>,
113
114    /// If using a copy-on-write allocation scheme, the slot management. We
115    /// dynamically transfer ownership of a slot to a Memory when in use.
116    image_slots: Vec<Mutex<ImageSlot>>,
117
118    /// A description of the various memory sizes used in allocating the
119    /// `mapping` slab.
120    layout: SlabLayout,
121
122    /// The maximum number of memories that a single core module instance may
123    /// use.
124    ///
125    /// NB: this is needed for validation but does not affect the pool's size.
126    memories_per_instance: usize,
127
128    /// How much linear memory, in bytes, to keep resident after resetting for
129    /// use with the next instance. This much memory will be `memset` to zero
130    /// when a linear memory is deallocated.
131    ///
132    /// Memory exceeding this amount in the wasm linear memory will be released
133    /// with `madvise` back to the kernel.
134    ///
135    /// Only applicable on Linux.
136    pub(super) keep_resident: HostAlignedByteCount,
137
138    /// Keep track of protection keys handed out to initialized stores; this
139    /// allows us to round-robin the assignment of stores to stripes.
140    next_available_pkey: AtomicUsize,
141}
142
143/// The state of memory for each slot in this pool.
144#[derive(Debug)]
145enum ImageSlot {
146    /// This slot is guaranteed to be entirely unmapped.
147    ///
148    /// This is the initial state of all slots.
149    Unmapped,
150
151    /// The state of this slot is unknown.
152    ///
153    /// This encompasses a number of situations such as:
154    ///
155    /// * The slot is currently in use.
156    /// * The slot was attempted to be in use, but allocation failed.
157    /// * The slot was used but not deallocated properly.
158    ///
159    /// All of these situations are lumped into this one variant indicating
160    /// that, at a base level, no knowledge is known about this slot. Using a
161    /// slot in this state first requires resetting all memory in this slot by
162    /// mapping anonymous memory on top of the entire slot.
163    Unknown,
164
165    /// This slot was previously used and `MemoryImageSlot` maintains the state
166    /// about what this slot was last configured as.
167    ///
168    /// Future use of this slot will use `MemoryImageSlot` to continue to
169    /// re-instantiate and reuse images and such. This state is entered after
170    /// and allocated slot is successfully deallcoated.
171    PreviouslyUsed(MemoryImageSlot),
172}
173
174impl MemoryPool {
175    /// Create a new `MemoryPool`.
176    pub fn new(config: &PoolingInstanceAllocatorConfig, tunables: &Tunables) -> Result<Self> {
177        if u64::try_from(config.limits.max_memory_size).unwrap() > tunables.memory_reservation {
178            bail!(
179                "maximum memory size of {:#x} bytes exceeds the configured \
180                 memory reservation of {:#x} bytes",
181                config.limits.max_memory_size,
182                tunables.memory_reservation
183            );
184        }
185        let pkeys = match config.memory_protection_keys {
186            Enabled::Auto => {
187                if mpk::is_supported() {
188                    mpk::keys(config.max_memory_protection_keys)
189                } else {
190                    &[]
191                }
192            }
193            Enabled::Yes => {
194                if mpk::is_supported() {
195                    mpk::keys(config.max_memory_protection_keys)
196                } else {
197                    bail!("mpk is disabled on this system")
198                }
199            }
200            Enabled::No => &[],
201        };
202
203        // This is a tricky bit of global state: when creating a memory pool
204        // that uses memory protection keys, we ensure here that any host code
205        // will have access to all keys (i.e., stripes). It's only when we enter
206        // the WebAssembly guest code (see `StoreInner::call_hook`) that we
207        // enforce which keys/stripes can be accessed. Be forewarned about the
208        // assumptions here:
209        // - we expect this "allow all" configuration to reset the default
210        //   process state (only allow key 0) _before_ any memories are accessed
211        // - and we expect no other code (e.g., host-side code) to modify this
212        //   global MPK configuration
213        if !pkeys.is_empty() {
214            mpk::allow(ProtectionMask::all());
215        }
216
217        // Create a slab layout and allocate it as a completely inaccessible
218        // region to start--`PROT_NONE`.
219        let constraints = SlabConstraints::new(&config.limits, tunables, pkeys.len())?;
220        let layout = calculate(&constraints)?;
221        log::debug!(
222            "creating memory pool: {constraints:?} -> {layout:?} (total: {})",
223            layout.total_slab_bytes()?
224        );
225        let mut mapping =
226            Mmap::accessible_reserved(HostAlignedByteCount::ZERO, layout.total_slab_bytes()?)
227                .context("failed to create memory pool mapping")?;
228
229        // Then, stripe the memory with the available protection keys. This is
230        // unnecessary if there is only one stripe color.
231        if layout.num_stripes >= 2 {
232            let mut cursor = layout.pre_slab_guard_bytes;
233            let pkeys = &pkeys[..layout.num_stripes];
234            for i in 0..constraints.num_slots {
235                let pkey = &pkeys[i % pkeys.len()];
236                let region = unsafe {
237                    mapping.slice_mut(
238                        cursor.byte_count()..cursor.byte_count() + layout.slot_bytes.byte_count(),
239                    )
240                };
241                pkey.protect(region)?;
242                cursor = cursor
243                    .checked_add(layout.slot_bytes)
244                    .context("cursor + slot_bytes overflows")?;
245            }
246            debug_assert_eq!(
247                cursor
248                    .checked_add(layout.post_slab_guard_bytes)
249                    .context("cursor + post_slab_guard_bytes overflows")?,
250                layout.total_slab_bytes()?
251            );
252        }
253
254        let image_slots: Vec<_> = std::iter::repeat_with(|| Mutex::new(ImageSlot::Unmapped))
255            .take(constraints.num_slots)
256            .collect();
257
258        let create_stripe = |i| {
259            let num_slots = constraints.num_slots / layout.num_stripes
260                + usize::from(constraints.num_slots % layout.num_stripes > i);
261            let allocator = ModuleAffinityIndexAllocator::new(
262                num_slots.try_into().unwrap(),
263                config.max_unused_warm_slots,
264            );
265            Stripe {
266                allocator,
267                pkey: pkeys.get(i).cloned(),
268            }
269        };
270
271        debug_assert!(layout.num_stripes > 0);
272        let stripes: Vec<_> = (0..layout.num_stripes).map(create_stripe).collect();
273
274        let pool = Self {
275            stripes,
276            mapping: Arc::new(mapping),
277            image_slots,
278            layout,
279            memories_per_instance: usize::try_from(config.limits.max_memories_per_module).unwrap(),
280            keep_resident: HostAlignedByteCount::new_rounded_up(
281                config.linear_memory_keep_resident,
282            )?,
283            next_available_pkey: AtomicUsize::new(0),
284        };
285
286        Ok(pool)
287    }
288
289    /// Return a protection key that stores can use for requesting new
290    pub fn next_available_pkey(&self) -> Option<ProtectionKey> {
291        let index = self.next_available_pkey.fetch_add(1, Ordering::SeqCst) % self.stripes.len();
292        debug_assert!(
293            self.stripes.len() < 2 || self.stripes[index].pkey.is_some(),
294            "if we are using stripes, we cannot have an empty protection key"
295        );
296        self.stripes[index].pkey
297    }
298
299    /// Validate whether this memory pool supports the given module.
300    pub fn validate_memories(&self, module: &Module) -> Result<()> {
301        let memories = module.num_defined_memories();
302        if memories > self.memories_per_instance {
303            bail!(
304                "defined memories count of {} exceeds the per-instance limit of {}",
305                memories,
306                self.memories_per_instance,
307            );
308        }
309
310        for (i, memory) in module.memories.iter().skip(module.num_imported_memories) {
311            self.validate_memory(memory).with_context(|| {
312                format!(
313                    "memory index {} is unsupported in this pooling allocator configuration",
314                    i.as_u32()
315                )
316            })?;
317        }
318        Ok(())
319    }
320
321    /// Validate one memory for this pool.
322    pub fn validate_memory(&self, memory: &wasmtime_environ::Memory) -> Result<()> {
323        let min = memory.minimum_byte_size().with_context(|| {
324            format!("memory has a minimum byte size that cannot be represented in a u64",)
325        })?;
326        if min > u64::try_from(self.layout.max_memory_bytes.byte_count()).unwrap() {
327            bail!(
328                "memory has a minimum byte size of {} which exceeds the limit of {} bytes",
329                min,
330                self.layout.max_memory_bytes,
331            );
332        }
333        if memory.shared {
334            // FIXME(#4244): since the pooling allocator owns the memory
335            // allocation (which is torn down with the instance), that
336            // can't be used with shared memory where threads or the host
337            // might persist the memory beyond the lifetime of the instance
338            // itself.
339            bail!("memory is shared which is not supported in the pooling allocator");
340        }
341        Ok(())
342    }
343
344    /// Are zero slots in use right now?
345    pub fn is_empty(&self) -> bool {
346        self.stripes.iter().all(|s| s.allocator.is_empty())
347    }
348
349    /// Allocate a single memory for the given instance allocation request.
350    pub async fn allocate(
351        &self,
352        request: &mut InstanceAllocationRequest<'_, '_>,
353        ty: &wasmtime_environ::Memory,
354        memory_index: Option<DefinedMemoryIndex>,
355    ) -> Result<(MemoryAllocationIndex, Memory)> {
356        let tunables = request.store.engine().tunables();
357        let stripe_index = if let Some(pkey) = request.store.get_pkey() {
358            pkey.as_stripe()
359        } else {
360            debug_assert!(self.stripes.len() < 2);
361            0
362        };
363
364        let striped_allocation_index = self.stripes[stripe_index]
365            .allocator
366            .alloc(memory_index.and_then(|mem_idx| {
367                request
368                    .runtime_info
369                    .unique_id()
370                    .map(|id| MemoryInModule(id, mem_idx))
371            }))
372            .map(|slot| StripedAllocationIndex(u32::try_from(slot.index()).unwrap()))
373            .ok_or_else(|| {
374                super::PoolConcurrencyLimitError::new(
375                    self.stripes[stripe_index].allocator.len(),
376                    format!("memory stripe {stripe_index}"),
377                )
378            })?;
379        let mut guard = DeallocateIndexGuard {
380            pool: self,
381            stripe_index,
382            striped_allocation_index,
383            active: true,
384        };
385
386        let allocation_index =
387            striped_allocation_index.as_unstriped_slot_index(stripe_index, self.stripes.len());
388
389        // Double-check that the runtime requirements of the memory are
390        // satisfied by the configuration of this pooling allocator. This
391        // should be returned as an error through `validate_memory_plans`
392        // but double-check here to be sure.
393        assert!(
394            tunables.memory_reservation + tunables.memory_guard_size
395                <= u64::try_from(self.layout.bytes_to_next_stripe_slot().byte_count()).unwrap()
396        );
397
398        let base = self.get_base(allocation_index);
399        let base_capacity = self.layout.max_memory_bytes;
400
401        let mut slot = self.take_memory_image_slot(allocation_index)?;
402        let image = match memory_index {
403            Some(memory_index) => request.runtime_info.memory_image(memory_index)?,
404            None => None,
405        };
406        let initial_size = ty
407            .minimum_byte_size()
408            .expect("min size checked in validation");
409
410        // If instantiation fails, we can propagate the error
411        // upward and drop the slot. This will cause the Drop
412        // handler to attempt to map the range with PROT_NONE
413        // memory, to reserve the space while releasing any
414        // stale mappings. The next use of this slot will then
415        // create a new slot that will try to map over
416        // this, returning errors as well if the mapping
417        // errors persist. The unmap-on-drop is best effort;
418        // if it fails, then we can still soundly continue
419        // using the rest of the pool and allowing the rest of
420        // the process to continue, because we never perform a
421        // mmap that would leave an open space for someone
422        // else to come in and map something.
423        let initial_size = usize::try_from(initial_size).unwrap();
424        slot.instantiate(initial_size, image, ty, tunables)?;
425
426        let memory = Memory::new_static(
427            ty,
428            tunables,
429            MemoryBase::Mmap(base),
430            base_capacity.byte_count(),
431            slot,
432            request.limiter.as_deref_mut(),
433        )
434        .await?;
435        guard.active = false;
436        return Ok((allocation_index, memory));
437
438        struct DeallocateIndexGuard<'a> {
439            pool: &'a MemoryPool,
440            stripe_index: usize,
441            striped_allocation_index: StripedAllocationIndex,
442            active: bool,
443        }
444
445        impl Drop for DeallocateIndexGuard<'_> {
446            fn drop(&mut self) {
447                if !self.active {
448                    return;
449                }
450                self.pool.stripes[self.stripe_index]
451                    .allocator
452                    .free(SlotId(self.striped_allocation_index.0));
453            }
454        }
455    }
456
457    /// Deallocate a previously-allocated memory.
458    ///
459    /// # Safety
460    ///
461    /// The memory must have been previously allocated from this pool and
462    /// assigned the given index, must currently be in an allocated state, and
463    /// must never be used again.
464    ///
465    /// The caller must have already called `clear_and_remain_ready` on the
466    /// memory's image and flushed any enqueued decommits for this memory.
467    pub unsafe fn deallocate(
468        &self,
469        allocation_index: MemoryAllocationIndex,
470        image: MemoryImageSlot,
471    ) {
472        self.return_memory_image_slot(allocation_index, image);
473
474        let (stripe_index, striped_allocation_index) =
475            StripedAllocationIndex::from_unstriped_slot_index(allocation_index, self.stripes.len());
476        self.stripes[stripe_index]
477            .allocator
478            .free(SlotId(striped_allocation_index.0));
479    }
480
481    /// Purging everything related to `module`.
482    pub fn purge_module(&self, module: CompiledModuleId) {
483        // This primarily means clearing out all of its memory images present in
484        // the virtual address space. Go through the index allocator for slots
485        // affine to `module` and reset them, freeing up the index when we're
486        // done.
487        //
488        // Note that this is only called when the specified `module` won't be
489        // allocated further (the module is being dropped) so this shouldn't hit
490        // any sort of infinite loop since this should be the final operation
491        // working with `module`.
492        //
493        // TODO: We are given a module id, but key affinity by pair of module id
494        // and defined memory index. We are missing any defined memory index or
495        // count of how many memories the module defines here. Therefore, we
496        // probe up to the maximum number of memories per instance. This is fine
497        // because that maximum is generally relatively small. If this method
498        // somehow ever gets hot because of unnecessary probing, we should
499        // either pass in the actual number of defined memories for the given
500        // module to this method, or keep a side table of all slots that are
501        // associated with a module (not just module and memory). The latter
502        // would require care to make sure that its maintenance wouldn't be too
503        // expensive for normal allocation/free operations.
504        for stripe in &self.stripes {
505            for i in 0..self.memories_per_instance {
506                use wasmtime_environ::EntityRef;
507                let memory_index = DefinedMemoryIndex::new(i);
508                while let Some(id) = stripe
509                    .allocator
510                    .alloc_affine_and_clear_affinity(module, memory_index)
511                {
512                    // Attempt to acquire the `MemoryImageSlot` state for this
513                    // slot, and then if we have that try to remove the image,
514                    // and then if all that succeeds put the slot back in.
515                    //
516                    // If anything fails then the slot will be in an "unknown"
517                    // state which means that on next use it'll be remapped with
518                    // anonymous memory.
519                    let index = MemoryAllocationIndex(id.0);
520                    if let Ok(mut slot) = self.take_memory_image_slot(index) {
521                        if slot.remove_image().is_ok() {
522                            self.return_memory_image_slot(index, slot);
523                        }
524                    }
525
526                    stripe.allocator.free(id);
527                }
528            }
529        }
530    }
531
532    fn get_base(&self, allocation_index: MemoryAllocationIndex) -> MmapOffset {
533        assert!(allocation_index.index() < self.layout.num_slots);
534        let offset = self
535            .layout
536            .slot_bytes
537            .checked_mul(allocation_index.index())
538            .and_then(|c| c.checked_add(self.layout.pre_slab_guard_bytes))
539            .expect("slot_bytes * index + pre_slab_guard_bytes overflows");
540        self.mapping.offset(offset).expect("offset is in bounds")
541    }
542
543    /// Take ownership of the given image slot.
544    ///
545    /// This method is used when a `MemoryAllocationIndex` has been allocated
546    /// and the state of the slot needs to be acquired. This will lazily
547    /// allocate a `MemoryImageSlot` which describes the current (and possibly
548    /// prior) state of the slot.
549    ///
550    /// During deallocation this structure is passed back to
551    /// `return_memory_image_slot`.
552    ///
553    /// Note that this is a fallible method because using a slot might require
554    /// resetting the memory that was previously there. This reset operation
555    /// is a fallible operation that may not succeed. If it fails then this
556    /// slot cannot be used at this time.
557    fn take_memory_image_slot(
558        &self,
559        allocation_index: MemoryAllocationIndex,
560    ) -> Result<MemoryImageSlot> {
561        let (maybe_slot, needs_reset) = {
562            let mut slot = self.image_slots[allocation_index.index()].lock().unwrap();
563            match mem::replace(&mut *slot, ImageSlot::Unknown) {
564                ImageSlot::Unmapped => (None, false),
565                ImageSlot::Unknown => (None, true),
566                ImageSlot::PreviouslyUsed(state) => (Some(state), false),
567            }
568        };
569        let mut slot = maybe_slot.unwrap_or_else(|| {
570            MemoryImageSlot::create(
571                self.get_base(allocation_index),
572                HostAlignedByteCount::ZERO,
573                self.layout.max_memory_bytes.byte_count(),
574            )
575        });
576
577        // For `Unknown` slots it means that `slot` is brand new and isn't
578        // actually tracking the state of the previous slot, so reset it
579        // entirely with anonymous memory to wipe the slate clean and start
580        // from zero. This should only happen if allocation of the previous
581        // slot failed, for example.
582        if needs_reset {
583            slot.reset_with_anon_memory()?;
584        }
585        Ok(slot)
586    }
587
588    /// Return ownership of the given image slot.
589    fn return_memory_image_slot(
590        &self,
591        allocation_index: MemoryAllocationIndex,
592        slot: MemoryImageSlot,
593    ) {
594        assert!(!slot.is_dirty());
595
596        let prev = mem::replace(
597            &mut *self.image_slots[allocation_index.index()].lock().unwrap(),
598            ImageSlot::PreviouslyUsed(slot),
599        );
600        assert!(matches!(prev, ImageSlot::Unknown));
601    }
602}
603
604/// The index of a memory allocation within an `InstanceAllocator`.
605#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
606pub struct StripedAllocationIndex(u32);
607
608impl StripedAllocationIndex {
609    fn from_unstriped_slot_index(
610        index: MemoryAllocationIndex,
611        num_stripes: usize,
612    ) -> (usize, Self) {
613        let stripe_index = index.index() % num_stripes;
614        let num_stripes: u32 = num_stripes.try_into().unwrap();
615        let index_within_stripe = Self(index.0 / num_stripes);
616        (stripe_index, index_within_stripe)
617    }
618
619    fn as_unstriped_slot_index(self, stripe: usize, num_stripes: usize) -> MemoryAllocationIndex {
620        let num_stripes: u32 = num_stripes.try_into().unwrap();
621        let stripe: u32 = stripe.try_into().unwrap();
622        MemoryAllocationIndex(self.0 * num_stripes + stripe)
623    }
624}
625
626#[derive(Clone, Debug)]
627struct SlabConstraints {
628    /// Essentially, the `static_memory_bound`: this is an assumption that the
629    /// runtime and JIT compiler make about how much space will be guarded
630    /// between slots.
631    expected_slot_bytes: HostAlignedByteCount,
632    /// The maximum size of any memory in the pool. Always a non-zero multiple
633    /// of the page size.
634    max_memory_bytes: HostAlignedByteCount,
635    num_slots: usize,
636    num_pkeys_available: usize,
637    guard_bytes: HostAlignedByteCount,
638    guard_before_slots: bool,
639}
640
641impl SlabConstraints {
642    fn new(
643        limits: &InstanceLimits,
644        tunables: &Tunables,
645        num_pkeys_available: usize,
646    ) -> Result<Self> {
647        // `memory_reservation` is the configured number of bytes for a
648        // static memory slot (see `Config::memory_reservation`); even
649        // if the memory never grows to this size (e.g., it has a lower memory
650        // maximum), codegen will assume that this unused memory is mapped
651        // `PROT_NONE`. Typically `memory_reservation` is 4GiB which helps
652        // elide most bounds checks. `MemoryPool` must respect this bound,
653        // though not explicitly: if we can achieve the same effect via
654        // MPK-protected stripes, the slot size can be lower than the
655        // `memory_reservation`.
656        let expected_slot_bytes =
657            HostAlignedByteCount::new_rounded_up_u64(tunables.memory_reservation)
658                .context("memory reservation is too large")?;
659
660        // Page-align the maximum size of memory since that's the granularity that
661        // permissions are going to be controlled at.
662        let max_memory_bytes = HostAlignedByteCount::new_rounded_up(limits.max_memory_size)
663            .context("maximum size of memory is too large")?;
664
665        let guard_bytes = HostAlignedByteCount::new_rounded_up_u64(tunables.memory_guard_size)
666            .context("guard region is too large")?;
667
668        let num_slots = limits
669            .total_memories
670            .try_into()
671            .context("too many memories")?;
672
673        let constraints = SlabConstraints {
674            max_memory_bytes,
675            num_slots,
676            expected_slot_bytes,
677            num_pkeys_available,
678            guard_bytes,
679            guard_before_slots: tunables.guard_before_linear_memory,
680        };
681        Ok(constraints)
682    }
683}
684
685#[derive(Debug)]
686struct SlabLayout {
687    /// The total number of slots available in the memory pool slab.
688    num_slots: usize,
689    /// The size of each slot in the memory pool; this contains the maximum
690    /// memory size (i.e., from WebAssembly or Wasmtime configuration) plus any
691    /// guard region after the memory to catch OOB access. On these guard
692    /// regions, note that:
693    /// - users can configure how aggressively (or not) to elide bounds checks
694    ///   via `Config::memory_guard_size` (see also:
695    ///   `memory_and_guard_size`)
696    /// - memory protection keys can compress the size of the guard region by
697    ///   placing slots from a different key (i.e., a stripe) in the guard
698    ///   region; this means the slot itself can be smaller and we can allocate
699    ///   more of them.
700    slot_bytes: HostAlignedByteCount,
701    /// The maximum size that can become accessible, in bytes, for each linear
702    /// memory. Guaranteed to be a whole number of Wasm pages.
703    max_memory_bytes: HostAlignedByteCount,
704    /// If necessary, the number of bytes to reserve as a guard region at the
705    /// beginning of the slab.
706    pre_slab_guard_bytes: HostAlignedByteCount,
707    /// Like `pre_slab_guard_bytes`, but at the end of the slab.
708    post_slab_guard_bytes: HostAlignedByteCount,
709    /// The number of stripes needed in the slab layout.
710    num_stripes: usize,
711}
712
713impl SlabLayout {
714    /// Return the total size of the slab, using the final layout (where `n =
715    /// num_slots`):
716    ///
717    /// ```text
718    /// ┌────────────────────┬──────┬──────┬───┬──────┬─────────────────────┐
719    /// │pre_slab_guard_bytes│slot 1│slot 2│...│slot n│post_slab_guard_bytes│
720    /// └────────────────────┴──────┴──────┴───┴──────┴─────────────────────┘
721    /// ```
722    fn total_slab_bytes(&self) -> Result<HostAlignedByteCount> {
723        self.slot_bytes
724            .checked_mul(self.num_slots)
725            .and_then(|c| c.checked_add(self.pre_slab_guard_bytes))
726            .and_then(|c| c.checked_add(self.post_slab_guard_bytes))
727            .context("total size of memory reservation exceeds addressable memory")
728    }
729
730    /// Returns the number of Wasm bytes from the beginning of one slot to the
731    /// next slot in the same stripe--this is the striped equivalent of
732    /// `static_memory_bound`. Recall that between slots of the same stripe we
733    /// will see a slot from every other stripe.
734    ///
735    /// For example, in a 3-stripe pool, this function measures the distance
736    /// from the beginning of slot 1 to slot 4, which are of the same stripe:
737    ///
738    /// ```text
739    ///  ◄────────────────────►
740    /// ┌────────┬──────┬──────┬────────┬───┐
741    /// │*slot 1*│slot 2│slot 3│*slot 4*│...|
742    /// └────────┴──────┴──────┴────────┴───┘
743    /// ```
744    fn bytes_to_next_stripe_slot(&self) -> HostAlignedByteCount {
745        self.slot_bytes
746            .checked_mul(self.num_stripes)
747            .expect("constructor checks that self.slot_bytes * self.num_stripes is in bounds")
748    }
749}
750
751fn calculate(constraints: &SlabConstraints) -> Result<SlabLayout> {
752    let SlabConstraints {
753        max_memory_bytes,
754        num_slots,
755        expected_slot_bytes,
756        num_pkeys_available,
757        guard_bytes,
758        guard_before_slots,
759    } = *constraints;
760
761    // If the user specifies a guard region, we always need to allocate a
762    // `PROT_NONE` region for it before any memory slots. Recall that we can
763    // avoid bounds checks for loads and stores with immediates up to
764    // `guard_bytes`, but we rely on Wasmtime to emit bounds checks for any
765    // accesses greater than this.
766    let pre_slab_guard_bytes = if guard_before_slots {
767        guard_bytes
768    } else {
769        HostAlignedByteCount::ZERO
770    };
771
772    // To calculate the slot size, we start with the default configured size and
773    // attempt to chip away at this via MPK protection. Note here how we begin
774    // to define a slot as "all of the memory and guard region."
775    let faulting_region_bytes = expected_slot_bytes
776        .max(max_memory_bytes)
777        .checked_add(guard_bytes)
778        .context("faulting region is too large")?;
779
780    let (num_stripes, slot_bytes) = if guard_bytes == 0 || max_memory_bytes == 0 || num_slots == 0 {
781        // In the uncommon case where the memory/guard regions are empty or we don't need any slots , we
782        // will not need any stripes: we just lay out the slots back-to-back
783        // using a single stripe.
784        (1, faulting_region_bytes.byte_count())
785    } else if num_pkeys_available < 2 {
786        // If we do not have enough protection keys to stripe the memory, we do
787        // the same. We can't elide any of the guard bytes because we aren't
788        // overlapping guard regions with other stripes...
789        (1, faulting_region_bytes.byte_count())
790    } else {
791        // ...but if we can create at least two stripes, we can use another
792        // stripe (i.e., a different pkey) as this slot's guard region--this
793        // reduces the guard bytes each slot has to allocate. We must make
794        // sure, though, that if the size of that other stripe(s) does not
795        // fully cover `guard_bytes`, we keep those around to prevent OOB
796        // access.
797
798        // We first calculate the number of stripes we need: we want to
799        // minimize this so that there is less chance of a single store
800        // running out of slots with its stripe--we need at least two,
801        // though. But this is not just an optimization; we need to handle
802        // the case when there are fewer slots than stripes. E.g., if our
803        // pool is configured with only three slots (`num_memory_slots =
804        // 3`), we will run into failures if we attempt to set up more than
805        // three stripes.
806        let needed_num_stripes = faulting_region_bytes
807            .checked_div(max_memory_bytes)
808            .expect("if condition above implies max_memory_bytes is non-zero")
809            + usize::from(
810                faulting_region_bytes
811                    .checked_rem(max_memory_bytes)
812                    .expect("if condition above implies max_memory_bytes is non-zero")
813                    != 0,
814            );
815        assert!(needed_num_stripes > 0);
816        let num_stripes = num_pkeys_available.min(needed_num_stripes).min(num_slots);
817
818        // Next, we try to reduce the slot size by "overlapping" the stripes: we
819        // can make slot `n` smaller since we know that slot `n+1` and following
820        // are in different stripes and will look just like `PROT_NONE` memory.
821        // Recall that codegen expects a guarantee that at least
822        // `faulting_region_bytes` will catch OOB accesses via segfaults.
823        let needed_slot_bytes = faulting_region_bytes
824            .byte_count()
825            .checked_div(num_stripes)
826            .unwrap_or(faulting_region_bytes.byte_count())
827            .max(max_memory_bytes.byte_count());
828        assert!(needed_slot_bytes >= max_memory_bytes.byte_count());
829
830        (num_stripes, needed_slot_bytes)
831    };
832
833    // The page-aligned slot size; equivalent to `memory_and_guard_size`.
834    let slot_bytes =
835        HostAlignedByteCount::new_rounded_up(slot_bytes).context("slot size is too large")?;
836
837    // We may need another guard region (like `pre_slab_guard_bytes`) at the end
838    // of our slab to maintain our `faulting_region_bytes` guarantee. We could
839    // be conservative and just create it as large as `faulting_region_bytes`,
840    // but because we know that the last slot's `slot_bytes` make up the first
841    // part of that region, we reduce the final guard region by that much.
842    let post_slab_guard_bytes = faulting_region_bytes.saturating_sub(slot_bytes);
843
844    // Check that we haven't exceeded the slab we can calculate given the limits
845    // of `usize`.
846    let layout = SlabLayout {
847        num_slots,
848        slot_bytes,
849        max_memory_bytes,
850        pre_slab_guard_bytes,
851        post_slab_guard_bytes,
852        num_stripes,
853    };
854    match layout.total_slab_bytes() {
855        Ok(_) => Ok(layout),
856        Err(e) => Err(e),
857    }
858}
859
860#[cfg(test)]
861mod tests {
862    use super::*;
863    use proptest::prelude::*;
864
865    const WASM_PAGE_SIZE: u32 = wasmtime_environ::Memory::DEFAULT_PAGE_SIZE;
866
867    #[cfg(target_pointer_width = "64")]
868    #[test]
869    fn test_memory_pool() -> Result<()> {
870        let pool = MemoryPool::new(
871            &PoolingInstanceAllocatorConfig {
872                limits: InstanceLimits {
873                    total_memories: 5,
874                    max_tables_per_module: 0,
875                    max_memories_per_module: 3,
876                    table_elements: 0,
877                    max_memory_size: WASM_PAGE_SIZE as usize,
878                    ..Default::default()
879                },
880                ..Default::default()
881            },
882            &Tunables {
883                memory_reservation: WASM_PAGE_SIZE as u64,
884                memory_guard_size: 0,
885                ..Tunables::default_host()
886            },
887        )?;
888
889        assert_eq!(pool.layout.slot_bytes, WASM_PAGE_SIZE as usize);
890        assert_eq!(pool.layout.num_slots, 5);
891        assert_eq!(pool.layout.max_memory_bytes, WASM_PAGE_SIZE as usize);
892
893        let base = pool.mapping.as_ptr() as usize;
894
895        for i in 0..5 {
896            let index = MemoryAllocationIndex(i);
897            let ptr = pool.get_base(index).as_mut_ptr();
898            assert_eq!(
899                ptr as usize - base,
900                i as usize * pool.layout.slot_bytes.byte_count()
901            );
902        }
903
904        Ok(())
905    }
906
907    #[test]
908    #[cfg_attr(miri, ignore)]
909    fn test_pooling_allocator_striping() {
910        if !mpk::is_supported() {
911            println!("skipping `test_pooling_allocator_striping` test; mpk is not supported");
912            return;
913        }
914
915        // Force the use of MPK.
916        let config = PoolingInstanceAllocatorConfig {
917            memory_protection_keys: Enabled::Yes,
918            ..PoolingInstanceAllocatorConfig::default()
919        };
920        let pool = MemoryPool::new(&config, &Tunables::default_host()).unwrap();
921        assert!(pool.stripes.len() >= 2);
922
923        let max_memory_slots = config.limits.total_memories;
924        dbg!(pool.stripes[0].allocator.num_empty_slots());
925        dbg!(pool.stripes[1].allocator.num_empty_slots());
926        let available_memory_slots: usize = pool
927            .stripes
928            .iter()
929            .map(|s| s.allocator.num_empty_slots())
930            .sum();
931        assert_eq!(
932            max_memory_slots,
933            u32::try_from(available_memory_slots).unwrap()
934        );
935    }
936
937    #[test]
938    fn check_known_layout_calculations() {
939        for num_pkeys_available in 0..16 {
940            for num_memory_slots in [0, 1, 10, 64] {
941                for expected_slot_bytes in [0, 1 << 30 /* 1GB */, 4 << 30 /* 4GB */] {
942                    let expected_slot_bytes =
943                        HostAlignedByteCount::new(expected_slot_bytes).unwrap();
944                    for max_memory_bytes in
945                        [0, 1 * WASM_PAGE_SIZE as usize, 10 * WASM_PAGE_SIZE as usize]
946                    {
947                        // Note new rather than new_rounded_up here -- for now,
948                        // WASM_PAGE_SIZE is 64KiB, which is a multiple of the
949                        // host page size on all platforms.
950                        let max_memory_bytes = HostAlignedByteCount::new(max_memory_bytes).unwrap();
951                        for guard_bytes in [0, 2 << 30 /* 2GB */] {
952                            let guard_bytes = HostAlignedByteCount::new(guard_bytes).unwrap();
953                            for guard_before_slots in [true, false] {
954                                let constraints = SlabConstraints {
955                                    max_memory_bytes,
956                                    num_slots: num_memory_slots,
957                                    expected_slot_bytes,
958                                    num_pkeys_available,
959                                    guard_bytes,
960                                    guard_before_slots,
961                                };
962                                match calculate(&constraints) {
963                                    Ok(layout) => {
964                                        assert_slab_layout_invariants(constraints, layout)
965                                    }
966                                    Err(e) => {
967                                        // Only allow failure on 32-bit
968                                        // platforms where the calculation
969                                        // exceeded the size of the address
970                                        // space
971                                        assert!(
972                                            cfg!(target_pointer_width = "32")
973                                                && e.to_string()
974                                                    .contains("exceeds addressable memory"),
975                                            "bad error: {e:?}"
976                                        );
977                                    }
978                                }
979                            }
980                        }
981                    }
982                }
983            }
984        }
985    }
986
987    proptest! {
988        #[test]
989        #[cfg_attr(miri, ignore)]
990        fn check_random_layout_calculations(c in constraints()) {
991            if let Ok(l) = calculate(&c) {
992                assert_slab_layout_invariants(c, l);
993            }
994        }
995    }
996
997    fn constraints() -> impl Strategy<Value = SlabConstraints> {
998        (
999            any::<HostAlignedByteCount>(),
1000            any::<usize>(),
1001            any::<HostAlignedByteCount>(),
1002            any::<usize>(),
1003            any::<HostAlignedByteCount>(),
1004            any::<bool>(),
1005        )
1006            .prop_map(
1007                |(
1008                    max_memory_bytes,
1009                    num_memory_slots,
1010                    expected_slot_bytes,
1011                    num_pkeys_available,
1012                    guard_bytes,
1013                    guard_before_slots,
1014                )| {
1015                    SlabConstraints {
1016                        max_memory_bytes,
1017                        num_slots: num_memory_slots,
1018                        expected_slot_bytes,
1019                        num_pkeys_available,
1020                        guard_bytes,
1021                        guard_before_slots,
1022                    }
1023                },
1024            )
1025    }
1026
1027    fn assert_slab_layout_invariants(c: SlabConstraints, s: SlabLayout) {
1028        // Check that all the sizes add up.
1029        assert_eq!(
1030            s.total_slab_bytes().unwrap(),
1031            s.pre_slab_guard_bytes
1032                .checked_add(s.slot_bytes.checked_mul(c.num_slots).unwrap())
1033                .and_then(|c| c.checked_add(s.post_slab_guard_bytes))
1034                .unwrap(),
1035            "the slab size does not add up: {c:?} => {s:?}"
1036        );
1037        assert!(
1038            s.slot_bytes >= s.max_memory_bytes,
1039            "slot is not big enough: {c:?} => {s:?}"
1040        );
1041
1042        // The HostAlignedByteCount newtype wrapper ensures that the various
1043        // byte values are page-aligned.
1044
1045        // Check that we use no more or less stripes than needed.
1046        assert!(s.num_stripes >= 1, "not enough stripes: {c:?} => {s:?}");
1047        if c.num_pkeys_available == 0 || c.num_slots == 0 {
1048            assert_eq!(
1049                s.num_stripes, 1,
1050                "expected at least one stripe: {c:?} => {s:?}"
1051            );
1052        } else {
1053            assert!(
1054                s.num_stripes <= c.num_pkeys_available,
1055                "layout has more stripes than available pkeys: {c:?} => {s:?}"
1056            );
1057            assert!(
1058                s.num_stripes <= c.num_slots,
1059                "layout has more stripes than memory slots: {c:?} => {s:?}"
1060            );
1061        }
1062
1063        // Check that we use the minimum number of stripes/protection keys.
1064        // - if the next MPK-protected slot is bigger or the same as the
1065        //   required guard region, we only need two stripes
1066        // - if the next slot is smaller than the guard region, we only need
1067        //   enough stripes to add up to at least that guard region size.
1068        if c.num_pkeys_available > 1 && !c.max_memory_bytes.is_zero() {
1069            assert!(
1070                s.num_stripes <= (c.guard_bytes.checked_div(c.max_memory_bytes).unwrap() + 2),
1071                "calculated more stripes than needed: {c:?} => {s:?}"
1072            );
1073        }
1074
1075        // Check that the memory-striping will not allow OOB access.
1076        // - we may have reduced the slot size from `expected_slot_bytes` to
1077        //   `slot_bytes` assuming MPK striping; we check that our guaranteed
1078        //   "faulting region" is respected
1079        // - the last slot won't have MPK striping after it; we check that the
1080        //   `post_slab_guard_bytes` accounts for this
1081        assert!(
1082            s.bytes_to_next_stripe_slot()
1083                >= c.expected_slot_bytes
1084                    .max(c.max_memory_bytes)
1085                    .checked_add(c.guard_bytes)
1086                    .unwrap(),
1087            "faulting region not large enough: {c:?} => {s:?}"
1088        );
1089        assert!(
1090            s.slot_bytes.checked_add(s.post_slab_guard_bytes).unwrap() >= c.expected_slot_bytes,
1091            "last slot may allow OOB access: {c:?} => {s:?}"
1092        );
1093    }
1094}