wasmtime/runtime/vm/memory/
mmap.rs

1//! Support for implementing the [`RuntimeLinearMemory`] trait in terms of a
2//! platform mmap primitive.
3
4use crate::prelude::*;
5use crate::runtime::vm::memory::RuntimeLinearMemory;
6use crate::runtime::vm::{mmap::AlignedLength, HostAlignedByteCount, Mmap};
7use alloc::sync::Arc;
8use wasmtime_environ::Tunables;
9
10use super::MemoryBase;
11
12/// A linear memory instance.
13#[derive(Debug)]
14pub struct MmapMemory {
15    // The underlying allocation.
16    mmap: Arc<Mmap<AlignedLength>>,
17
18    // The current length of this Wasm memory, in bytes.
19    //
20    // This region starts at `pre_guard_size` offset from the base of `mmap`. It
21    // is always accessible, which means that if the Wasm page size is smaller
22    // than the host page size, there may be some trailing region in the `mmap`
23    // that is accessible but should not be accessed. (We rely on explicit
24    // bounds checks in the compiled code to protect this region.)
25    len: usize,
26
27    // The optional maximum accessible size, in bytes, for this linear memory.
28    //
29    // Note that this maximum does not factor in guard pages, so this isn't the
30    // maximum size of the linear address space reservation for this memory.
31    //
32    // This is *not* always a multiple of the host page size, and
33    // `self.accessible()` may go past `self.maximum` when Wasm is using a small
34    // custom page size due to `self.accessible()`'s rounding up to the host
35    // page size.
36    maximum: Option<usize>,
37
38    // The amount of extra bytes to reserve whenever memory grows. This is
39    // specified so that the cost of repeated growth is amortized.
40    extra_to_reserve_on_growth: HostAlignedByteCount,
41
42    // Size in bytes of extra guard pages before the start and after the end to
43    // optimize loads and stores with constant offsets.
44    pre_guard_size: HostAlignedByteCount,
45    offset_guard_size: HostAlignedByteCount,
46}
47
48impl MmapMemory {
49    /// Create a new linear memory instance with specified minimum and maximum
50    /// number of wasm pages.
51    pub fn new(
52        ty: &wasmtime_environ::Memory,
53        tunables: &Tunables,
54        minimum: usize,
55        maximum: Option<usize>,
56    ) -> Result<Self> {
57        // It's a programmer error for these two configuration values to exceed
58        // the host available address space, so panic if such a configuration is
59        // found (mostly an issue for hypothetical 32-bit hosts).
60        //
61        // Also be sure to round up to the host page size for this value.
62        let offset_guard_bytes =
63            HostAlignedByteCount::new_rounded_up_u64(tunables.memory_guard_size)
64                .context("tunable.memory_guard_size overflows")?;
65        let pre_guard_bytes = if tunables.guard_before_linear_memory {
66            offset_guard_bytes
67        } else {
68            HostAlignedByteCount::ZERO
69        };
70
71        // Calculate how much is going to be allocated for this linear memory in
72        // addition to how much extra space we're reserving to grow into.
73        //
74        // If the minimum size of this linear memory fits within the initial
75        // allocation (tunables.memory_reservation) then that's how many bytes
76        // are going to be allocated. If the maximum size of linear memory
77        // additionally fits within the entire allocation then there's no need
78        // to reserve any extra for growth.
79        //
80        // If the minimum size doesn't fit within this linear memory.
81        let mut alloc_bytes = tunables.memory_reservation;
82        let mut extra_to_reserve_on_growth = tunables.memory_reservation_for_growth;
83        let minimum_u64 = u64::try_from(minimum).unwrap();
84        if minimum_u64 <= alloc_bytes {
85            if let Ok(max) = ty.maximum_byte_size() {
86                if max <= alloc_bytes {
87                    extra_to_reserve_on_growth = 0;
88                }
89            }
90        } else {
91            alloc_bytes = minimum_u64.saturating_add(extra_to_reserve_on_growth);
92        }
93
94        // Convert `alloc_bytes` and `extra_to_reserve_on_growth` to
95        // page-aligned `usize` values.
96        let alloc_bytes = HostAlignedByteCount::new_rounded_up_u64(alloc_bytes)
97            .context("tunables.memory_reservation overflows")?;
98        let extra_to_reserve_on_growth =
99            HostAlignedByteCount::new_rounded_up_u64(extra_to_reserve_on_growth)
100                .context("tunables.memory_reservation_for_growth overflows")?;
101
102        let request_bytes = pre_guard_bytes
103            .checked_add(alloc_bytes)
104            .and_then(|i| i.checked_add(offset_guard_bytes))
105            .with_context(|| format!("cannot allocate {minimum} with guard regions"))?;
106
107        let mmap = Mmap::accessible_reserved(HostAlignedByteCount::ZERO, request_bytes)?;
108
109        if minimum > 0 {
110            let accessible = HostAlignedByteCount::new_rounded_up(minimum)?;
111            // SAFETY: mmap is not in use right now so it's safe to make it accessible.
112            unsafe {
113                mmap.make_accessible(pre_guard_bytes, accessible)?;
114            }
115        }
116
117        Ok(Self {
118            mmap: Arc::new(mmap),
119            len: minimum,
120            maximum,
121            pre_guard_size: pre_guard_bytes,
122            offset_guard_size: offset_guard_bytes,
123            extra_to_reserve_on_growth,
124        })
125    }
126
127    /// Get the length of the accessible portion of the underlying `mmap`. This
128    /// is the same region as `self.len` but rounded up to a multiple of the
129    /// host page size.
130    fn accessible(&self) -> HostAlignedByteCount {
131        let accessible = HostAlignedByteCount::new_rounded_up(self.len)
132            .expect("accessible region always fits in usize");
133        debug_assert!(accessible <= self.current_capacity());
134        accessible
135    }
136
137    /// Get the amount to which this memory can grow.
138    fn current_capacity(&self) -> HostAlignedByteCount {
139        let mmap_len = self.mmap.len_aligned();
140        mmap_len
141            .checked_sub(self.offset_guard_size)
142            .and_then(|i| i.checked_sub(self.pre_guard_size))
143            .expect("guard regions fit in mmap.len")
144    }
145}
146
147impl RuntimeLinearMemory for MmapMemory {
148    fn byte_size(&self) -> usize {
149        self.len
150    }
151
152    fn byte_capacity(&self) -> usize {
153        self.current_capacity().byte_count()
154    }
155
156    fn grow_to(&mut self, new_size: usize) -> Result<()> {
157        let new_accessible = HostAlignedByteCount::new_rounded_up(new_size)?;
158        let current_capacity = self.current_capacity();
159        if new_accessible > current_capacity {
160            // If the new size of this heap exceeds the current size of the
161            // allocation we have, then this must be a dynamic heap. Use
162            // `new_size` to calculate a new size of an allocation, allocate it,
163            // and then copy over the memory from before.
164            let request_bytes = self
165                .pre_guard_size
166                .checked_add(new_accessible)
167                .and_then(|s| s.checked_add(self.extra_to_reserve_on_growth))
168                .and_then(|s| s.checked_add(self.offset_guard_size))
169                .context("overflow calculating size of memory allocation")?;
170
171            let mut new_mmap =
172                Mmap::accessible_reserved(HostAlignedByteCount::ZERO, request_bytes)?;
173            // SAFETY: new_mmap is not in use right now so it's safe to make it
174            // accessible.
175            unsafe {
176                new_mmap.make_accessible(self.pre_guard_size, new_accessible)?;
177            }
178
179            // This method has an exclusive reference to `self.mmap` and just
180            // created `new_mmap` so it should be safe to acquire references
181            // into both of them and copy between them.
182            unsafe {
183                let range =
184                    self.pre_guard_size.byte_count()..(self.pre_guard_size.byte_count() + self.len);
185                let src = self.mmap.slice(range.clone());
186                let dst = new_mmap.slice_mut(range);
187                dst.copy_from_slice(src);
188            }
189
190            self.mmap = Arc::new(new_mmap);
191        } else {
192            // If the new size of this heap fits within the existing allocation
193            // then all we need to do is to make the new pages accessible. This
194            // can happen either for "static" heaps which always hit this case,
195            // or "dynamic" heaps which have some space reserved after the
196            // initial allocation to grow into before the heap is moved in
197            // memory.
198            assert!(new_size <= current_capacity.byte_count());
199            assert!(self.maximum.map_or(true, |max| new_size <= max));
200
201            // If the Wasm memory's page size is smaller than the host's page
202            // size, then we might not need to actually change permissions,
203            // since we are forced to round our accessible range up to the
204            // host's page size.
205            if let Ok(difference) = new_accessible.checked_sub(self.accessible()) {
206                // SAFETY: the difference was previously inaccessible so we
207                // never handed out any references to within it.
208                unsafe {
209                    self.mmap.make_accessible(
210                        self.pre_guard_size
211                            .checked_add(self.accessible())
212                            .context("overflow calculating new accessible region")?,
213                        difference,
214                    )?;
215                }
216            }
217        }
218
219        self.len = new_size;
220
221        Ok(())
222    }
223
224    fn set_byte_size(&mut self, len: usize) {
225        self.len = len;
226    }
227
228    fn base(&self) -> MemoryBase {
229        MemoryBase::Mmap(
230            self.mmap
231                .offset(self.pre_guard_size)
232                .expect("pre_guard_size is in bounds"),
233        )
234    }
235}