wasmtime/runtime/vm/mmap.rs
1//! Low-level abstraction for allocating and managing zero-filled pages
2//! of memory.
3
4use super::HostAlignedByteCount;
5use crate::prelude::*;
6use crate::runtime::vm::sys::{mmap, vm::MemoryImageSource};
7use alloc::sync::Arc;
8use core::ops::Range;
9use core::ptr::NonNull;
10#[cfg(feature = "std")]
11use std::fs::File;
12
13/// A marker type for an [`Mmap`] where both the start address and length are a
14/// multiple of the host page size.
15///
16/// For more information, see the documentation on [`Mmap`].
17#[derive(Clone, Debug)]
18pub struct AlignedLength {}
19
20/// A type of [`Mmap`] where the start address is host page-aligned, but the
21/// length is possibly not a multiple of the host page size.
22///
23/// For more information, see the documentation on [`Mmap`].
24#[derive(Clone, Debug)]
25pub struct UnalignedLength {
26 #[cfg(feature = "std")]
27 file: Option<Arc<File>>,
28}
29
30/// A platform-independent abstraction over memory-mapped data.
31///
32/// The type parameter can be one of:
33///
34/// * [`AlignedLength`]: Both the start address and length are page-aligned
35/// (i.e. a multiple of the host page size). This is always the result of an
36/// mmap backed by anonymous memory.
37///
38/// * [`UnalignedLength`]: The start address is host page-aligned, but the
39/// length is not necessarily page-aligned. This is usually backed by a file,
40/// but can also be backed by anonymous memory.
41///
42/// ## Notes
43///
44/// If the length of a file is not a multiple of the host page size, [POSIX does
45/// not specify any semantics][posix-mmap] for the rest of the last page. Linux
46/// [does say][linux-mmap] that the rest of the page is reserved and zeroed out,
47/// but for portability it's best to not assume anything about the rest of
48/// memory. `UnalignedLength` achieves a type-level distinction between an mmap
49/// that is backed purely by memory, and one that is possibly backed by a file.
50///
51/// Currently, the OS-specific `mmap` implementations in this crate do not make
52/// this this distinction -- alignment is managed at this platform-independent
53/// layer. It might make sense to add this distinction to the OS-specific
54/// implementations in the future.
55///
56/// [posix-mmap]: https://pubs.opengroup.org/onlinepubs/9799919799/functions/mmap.html
57/// [linux-mmap]: https://man7.org/linux/man-pages/man2/mmap.2.html#NOTES
58#[derive(Debug)]
59pub struct Mmap<T> {
60 sys: mmap::Mmap,
61 data: T,
62}
63
64impl Mmap<AlignedLength> {
65 /// Create a new `Mmap` pointing to at least `size` bytes of page-aligned
66 /// accessible memory.
67 pub fn with_at_least(size: usize) -> Result<Self> {
68 let rounded_size = HostAlignedByteCount::new_rounded_up(size)?;
69 Self::accessible_reserved(rounded_size, rounded_size)
70 }
71
72 /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned
73 /// accessible memory, within a reserved mapping of `mapping_size` bytes.
74 /// `accessible_size` and `mapping_size` must be native page-size multiples.
75 ///
76 /// # Panics
77 ///
78 /// This function will panic if `accessible_size` is greater than
79 /// `mapping_size`.
80 pub fn accessible_reserved(
81 accessible_size: HostAlignedByteCount,
82 mapping_size: HostAlignedByteCount,
83 ) -> Result<Self> {
84 assert!(accessible_size <= mapping_size);
85
86 if mapping_size.is_zero() {
87 Ok(Mmap {
88 sys: mmap::Mmap::new_empty(),
89 data: AlignedLength {},
90 })
91 } else if accessible_size == mapping_size {
92 Ok(Mmap {
93 sys: mmap::Mmap::new(mapping_size)
94 .context(format!("mmap failed to allocate {mapping_size:#x} bytes"))?,
95 data: AlignedLength {},
96 })
97 } else {
98 let result = Mmap {
99 sys: mmap::Mmap::reserve(mapping_size)
100 .context(format!("mmap failed to reserve {mapping_size:#x} bytes"))?,
101 data: AlignedLength {},
102 };
103 if !accessible_size.is_zero() {
104 // SAFETY: result was just created and is not in use.
105 unsafe {
106 result
107 .make_accessible(HostAlignedByteCount::ZERO, accessible_size)
108 .context(format!(
109 "mmap failed to allocate {accessible_size:#x} bytes"
110 ))?;
111 }
112 }
113 Ok(result)
114 }
115 }
116
117 /// Converts this `Mmap` into a `Mmap<UnalignedLength>`.
118 ///
119 /// `UnalignedLength` really means "_possibly_ unaligned length", so it can
120 /// be freely converted over at the cost of losing the alignment guarantee.
121 pub fn into_unaligned(self) -> Mmap<UnalignedLength> {
122 Mmap {
123 sys: self.sys,
124 data: UnalignedLength {
125 #[cfg(feature = "std")]
126 file: None,
127 },
128 }
129 }
130
131 /// Returns the length of the memory mapping as an aligned byte count.
132 pub fn len_aligned(&self) -> HostAlignedByteCount {
133 // SAFETY: The type parameter indicates that self.sys.len() is aligned.
134 unsafe { HostAlignedByteCount::new_unchecked(self.sys.len()) }
135 }
136
137 /// Return a struct representing a page-aligned offset into the mmap.
138 ///
139 /// Returns an error if `offset > self.len_aligned()`.
140 pub fn offset(self: &Arc<Self>, offset: HostAlignedByteCount) -> Result<MmapOffset> {
141 if offset > self.len_aligned() {
142 bail!(
143 "offset {} is not in bounds for mmap: {}",
144 offset,
145 self.len_aligned()
146 );
147 }
148
149 Ok(MmapOffset::new(self.clone(), offset))
150 }
151
152 /// Return an `MmapOffset` corresponding to zero bytes into the mmap.
153 pub fn zero_offset(self: &Arc<Self>) -> MmapOffset {
154 MmapOffset::new(self.clone(), HostAlignedByteCount::ZERO)
155 }
156
157 /// Make the memory starting at `start` and extending for `len` bytes
158 /// accessible. `start` and `len` must be native page-size multiples and
159 /// describe a range within `self`'s reserved memory.
160 ///
161 /// # Safety
162 ///
163 /// There must not be any other references to the region of memory being
164 /// made accessible.
165 ///
166 /// # Panics
167 ///
168 /// Panics if `start + len >= self.len()`.
169 pub unsafe fn make_accessible(
170 &self,
171 start: HostAlignedByteCount,
172 len: HostAlignedByteCount,
173 ) -> Result<()> {
174 if len.is_zero() {
175 // A zero-sized mprotect (or equivalent) is allowed on some
176 // platforms but not others (notably Windows). Treat it as a no-op
177 // everywhere.
178 return Ok(());
179 }
180
181 let end = start
182 .checked_add(len)
183 .expect("start + len must not overflow");
184 assert!(
185 end <= self.len_aligned(),
186 "start + len ({end}) must be <= mmap region {}",
187 self.len_aligned()
188 );
189
190 self.sys.make_accessible(start, len)
191 }
192}
193
194#[cfg(feature = "std")]
195impl Mmap<UnalignedLength> {
196 /// Creates a new `Mmap` by opening the file located at `path` and mapping
197 /// it into memory.
198 ///
199 /// The memory is mapped in read-only mode for the entire file. If portions
200 /// of the file need to be modified then the `region` crate can be use to
201 /// alter permissions of each page.
202 ///
203 /// The memory mapping and the length of the file within the mapping are
204 /// returned.
205 pub fn from_file(file: Arc<File>) -> Result<Self> {
206 let sys = mmap::Mmap::from_file(&file)?;
207 Ok(Mmap {
208 sys,
209 data: UnalignedLength { file: Some(file) },
210 })
211 }
212
213 /// Returns the underlying file that this mmap is mapping, if present.
214 pub fn original_file(&self) -> Option<&Arc<File>> {
215 self.data.file.as_ref()
216 }
217}
218
219impl<T> Mmap<T> {
220 /// Return the allocated memory as a slice of u8.
221 ///
222 /// # Safety
223 ///
224 /// The caller must ensure that the range of bytes is accessible to the
225 /// program and additionally has previously been initialized.
226 ///
227 /// # Panics
228 ///
229 /// Panics of the `range` provided is outside of the limits of this mmap.
230 #[inline]
231 pub unsafe fn slice(&self, range: Range<usize>) -> &[u8] {
232 assert!(range.start <= range.end);
233 assert!(range.end <= self.len());
234 core::slice::from_raw_parts(self.as_ptr().add(range.start), range.end - range.start)
235 }
236
237 /// Return the allocated memory as a mutable slice of u8.
238 ///
239 /// # Safety
240 ///
241 /// The caller must ensure that the range of bytes is accessible to the
242 /// program and additionally has previously been initialized.
243 ///
244 /// # Panics
245 ///
246 /// Panics of the `range` provided is outside of the limits of this mmap.
247 pub unsafe fn slice_mut(&mut self, range: Range<usize>) -> &mut [u8] {
248 assert!(range.start <= range.end);
249 assert!(range.end <= self.len());
250 core::slice::from_raw_parts_mut(self.as_mut_ptr().add(range.start), range.end - range.start)
251 }
252
253 /// Return the allocated memory as a pointer to u8.
254 #[inline]
255 pub fn as_ptr(&self) -> *const u8 {
256 self.sys.as_send_sync_ptr().as_ptr() as *const u8
257 }
258
259 /// Return the allocated memory as a mutable pointer to u8.
260 #[inline]
261 pub fn as_mut_ptr(&self) -> *mut u8 {
262 self.sys.as_send_sync_ptr().as_ptr()
263 }
264
265 /// Return the allocated memory as a mutable pointer to u8.
266 #[inline]
267 pub fn as_non_null(&self) -> NonNull<u8> {
268 self.sys.as_send_sync_ptr().as_non_null()
269 }
270
271 /// Return the length of the allocated memory.
272 ///
273 /// This is the byte length of this entire mapping which includes both
274 /// addressable and non-addressable memory.
275 ///
276 /// If the length is statically known to be page-aligned via the
277 /// [`AlignedLength`] type parameter, use [`Self::len_aligned`].
278 #[inline]
279 pub fn len(&self) -> usize {
280 self.sys.len()
281 }
282
283 /// Makes the specified `range` within this `Mmap` to be read/execute.
284 ///
285 /// # Unsafety
286 ///
287 /// This method is unsafe as it's generally not valid to simply make memory
288 /// executable, so it's up to the caller to ensure that everything is in
289 /// order and this doesn't overlap with other memory that should only be
290 /// read or only read/write.
291 ///
292 /// # Panics
293 ///
294 /// Panics of `range` is out-of-bounds or not page-aligned.
295 pub unsafe fn make_executable(
296 &self,
297 range: Range<usize>,
298 enable_branch_protection: bool,
299 ) -> Result<()> {
300 assert!(range.start <= self.len());
301 assert!(range.end <= self.len());
302 assert!(range.start <= range.end);
303 assert!(
304 range.start % crate::runtime::vm::host_page_size() == 0,
305 "changing of protections isn't page-aligned",
306 );
307
308 if range.start == range.end {
309 // A zero-sized mprotect (or equivalent) is allowed on some
310 // platforms but not others (notably Windows). Treat it as a no-op
311 // everywhere.
312 return Ok(());
313 }
314
315 self.sys
316 .make_executable(range, enable_branch_protection)
317 .context("failed to make memory executable")
318 }
319
320 /// Makes the specified `range` within this `Mmap` to be readonly.
321 pub unsafe fn make_readonly(&self, range: Range<usize>) -> Result<()> {
322 assert!(range.start <= self.len());
323 assert!(range.end <= self.len());
324 assert!(range.start <= range.end);
325 assert!(
326 range.start % crate::runtime::vm::host_page_size() == 0,
327 "changing of protections isn't page-aligned",
328 );
329
330 if range.start == range.end {
331 // A zero-sized mprotect (or equivalent) is allowed on some
332 // platforms but not others (notably Windows). Treat it as a no-op
333 // everywhere.
334 return Ok(());
335 }
336
337 self.sys
338 .make_readonly(range)
339 .context("failed to make memory readonly")
340 }
341}
342
343fn _assert() {
344 fn _assert_send_sync<T: Send + Sync>() {}
345 _assert_send_sync::<Mmap<AlignedLength>>();
346 _assert_send_sync::<Mmap<UnalignedLength>>();
347}
348
349impl From<Mmap<AlignedLength>> for Mmap<UnalignedLength> {
350 fn from(mmap: Mmap<AlignedLength>) -> Mmap<UnalignedLength> {
351 mmap.into_unaligned()
352 }
353}
354
355/// A reference to an [`Mmap`], along with a host-page-aligned index within it.
356///
357/// The main invariant this type asserts is that the index is in bounds within
358/// the `Mmap` (i.e. `self.mmap[self.offset]` is valid). In the future, this
359/// type may also assert other invariants.
360#[derive(Clone, Debug)]
361pub struct MmapOffset {
362 mmap: Arc<Mmap<AlignedLength>>,
363 offset: HostAlignedByteCount,
364}
365
366impl MmapOffset {
367 #[inline]
368 fn new(mmap: Arc<Mmap<AlignedLength>>, offset: HostAlignedByteCount) -> Self {
369 assert!(
370 offset <= mmap.len_aligned(),
371 "offset {} is in bounds (< {})",
372 offset,
373 mmap.len_aligned(),
374 );
375 Self { mmap, offset }
376 }
377
378 /// Returns the mmap this offset is within.
379 #[inline]
380 pub fn mmap(&self) -> &Arc<Mmap<AlignedLength>> {
381 &self.mmap
382 }
383
384 /// Returns the host-page-aligned offset within the mmap.
385 #[inline]
386 pub fn offset(&self) -> HostAlignedByteCount {
387 self.offset
388 }
389
390 /// Returns the raw pointer in memory represented by this offset.
391 #[inline]
392 pub fn as_mut_ptr(&self) -> *mut u8 {
393 self.as_non_null().as_ptr()
394 }
395
396 /// Returns the raw pointer in memory represented by this offset.
397 #[inline]
398 pub fn as_non_null(&self) -> NonNull<u8> {
399 // SAFETY: constructor checks that offset is within this allocation.
400 unsafe { self.mmap().as_non_null().byte_add(self.offset.byte_count()) }
401 }
402
403 /// Maps an image into the mmap with read/write permissions.
404 ///
405 /// The image is mapped at `self.mmap.as_ptr() + self.offset +
406 /// memory_offset`.
407 ///
408 /// ## Safety
409 ///
410 /// The caller must ensure that noone else has a reference to this memory.
411 pub unsafe fn map_image_at(
412 &self,
413 image_source: &MemoryImageSource,
414 source_offset: u64,
415 memory_offset: HostAlignedByteCount,
416 memory_len: HostAlignedByteCount,
417 ) -> Result<()> {
418 let total_offset = self
419 .offset
420 .checked_add(memory_offset)
421 .expect("self.offset + memory_offset is in bounds");
422 self.mmap
423 .sys
424 .map_image_at(image_source, source_offset, total_offset, memory_len)
425 }
426}
427
428#[cfg(test)]
429mod tests {
430 use super::*;
431
432 /// Test zero-length calls to mprotect (or the OS equivalent).
433 ///
434 /// These should be treated as no-ops on all platforms. This test ensures
435 /// that such calls at least don't error out.
436 #[test]
437 fn mprotect_zero_length() {
438 let page_size = HostAlignedByteCount::host_page_size();
439 let pagex2 = page_size.checked_mul(2).unwrap();
440 let pagex3 = page_size.checked_mul(3).unwrap();
441 let pagex4 = page_size.checked_mul(4).unwrap();
442
443 let mem = Mmap::accessible_reserved(pagex2, pagex4).expect("allocated memory");
444
445 unsafe {
446 mem.make_accessible(pagex3, HostAlignedByteCount::ZERO)
447 .expect("make_accessible succeeded");
448
449 mem.make_executable(pagex3.byte_count()..pagex3.byte_count(), false)
450 .expect("make_executable succeeded");
451
452 mem.make_readonly(pagex3.byte_count()..pagex3.byte_count())
453 .expect("make_readonly succeeded");
454 };
455 }
456}