wasmtime/runtime/vm/sys/unix/
mmap.rs

1use crate::prelude::*;
2use crate::runtime::vm::sys::vm::MemoryImageSource;
3use crate::runtime::vm::{HostAlignedByteCount, SendSyncPtr};
4use rustix::mm::{MprotectFlags, mprotect};
5use std::ops::Range;
6use std::ptr::{self, NonNull};
7#[cfg(feature = "std")]
8use std::{fs::File, path::Path};
9
10/// Open a file so that it can be mmap'd for executing.
11#[cfg(feature = "std")]
12pub fn open_file_for_mmap(path: &Path) -> Result<File> {
13    File::open(path).context("failed to open file")
14}
15
16#[derive(Debug)]
17pub struct Mmap {
18    memory: SendSyncPtr<[u8]>,
19}
20
21cfg_if::cfg_if! {
22    if #[cfg(any(target_os = "illumos", target_os = "linux"))] {
23        // On illumos, by default, mmap reserves what it calls "swap space" ahead of time, so that
24        // memory accesses a`re guaranteed not to fail once mmap succeeds. NORESERVE is for cases
25        // where that memory is never meant to be accessed -- e.g. memory that's used as guard
26        // pages.
27        //
28        // This is less crucial on Linux because Linux tends to overcommit memory by default, but is
29        // still a good idea to pass in for large allocations that don't need to be backed by
30        // physical memory.
31        pub(super) const MMAP_NORESERVE_FLAG: rustix::mm::MapFlags =
32            rustix::mm::MapFlags::NORESERVE;
33    } else {
34        pub(super) const MMAP_NORESERVE_FLAG: rustix::mm::MapFlags = rustix::mm::MapFlags::empty();
35    }
36}
37
38impl Mmap {
39    pub fn new_empty() -> Mmap {
40        Mmap {
41            memory: crate::vm::sys::empty_mmap(),
42        }
43    }
44
45    pub fn new(size: HostAlignedByteCount) -> Result<Self> {
46        let ptr = unsafe {
47            rustix::mm::mmap_anonymous(
48                ptr::null_mut(),
49                size.byte_count(),
50                rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
51                rustix::mm::MapFlags::PRIVATE | MMAP_NORESERVE_FLAG,
52            )?
53        };
54        let memory = std::ptr::slice_from_raw_parts_mut(ptr.cast(), size.byte_count());
55        let memory = SendSyncPtr::new(NonNull::new(memory).unwrap());
56        Ok(Mmap { memory })
57    }
58
59    pub fn reserve(size: HostAlignedByteCount) -> Result<Self> {
60        let ptr = unsafe {
61            rustix::mm::mmap_anonymous(
62                ptr::null_mut(),
63                size.byte_count(),
64                rustix::mm::ProtFlags::empty(),
65                // Astute readers might be wondering why a function called "reserve" passes in a
66                // NORESERVE flag. That's because "reserve" in this context means one of two
67                // different things.
68                //
69                // * This method is used to allocate virtual memory that starts off in a state where
70                //   it cannot be accessed (i.e. causes a segfault if accessed).
71                // * NORESERVE is meant for virtual memory space for which backing physical/swap
72                //   pages are reserved on first access.
73                //
74                // Virtual memory that cannot be accessed should not have a backing store reserved
75                // for it. Hence, passing in NORESERVE is correct here.
76                rustix::mm::MapFlags::PRIVATE | MMAP_NORESERVE_FLAG,
77            )?
78        };
79
80        let memory = std::ptr::slice_from_raw_parts_mut(ptr.cast(), size.byte_count());
81        let memory = SendSyncPtr::new(NonNull::new(memory).unwrap());
82        Ok(Mmap { memory })
83    }
84
85    #[cfg(feature = "std")]
86    pub fn from_file(file: &File) -> Result<Self> {
87        let len = file
88            .metadata()
89            .context("failed to get file metadata")?
90            .len();
91        let len = usize::try_from(len).map_err(|_| anyhow::anyhow!("file too large to map"))?;
92        let ptr = unsafe {
93            rustix::mm::mmap(
94                ptr::null_mut(),
95                len,
96                rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
97                rustix::mm::MapFlags::PRIVATE,
98                &file,
99                0,
100            )
101            .context(format!("mmap failed to allocate {len:#x} bytes"))?
102        };
103        let memory = std::ptr::slice_from_raw_parts_mut(ptr.cast(), len);
104        let memory = SendSyncPtr::new(NonNull::new(memory).unwrap());
105
106        Ok(Mmap { memory })
107    }
108
109    pub unsafe fn make_accessible(
110        &self,
111        start: HostAlignedByteCount,
112        len: HostAlignedByteCount,
113    ) -> Result<()> {
114        let ptr = self.memory.as_ptr();
115        unsafe {
116            mprotect(
117                ptr.byte_add(start.byte_count()).cast(),
118                len.byte_count(),
119                MprotectFlags::READ | MprotectFlags::WRITE,
120            )?;
121        }
122
123        Ok(())
124    }
125
126    #[inline]
127    pub fn as_send_sync_ptr(&self) -> SendSyncPtr<u8> {
128        self.memory.cast()
129    }
130
131    #[inline]
132    pub fn len(&self) -> usize {
133        // Note: while the start of memory is host page-aligned, the length might
134        // not be, and in particular is not aligned for file-backed mmaps. Be
135        // careful!
136        self.memory.as_ptr().len()
137    }
138
139    pub unsafe fn make_executable(
140        &self,
141        range: Range<usize>,
142        enable_branch_protection: bool,
143    ) -> Result<()> {
144        let base = unsafe { self.memory.as_ptr().byte_add(range.start).cast() };
145        let len = range.end - range.start;
146
147        if !cfg!(feature = "std") {
148            bail!(
149                "with the `std` feature disabled at compile time \
150                 there must be a custom implementation of publishing \
151                 code memory, otherwise it's unknown how to do icache \
152                 management"
153            );
154        }
155
156        // Clear the newly allocated code from cache if the processor requires
157        // it
158        //
159        // Do this before marking the memory as R+X, technically we should be
160        // able to do it after but there are some CPU's that have had errata
161        // about doing this with read only memory.
162        #[cfg(feature = "std")]
163        unsafe {
164            wasmtime_jit_icache_coherence::clear_cache(base, len).context("failed cache clear")?;
165        }
166
167        let flags = MprotectFlags::READ | MprotectFlags::EXEC;
168        let flags = if enable_branch_protection {
169            #[cfg(all(target_arch = "aarch64", target_os = "linux"))]
170            if std::arch::is_aarch64_feature_detected!("bti") {
171                MprotectFlags::from_bits_retain(flags.bits() | /* PROT_BTI */ 0x10)
172            } else {
173                flags
174            }
175
176            #[cfg(not(all(target_arch = "aarch64", target_os = "linux")))]
177            flags
178        } else {
179            flags
180        };
181
182        unsafe {
183            mprotect(base, len, flags)?;
184        }
185
186        // Flush any in-flight instructions from the pipeline
187        #[cfg(feature = "std")]
188        wasmtime_jit_icache_coherence::pipeline_flush_mt().context("Failed pipeline flush")?;
189
190        Ok(())
191    }
192
193    pub unsafe fn make_readonly(&self, range: Range<usize>) -> Result<()> {
194        let base = unsafe { self.memory.as_ptr().byte_add(range.start).cast() };
195        let len = range.end - range.start;
196
197        unsafe {
198            mprotect(base, len, MprotectFlags::READ)?;
199        }
200
201        Ok(())
202    }
203
204    pub unsafe fn map_image_at(
205        &self,
206        image_source: &MemoryImageSource,
207        source_offset: u64,
208        memory_offset: HostAlignedByteCount,
209        memory_len: HostAlignedByteCount,
210    ) -> Result<()> {
211        unsafe {
212            let map_base = self.memory.as_ptr().byte_add(memory_offset.byte_count());
213            let ptr = rustix::mm::mmap(
214                map_base.cast(),
215                memory_len.byte_count(),
216                rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
217                rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::FIXED,
218                image_source.as_file(),
219                source_offset,
220            )?;
221            assert_eq!(map_base.cast(), ptr);
222        };
223        Ok(())
224    }
225}
226
227impl Drop for Mmap {
228    fn drop(&mut self) {
229        unsafe {
230            let ptr = self.memory.as_ptr().cast();
231            let len = self.memory.as_ptr().len();
232            if len == 0 {
233                return;
234            }
235            rustix::mm::munmap(ptr, len).expect("munmap failed");
236        }
237    }
238}