cranelift_jit/
memory.rs

1use cranelift_module::{ModuleError, ModuleResult};
2
3#[cfg(all(not(target_os = "windows"), feature = "selinux-fix"))]
4use memmap2::MmapMut;
5
6#[cfg(not(any(feature = "selinux-fix", windows)))]
7use std::alloc;
8use std::ffi::c_void;
9use std::io;
10use std::mem;
11use std::ptr;
12use wasmtime_jit_icache_coherence as icache_coherence;
13
14/// A simple struct consisting of a pointer and length.
15struct PtrLen {
16    #[cfg(all(not(target_os = "windows"), feature = "selinux-fix"))]
17    map: Option<MmapMut>,
18
19    ptr: *mut u8,
20    len: usize,
21}
22
23impl PtrLen {
24    /// Create a new empty `PtrLen`.
25    fn new() -> Self {
26        Self {
27            #[cfg(all(not(target_os = "windows"), feature = "selinux-fix"))]
28            map: None,
29
30            ptr: ptr::null_mut(),
31            len: 0,
32        }
33    }
34
35    /// Create a new `PtrLen` pointing to at least `size` bytes of memory,
36    /// suitably sized and aligned for memory protection.
37    #[cfg(all(not(target_os = "windows"), feature = "selinux-fix"))]
38    fn with_size(size: usize) -> io::Result<Self> {
39        let alloc_size = region::page::ceil(size as *const ()) as usize;
40        MmapMut::map_anon(alloc_size).map(|mut mmap| {
41            // The order here is important; we assign the pointer first to get
42            // around compile time borrow errors.
43            Self {
44                ptr: mmap.as_mut_ptr(),
45                map: Some(mmap),
46                len: alloc_size,
47            }
48        })
49    }
50
51    #[cfg(all(not(target_os = "windows"), not(feature = "selinux-fix")))]
52    fn with_size(size: usize) -> io::Result<Self> {
53        assert_ne!(size, 0);
54        let page_size = region::page::size();
55        let alloc_size = region::page::ceil(size as *const ()) as usize;
56        let layout = alloc::Layout::from_size_align(alloc_size, page_size).unwrap();
57        // Safety: We assert that the size is non-zero above.
58        let ptr = unsafe { alloc::alloc(layout) };
59
60        if !ptr.is_null() {
61            Ok(Self {
62                ptr,
63                len: alloc_size,
64            })
65        } else {
66            Err(io::Error::from(io::ErrorKind::OutOfMemory))
67        }
68    }
69
70    #[cfg(target_os = "windows")]
71    fn with_size(size: usize) -> io::Result<Self> {
72        use windows_sys::Win32::System::Memory::{
73            VirtualAlloc, MEM_COMMIT, MEM_RESERVE, PAGE_READWRITE,
74        };
75
76        // VirtualAlloc always rounds up to the next multiple of the page size
77        let ptr = unsafe {
78            VirtualAlloc(
79                ptr::null_mut(),
80                size,
81                MEM_COMMIT | MEM_RESERVE,
82                PAGE_READWRITE,
83            )
84        };
85        if !ptr.is_null() {
86            Ok(Self {
87                ptr: ptr as *mut u8,
88                len: region::page::ceil(size as *const ()) as usize,
89            })
90        } else {
91            Err(io::Error::last_os_error())
92        }
93    }
94}
95
96// `MMapMut` from `cfg(feature = "selinux-fix")` already deallocates properly.
97#[cfg(all(not(target_os = "windows"), not(feature = "selinux-fix")))]
98impl Drop for PtrLen {
99    fn drop(&mut self) {
100        if !self.ptr.is_null() {
101            let page_size = region::page::size();
102            let layout = alloc::Layout::from_size_align(self.len, page_size).unwrap();
103            unsafe {
104                region::protect(self.ptr, self.len, region::Protection::READ_WRITE)
105                    .expect("unable to unprotect memory");
106                alloc::dealloc(self.ptr, layout)
107            }
108        }
109    }
110}
111
112// TODO: add a `Drop` impl for `cfg(target_os = "windows")`
113
114/// Type of branch protection to apply to executable memory.
115#[derive(Clone, Debug, PartialEq)]
116pub(crate) enum BranchProtection {
117    /// No protection.
118    None,
119    /// Use the Branch Target Identification extension of the Arm architecture.
120    BTI,
121}
122
123/// JIT memory manager. This manages pages of suitably aligned and
124/// accessible memory. Memory will be leaked by default to have
125/// function pointers remain valid for the remainder of the
126/// program's life.
127pub(crate) struct Memory {
128    allocations: Vec<PtrLen>,
129    already_protected: usize,
130    current: PtrLen,
131    position: usize,
132    branch_protection: BranchProtection,
133}
134
135unsafe impl Send for Memory {}
136
137impl Memory {
138    pub(crate) fn new(branch_protection: BranchProtection) -> Self {
139        Self {
140            allocations: Vec::new(),
141            already_protected: 0,
142            current: PtrLen::new(),
143            position: 0,
144            branch_protection,
145        }
146    }
147
148    fn finish_current(&mut self) {
149        self.allocations
150            .push(mem::replace(&mut self.current, PtrLen::new()));
151        self.position = 0;
152    }
153
154    pub(crate) fn allocate(&mut self, size: usize, align: u64) -> io::Result<*mut u8> {
155        let align = usize::try_from(align).expect("alignment too big");
156        if self.position % align != 0 {
157            self.position += align - self.position % align;
158            debug_assert!(self.position % align == 0);
159        }
160
161        if size <= self.current.len - self.position {
162            // TODO: Ensure overflow is not possible.
163            let ptr = unsafe { self.current.ptr.add(self.position) };
164            self.position += size;
165            return Ok(ptr);
166        }
167
168        self.finish_current();
169
170        // TODO: Allocate more at a time.
171        self.current = PtrLen::with_size(size)?;
172        self.position = size;
173
174        Ok(self.current.ptr)
175    }
176
177    /// Set all memory allocated in this `Memory` up to now as readable and executable.
178    pub(crate) fn set_readable_and_executable(&mut self) -> ModuleResult<()> {
179        self.finish_current();
180
181        // Clear all the newly allocated code from cache if the processor requires it
182        //
183        // Do this before marking the memory as R+X, technically we should be able to do it after
184        // but there are some CPU's that have had errata about doing this with read only memory.
185        for &PtrLen { ptr, len, .. } in self.non_protected_allocations_iter() {
186            unsafe {
187                icache_coherence::clear_cache(ptr as *const c_void, len)
188                    .expect("Failed cache clear")
189            };
190        }
191
192        let set_region_readable_and_executable = |ptr, len| -> ModuleResult<()> {
193            if self.branch_protection == BranchProtection::BTI {
194                #[cfg(all(target_arch = "aarch64", target_os = "linux"))]
195                if std::arch::is_aarch64_feature_detected!("bti") {
196                    let prot = libc::PROT_EXEC | libc::PROT_READ | /* PROT_BTI */ 0x10;
197
198                    unsafe {
199                        if libc::mprotect(ptr as *mut libc::c_void, len, prot) < 0 {
200                            return Err(ModuleError::Backend(
201                                anyhow::Error::new(io::Error::last_os_error())
202                                    .context("unable to make memory readable+executable"),
203                            ));
204                        }
205                    }
206
207                    return Ok(());
208                }
209            }
210
211            unsafe {
212                region::protect(ptr, len, region::Protection::READ_EXECUTE).map_err(|e| {
213                    ModuleError::Backend(
214                        anyhow::Error::new(e).context("unable to make memory readable+executable"),
215                    )
216                })?;
217            }
218            Ok(())
219        };
220
221        for &PtrLen { ptr, len, .. } in self.non_protected_allocations_iter() {
222            set_region_readable_and_executable(ptr, len)?;
223        }
224
225        // Flush any in-flight instructions from the pipeline
226        icache_coherence::pipeline_flush_mt().expect("Failed pipeline flush");
227
228        self.already_protected = self.allocations.len();
229        Ok(())
230    }
231
232    /// Set all memory allocated in this `Memory` up to now as readonly.
233    pub(crate) fn set_readonly(&mut self) -> ModuleResult<()> {
234        self.finish_current();
235
236        for &PtrLen { ptr, len, .. } in self.non_protected_allocations_iter() {
237            unsafe {
238                region::protect(ptr, len, region::Protection::READ).map_err(|e| {
239                    ModuleError::Backend(
240                        anyhow::Error::new(e).context("unable to make memory readonly"),
241                    )
242                })?;
243            }
244        }
245
246        self.already_protected = self.allocations.len();
247        Ok(())
248    }
249
250    /// Iterates non protected memory allocations that are of not zero bytes in size.
251    fn non_protected_allocations_iter(&self) -> impl Iterator<Item = &PtrLen> {
252        let iter = self.allocations[self.already_protected..].iter();
253
254        #[cfg(all(not(target_os = "windows"), feature = "selinux-fix"))]
255        return iter.filter(|&PtrLen { map, len, .. }| *len != 0 && map.is_some());
256
257        #[cfg(any(target_os = "windows", not(feature = "selinux-fix")))]
258        return iter.filter(|&PtrLen { len, .. }| *len != 0);
259    }
260
261    /// Frees all allocated memory regions that would be leaked otherwise.
262    /// Likely to invalidate existing function pointers, causing unsafety.
263    pub(crate) unsafe fn free_memory(&mut self) {
264        self.allocations.clear();
265        self.already_protected = 0;
266    }
267}
268
269impl Drop for Memory {
270    fn drop(&mut self) {
271        // leak memory to guarantee validity of function pointers
272        mem::replace(&mut self.allocations, Vec::new())
273            .into_iter()
274            .for_each(mem::forget);
275    }
276}