wasmtime/runtime/vm/memory.rs
1//! Memory management for linear memories.
2//!
3//! This module implements the runtime data structures that manage linear
4//! memories for WebAssembly. There's a number of types here each with various
5//! purposes, and this is the high level relationships between types where an
6//! arrow here means "builds on top of".
7//!
8//! ```text
9//! ┌─────────────────────┐
10//! │ │
11//! │ Memory ├─────────────┐
12//! │ │ │
13//! └──────────┬──────────┘ │
14//! │ │
15//! │ │
16//! ▼ ▼
17//! ┌─────────────────────┐ ┌──────────────┐
18//! │ │ │ │
19//! │ LocalMemory │◄────┤ SharedMemory │
20//! │ │ │ │
21//! └──────────┬──────────┘ └──────────────┘
22//! │
23//! │
24//! ▼
25//! ┌─────────────────────┐
26//! │ │
27//! │ RuntimeLinearMemory ├─────────────┬───────────────┐
28//! │ │ │ │
29//! └──────────┬──────────┘ │ │
30//! │ │ │
31//! │ │ │
32//! ▼ ▼ ▼
33//! ┌─────────────────────┐ ┌──────────────┐ ┌─────┐
34//! │ │ │ │ │ │
35//! │ MmapMemory │ │ StaticMemory │ │ ... │
36//! │ │ │ │ │ │
37//! └─────────────────────┘ └──────────────┘ └─────┘
38//! ```
39//!
40//! In more detail:
41//!
42//! * `Memory` - the root of what's actually stored in a wasm instance. This
43//! implements the high-level embedder APIs one would expect from a wasm
44//! linear memory.
45//!
46//! * `SharedMemory` - this is one of the variants of a local memory. A shared
47//! memory contains `RwLock<LocalMemory>` where all the real bits happen
48//! within the lock.
49//!
50//! * `LocalMemory` - this is an owned allocation of a linear memory which
51//! maintains low-level state that's shared between `SharedMemory` and the
52//! instance-local state of `Memory`. One example is that `LocalMemory::grow`
53//! has most of the logic around memory growth.
54//!
55//! * `RuntimeLinearMemory` - this is a trait which `LocalMemory` delegates to.
56//! This trait is intentionally relatively simple to be exposed in Wasmtime's
57//! embedder API. This is exposed all the way through `wasmtime::Config` so
58//! embedders can provide arbitrary implementations.
59//!
60//! * `MmapMemory` - this is an implementation of `RuntimeLinearMemory` in terms
61//! of the platform's mmap primitive.
62//!
63//! * `StaticMemory` - this is an implementation of `RuntimeLinearMemory`
64//! for the pooling allocator where the base pointer is already allocated
65//! and contents are managed through `MemoryImageSlot`.
66//!
67//! Other important types for memories are `MemoryImage` and `MemoryImageSlot`
68//! which manage CoW state for memories. This is implemented at the
69//! `LocalMemory` layer.
70//!
71//! FIXME: don't have both RuntimeLinearMemory and wasmtime::LinearMemory, they
72//! should be merged together.
73//!
74//! FIXME: don't have both RuntimeMemoryCreator and wasmtime::MemoryCreator,
75//! they should be merged together.
76
77use crate::prelude::*;
78use crate::runtime::vm::vmcontext::VMMemoryDefinition;
79#[cfg(has_virtual_memory)]
80use crate::runtime::vm::{HostAlignedByteCount, MmapOffset};
81use crate::runtime::vm::{MemoryImage, MemoryImageSlot, SendSyncPtr, VMStore};
82use alloc::sync::Arc;
83use core::{ops::Range, ptr::NonNull};
84use wasmtime_environ::Tunables;
85
86#[cfg(feature = "threads")]
87use wasmtime_environ::Trap;
88
89#[cfg(has_virtual_memory)]
90mod mmap;
91#[cfg(has_virtual_memory)]
92pub use self::mmap::MmapMemory;
93
94mod malloc;
95pub use self::malloc::MallocMemory;
96
97#[cfg(feature = "pooling-allocator")]
98mod static_;
99#[cfg(feature = "pooling-allocator")]
100use self::static_::StaticMemory;
101
102#[cfg(feature = "threads")]
103mod shared_memory;
104#[cfg(feature = "threads")]
105pub use shared_memory::SharedMemory;
106
107#[cfg(not(feature = "threads"))]
108mod shared_memory_disabled;
109#[cfg(not(feature = "threads"))]
110pub use shared_memory_disabled::SharedMemory;
111
112/// A memory allocator
113pub trait RuntimeMemoryCreator: Send + Sync {
114 /// Create new RuntimeLinearMemory
115 fn new_memory(
116 &self,
117 ty: &wasmtime_environ::Memory,
118 tunables: &Tunables,
119 minimum: usize,
120 maximum: Option<usize>,
121 ) -> Result<Box<dyn RuntimeLinearMemory>>;
122}
123
124/// A default memory allocator used by Wasmtime
125pub struct DefaultMemoryCreator;
126
127impl RuntimeMemoryCreator for DefaultMemoryCreator {
128 /// Create new MmapMemory
129 fn new_memory(
130 &self,
131 ty: &wasmtime_environ::Memory,
132 tunables: &Tunables,
133 minimum: usize,
134 maximum: Option<usize>,
135 ) -> Result<Box<dyn RuntimeLinearMemory>> {
136 #[cfg(has_virtual_memory)]
137 if tunables.signals_based_traps
138 || tunables.memory_guard_size > 0
139 || tunables.memory_reservation > 0
140 || tunables.memory_init_cow
141 {
142 return Ok(Box::new(MmapMemory::new(ty, tunables, minimum, maximum)?));
143 }
144
145 let _ = maximum;
146 Ok(Box::new(MallocMemory::new(ty, tunables, minimum)?))
147 }
148}
149
150/// A linear memory and its backing storage.
151pub trait RuntimeLinearMemory: Send + Sync {
152 /// Returns the number bytes that this linear memory can access.
153 fn byte_size(&self) -> usize;
154
155 /// Returns the maximal number of bytes the current allocation can access.
156 ///
157 /// Growth up to this value should not relocate the base pointer.
158 fn byte_capacity(&self) -> usize;
159
160 /// Grow memory to the specified amount of bytes.
161 ///
162 /// Returns an error if memory can't be grown by the specified amount
163 /// of bytes.
164 fn grow_to(&mut self, size: usize) -> Result<()>;
165
166 /// Returns a pointer to the base of this linear memory allocation.
167 ///
168 /// This is either a raw pointer, or a reference to an mmap along with an
169 /// offset within it.
170 fn base(&self) -> MemoryBase;
171
172 /// Get a `VMMemoryDefinition` for this linear memory.
173 fn vmmemory(&self) -> VMMemoryDefinition;
174
175 /// Internal method for Wasmtime when used in conjunction with CoW images.
176 /// This is used to inform the underlying memory that the size of memory has
177 /// changed.
178 ///
179 /// Note that this is hidden and panics by default as embedders using custom
180 /// memory without CoW images shouldn't have to worry about this.
181 #[doc(hidden)]
182 fn set_byte_size(&mut self, len: usize) {
183 let _ = len;
184 panic!("CoW images used with this memory and it doesn't support it");
185 }
186}
187
188/// The base pointer of a memory allocation.
189#[derive(Clone, Debug)]
190pub enum MemoryBase {
191 /// A raw pointer into memory.
192 ///
193 /// This may or may not be host-page-aligned.
194 Raw(SendSyncPtr<u8>),
195
196 /// An mmap along with an offset into it.
197 #[cfg(has_virtual_memory)]
198 Mmap(MmapOffset),
199}
200
201impl MemoryBase {
202 /// Creates a new `MemoryBase` from a raw pointer.
203 ///
204 /// The pointer must be non-null, and it must be logically `Send + Sync`.
205 pub fn new_raw(ptr: *mut u8) -> Self {
206 Self::Raw(NonNull::new(ptr).expect("pointer is non-null").into())
207 }
208
209 /// Returns the actual memory address in memory that is represented by this
210 /// base.
211 pub fn as_non_null(&self) -> NonNull<u8> {
212 match self {
213 Self::Raw(ptr) => ptr.as_non_null(),
214 #[cfg(has_virtual_memory)]
215 Self::Mmap(mmap_offset) => mmap_offset.as_non_null(),
216 }
217 }
218
219 /// Same as `as_non_null`, but different return type.
220 pub fn as_mut_ptr(&self) -> *mut u8 {
221 self.as_non_null().as_ptr()
222 }
223}
224
225/// Representation of a runtime wasm linear memory.
226pub enum Memory {
227 Local(LocalMemory),
228 Shared(SharedMemory),
229}
230
231impl Memory {
232 /// Create a new dynamic (movable) memory instance for the specified plan.
233 pub fn new_dynamic(
234 ty: &wasmtime_environ::Memory,
235 tunables: &Tunables,
236 creator: &dyn RuntimeMemoryCreator,
237 store: &mut dyn VMStore,
238 memory_image: Option<&Arc<MemoryImage>>,
239 ) -> Result<Self> {
240 let (minimum, maximum) = Self::limit_new(ty, Some(store))?;
241 let allocation = creator.new_memory(ty, tunables, minimum, maximum)?;
242
243 let memory = LocalMemory::new(ty, tunables, allocation, memory_image)?;
244 Ok(if ty.shared {
245 Memory::Shared(SharedMemory::wrap(ty, memory)?)
246 } else {
247 Memory::Local(memory)
248 })
249 }
250
251 /// Create a new static (immovable) memory instance for the specified plan.
252 #[cfg(feature = "pooling-allocator")]
253 pub fn new_static(
254 ty: &wasmtime_environ::Memory,
255 tunables: &Tunables,
256 base: MemoryBase,
257 base_capacity: usize,
258 memory_image: MemoryImageSlot,
259 store: &mut dyn VMStore,
260 ) -> Result<Self> {
261 let (minimum, maximum) = Self::limit_new(ty, Some(store))?;
262 let pooled_memory = StaticMemory::new(base, base_capacity, minimum, maximum)?;
263 let allocation = Box::new(pooled_memory);
264
265 // Configure some defaults a bit differently for this memory within the
266 // `LocalMemory` structure created, notably we already have
267 // `memory_image` and regardless of configuration settings this memory
268 // can't move its base pointer since it's a fixed allocation.
269 let mut memory = LocalMemory::new(ty, tunables, allocation, None)?;
270 assert!(memory.memory_image.is_none());
271 memory.memory_image = Some(memory_image);
272 memory.memory_may_move = false;
273
274 Ok(if ty.shared {
275 // FIXME(#4244): not supported with the pooling allocator (which
276 // `new_static` is always used with), see `MemoryPool::validate` as
277 // well).
278 todo!("using shared memory with the pooling allocator is a work in progress");
279 } else {
280 Memory::Local(memory)
281 })
282 }
283
284 /// Calls the `store`'s limiter to optionally prevent a memory from being allocated.
285 ///
286 /// Returns a tuple of the minimum size, optional maximum size, and log(page
287 /// size) of the memory, all in bytes.
288 pub(crate) fn limit_new(
289 ty: &wasmtime_environ::Memory,
290 store: Option<&mut dyn VMStore>,
291 ) -> Result<(usize, Option<usize>)> {
292 let page_size = usize::try_from(ty.page_size()).unwrap();
293
294 // This is the absolute possible maximum that the module can try to
295 // allocate, which is our entire address space minus a wasm page. That
296 // shouldn't ever actually work in terms of an allocation because
297 // presumably the kernel wants *something* for itself, but this is used
298 // to pass to the `store`'s limiter for a requested size
299 // to approximate the scale of the request that the wasm module is
300 // making. This is necessary because the limiter works on `usize` bytes
301 // whereas we're working with possibly-overflowing `u64` calculations
302 // here. To actually faithfully represent the byte requests of modules
303 // we'd have to represent things as `u128`, but that's kinda
304 // overkill for this purpose.
305 let absolute_max = 0usize.wrapping_sub(page_size);
306
307 // If the minimum memory size overflows the size of our own address
308 // space, then we can't satisfy this request, but defer the error to
309 // later so the `store` can be informed that an effective oom is
310 // happening.
311 let minimum = ty
312 .minimum_byte_size()
313 .ok()
314 .and_then(|m| usize::try_from(m).ok());
315
316 // The plan stores the maximum size in units of wasm pages, but we
317 // use units of bytes. Unlike for the `minimum` size we silently clamp
318 // the effective maximum size to the limits of what we can track. If the
319 // maximum size exceeds `usize` or `u64` then there's no need to further
320 // keep track of it as some sort of runtime limit will kick in long
321 // before we reach the statically declared maximum size.
322 let maximum = ty
323 .maximum_byte_size()
324 .ok()
325 .and_then(|m| usize::try_from(m).ok());
326
327 // Inform the store's limiter what's about to happen. This will let the
328 // limiter reject anything if necessary, and this also guarantees that
329 // we should call the limiter for all requested memories, even if our
330 // `minimum` calculation overflowed. This means that the `minimum` we're
331 // informing the limiter is lossy and may not be 100% accurate, but for
332 // now the expected uses of limiter means that's ok.
333 if let Some(store) = store {
334 if !store.memory_growing(0, minimum.unwrap_or(absolute_max), maximum)? {
335 bail!(
336 "memory minimum size of {} pages exceeds memory limits",
337 ty.limits.min
338 );
339 }
340 }
341
342 // At this point we need to actually handle overflows, so bail out with
343 // an error if we made it this far.
344 let minimum = minimum.ok_or_else(|| {
345 format_err!(
346 "memory minimum size of {} pages exceeds memory limits",
347 ty.limits.min
348 )
349 })?;
350
351 Ok((minimum, maximum))
352 }
353
354 /// Returns this memory's page size, in bytes.
355 pub fn page_size(&self) -> u64 {
356 match self {
357 Memory::Local(mem) => mem.page_size(),
358 Memory::Shared(mem) => mem.page_size(),
359 }
360 }
361
362 /// Returns the size of this memory, in bytes.
363 pub fn byte_size(&self) -> usize {
364 match self {
365 Memory::Local(mem) => mem.byte_size(),
366 Memory::Shared(mem) => mem.byte_size(),
367 }
368 }
369
370 /// Returns whether or not this memory needs initialization. It
371 /// may not if it already has initial content thanks to a CoW
372 /// mechanism.
373 pub(crate) fn needs_init(&self) -> bool {
374 match self {
375 Memory::Local(mem) => mem.needs_init(),
376 Memory::Shared(mem) => mem.needs_init(),
377 }
378 }
379
380 /// Grow memory by the specified amount of wasm pages.
381 ///
382 /// Returns `None` if memory can't be grown by the specified amount
383 /// of wasm pages. Returns `Some` with the old size of memory, in bytes, on
384 /// successful growth.
385 ///
386 /// # Safety
387 ///
388 /// Resizing the memory can reallocate the memory buffer for dynamic memories.
389 /// An instance's `VMContext` may have pointers to the memory's base and will
390 /// need to be fixed up after growing the memory.
391 ///
392 /// Generally, prefer using `InstanceHandle::memory_grow`, which encapsulates
393 /// this unsafety.
394 ///
395 /// Ensure that the provided Store is not used to get access any Memory
396 /// which lives inside it.
397 pub unsafe fn grow(
398 &mut self,
399 delta_pages: u64,
400 store: Option<&mut dyn VMStore>,
401 ) -> Result<Option<usize>, Error> {
402 let result = match self {
403 Memory::Local(mem) => mem.grow(delta_pages, store)?,
404 Memory::Shared(mem) => mem.grow(delta_pages, store)?,
405 };
406 match result {
407 Some((old, _new)) => Ok(Some(old)),
408 None => Ok(None),
409 }
410 }
411
412 /// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
413 pub fn vmmemory(&self) -> VMMemoryDefinition {
414 match self {
415 Memory::Local(mem) => mem.vmmemory(),
416 // `vmmemory()` is used for writing the `VMMemoryDefinition` of a
417 // memory into its `VMContext`; this should never be possible for a
418 // shared memory because the only `VMMemoryDefinition` for it should
419 // be stored in its own `def` field.
420 Memory::Shared(_) => unreachable!(),
421 }
422 }
423
424 /// Consume the memory, returning its [`MemoryImageSlot`] if any is present.
425 /// The image should only be present for a subset of memories created with
426 /// [`Memory::new_static()`].
427 #[cfg(feature = "pooling-allocator")]
428 pub fn unwrap_static_image(self) -> MemoryImageSlot {
429 match self {
430 Memory::Local(mem) => mem.unwrap_static_image(),
431 Memory::Shared(_) => panic!("expected a local memory"),
432 }
433 }
434
435 /// Is this a shared memory?
436 pub fn is_shared_memory(&self) -> bool {
437 matches!(self, Memory::Shared(_))
438 }
439
440 /// If the [Memory] is a [SharedMemory], unwrap it and return a clone to
441 /// that shared memory.
442 pub fn as_shared_memory(&mut self) -> Option<&mut SharedMemory> {
443 match self {
444 Memory::Local(_) => None,
445 Memory::Shared(mem) => Some(mem),
446 }
447 }
448
449 /// Implementation of `memory.atomic.notify` for all memories.
450 #[cfg(feature = "threads")]
451 pub fn atomic_notify(&mut self, addr: u64, count: u32) -> Result<u32, Trap> {
452 match self.as_shared_memory() {
453 Some(m) => m.atomic_notify(addr, count),
454 None => {
455 validate_atomic_addr(&self.vmmemory(), addr, 4, 4)?;
456 Ok(0)
457 }
458 }
459 }
460
461 /// Implementation of `memory.atomic.wait32` for all memories.
462 #[cfg(feature = "threads")]
463 pub fn atomic_wait32(
464 &mut self,
465 addr: u64,
466 expected: u32,
467 timeout: Option<core::time::Duration>,
468 ) -> Result<crate::WaitResult, Trap> {
469 match self.as_shared_memory() {
470 Some(m) => m.atomic_wait32(addr, expected, timeout),
471 None => {
472 validate_atomic_addr(&self.vmmemory(), addr, 4, 4)?;
473 Err(Trap::AtomicWaitNonSharedMemory)
474 }
475 }
476 }
477
478 /// Implementation of `memory.atomic.wait64` for all memories.
479 #[cfg(feature = "threads")]
480 pub fn atomic_wait64(
481 &mut self,
482 addr: u64,
483 expected: u64,
484 timeout: Option<core::time::Duration>,
485 ) -> Result<crate::WaitResult, Trap> {
486 match self.as_shared_memory() {
487 Some(m) => m.atomic_wait64(addr, expected, timeout),
488 None => {
489 validate_atomic_addr(&self.vmmemory(), addr, 8, 8)?;
490 Err(Trap::AtomicWaitNonSharedMemory)
491 }
492 }
493 }
494
495 /// Returns the range of bytes that WebAssembly should be able to address in
496 /// this linear memory. Note that this includes guard pages which wasm can
497 /// hit.
498 pub fn wasm_accessible(&self) -> Range<usize> {
499 match self {
500 Memory::Local(mem) => mem.wasm_accessible(),
501 Memory::Shared(mem) => mem.wasm_accessible(),
502 }
503 }
504}
505
506/// An owned allocation of a wasm linear memory.
507///
508/// This might be part of a `Memory` via `Memory::Local` but it might also be
509/// the implementation basis for a `SharedMemory` behind an `RwLock` for
510/// example.
511pub struct LocalMemory {
512 alloc: Box<dyn RuntimeLinearMemory>,
513 ty: wasmtime_environ::Memory,
514 memory_may_move: bool,
515 memory_guard_size: usize,
516 memory_reservation: usize,
517
518 /// An optional CoW mapping that provides the initial content of this
519 /// memory.
520 memory_image: Option<MemoryImageSlot>,
521}
522
523impl LocalMemory {
524 pub fn new(
525 ty: &wasmtime_environ::Memory,
526 tunables: &Tunables,
527 alloc: Box<dyn RuntimeLinearMemory>,
528 memory_image: Option<&Arc<MemoryImage>>,
529 ) -> Result<LocalMemory> {
530 // If a memory image was specified, try to create the MemoryImageSlot on
531 // top of our mmap.
532 let memory_image = match memory_image {
533 #[cfg(has_virtual_memory)]
534 Some(image) => {
535 // We currently don't support memory_image if
536 // `RuntimeLinearMemory::byte_size` is not a multiple of the host page
537 // size. See https://github.com/bytecodealliance/wasmtime/issues/9660.
538 if let Ok(byte_size) = HostAlignedByteCount::new(alloc.byte_size()) {
539 // memory_image is CoW-based so it is expected to be backed
540 // by an mmap.
541 let mmap_base = match alloc.base() {
542 MemoryBase::Mmap(offset) => offset,
543 MemoryBase::Raw { .. } => {
544 unreachable!("memory_image is Some only for mmap-based memories")
545 }
546 };
547
548 let mut slot =
549 MemoryImageSlot::create(mmap_base, byte_size, alloc.byte_capacity());
550 // On drop, we will unmap our mmap'd range that this slot
551 // was mapped on top of, so there is no need for the slot to
552 // wipe it with an anonymous mapping first.
553 //
554 // Note that this code would be incorrect if clear-on-drop
555 // were enabled. That's because:
556 //
557 // * In the struct definition, `memory_image` above is listed
558 // after `alloc`.
559 // * Rust drops fields in the order they're defined, so
560 // `memory_image` would be dropped after `alloc`.
561 // * `alloc` can represent either owned memory (i.e. the mmap is
562 // freed on drop) or logically borrowed memory (something else
563 // manages the mmap).
564 // * If `alloc` is borrowed memory, then this isn't an issue.
565 // * But if `alloc` is owned memory, then it would first drop
566 // the mmap, and then `memory_image` would try to remap
567 // part of that same memory as part of clear-on-drop.
568 //
569 // A lot of this really suggests representing the ownership
570 // via Rust lifetimes -- that would be a major refactor,
571 // though.
572 slot.no_clear_on_drop();
573 slot.instantiate(alloc.byte_size(), Some(image), ty, tunables)?;
574 Some(slot)
575 } else {
576 None
577 }
578 }
579 #[cfg(not(has_virtual_memory))]
580 Some(_) => unreachable!(),
581 None => None,
582 };
583 Ok(LocalMemory {
584 ty: *ty,
585 alloc,
586 memory_may_move: ty.memory_may_move(tunables),
587 memory_image,
588 memory_guard_size: tunables.memory_guard_size.try_into().unwrap(),
589 memory_reservation: tunables.memory_reservation.try_into().unwrap(),
590 })
591 }
592
593 pub fn page_size(&self) -> u64 {
594 self.ty.page_size()
595 }
596
597 /// Grows a memory by `delta_pages`.
598 ///
599 /// This performs the necessary checks on the growth before delegating to
600 /// the underlying `grow_to` implementation.
601 ///
602 /// The `store` is used only for error reporting.
603 pub fn grow(
604 &mut self,
605 delta_pages: u64,
606 mut store: Option<&mut dyn VMStore>,
607 ) -> Result<Option<(usize, usize)>, Error> {
608 let old_byte_size = self.alloc.byte_size();
609
610 // Wasm spec: when growing by 0 pages, always return the current size.
611 if delta_pages == 0 {
612 return Ok(Some((old_byte_size, old_byte_size)));
613 }
614
615 let page_size = usize::try_from(self.page_size()).unwrap();
616
617 // The largest wasm-page-aligned region of memory is possible to
618 // represent in a `usize`. This will be impossible for the system to
619 // actually allocate.
620 let absolute_max = 0usize.wrapping_sub(page_size);
621
622 // Calculate the byte size of the new allocation. Let it overflow up to
623 // `usize::MAX`, then clamp it down to `absolute_max`.
624 let new_byte_size = usize::try_from(delta_pages)
625 .unwrap_or(usize::MAX)
626 .saturating_mul(page_size)
627 .saturating_add(old_byte_size)
628 .min(absolute_max);
629
630 let maximum = self
631 .ty
632 .maximum_byte_size()
633 .ok()
634 .and_then(|n| usize::try_from(n).ok());
635
636 // Store limiter gets first chance to reject memory_growing.
637 if let Some(store) = &mut store {
638 if !store.memory_growing(old_byte_size, new_byte_size, maximum)? {
639 return Ok(None);
640 }
641 }
642
643 // Save the original base pointer to assert the invariant that growth up
644 // to the byte capacity never relocates the base pointer.
645 let base_ptr_before = self.alloc.base().as_mut_ptr();
646 let required_to_not_move_memory = new_byte_size <= self.alloc.byte_capacity();
647
648 let result = (|| -> Result<()> {
649 // Never exceed maximum, even if limiter permitted it.
650 if let Some(max) = maximum {
651 if new_byte_size > max {
652 bail!("Memory maximum size exceeded");
653 }
654 }
655
656 // If memory isn't allowed to move then don't let growth happen
657 // beyond the initial capacity
658 if !self.memory_may_move && new_byte_size > self.alloc.byte_capacity() {
659 bail!("Memory maximum size exceeded");
660 }
661
662 // If we have a CoW image overlay then let it manage accessible
663 // bytes. Once the heap limit is modified inform the underlying
664 // allocation that the size has changed.
665 //
666 // If the growth is going beyond the size of the heap image then
667 // discard it. This should only happen for `MmapMemory` where
668 // `no_clear_on_drop` is set so the destructor doesn't do anything.
669 // For now be maximally sure about this by asserting that memory can
670 // indeed move and that we're on unix. If this wants to run
671 // somewhere else like Windows or with other allocations this may
672 // need adjusting.
673 if let Some(image) = &mut self.memory_image {
674 if new_byte_size <= self.alloc.byte_capacity() {
675 image.set_heap_limit(new_byte_size)?;
676 self.alloc.set_byte_size(new_byte_size);
677 return Ok(());
678 }
679 assert!(cfg!(unix));
680 assert!(self.memory_may_move);
681 self.memory_image = None;
682 }
683
684 // And failing all that fall back to the underlying allocation to
685 // grow it.
686 self.alloc.grow_to(new_byte_size)
687 })();
688
689 match result {
690 Ok(()) => {
691 // On successful growth double-check that the base pointer
692 // didn't move if it shouldn't have.
693 if required_to_not_move_memory {
694 assert_eq!(base_ptr_before, self.alloc.base().as_mut_ptr());
695 }
696
697 Ok(Some((old_byte_size, new_byte_size)))
698 }
699 Err(e) => {
700 // FIXME: shared memories may not have an associated store to
701 // report the growth failure to but the error should not be
702 // dropped
703 // (https://github.com/bytecodealliance/wasmtime/issues/4240).
704 if let Some(store) = store {
705 store.memory_grow_failed(e)?;
706 }
707 Ok(None)
708 }
709 }
710 }
711
712 pub fn vmmemory(&self) -> VMMemoryDefinition {
713 self.alloc.vmmemory()
714 }
715
716 pub fn byte_size(&self) -> usize {
717 self.alloc.byte_size()
718 }
719
720 pub fn needs_init(&self) -> bool {
721 match &self.memory_image {
722 Some(image) => !image.has_image(),
723 None => true,
724 }
725 }
726
727 pub fn wasm_accessible(&self) -> Range<usize> {
728 let base = self.alloc.base().as_mut_ptr() as usize;
729 // From the base add:
730 //
731 // * max(capacity, reservation) -- all memory is guaranteed to have at
732 // least `memory_reservation`, but capacity may go beyond that.
733 // * memory_guard_size - wasm is allowed to hit the guard page for
734 // sigsegv for example.
735 //
736 // and this computes the range that wasm is allowed to load from and
737 // deterministically trap or succeed.
738 let end =
739 base + self.alloc.byte_capacity().max(self.memory_reservation) + self.memory_guard_size;
740 base..end
741 }
742
743 #[cfg(feature = "pooling-allocator")]
744 pub fn unwrap_static_image(self) -> MemoryImageSlot {
745 self.memory_image.unwrap()
746 }
747}
748
749/// In the configurations where bounds checks were elided in JIT code (because
750/// we are using static memories with virtual memory guard pages) this manual
751/// check is here so we don't segfault from Rust. For other configurations,
752/// these checks are required anyways.
753#[cfg(feature = "threads")]
754pub fn validate_atomic_addr(
755 def: &VMMemoryDefinition,
756 addr: u64,
757 access_size: u64,
758 access_alignment: u64,
759) -> Result<*mut u8, Trap> {
760 debug_assert!(access_alignment.is_power_of_two());
761 if !(addr % access_alignment == 0) {
762 return Err(Trap::HeapMisaligned);
763 }
764
765 let length = u64::try_from(def.current_length()).unwrap();
766 if !(addr.saturating_add(access_size) < length) {
767 return Err(Trap::MemoryOutOfBounds);
768 }
769
770 let addr = usize::try_from(addr).unwrap();
771 Ok(def.base.as_ptr().wrapping_add(addr))
772}