wasmtime/runtime/vm/
memory.rs

1//! Memory management for linear memories.
2//!
3//! This module implements the runtime data structures that manage linear
4//! memories for WebAssembly. There's a number of types here each with various
5//! purposes, and this is the high level relationships between types where an
6//! arrow here means "builds on top of".
7//!
8//! ```text
9//! ┌─────────────────────┐
10//! │                     │
11//! │        Memory       ├─────────────┐
12//! │                     │             │
13//! └──────────┬──────────┘             │
14//!            │                        │
15//!            │                        │
16//!            ▼                        ▼
17//! ┌─────────────────────┐     ┌──────────────┐
18//! │                     │     │              │
19//! │     LocalMemory     │◄────┤ SharedMemory │
20//! │                     │     │              │
21//! └──────────┬──────────┘     └──────────────┘
22//!            │
23//!            │
24//!            ▼
25//! ┌─────────────────────┐
26//! │                     │
27//! │ RuntimeLinearMemory ├─────────────┬───────────────┐
28//! │                     │             │               │
29//! └──────────┬──────────┘             │               │
30//!            │                        │               │
31//!            │                        │               │
32//!            ▼                        ▼               ▼
33//! ┌─────────────────────┐     ┌──────────────┐     ┌─────┐
34//! │                     │     │              │     │     │
35//! │      MmapMemory     │     │ StaticMemory │     │ ... │
36//! │                     │     │              │     │     │
37//! └─────────────────────┘     └──────────────┘     └─────┘
38//! ```
39//!
40//! In more detail:
41//!
42//! * `Memory` - the root of what's actually stored in a wasm instance. This
43//!   implements the high-level embedder APIs one would expect from a wasm
44//!   linear memory.
45//!
46//! * `SharedMemory` - this is one of the variants of a local memory. A shared
47//!   memory contains `RwLock<LocalMemory>` where all the real bits happen
48//!   within the lock.
49//!
50//! * `LocalMemory` - this is an owned allocation of a linear memory which
51//!   maintains low-level state that's shared between `SharedMemory` and the
52//!   instance-local state of `Memory`. One example is that `LocalMemory::grow`
53//!   has most of the logic around memory growth.
54//!
55//! * `RuntimeLinearMemory` - this is a trait which `LocalMemory` delegates to.
56//!   This trait is intentionally relatively simple to be exposed in Wasmtime's
57//!   embedder API. This is exposed all the way through `wasmtime::Config` so
58//!   embedders can provide arbitrary implementations.
59//!
60//! * `MmapMemory` - this is an implementation of `RuntimeLinearMemory` in terms
61//!   of the platform's mmap primitive.
62//!
63//! * `StaticMemory` - this is an implementation of `RuntimeLinearMemory`
64//!   for the pooling allocator where the base pointer is already allocated
65//!   and contents are managed through `MemoryImageSlot`.
66//!
67//! Other important types for memories are `MemoryImage` and `MemoryImageSlot`
68//! which manage CoW state for memories. This is implemented at the
69//! `LocalMemory` layer.
70//!
71//! FIXME: don't have both RuntimeLinearMemory and wasmtime::LinearMemory, they
72//! should be merged together.
73//!
74//! FIXME: don't have both RuntimeMemoryCreator and wasmtime::MemoryCreator,
75//! they should be merged together.
76
77use crate::Engine;
78use crate::prelude::*;
79use crate::runtime::store::StoreResourceLimiter;
80use crate::runtime::vm::vmcontext::VMMemoryDefinition;
81#[cfg(has_virtual_memory)]
82use crate::runtime::vm::{HostAlignedByteCount, MmapOffset};
83use crate::runtime::vm::{MemoryImage, MemoryImageSlot, SendSyncPtr};
84use alloc::sync::Arc;
85use core::{ops::Range, ptr::NonNull};
86use wasmtime_environ::Tunables;
87
88#[cfg(feature = "threads")]
89use wasmtime_environ::Trap;
90
91#[cfg(has_virtual_memory)]
92mod mmap;
93#[cfg(has_virtual_memory)]
94pub use self::mmap::MmapMemory;
95
96mod malloc;
97pub use self::malloc::MallocMemory;
98
99#[cfg(feature = "pooling-allocator")]
100mod static_;
101#[cfg(feature = "pooling-allocator")]
102use self::static_::StaticMemory;
103
104#[cfg(feature = "threads")]
105mod shared_memory;
106#[cfg(feature = "threads")]
107pub use shared_memory::SharedMemory;
108
109#[cfg(not(feature = "threads"))]
110mod shared_memory_disabled;
111#[cfg(not(feature = "threads"))]
112pub use shared_memory_disabled::SharedMemory;
113
114/// A memory allocator
115pub trait RuntimeMemoryCreator: Send + Sync {
116    /// Create new RuntimeLinearMemory
117    fn new_memory(
118        &self,
119        ty: &wasmtime_environ::Memory,
120        tunables: &Tunables,
121        minimum: usize,
122        maximum: Option<usize>,
123    ) -> Result<Box<dyn RuntimeLinearMemory>>;
124}
125
126/// A default memory allocator used by Wasmtime
127pub struct DefaultMemoryCreator;
128
129impl RuntimeMemoryCreator for DefaultMemoryCreator {
130    /// Create new MmapMemory
131    fn new_memory(
132        &self,
133        ty: &wasmtime_environ::Memory,
134        tunables: &Tunables,
135        minimum: usize,
136        maximum: Option<usize>,
137    ) -> Result<Box<dyn RuntimeLinearMemory>> {
138        #[cfg(has_virtual_memory)]
139        if tunables.signals_based_traps
140            || tunables.memory_guard_size > 0
141            || tunables.memory_reservation > 0
142            || tunables.memory_init_cow
143        {
144            return Ok(Box::new(MmapMemory::new(ty, tunables, minimum, maximum)?));
145        }
146
147        let _ = maximum;
148        Ok(Box::new(MallocMemory::new(ty, tunables, minimum)?))
149    }
150}
151
152/// A linear memory and its backing storage.
153pub trait RuntimeLinearMemory: Send + Sync {
154    /// Returns the number bytes that this linear memory can access.
155    fn byte_size(&self) -> usize;
156
157    /// Returns the maximal number of bytes the current allocation can access.
158    ///
159    /// Growth up to this value should not relocate the base pointer.
160    fn byte_capacity(&self) -> usize;
161
162    /// Grow memory to the specified amount of bytes.
163    ///
164    /// Returns an error if memory can't be grown by the specified amount
165    /// of bytes.
166    fn grow_to(&mut self, size: usize) -> Result<()>;
167
168    /// Returns a pointer to the base of this linear memory allocation.
169    ///
170    /// This is either a raw pointer, or a reference to an mmap along with an
171    /// offset within it.
172    fn base(&self) -> MemoryBase;
173
174    /// Get a `VMMemoryDefinition` for this linear memory.
175    fn vmmemory(&self) -> VMMemoryDefinition;
176
177    /// Internal method for Wasmtime when used in conjunction with CoW images.
178    /// This is used to inform the underlying memory that the size of memory has
179    /// changed.
180    ///
181    /// Note that this is hidden and panics by default as embedders using custom
182    /// memory without CoW images shouldn't have to worry about this.
183    #[doc(hidden)]
184    fn set_byte_size(&mut self, len: usize) {
185        let _ = len;
186        panic!("CoW images used with this memory and it doesn't support it");
187    }
188}
189
190/// The base pointer of a memory allocation.
191#[derive(Clone, Debug)]
192pub enum MemoryBase {
193    /// A raw pointer into memory.
194    ///
195    /// This may or may not be host-page-aligned.
196    Raw(SendSyncPtr<u8>),
197
198    /// An mmap along with an offset into it.
199    #[cfg(has_virtual_memory)]
200    Mmap(MmapOffset),
201}
202
203impl MemoryBase {
204    /// Creates a new `MemoryBase` from a raw pointer.
205    ///
206    /// The pointer must be non-null, and it must be logically `Send + Sync`.
207    pub fn new_raw(ptr: *mut u8) -> Self {
208        Self::Raw(NonNull::new(ptr).expect("pointer is non-null").into())
209    }
210
211    /// Returns the actual memory address in memory that is represented by this
212    /// base.
213    pub fn as_non_null(&self) -> NonNull<u8> {
214        match self {
215            Self::Raw(ptr) => ptr.as_non_null(),
216            #[cfg(has_virtual_memory)]
217            Self::Mmap(mmap_offset) => mmap_offset.as_non_null(),
218        }
219    }
220
221    /// Same as `as_non_null`, but different return type.
222    pub fn as_mut_ptr(&self) -> *mut u8 {
223        self.as_non_null().as_ptr()
224    }
225}
226
227/// Representation of a runtime wasm linear memory.
228pub enum Memory {
229    Local(LocalMemory),
230    Shared(SharedMemory),
231}
232
233impl Memory {
234    /// Create a new dynamic (movable) memory instance for the specified plan.
235    pub async fn new_dynamic(
236        ty: &wasmtime_environ::Memory,
237        engine: &Engine,
238        creator: &dyn RuntimeMemoryCreator,
239        memory_image: Option<&Arc<MemoryImage>>,
240        limiter: Option<&mut StoreResourceLimiter<'_>>,
241    ) -> Result<Self> {
242        let (minimum, maximum) = Self::limit_new(ty, limiter).await?;
243        let tunables = engine.tunables();
244        let allocation = creator.new_memory(ty, tunables, minimum, maximum)?;
245
246        let memory = LocalMemory::new(ty, tunables, allocation, memory_image)?;
247        Ok(if ty.shared {
248            Memory::Shared(SharedMemory::wrap(engine, ty, memory)?)
249        } else {
250            Memory::Local(memory)
251        })
252    }
253
254    /// Create a new static (immovable) memory instance for the specified plan.
255    #[cfg(feature = "pooling-allocator")]
256    pub async fn new_static(
257        ty: &wasmtime_environ::Memory,
258        tunables: &Tunables,
259        base: MemoryBase,
260        base_capacity: usize,
261        memory_image: MemoryImageSlot,
262        limiter: Option<&mut StoreResourceLimiter<'_>>,
263    ) -> Result<Self> {
264        let (minimum, maximum) = Self::limit_new(ty, limiter).await?;
265        let pooled_memory = StaticMemory::new(base, base_capacity, minimum, maximum)?;
266        let allocation = Box::new(pooled_memory);
267
268        // Configure some defaults a bit differently for this memory within the
269        // `LocalMemory` structure created, notably we already have
270        // `memory_image` and regardless of configuration settings this memory
271        // can't move its base pointer since it's a fixed allocation.
272        let mut memory = LocalMemory::new(ty, tunables, allocation, None)?;
273        assert!(memory.memory_image.is_none());
274        memory.memory_image = Some(memory_image);
275        memory.memory_may_move = false;
276
277        Ok(if ty.shared {
278            // FIXME(#4244): not supported with the pooling allocator (which
279            // `new_static` is always used with), see `MemoryPool::validate` as
280            // well).
281            todo!("using shared memory with the pooling allocator is a work in progress");
282        } else {
283            Memory::Local(memory)
284        })
285    }
286
287    /// Calls the `store`'s limiter to optionally prevent a memory from being allocated.
288    ///
289    /// Returns a tuple of the minimum size, optional maximum size, and log(page
290    /// size) of the memory, all in bytes.
291    pub(crate) async fn limit_new(
292        ty: &wasmtime_environ::Memory,
293        limiter: Option<&mut StoreResourceLimiter<'_>>,
294    ) -> Result<(usize, Option<usize>)> {
295        let page_size = usize::try_from(ty.page_size()).unwrap();
296
297        // This is the absolute possible maximum that the module can try to
298        // allocate, which is our entire address space minus a wasm page. That
299        // shouldn't ever actually work in terms of an allocation because
300        // presumably the kernel wants *something* for itself, but this is used
301        // to pass to the `store`'s limiter for a requested size
302        // to approximate the scale of the request that the wasm module is
303        // making. This is necessary because the limiter works on `usize` bytes
304        // whereas we're working with possibly-overflowing `u64` calculations
305        // here. To actually faithfully represent the byte requests of modules
306        // we'd have to represent things as `u128`, but that's kinda
307        // overkill for this purpose.
308        let absolute_max = 0usize.wrapping_sub(page_size);
309
310        // If the minimum memory size overflows the size of our own address
311        // space, then we can't satisfy this request, but defer the error to
312        // later so the `store` can be informed that an effective oom is
313        // happening.
314        let minimum = ty
315            .minimum_byte_size()
316            .ok()
317            .and_then(|m| usize::try_from(m).ok());
318
319        // The plan stores the maximum size in units of wasm pages, but we
320        // use units of bytes. Unlike for the `minimum` size we silently clamp
321        // the effective maximum size to the limits of what we can track. If the
322        // maximum size exceeds `usize` or `u64` then there's no need to further
323        // keep track of it as some sort of runtime limit will kick in long
324        // before we reach the statically declared maximum size.
325        let maximum = ty
326            .maximum_byte_size()
327            .ok()
328            .and_then(|m| usize::try_from(m).ok());
329
330        // Inform the store's limiter what's about to happen. This will let the
331        // limiter reject anything if necessary, and this also guarantees that
332        // we should call the limiter for all requested memories, even if our
333        // `minimum` calculation overflowed. This means that the `minimum` we're
334        // informing the limiter is lossy and may not be 100% accurate, but for
335        // now the expected uses of limiter means that's ok.
336        if let Some(limiter) = limiter {
337            if !limiter
338                .memory_growing(0, minimum.unwrap_or(absolute_max), maximum)
339                .await?
340            {
341                bail!(
342                    "memory minimum size of {} pages exceeds memory limits",
343                    ty.limits.min
344                );
345            }
346        }
347
348        // At this point we need to actually handle overflows, so bail out with
349        // an error if we made it this far.
350        let minimum = minimum.ok_or_else(|| {
351            format_err!(
352                "memory minimum size of {} pages exceeds memory limits",
353                ty.limits.min
354            )
355        })?;
356
357        Ok((minimum, maximum))
358    }
359
360    /// Returns this memory's page size, in bytes.
361    pub fn page_size(&self) -> u64 {
362        match self {
363            Memory::Local(mem) => mem.page_size(),
364            Memory::Shared(mem) => mem.page_size(),
365        }
366    }
367
368    /// Returns the size of this memory, in bytes.
369    pub fn byte_size(&self) -> usize {
370        match self {
371            Memory::Local(mem) => mem.byte_size(),
372            Memory::Shared(mem) => mem.byte_size(),
373        }
374    }
375
376    /// Returns whether or not this memory needs initialization. It
377    /// may not if it already has initial content thanks to a CoW
378    /// mechanism.
379    pub(crate) fn needs_init(&self) -> bool {
380        match self {
381            Memory::Local(mem) => mem.needs_init(),
382            Memory::Shared(mem) => mem.needs_init(),
383        }
384    }
385
386    /// Grow memory by the specified amount of wasm pages.
387    ///
388    /// Returns `None` if memory can't be grown by the specified amount
389    /// of wasm pages. Returns `Some` with the old size of memory, in bytes, on
390    /// successful growth.
391    ///
392    /// # Safety
393    ///
394    /// Resizing the memory can reallocate the memory buffer for dynamic memories.
395    /// An instance's `VMContext` may have pointers to the memory's base and will
396    /// need to be fixed up after growing the memory.
397    ///
398    /// Generally, prefer using `InstanceHandle::memory_grow`, which encapsulates
399    /// this unsafety.
400    ///
401    /// Ensure that the provided Store is not used to get access any Memory
402    /// which lives inside it.
403    pub async unsafe fn grow(
404        &mut self,
405        delta_pages: u64,
406        limiter: Option<&mut StoreResourceLimiter<'_>>,
407    ) -> Result<Option<usize>, Error> {
408        let result = match self {
409            Memory::Local(mem) => mem.grow(delta_pages, limiter).await?,
410            Memory::Shared(mem) => mem.grow(delta_pages)?,
411        };
412        match result {
413            Some((old, _new)) => Ok(Some(old)),
414            None => Ok(None),
415        }
416    }
417
418    /// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
419    pub fn vmmemory(&self) -> VMMemoryDefinition {
420        match self {
421            Memory::Local(mem) => mem.vmmemory(),
422            // `vmmemory()` is used for writing the `VMMemoryDefinition` of a
423            // memory into its `VMContext`; this should never be possible for a
424            // shared memory because the only `VMMemoryDefinition` for it should
425            // be stored in its own `def` field.
426            Memory::Shared(_) => unreachable!(),
427        }
428    }
429
430    /// Consume the memory, returning its [`MemoryImageSlot`] if any is present.
431    /// The image should only be present for a subset of memories created with
432    /// [`Memory::new_static()`].
433    #[cfg(feature = "pooling-allocator")]
434    pub fn unwrap_static_image(self) -> MemoryImageSlot {
435        match self {
436            Memory::Local(mem) => mem.unwrap_static_image(),
437            Memory::Shared(_) => panic!("expected a local memory"),
438        }
439    }
440
441    /// Is this a shared memory?
442    pub fn is_shared_memory(&self) -> bool {
443        matches!(self, Memory::Shared(_))
444    }
445
446    /// If the [Memory] is a [SharedMemory], unwrap it and return a clone to
447    /// that shared memory.
448    pub fn as_shared_memory(&self) -> Option<&SharedMemory> {
449        match self {
450            Memory::Local(_) => None,
451            Memory::Shared(mem) => Some(mem),
452        }
453    }
454
455    /// Implementation of `memory.atomic.notify` for all memories.
456    #[cfg(feature = "threads")]
457    pub fn atomic_notify(&mut self, addr: u64, count: u32) -> Result<u32, Trap> {
458        match self.as_shared_memory() {
459            Some(m) => m.atomic_notify(addr, count),
460            None => {
461                validate_atomic_addr(&self.vmmemory(), addr, 4, 4)?;
462                Ok(0)
463            }
464        }
465    }
466
467    /// Implementation of `memory.atomic.wait32` for all memories.
468    #[cfg(feature = "threads")]
469    pub fn atomic_wait32(
470        &mut self,
471        addr: u64,
472        expected: u32,
473        timeout: Option<core::time::Duration>,
474    ) -> Result<crate::WaitResult, Trap> {
475        match self.as_shared_memory() {
476            Some(m) => m.atomic_wait32(addr, expected, timeout),
477            None => {
478                validate_atomic_addr(&self.vmmemory(), addr, 4, 4)?;
479                Err(Trap::AtomicWaitNonSharedMemory)
480            }
481        }
482    }
483
484    /// Implementation of `memory.atomic.wait64` for all memories.
485    #[cfg(feature = "threads")]
486    pub fn atomic_wait64(
487        &mut self,
488        addr: u64,
489        expected: u64,
490        timeout: Option<core::time::Duration>,
491    ) -> Result<crate::WaitResult, Trap> {
492        match self.as_shared_memory() {
493            Some(m) => m.atomic_wait64(addr, expected, timeout),
494            None => {
495                validate_atomic_addr(&self.vmmemory(), addr, 8, 8)?;
496                Err(Trap::AtomicWaitNonSharedMemory)
497            }
498        }
499    }
500
501    /// Returns the range of bytes that WebAssembly should be able to address in
502    /// this linear memory. Note that this includes guard pages which wasm can
503    /// hit.
504    pub fn wasm_accessible(&self) -> Range<usize> {
505        match self {
506            Memory::Local(mem) => mem.wasm_accessible(),
507            Memory::Shared(mem) => mem.wasm_accessible(),
508        }
509    }
510}
511
512/// An owned allocation of a wasm linear memory.
513///
514/// This might be part of a `Memory` via `Memory::Local` but it might also be
515/// the implementation basis for a `SharedMemory` behind an `RwLock` for
516/// example.
517pub struct LocalMemory {
518    alloc: Box<dyn RuntimeLinearMemory>,
519    ty: wasmtime_environ::Memory,
520    memory_may_move: bool,
521    memory_guard_size: usize,
522    memory_reservation: usize,
523
524    /// An optional CoW mapping that provides the initial content of this
525    /// memory.
526    memory_image: Option<MemoryImageSlot>,
527}
528
529impl LocalMemory {
530    pub fn new(
531        ty: &wasmtime_environ::Memory,
532        tunables: &Tunables,
533        alloc: Box<dyn RuntimeLinearMemory>,
534        memory_image: Option<&Arc<MemoryImage>>,
535    ) -> Result<LocalMemory> {
536        // If a memory image was specified, try to create the MemoryImageSlot on
537        // top of our mmap.
538        let memory_image = match memory_image {
539            #[cfg(has_virtual_memory)]
540            Some(image) => {
541                // We currently don't support memory_image if
542                // `RuntimeLinearMemory::byte_size` is not a multiple of the host page
543                // size. See https://github.com/bytecodealliance/wasmtime/issues/9660.
544                if let Ok(byte_size) = HostAlignedByteCount::new(alloc.byte_size()) {
545                    // memory_image is CoW-based so it is expected to be backed
546                    // by an mmap.
547                    let mmap_base = match alloc.base() {
548                        MemoryBase::Mmap(offset) => offset,
549                        MemoryBase::Raw { .. } => {
550                            unreachable!("memory_image is Some only for mmap-based memories")
551                        }
552                    };
553
554                    let mut slot =
555                        MemoryImageSlot::create(mmap_base, byte_size, alloc.byte_capacity());
556                    slot.instantiate(alloc.byte_size(), Some(image), ty, tunables)?;
557                    Some(slot)
558                } else {
559                    None
560                }
561            }
562            #[cfg(not(has_virtual_memory))]
563            Some(_) => unreachable!(),
564            None => None,
565        };
566        Ok(LocalMemory {
567            ty: *ty,
568            alloc,
569            memory_may_move: ty.memory_may_move(tunables),
570            memory_image,
571            memory_guard_size: tunables.memory_guard_size.try_into().unwrap(),
572            memory_reservation: tunables.memory_reservation.try_into().unwrap(),
573        })
574    }
575
576    pub fn page_size(&self) -> u64 {
577        self.ty.page_size()
578    }
579
580    /// Grows a memory by `delta_pages`.
581    ///
582    /// This performs the necessary checks on the growth before delegating to
583    /// the underlying `grow_to` implementation.
584    ///
585    /// The `store` is used only for error reporting.
586    pub async fn grow(
587        &mut self,
588        delta_pages: u64,
589        mut limiter: Option<&mut StoreResourceLimiter<'_>>,
590    ) -> Result<Option<(usize, usize)>, Error> {
591        let old_byte_size = self.alloc.byte_size();
592
593        // Wasm spec: when growing by 0 pages, always return the current size.
594        if delta_pages == 0 {
595            return Ok(Some((old_byte_size, old_byte_size)));
596        }
597
598        let page_size = usize::try_from(self.page_size()).unwrap();
599
600        // The largest wasm-page-aligned region of memory is possible to
601        // represent in a `usize`. This will be impossible for the system to
602        // actually allocate.
603        let absolute_max = 0usize.wrapping_sub(page_size);
604
605        // Calculate the byte size of the new allocation. Let it overflow up to
606        // `usize::MAX`, then clamp it down to `absolute_max`.
607        let new_byte_size = usize::try_from(delta_pages)
608            .unwrap_or(usize::MAX)
609            .saturating_mul(page_size)
610            .saturating_add(old_byte_size)
611            .min(absolute_max);
612
613        let maximum = self
614            .ty
615            .maximum_byte_size()
616            .ok()
617            .and_then(|n| usize::try_from(n).ok());
618
619        // Store limiter gets first chance to reject memory_growing.
620        if let Some(limiter) = &mut limiter {
621            if !limiter
622                .memory_growing(old_byte_size, new_byte_size, maximum)
623                .await?
624            {
625                return Ok(None);
626            }
627        }
628
629        // Save the original base pointer to assert the invariant that growth up
630        // to the byte capacity never relocates the base pointer.
631        let base_ptr_before = self.alloc.base().as_mut_ptr();
632        let required_to_not_move_memory = new_byte_size <= self.alloc.byte_capacity();
633
634        let result = (|| -> Result<()> {
635            // Never exceed maximum, even if limiter permitted it.
636            if let Some(max) = maximum {
637                if new_byte_size > max {
638                    bail!("Memory maximum size exceeded");
639                }
640            }
641
642            // If memory isn't allowed to move then don't let growth happen
643            // beyond the initial capacity
644            if !self.memory_may_move && new_byte_size > self.alloc.byte_capacity() {
645                bail!("Memory maximum size exceeded");
646            }
647
648            // If we have a CoW image overlay then let it manage accessible
649            // bytes. Once the heap limit is modified inform the underlying
650            // allocation that the size has changed.
651            //
652            // If the growth is going beyond the size of the heap image then
653            // discard it. This should only happen for `MmapMemory` where
654            // `no_clear_on_drop` is set so the destructor doesn't do anything.
655            // For now be maximally sure about this by asserting that memory can
656            // indeed move and that we're on unix. If this wants to run
657            // somewhere else like Windows or with other allocations this may
658            // need adjusting.
659            if let Some(image) = &mut self.memory_image {
660                if new_byte_size <= self.alloc.byte_capacity() {
661                    image.set_heap_limit(new_byte_size)?;
662                    self.alloc.set_byte_size(new_byte_size);
663                    return Ok(());
664                }
665                assert!(cfg!(unix));
666                assert!(self.memory_may_move);
667                self.memory_image = None;
668            }
669
670            // And failing all that fall back to the underlying allocation to
671            // grow it.
672            self.alloc.grow_to(new_byte_size)
673        })();
674
675        match result {
676            Ok(()) => {
677                // On successful growth double-check that the base pointer
678                // didn't move if it shouldn't have.
679                if required_to_not_move_memory {
680                    assert_eq!(base_ptr_before, self.alloc.base().as_mut_ptr());
681                }
682
683                Ok(Some((old_byte_size, new_byte_size)))
684            }
685            Err(e) => {
686                // FIXME: shared memories may not have an associated store to
687                // report the growth failure to but the error should not be
688                // dropped
689                // (https://github.com/bytecodealliance/wasmtime/issues/4240).
690                if let Some(limiter) = limiter {
691                    limiter.memory_grow_failed(e)?;
692                }
693                Ok(None)
694            }
695        }
696    }
697
698    pub fn vmmemory(&self) -> VMMemoryDefinition {
699        self.alloc.vmmemory()
700    }
701
702    pub fn byte_size(&self) -> usize {
703        self.alloc.byte_size()
704    }
705
706    pub fn needs_init(&self) -> bool {
707        match &self.memory_image {
708            Some(image) => !image.has_image(),
709            None => true,
710        }
711    }
712
713    pub fn wasm_accessible(&self) -> Range<usize> {
714        let base = self.alloc.base().as_mut_ptr() as usize;
715        // From the base add:
716        //
717        // * max(capacity, reservation) -- all memory is guaranteed to have at
718        //   least `memory_reservation`, but capacity may go beyond that.
719        // * memory_guard_size - wasm is allowed to hit the guard page for
720        //   sigsegv for example.
721        //
722        // and this computes the range that wasm is allowed to load from and
723        // deterministically trap or succeed.
724        let end =
725            base + self.alloc.byte_capacity().max(self.memory_reservation) + self.memory_guard_size;
726        base..end
727    }
728
729    #[cfg(feature = "pooling-allocator")]
730    pub fn unwrap_static_image(self) -> MemoryImageSlot {
731        self.memory_image.unwrap()
732    }
733}
734
735/// In the configurations where bounds checks were elided in JIT code (because
736/// we are using static memories with virtual memory guard pages) this manual
737/// check is here so we don't segfault from Rust. For other configurations,
738/// these checks are required anyways.
739#[cfg(feature = "threads")]
740pub fn validate_atomic_addr(
741    def: &VMMemoryDefinition,
742    addr: u64,
743    access_size: u64,
744    access_alignment: u64,
745) -> Result<*mut u8, Trap> {
746    debug_assert!(access_alignment.is_power_of_two());
747    if !(addr % access_alignment == 0) {
748        return Err(Trap::HeapMisaligned);
749    }
750
751    let length = u64::try_from(def.current_length()).unwrap();
752    if !(addr.saturating_add(access_size) <= length) {
753        return Err(Trap::MemoryOutOfBounds);
754    }
755
756    let addr = usize::try_from(addr).unwrap();
757    Ok(def.base.as_ptr().wrapping_add(addr))
758}