wasmtime/runtime/vm/memory/
shared_memory.rs

1use crate::prelude::*;
2use crate::runtime::vm::memory::{validate_atomic_addr, LocalMemory, MmapMemory};
3use crate::runtime::vm::parking_spot::{ParkingSpot, Waiter};
4use crate::runtime::vm::{Memory, VMMemoryDefinition, VMStore, WaitResult};
5use std::cell::RefCell;
6use std::ops::Range;
7use std::ptr::NonNull;
8use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
9use std::sync::{Arc, RwLock};
10use std::time::{Duration, Instant};
11use wasmtime_environ::{Trap, Tunables};
12
13/// For shared memory (and only for shared memory), this lock-version restricts
14/// access when growing the memory or checking its size. This is to conform with
15/// the [thread proposal]: "When `IsSharedArrayBuffer(...)` is true, the return
16/// value should be the result of an atomic read-modify-write of the new size to
17/// the internal `length` slot."
18///
19/// [thread proposal]:
20///     https://github.com/WebAssembly/threads/blob/master/proposals/threads/Overview.md#webassemblymemoryprototypegrow
21#[derive(Clone)]
22pub struct SharedMemory(Arc<SharedMemoryInner>);
23
24struct SharedMemoryInner {
25    memory: RwLock<LocalMemory>,
26    spot: ParkingSpot,
27    ty: wasmtime_environ::Memory,
28    def: LongTermVMMemoryDefinition,
29}
30
31impl SharedMemory {
32    /// Construct a new [`SharedMemory`].
33    pub fn new(ty: &wasmtime_environ::Memory, tunables: &Tunables) -> Result<Self> {
34        let (minimum_bytes, maximum_bytes) = Memory::limit_new(ty, None)?;
35        let mmap_memory = MmapMemory::new(ty, tunables, minimum_bytes, maximum_bytes)?;
36        Self::wrap(
37            ty,
38            LocalMemory::new(ty, tunables, Box::new(mmap_memory), None)?,
39        )
40    }
41
42    /// Wrap an existing [Memory] with the locking provided by a [SharedMemory].
43    pub fn wrap(ty: &wasmtime_environ::Memory, mut memory: LocalMemory) -> Result<Self> {
44        if !ty.shared {
45            bail!("shared memory must have a `shared` memory type");
46        }
47        Ok(Self(Arc::new(SharedMemoryInner {
48            ty: *ty,
49            spot: ParkingSpot::default(),
50            def: LongTermVMMemoryDefinition(memory.vmmemory()),
51            memory: RwLock::new(memory),
52        })))
53    }
54
55    /// Return the memory type for this [`SharedMemory`].
56    pub fn ty(&self) -> wasmtime_environ::Memory {
57        self.0.ty
58    }
59
60    /// Convert this shared memory into a [`Memory`].
61    pub fn as_memory(self) -> Memory {
62        Memory::Shared(self)
63    }
64
65    /// Return a pointer to the shared memory's [VMMemoryDefinition].
66    pub fn vmmemory_ptr(&self) -> NonNull<VMMemoryDefinition> {
67        NonNull::from(&self.0.def.0)
68    }
69
70    /// Same as `RuntimeLinearMemory::grow`, except with `&self`.
71    pub fn grow(
72        &self,
73        delta_pages: u64,
74        store: Option<&mut dyn VMStore>,
75    ) -> Result<Option<(usize, usize)>, Error> {
76        let mut memory = self.0.memory.write().unwrap();
77        let result = memory.grow(delta_pages, store)?;
78        if let Some((_old_size_in_bytes, new_size_in_bytes)) = result {
79            // Store the new size to the `VMMemoryDefinition` for JIT-generated
80            // code (and runtime functions) to access. No other code can be
81            // growing this memory due to the write lock, but code in other
82            // threads could have access to this shared memory and we want them
83            // to see the most consistent version of the `current_length`; a
84            // weaker consistency is possible if we accept them seeing an older,
85            // smaller memory size (assumption: memory only grows) but presently
86            // we are aiming for accuracy.
87            //
88            // Note that it could be possible to access a memory address that is
89            // now-valid due to changes to the page flags in `grow` above but
90            // beyond the `memory.size` that we are about to assign to. In these
91            // and similar cases, discussion in the thread proposal concluded
92            // that: "multiple accesses in one thread racing with another
93            // thread's `memory.grow` that are in-bounds only after the grow
94            // commits may independently succeed or trap" (see
95            // https://github.com/WebAssembly/threads/issues/26#issuecomment-433930711).
96            // In other words, some non-determinism is acceptable when using
97            // `memory.size` on work being done by `memory.grow`.
98            self.0
99                .def
100                .0
101                .current_length
102                .store(new_size_in_bytes, Ordering::SeqCst);
103        }
104        Ok(result)
105    }
106
107    /// Implementation of `memory.atomic.notify` for this shared memory.
108    pub fn atomic_notify(&self, addr_index: u64, count: u32) -> Result<u32, Trap> {
109        let ptr = validate_atomic_addr(&self.0.def.0, addr_index, 4, 4)?;
110        log::trace!("memory.atomic.notify(addr={addr_index:#x}, count={count})");
111        let ptr = unsafe { &*ptr };
112        Ok(self.0.spot.notify(ptr, count))
113    }
114
115    /// Implementation of `memory.atomic.wait32` for this shared memory.
116    pub fn atomic_wait32(
117        &self,
118        addr_index: u64,
119        expected: u32,
120        timeout: Option<Duration>,
121    ) -> Result<WaitResult, Trap> {
122        let addr = validate_atomic_addr(&self.0.def.0, addr_index, 4, 4)?;
123        log::trace!(
124            "memory.atomic.wait32(addr={addr_index:#x}, expected={expected}, timeout={timeout:?})"
125        );
126
127        // SAFETY: `addr_index` was validated by `validate_atomic_addr` above.
128        assert!(std::mem::size_of::<AtomicU32>() == 4);
129        assert!(std::mem::align_of::<AtomicU32>() <= 4);
130        let atomic = unsafe { AtomicU32::from_ptr(addr.cast()) };
131        let deadline = timeout.map(|d| Instant::now() + d);
132
133        WAITER.with(|waiter| {
134            let mut waiter = waiter.borrow_mut();
135            Ok(self.0.spot.wait32(atomic, expected, deadline, &mut waiter))
136        })
137    }
138
139    /// Implementation of `memory.atomic.wait64` for this shared memory.
140    pub fn atomic_wait64(
141        &self,
142        addr_index: u64,
143        expected: u64,
144        timeout: Option<Duration>,
145    ) -> Result<WaitResult, Trap> {
146        let addr = validate_atomic_addr(&self.0.def.0, addr_index, 8, 8)?;
147        log::trace!(
148            "memory.atomic.wait64(addr={addr_index:#x}, expected={expected}, timeout={timeout:?})"
149        );
150
151        // SAFETY: `addr_index` was validated by `validate_atomic_addr` above.
152        assert!(std::mem::size_of::<AtomicU64>() == 8);
153        assert!(std::mem::align_of::<AtomicU64>() <= 8);
154        let atomic = unsafe { AtomicU64::from_ptr(addr.cast()) };
155        let deadline = timeout.map(|d| Instant::now() + d);
156
157        WAITER.with(|waiter| {
158            let mut waiter = waiter.borrow_mut();
159            Ok(self.0.spot.wait64(atomic, expected, deadline, &mut waiter))
160        })
161    }
162
163    pub(crate) fn page_size(&self) -> u64 {
164        self.0.ty.page_size()
165    }
166
167    pub(crate) fn byte_size(&self) -> usize {
168        self.0.memory.read().unwrap().byte_size()
169    }
170
171    pub(crate) fn needs_init(&self) -> bool {
172        self.0.memory.read().unwrap().needs_init()
173    }
174
175    pub(crate) fn wasm_accessible(&self) -> Range<usize> {
176        self.0.memory.read().unwrap().wasm_accessible()
177    }
178}
179
180thread_local! {
181    /// Structure used in conjunction with `ParkingSpot` to block the current
182    /// thread if necessary. Note that this is lazily initialized.
183    static WAITER: RefCell<Waiter> = const { RefCell::new(Waiter::new()) };
184}
185
186/// Shared memory needs some representation of a `VMMemoryDefinition` for
187/// JIT-generated code to access. This structure owns the base pointer and
188/// length to the actual memory and we share this definition across threads by:
189/// - never changing the base pointer; according to the specification, shared
190///   memory must be created with a known maximum size so it can be allocated
191///   once and never moved
192/// - carefully changing the length, using atomic accesses in both the runtime
193///   and JIT-generated code.
194struct LongTermVMMemoryDefinition(VMMemoryDefinition);
195unsafe impl Send for LongTermVMMemoryDefinition {}
196unsafe impl Sync for LongTermVMMemoryDefinition {}