wasmtime/runtime/vm/memory/
shared_memory.rs

1use crate::Engine;
2use crate::prelude::*;
3use crate::runtime::vm::memory::{LocalMemory, MmapMemory, validate_atomic_addr};
4use crate::runtime::vm::parking_spot::{ParkingSpot, Waiter};
5use crate::runtime::vm::{self, Memory, VMMemoryDefinition, WaitResult};
6use std::cell::RefCell;
7use std::ops::Range;
8use std::ptr::NonNull;
9use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
10use std::sync::{Arc, RwLock};
11use std::time::{Duration, Instant};
12use wasmtime_environ::Trap;
13
14/// For shared memory (and only for shared memory), this lock-version restricts
15/// access when growing the memory or checking its size. This is to conform with
16/// the [thread proposal]: "When `IsSharedArrayBuffer(...)` is true, the return
17/// value should be the result of an atomic read-modify-write of the new size to
18/// the internal `length` slot."
19///
20/// [thread proposal]:
21///     https://github.com/WebAssembly/threads/blob/master/proposals/threads/Overview.md#webassemblymemoryprototypegrow
22#[derive(Clone)]
23pub struct SharedMemory(Arc<SharedMemoryInner>);
24
25struct SharedMemoryInner {
26    memory: RwLock<LocalMemory>,
27    spot: ParkingSpot,
28    ty: wasmtime_environ::Memory,
29    def: LongTermVMMemoryDefinition,
30}
31
32impl SharedMemory {
33    /// Construct a new [`SharedMemory`].
34    pub fn new(engine: &Engine, ty: &wasmtime_environ::Memory) -> Result<Self> {
35        let tunables = engine.tunables();
36        // Note that without a limiter being passed to `limit_new` this
37        // `assert_ready` should never panic.
38        let (minimum_bytes, maximum_bytes) = vm::assert_ready(Memory::limit_new(ty, None))?;
39        let mmap_memory = MmapMemory::new(ty, tunables, minimum_bytes, maximum_bytes)?;
40        Self::wrap(
41            engine,
42            ty,
43            LocalMemory::new(ty, tunables, Box::new(mmap_memory), None)?,
44        )
45    }
46
47    /// Wrap an existing [Memory] with the locking provided by a [SharedMemory].
48    pub fn wrap(
49        engine: &Engine,
50        ty: &wasmtime_environ::Memory,
51        memory: LocalMemory,
52    ) -> Result<Self> {
53        if !engine.config().shared_memory {
54            bail!(
55                "shared memory support is disabled for this engine -- see `Config::shared_memory`"
56            );
57        }
58        if !ty.shared {
59            bail!("shared memory must have a `shared` memory type");
60        }
61        Ok(Self(Arc::new(SharedMemoryInner {
62            ty: *ty,
63            spot: ParkingSpot::default(),
64            def: LongTermVMMemoryDefinition(memory.vmmemory()),
65            memory: RwLock::new(memory),
66        })))
67    }
68
69    /// Return the memory type for this [`SharedMemory`].
70    pub fn ty(&self) -> wasmtime_environ::Memory {
71        self.0.ty
72    }
73
74    /// Convert this shared memory into a [`Memory`].
75    pub fn as_memory(self) -> Memory {
76        Memory::Shared(self)
77    }
78
79    /// Return a pointer to the shared memory's [VMMemoryDefinition].
80    pub fn vmmemory_ptr(&self) -> NonNull<VMMemoryDefinition> {
81        NonNull::from(&self.0.def.0)
82    }
83
84    /// Same as `RuntimeLinearMemory::grow`, except with `&self`.
85    pub fn grow(&self, delta_pages: u64) -> Result<Option<(usize, usize)>, Error> {
86        let mut memory = self.0.memory.write().unwrap();
87        // Without a limiter being passed in this shouldn't have an await point,
88        // so it should be safe to assert that it's ready.
89        let result = vm::assert_ready(memory.grow(delta_pages, None))?;
90        if let Some((_old_size_in_bytes, new_size_in_bytes)) = result {
91            // Store the new size to the `VMMemoryDefinition` for JIT-generated
92            // code (and runtime functions) to access. No other code can be
93            // growing this memory due to the write lock, but code in other
94            // threads could have access to this shared memory and we want them
95            // to see the most consistent version of the `current_length`; a
96            // weaker consistency is possible if we accept them seeing an older,
97            // smaller memory size (assumption: memory only grows) but presently
98            // we are aiming for accuracy.
99            //
100            // Note that it could be possible to access a memory address that is
101            // now-valid due to changes to the page flags in `grow` above but
102            // beyond the `memory.size` that we are about to assign to. In these
103            // and similar cases, discussion in the thread proposal concluded
104            // that: "multiple accesses in one thread racing with another
105            // thread's `memory.grow` that are in-bounds only after the grow
106            // commits may independently succeed or trap" (see
107            // https://github.com/WebAssembly/threads/issues/26#issuecomment-433930711).
108            // In other words, some non-determinism is acceptable when using
109            // `memory.size` on work being done by `memory.grow`.
110            self.0
111                .def
112                .0
113                .current_length
114                .store(new_size_in_bytes, Ordering::SeqCst);
115        }
116        Ok(result)
117    }
118
119    /// Implementation of `memory.atomic.notify` for this shared memory.
120    pub fn atomic_notify(&self, addr_index: u64, count: u32) -> Result<u32, Trap> {
121        let ptr = validate_atomic_addr(&self.0.def.0, addr_index, 4, 4)?;
122        log::trace!("memory.atomic.notify(addr={addr_index:#x}, count={count})");
123        let ptr = unsafe { &*ptr };
124        Ok(self.0.spot.notify(ptr, count))
125    }
126
127    /// Implementation of `memory.atomic.wait32` for this shared memory.
128    pub fn atomic_wait32(
129        &self,
130        addr_index: u64,
131        expected: u32,
132        timeout: Option<Duration>,
133    ) -> Result<WaitResult, Trap> {
134        let addr = validate_atomic_addr(&self.0.def.0, addr_index, 4, 4)?;
135        log::trace!(
136            "memory.atomic.wait32(addr={addr_index:#x}, expected={expected}, timeout={timeout:?})"
137        );
138
139        // SAFETY: `addr_index` was validated by `validate_atomic_addr` above.
140        assert!(std::mem::size_of::<AtomicU32>() == 4);
141        assert!(std::mem::align_of::<AtomicU32>() <= 4);
142        let atomic = unsafe { AtomicU32::from_ptr(addr.cast()) };
143        let deadline = timeout.map(|d| Instant::now() + d);
144
145        WAITER.with(|waiter| {
146            let mut waiter = waiter.borrow_mut();
147            Ok(self.0.spot.wait32(atomic, expected, deadline, &mut waiter))
148        })
149    }
150
151    /// Implementation of `memory.atomic.wait64` for this shared memory.
152    pub fn atomic_wait64(
153        &self,
154        addr_index: u64,
155        expected: u64,
156        timeout: Option<Duration>,
157    ) -> Result<WaitResult, Trap> {
158        let addr = validate_atomic_addr(&self.0.def.0, addr_index, 8, 8)?;
159        log::trace!(
160            "memory.atomic.wait64(addr={addr_index:#x}, expected={expected}, timeout={timeout:?})"
161        );
162
163        // SAFETY: `addr_index` was validated by `validate_atomic_addr` above.
164        assert!(std::mem::size_of::<AtomicU64>() == 8);
165        assert!(std::mem::align_of::<AtomicU64>() <= 8);
166        let atomic = unsafe { AtomicU64::from_ptr(addr.cast()) };
167        let deadline = timeout.map(|d| Instant::now() + d);
168
169        WAITER.with(|waiter| {
170            let mut waiter = waiter.borrow_mut();
171            Ok(self.0.spot.wait64(atomic, expected, deadline, &mut waiter))
172        })
173    }
174
175    pub(crate) fn page_size(&self) -> u64 {
176        self.0.ty.page_size()
177    }
178
179    pub(crate) fn byte_size(&self) -> usize {
180        self.0.memory.read().unwrap().byte_size()
181    }
182
183    pub(crate) fn needs_init(&self) -> bool {
184        self.0.memory.read().unwrap().needs_init()
185    }
186
187    pub(crate) fn wasm_accessible(&self) -> Range<usize> {
188        self.0.memory.read().unwrap().wasm_accessible()
189    }
190}
191
192thread_local! {
193    /// Structure used in conjunction with `ParkingSpot` to block the current
194    /// thread if necessary. Note that this is lazily initialized.
195    static WAITER: RefCell<Waiter> = const { RefCell::new(Waiter::new()) };
196}
197
198/// Shared memory needs some representation of a `VMMemoryDefinition` for
199/// JIT-generated code to access. This structure owns the base pointer and
200/// length to the actual memory and we share this definition across threads by:
201/// - never changing the base pointer; according to the specification, shared
202///   memory must be created with a known maximum size so it can be allocated
203///   once and never moved
204/// - carefully changing the length, using atomic accesses in both the runtime
205///   and JIT-generated code.
206struct LongTermVMMemoryDefinition(VMMemoryDefinition);
207unsafe impl Send for LongTermVMMemoryDefinition {}
208unsafe impl Sync for LongTermVMMemoryDefinition {}