wasmtime/runtime/store/
data.rs

1use crate::prelude::*;
2use crate::store::StoreOpaque;
3use crate::{StoreContext, StoreContextMut};
4use core::fmt;
5use core::marker;
6use core::num::NonZeroU64;
7use core::ops::{Index, IndexMut};
8
9// This is defined here, in a private submodule, so we can explicitly reexport
10// it only as `pub(crate)`. This avoids a ton of
11// crate-private-type-in-public-interface errors that aren't really too
12// interesting to deal with.
13#[derive(Copy, Clone)]
14pub struct InstanceId(pub(super) usize);
15
16impl InstanceId {
17    pub fn from_index(idx: usize) -> InstanceId {
18        InstanceId(idx)
19    }
20}
21
22pub struct StoreData {
23    id: StoreId,
24    funcs: Vec<crate::func::FuncData>,
25    tables: Vec<crate::runtime::vm::ExportTable>,
26    globals: Vec<crate::runtime::vm::ExportGlobal>,
27    instances: Vec<crate::instance::InstanceData>,
28    memories: Vec<crate::runtime::vm::ExportMemory>,
29    tags: Vec<crate::runtime::vm::ExportTag>,
30    #[cfg(feature = "component-model")]
31    pub(crate) components: crate::component::ComponentStoreData,
32}
33
34pub trait StoredData: Sized {
35    fn list(data: &StoreData) -> &Vec<Self>;
36    fn list_mut(data: &mut StoreData) -> &mut Vec<Self>;
37}
38
39macro_rules! impl_store_data {
40    ($($field:ident => $t:ty,)*) => ($(
41        impl StoredData for $t {
42            #[inline]
43            fn list(data: &StoreData) -> &Vec<Self> { &data.$field }
44            #[inline]
45            fn list_mut(data: &mut StoreData) -> &mut Vec<Self> { &mut data.$field }
46        }
47    )*)
48}
49
50impl_store_data! {
51    funcs => crate::func::FuncData,
52    tables => crate::runtime::vm::ExportTable,
53    globals => crate::runtime::vm::ExportGlobal,
54    instances => crate::instance::InstanceData,
55    memories => crate::runtime::vm::ExportMemory,
56    tags => crate::runtime::vm::ExportTag,
57}
58
59impl StoreData {
60    pub fn new() -> StoreData {
61        StoreData {
62            id: StoreId::allocate(),
63            funcs: Vec::new(),
64            tables: Vec::new(),
65            globals: Vec::new(),
66            instances: Vec::new(),
67            memories: Vec::new(),
68            tags: Vec::new(),
69            #[cfg(feature = "component-model")]
70            components: Default::default(),
71        }
72    }
73
74    pub fn id(&self) -> StoreId {
75        self.id
76    }
77
78    pub fn insert<T>(&mut self, data: T) -> Stored<T>
79    where
80        T: StoredData,
81    {
82        let list = T::list_mut(self);
83        let index = list.len();
84        list.push(data);
85        Stored::new(self.id, index)
86    }
87
88    pub fn next_id<T>(&self) -> Stored<T>
89    where
90        T: StoredData,
91    {
92        Stored::new(self.id, T::list(self).len())
93    }
94
95    pub fn contains<T>(&self, id: Stored<T>) -> bool
96    where
97        T: StoredData,
98    {
99        if id.store_id != self.id {
100            return false;
101        }
102        // This should be true as an invariant of our API, but double-check with
103        // debug assertions enabled.
104        debug_assert!(id.index() < T::list(self).len());
105        true
106    }
107
108    pub fn iter<T>(&self) -> impl ExactSizeIterator<Item = Stored<T>> + use<T>
109    where
110        T: StoredData,
111    {
112        let id = self.id;
113        (0..T::list(self).len()).map(move |i| Stored::new(id, i))
114    }
115
116    pub(crate) fn reserve_funcs(&mut self, count: usize) {
117        self.funcs.reserve(count);
118    }
119}
120
121impl<T> Index<Stored<T>> for StoreData
122where
123    T: StoredData,
124{
125    type Output = T;
126
127    #[inline]
128    fn index(&self, index: Stored<T>) -> &Self::Output {
129        index.assert_belongs_to(self.id);
130        // Note that if this is ever a performance bottleneck it should be safe
131        // to use unchecked indexing here because presence of a `Stored<T>` is
132        // proof of an item having been inserted into a store and lists in
133        // stores are never shrunk. After the store check above the actual index
134        // should always be valid.
135        &T::list(self)[index.index()]
136    }
137}
138
139impl<T> IndexMut<Stored<T>> for StoreData
140where
141    T: StoredData,
142{
143    #[inline]
144    fn index_mut(&mut self, index: Stored<T>) -> &mut Self::Output {
145        index.assert_belongs_to(self.id);
146        // Note that this could be unchecked indexing, see the note in `Index`
147        // above.
148        &mut T::list_mut(self)[index.index()]
149    }
150}
151
152// forward StoreContext => StoreData
153impl<I, T> Index<I> for StoreContext<'_, T>
154where
155    StoreData: Index<I>,
156{
157    type Output = <StoreData as Index<I>>::Output;
158
159    #[inline]
160    fn index(&self, index: I) -> &Self::Output {
161        self.0.store_data.index(index)
162    }
163}
164
165// forward StoreContextMut => StoreData
166impl<I, T> Index<I> for StoreContextMut<'_, T>
167where
168    StoreData: Index<I>,
169{
170    type Output = <StoreData as Index<I>>::Output;
171
172    #[inline]
173    fn index(&self, index: I) -> &Self::Output {
174        self.0.store_data.index(index)
175    }
176}
177
178// forward StoreOpaque => StoreData
179impl<I> Index<I> for StoreOpaque
180where
181    StoreData: Index<I>,
182{
183    type Output = <StoreData as Index<I>>::Output;
184
185    #[inline]
186    fn index(&self, index: I) -> &Self::Output {
187        self.store_data().index(index)
188    }
189}
190impl<I> IndexMut<I> for StoreOpaque
191where
192    StoreData: IndexMut<I>,
193{
194    #[inline]
195    fn index_mut(&mut self, index: I) -> &mut Self::Output {
196        self.store_data_mut().index_mut(index)
197    }
198}
199
200/// A unique identifier to get attached to a store.
201///
202/// This identifier is embedded into the `Stored<T>` structure and is used to
203/// identify the original store that items come from. For example a `Memory` is
204/// owned by a `Store` and will embed a `StoreId` internally to say which store
205/// it came from. Comparisons with this value are how panics are generated for
206/// mismatching the item that a store belongs to.
207#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
208#[repr(transparent)] // NB: relied on in the C API
209pub struct StoreId(NonZeroU64);
210
211impl StoreId {
212    /// Allocates a new unique identifier for a store that has never before been
213    /// used in this process.
214    pub fn allocate() -> StoreId {
215        // When 64-bit atomics are allowed then allow 2^63 stores at which point
216        // we start panicking to prevent overflow.
217        //
218        // If a store is created once per microsecond then this will last the
219        // current process for 584,540 years before overflowing.
220        const OVERFLOW_THRESHOLD: u64 = 1 << 63;
221
222        #[cfg(target_has_atomic = "64")]
223        let id = {
224            use core::sync::atomic::{AtomicU64, Ordering::Relaxed};
225
226            // Note the usage of `Relaxed` ordering here which should be ok
227            // since we're only looking for atomicity on this counter and this
228            // otherwise isn't used to synchronize memory stored anywhere else.
229            static NEXT_ID: AtomicU64 = AtomicU64::new(0);
230            let id = NEXT_ID.fetch_add(1, Relaxed);
231            if id > OVERFLOW_THRESHOLD {
232                NEXT_ID.store(OVERFLOW_THRESHOLD, Relaxed);
233                panic!("store id allocator overflow");
234            }
235            id
236        };
237
238        // When 64-bit atomics are not allowed use a `RwLock<u64>`. This is
239        // already used elsewhere in Wasmtime and currently has the
240        // implementation of panic-on-contention, but it's at least no worse
241        // than what wasmtime had before and is at least correct and UB-free.
242        #[cfg(not(target_has_atomic = "64"))]
243        let id = {
244            use crate::sync::RwLock;
245            static NEXT_ID: RwLock<u64> = RwLock::new(0);
246
247            let mut lock = NEXT_ID.write();
248            if *lock > OVERFLOW_THRESHOLD {
249                panic!("store id allocator overflow");
250            }
251            let ret = *lock;
252            *lock += 1;
253            ret
254        };
255
256        StoreId(NonZeroU64::new(id + 1).unwrap())
257    }
258
259    #[inline]
260    pub fn assert_belongs_to(&self, store: StoreId) {
261        if *self == store {
262            return;
263        }
264        store_id_mismatch();
265    }
266
267    /// Raw accessor for the C API.
268    pub fn as_raw(&self) -> NonZeroU64 {
269        self.0
270    }
271
272    /// Raw constructor for the C API.
273    pub fn from_raw(id: NonZeroU64) -> StoreId {
274        StoreId(id)
275    }
276}
277
278#[repr(C)] // used by reference in the C API, also in `wasmtime_func_t`.
279pub struct Stored<T> {
280    store_id: StoreId,
281    index: usize,
282    _marker: marker::PhantomData<fn() -> T>,
283}
284
285impl<T> Stored<T> {
286    fn new(store_id: StoreId, index: usize) -> Stored<T> {
287        Stored {
288            store_id,
289            index,
290            _marker: marker::PhantomData,
291        }
292    }
293
294    #[inline]
295    pub fn assert_belongs_to(&self, store: StoreId) {
296        self.store_id.assert_belongs_to(store)
297    }
298
299    fn index(&self) -> usize {
300        self.index
301    }
302}
303
304#[cold]
305fn store_id_mismatch() {
306    panic!("object used with the wrong store");
307}
308
309impl<T> PartialEq for Stored<T> {
310    fn eq(&self, other: &Stored<T>) -> bool {
311        self.store_id == other.store_id && self.index == other.index
312    }
313}
314
315impl<T> Copy for Stored<T> {}
316
317impl<T> Clone for Stored<T> {
318    fn clone(&self) -> Self {
319        *self
320    }
321}
322
323impl<T> fmt::Debug for Stored<T> {
324    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
325        write!(f, "store={}, index={}", self.store_id.0, self.index())
326    }
327}