wasmtime/runtime/store/
func_refs.rs

1//! Lifetime management of `VMFuncRef`s inside of stores, and filling in their
2//! trampolines.
3
4use crate::Definition;
5use crate::module::ModuleRegistry;
6use crate::prelude::*;
7use crate::runtime::HostFunc;
8use crate::runtime::vm::{AlwaysMut, SendSyncPtr, VMArrayCallHostFuncContext, VMFuncRef};
9use alloc::sync::Arc;
10use core::ptr::NonNull;
11
12/// An arena of `VMFuncRef`s.
13///
14/// Allows a store to pin and own funcrefs so that it can patch in trampolines
15/// for `VMFuncRef`s that are missing a `wasm_call` trampoline and
16/// need Wasm to supply it.
17#[derive(Default)]
18pub struct FuncRefs {
19    /// A bump allocation arena where we allocate `VMFuncRef`s such
20    /// that they are pinned and owned.
21    bump: AlwaysMut<bumpalo::Bump>,
22
23    /// Pointers into `self.bump` for entries that need `wasm_call` field filled
24    /// in.
25    with_holes: Vec<SendSyncPtr<VMFuncRef>>,
26
27    /// General-purpose storage of "function things" that need to live as long
28    /// as the entire store.
29    storage: Vec<Storage>,
30}
31
32/// Various items to place in `FuncRefs::storage`
33///
34/// Note that each field has its own heap-level indirection to be resistant to
35/// `FuncRefs::storage` having its own backing storage reallocated.
36enum Storage {
37    /// Pinned arbitrary `Linker` definitions that must be kept alive for the
38    /// entire duration of the store. This can include host functions, funcrefs
39    /// inside them, etc.
40    InstancePreDefinitions {
41        #[expect(dead_code, reason = "only here to keep the original value alive")]
42        defs: Arc<[Definition]>,
43    },
44
45    /// Pinned `VMFuncRef`s that had their `wasm_call` field
46    /// pre-patched when constructing an `InstancePre`, and which we need to
47    /// keep alive for our owning store's lifetime.
48    InstancePreFuncRefs {
49        #[expect(dead_code, reason = "only here to keep the original value alive")]
50        funcs: Arc<[VMFuncRef]>,
51    },
52
53    /// A uniquely-owned host function within a `Store`. This comes about with
54    /// `Func::new` or similar APIs. The `HostFunc` internally owns the
55    /// `InstanceHandle` and that will get dropped when this `HostFunc` itself
56    /// is dropped.
57    ///
58    /// Note that this contains the vmctx that the `VMFuncRef` points to for
59    /// this host function.
60    BoxHost {
61        #[expect(dead_code, reason = "only here to keep the original value alive")]
62        func: Box<HostFunc>,
63    },
64
65    /// A function is shared across possibly other stores, hence the `Arc`. This
66    /// variant happens when a `Linker`-defined function is instantiated within
67    /// a `Store` (e.g. via `Linker::get` or similar APIs). The `Arc` here
68    /// indicates that there's some number of other stores holding this function
69    /// too, so dropping this may not deallocate the underlying
70    /// `InstanceHandle`.
71    ///
72    /// Note that this contains the vmctx that the `VMFuncRef` points to for
73    /// this host function.
74    ArcHost {
75        #[expect(dead_code, reason = "only here to keep the original value alive")]
76        func: Arc<HostFunc>,
77    },
78}
79
80impl FuncRefs {
81    /// Push the given `VMFuncRef` into this arena, returning a
82    /// pinned pointer to it.
83    ///
84    /// # Safety
85    ///
86    /// You may only access the return value on the same thread as this
87    /// `FuncRefs` and only while the store holding this `FuncRefs` exists.
88    /// Additionally the `vmctx` field of `func_ref` must be valid to read.
89    pub unsafe fn push(
90        &mut self,
91        func_ref: VMFuncRef,
92        modules: &ModuleRegistry,
93    ) -> NonNull<VMFuncRef> {
94        debug_assert!(func_ref.wasm_call.is_none());
95        let func_ref = self.bump.get_mut().alloc(func_ref);
96        // SAFETY: it's a contract of this function itself that `func_ref` has a
97        // valid vmctx field to read.
98        let has_hole = unsafe { !try_fill(func_ref, modules) };
99        let unpatched = SendSyncPtr::from(func_ref);
100        if has_hole {
101            self.with_holes.push(unpatched);
102        }
103        unpatched.as_non_null()
104    }
105
106    /// Patch any `VMFuncRef::wasm_call`s that need filling in.
107    pub fn fill(&mut self, modules: &ModuleRegistry) {
108        self.with_holes
109            .retain_mut(|f| unsafe { !try_fill(f.as_mut(), modules) });
110    }
111
112    /// Reserves `amt` space for extra items in "storage" for this store.
113    pub fn reserve_storage(&mut self, amt: usize) {
114        self.storage.reserve(amt);
115    }
116
117    /// Push pre-patched `VMFuncRef`s from an `InstancePre`.
118    ///
119    /// This is used to ensure that the store itself persists the entire list of
120    /// `funcs` for the entire lifetime of the store.
121    pub fn push_instance_pre_func_refs(&mut self, funcs: Arc<[VMFuncRef]>) {
122        self.storage.push(Storage::InstancePreFuncRefs { funcs });
123    }
124
125    /// Push linker definitions into storage, keeping them alive for the entire
126    /// lifetime of the store.
127    ///
128    /// This is used to keep linker-defined functions' vmctx values alive, for
129    /// example.
130    pub fn push_instance_pre_definitions(&mut self, defs: Arc<[Definition]>) {
131        self.storage.push(Storage::InstancePreDefinitions { defs });
132    }
133
134    /// Pushes a shared host function into this store.
135    ///
136    /// This will create a store-local `VMFuncRef` with a hole to fill in where
137    /// the `wasm_call` will get filled in as needed.
138    ///
139    /// This function returns a `VMFuncRef` which is store-local and will have
140    /// `wasm_call` filled in eventually if needed.
141    ///
142    /// # Safety
143    ///
144    /// You may only access the return value on the same thread as this
145    /// `FuncRefs` and only while the store holding this `FuncRefs` exists.
146    pub fn push_arc_host(
147        &mut self,
148        func: Arc<HostFunc>,
149        modules: &ModuleRegistry,
150    ) -> NonNull<VMFuncRef> {
151        debug_assert!(func.func_ref().wasm_call.is_none());
152        // SAFETY: the vmctx field in the funcref of `HostFunc` is safe to read.
153        let ret = unsafe { self.push(func.func_ref().clone(), modules) };
154        self.storage.push(Storage::ArcHost { func });
155        ret
156    }
157
158    /// Same as `push_arc_host`, but for owned host functions.
159    pub fn push_box_host(
160        &mut self,
161        func: Box<HostFunc>,
162        modules: &ModuleRegistry,
163    ) -> NonNull<VMFuncRef> {
164        debug_assert!(func.func_ref().wasm_call.is_none());
165        // SAFETY: the vmctx field in the funcref of `HostFunc` is safe to read.
166        let ret = unsafe { self.push(func.func_ref().clone(), modules) };
167        self.storage.push(Storage::BoxHost { func });
168        ret
169    }
170}
171
172/// Attempts to fill the `wasm_call` field of `func_ref` given `modules`
173/// registered and returns `true` if the field was filled, `false` otherwise.
174///
175/// # Panics
176///
177/// Panics if `func_ref.wasm_call.is_some()`
178///
179/// # Safety
180///
181/// This relies on `func_ref` being a valid pointer with a valid `vmctx` field.
182unsafe fn try_fill(func_ref: &mut VMFuncRef, modules: &ModuleRegistry) -> bool {
183    debug_assert!(func_ref.wasm_call.is_none());
184
185    // Debug assert that the vmctx is a `VMArrayCallHostFuncContext` as
186    // that is the only kind that can have holes.
187    //
188    // SAFETY: the validity of `vmctx` is a contract of this function itself.
189    unsafe {
190        let _ = VMArrayCallHostFuncContext::from_opaque(func_ref.vmctx.as_non_null());
191    }
192
193    func_ref.wasm_call = modules
194        .wasm_to_array_trampoline(func_ref.type_index)
195        .map(|f| f.into());
196    func_ref.wasm_call.is_some()
197}