wasmtime/runtime/store/
func_refs.rs

1//! Lifetime management of `VMFuncRef`s inside of stores, and filling in their
2//! trampolines.
3
4use crate::Definition;
5use crate::module::ModuleRegistry;
6use crate::prelude::*;
7use crate::runtime::HostFunc;
8use crate::runtime::vm::{SendSyncPtr, VMArrayCallHostFuncContext, VMFuncRef};
9use alloc::sync::Arc;
10use core::ptr::NonNull;
11
12/// An arena of `VMFuncRef`s.
13///
14/// Allows a store to pin and own funcrefs so that it can patch in trampolines
15/// for `VMFuncRef`s that are missing a `wasm_call` trampoline and
16/// need Wasm to supply it.
17#[derive(Default)]
18pub struct FuncRefs {
19    /// A bump allocation arena where we allocate `VMFuncRef`s such
20    /// that they are pinned and owned.
21    bump: SendSyncBump,
22
23    /// Pointers into `self.bump` for entries that need `wasm_call` field filled
24    /// in.
25    with_holes: Vec<SendSyncPtr<VMFuncRef>>,
26
27    /// General-purpose storage of "function things" that need to live as long
28    /// as the entire store.
29    storage: Vec<Storage>,
30}
31
32/// Various items to place in `FuncRefs::storage`
33///
34/// Note that each field has its own heap-level indirection to be resistant to
35/// `FuncRefs::storage` having its own backing storage reallocated.
36enum Storage {
37    /// Pinned arbitrary `Linker` definitions that must be kept alive for the
38    /// entire duration of the store. This can include host functions, funcrefs
39    /// inside them, etc.
40    InstancePreDefinitions {
41        #[expect(dead_code, reason = "only here to keep the original value alive")]
42        defs: Arc<[Definition]>,
43    },
44
45    /// Pinned `VMFuncRef`s that had their `wasm_call` field
46    /// pre-patched when constructing an `InstancePre`, and which we need to
47    /// keep alive for our owning store's lifetime.
48    InstancePreFuncRefs {
49        #[expect(dead_code, reason = "only here to keep the original value alive")]
50        funcs: Arc<[VMFuncRef]>,
51    },
52
53    /// A uniquely-owned host function within a `Store`. This comes about with
54    /// `Func::new` or similar APIs. The `HostFunc` internally owns the
55    /// `InstanceHandle` and that will get dropped when this `HostFunc` itself
56    /// is dropped.
57    ///
58    /// Note that this contains the vmctx that the `VMFuncRef` points to for
59    /// this host function.
60    BoxHost {
61        #[expect(dead_code, reason = "only here to keep the original value alive")]
62        func: Box<HostFunc>,
63    },
64
65    /// A function is shared across possibly other stores, hence the `Arc`. This
66    /// variant happens when a `Linker`-defined function is instantiated within
67    /// a `Store` (e.g. via `Linker::get` or similar APIs). The `Arc` here
68    /// indicates that there's some number of other stores holding this function
69    /// too, so dropping this may not deallocate the underlying
70    /// `InstanceHandle`.
71    ///
72    /// Note that this contains the vmctx that the `VMFuncRef` points to for
73    /// this host function.
74    ArcHost {
75        #[expect(dead_code, reason = "only here to keep the original value alive")]
76        func: Arc<HostFunc>,
77    },
78}
79
80use send_sync_bump::SendSyncBump;
81mod send_sync_bump {
82    #[derive(Default)]
83    pub struct SendSyncBump(bumpalo::Bump);
84
85    impl SendSyncBump {
86        pub fn alloc<T>(&mut self, val: T) -> &mut T {
87            self.0.alloc(val)
88        }
89    }
90
91    // Safety: We require `&mut self` on the only public method, which means it
92    // is safe to send `&SendSyncBump` references across threads because they
93    // can't actually do anything with it.
94    unsafe impl Sync for SendSyncBump {}
95}
96
97impl FuncRefs {
98    /// Push the given `VMFuncRef` into this arena, returning a
99    /// pinned pointer to it.
100    ///
101    /// # Safety
102    ///
103    /// You may only access the return value on the same thread as this
104    /// `FuncRefs` and only while the store holding this `FuncRefs` exists.
105    pub unsafe fn push(
106        &mut self,
107        func_ref: VMFuncRef,
108        modules: &ModuleRegistry,
109    ) -> NonNull<VMFuncRef> {
110        debug_assert!(func_ref.wasm_call.is_none());
111        let func_ref = self.bump.alloc(func_ref);
112        let has_hole = !try_fill(func_ref, modules);
113        let unpatched = SendSyncPtr::from(func_ref);
114        if has_hole {
115            self.with_holes.push(unpatched);
116        }
117        unpatched.as_non_null()
118    }
119
120    /// Patch any `VMFuncRef::wasm_call`s that need filling in.
121    pub fn fill(&mut self, modules: &ModuleRegistry) {
122        self.with_holes
123            .retain_mut(|f| unsafe { !try_fill(f.as_mut(), modules) });
124    }
125
126    /// Reserves `amt` space for extra items in "storage" for this store.
127    pub fn reserve_storage(&mut self, amt: usize) {
128        self.storage.reserve(amt);
129    }
130
131    /// Push pre-patched `VMFuncRef`s from an `InstancePre`.
132    ///
133    /// This is used to ensure that the store itself persists the entire list of
134    /// `funcs` for the entire lifetime of the store.
135    pub fn push_instance_pre_func_refs(&mut self, funcs: Arc<[VMFuncRef]>) {
136        self.storage.push(Storage::InstancePreFuncRefs { funcs });
137    }
138
139    /// Push linker definitions into storage, keeping them alive for the entire
140    /// lifetime of the store.
141    ///
142    /// This is used to keep linker-defined functions' vmctx values alive, for
143    /// example.
144    pub fn push_instance_pre_definitions(&mut self, defs: Arc<[Definition]>) {
145        self.storage.push(Storage::InstancePreDefinitions { defs });
146    }
147
148    /// Pushes a shared host function into this store.
149    ///
150    /// This will create a store-local `VMFuncRef` with a hole to fill in where
151    /// the `wasm_call` will get filled in as needed.
152    ///
153    /// This function returns a `VMFuncRef` which is store-local and will have
154    /// `wasm_call` filled in eventually if needed.
155    ///
156    /// # Safety
157    ///
158    /// You may only access the return value on the same thread as this
159    /// `FuncRefs` and only while the store holding this `FuncRefs` exists.
160    pub unsafe fn push_arc_host(
161        &mut self,
162        func: Arc<HostFunc>,
163        modules: &ModuleRegistry,
164    ) -> NonNull<VMFuncRef> {
165        debug_assert!(func.func_ref().wasm_call.is_none());
166        let ret = self.push(func.func_ref().clone(), modules);
167        self.storage.push(Storage::ArcHost { func });
168        ret
169    }
170
171    /// Same as `push_arc_host`, but for owned host functions.
172    pub unsafe fn push_box_host(
173        &mut self,
174        func: Box<HostFunc>,
175        modules: &ModuleRegistry,
176    ) -> NonNull<VMFuncRef> {
177        debug_assert!(func.func_ref().wasm_call.is_none());
178        let ret = self.push(func.func_ref().clone(), modules);
179        self.storage.push(Storage::BoxHost { func });
180        ret
181    }
182}
183
184/// Attempts to fill the `wasm_call` field of `func_ref` given `modules`
185/// registered and returns `true` if the field was filled, `false` otherwise.
186///
187/// # Panics
188///
189/// Panics if `func_ref.wasm_call.is_some()`
190///
191/// # Safety
192///
193/// This relies on `func_ref` being a valid pointer with a valid `vmctx` field.
194unsafe fn try_fill(func_ref: &mut VMFuncRef, modules: &ModuleRegistry) -> bool {
195    debug_assert!(func_ref.wasm_call.is_none());
196
197    // Debug assert that the vmctx is a `VMArrayCallHostFuncContext` as
198    // that is the only kind that can have holes.
199    //
200    // SAFETY: the validity of `vmctx` is a contract of this function itself.
201    unsafe {
202        let _ = VMArrayCallHostFuncContext::from_opaque(func_ref.vmctx.as_non_null());
203    }
204
205    func_ref.wasm_call = modules
206        .wasm_to_array_trampoline(func_ref.type_index)
207        .map(|f| f.into());
208    func_ref.wasm_call.is_some()
209}