wasmtime/runtime/vm/instance/
allocator.rs

1use crate::prelude::*;
2use crate::runtime::vm::const_expr::{ConstEvalContext, ConstExprEvaluator};
3use crate::runtime::vm::imports::Imports;
4use crate::runtime::vm::instance::{Instance, InstanceHandle};
5use crate::runtime::vm::memory::Memory;
6use crate::runtime::vm::mpk::ProtectionKey;
7use crate::runtime::vm::table::Table;
8use crate::runtime::vm::{CompiledModuleId, ModuleRuntimeInfo, VMFuncRef, VMGcRef, VMStore};
9use crate::store::{AutoAssertNoGc, StoreOpaque};
10use crate::vm::VMGlobalDefinition;
11use core::ptr::NonNull;
12use core::{any::Any, mem, ptr};
13use wasmtime_environ::{
14    DefinedMemoryIndex, DefinedTableIndex, HostPtr, InitMemory, MemoryInitialization,
15    MemoryInitializer, Module, PrimaryMap, SizeOverflow, TableInitialValue, Trap, Tunables,
16    VMOffsets, WasmHeapTopType,
17};
18
19#[cfg(feature = "gc")]
20use crate::runtime::vm::{GcHeap, GcRuntime};
21
22#[cfg(feature = "component-model")]
23use wasmtime_environ::{
24    component::{Component, VMComponentOffsets},
25    StaticModuleIndex,
26};
27
28mod on_demand;
29pub use self::on_demand::OnDemandInstanceAllocator;
30
31#[cfg(feature = "pooling-allocator")]
32mod pooling;
33#[cfg(feature = "pooling-allocator")]
34pub use self::pooling::{
35    InstanceLimits, PoolConcurrencyLimitError, PoolingInstanceAllocator,
36    PoolingInstanceAllocatorConfig,
37};
38
39/// Represents a request for a new runtime instance.
40pub struct InstanceAllocationRequest<'a> {
41    /// The info related to the compiled version of this module,
42    /// needed for instantiation: function metadata, JIT code
43    /// addresses, precomputed images for lazy memory and table
44    /// initialization, and the like. This Arc is cloned and held for
45    /// the lifetime of the instance.
46    pub runtime_info: &'a ModuleRuntimeInfo,
47
48    /// The imports to use for the instantiation.
49    pub imports: Imports<'a>,
50
51    /// The host state to associate with the instance.
52    pub host_state: Box<dyn Any + Send + Sync>,
53
54    /// A pointer to the "store" for this instance to be allocated. The store
55    /// correlates with the `Store` in wasmtime itself, and lots of contextual
56    /// information about the execution of wasm can be learned through the
57    /// store.
58    ///
59    /// Note that this is a raw pointer and has a static lifetime, both of which
60    /// are a bit of a lie. This is done purely so a store can learn about
61    /// itself when it gets called as a host function, and additionally so this
62    /// runtime can access internals as necessary (such as the
63    /// VMExternRefActivationsTable or the resource limiter methods).
64    ///
65    /// Note that this ends up being a self-pointer to the instance when stored.
66    /// The reason is that the instance itself is then stored within the store.
67    /// We use a number of `PhantomPinned` declarations to indicate this to the
68    /// compiler. More info on this in `wasmtime/src/store.rs`
69    pub store: StorePtr,
70
71    /// Indicates '--wmemcheck' flag.
72    #[cfg_attr(not(feature = "wmemcheck"), allow(dead_code))]
73    pub wmemcheck: bool,
74
75    /// Request that the instance's memories be protected by a specific
76    /// protection key.
77    #[cfg_attr(
78        not(feature = "pooling-allocator"),
79        expect(
80            dead_code,
81            reason = "easier to keep this field than remove it, not perf-critical to remove"
82        )
83    )]
84    pub pkey: Option<ProtectionKey>,
85
86    /// Tunable configuration options the engine is using.
87    pub tunables: &'a Tunables,
88}
89
90/// A pointer to a Store. This Option<*mut dyn Store> is wrapped in a struct
91/// so that the function to create a &mut dyn Store is a method on a member of
92/// InstanceAllocationRequest, rather than on a &mut InstanceAllocationRequest
93/// itself, because several use-sites require a split mut borrow on the
94/// InstanceAllocationRequest.
95pub struct StorePtr(Option<NonNull<dyn VMStore>>);
96
97impl StorePtr {
98    /// A pointer to no Store.
99    pub fn empty() -> Self {
100        Self(None)
101    }
102
103    /// A pointer to a Store.
104    pub fn new(ptr: NonNull<dyn VMStore>) -> Self {
105        Self(Some(ptr))
106    }
107
108    /// The raw contents of this struct
109    pub fn as_raw(&self) -> Option<NonNull<dyn VMStore>> {
110        self.0
111    }
112
113    /// Use the StorePtr as a mut ref to the Store.
114    ///
115    /// Safety: must not be used outside the original lifetime of the borrow.
116    pub(crate) unsafe fn get(&mut self) -> Option<&mut dyn VMStore> {
117        let ptr = self.0?.as_mut();
118        Some(ptr)
119    }
120}
121
122/// The index of a memory allocation within an `InstanceAllocator`.
123#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
124pub struct MemoryAllocationIndex(u32);
125
126impl Default for MemoryAllocationIndex {
127    fn default() -> Self {
128        // A default `MemoryAllocationIndex` that can be used with
129        // `InstanceAllocator`s that don't actually need indices.
130        MemoryAllocationIndex(u32::MAX)
131    }
132}
133
134impl MemoryAllocationIndex {
135    /// Get the underlying index of this `MemoryAllocationIndex`.
136    #[cfg(feature = "pooling-allocator")]
137    pub fn index(&self) -> usize {
138        self.0 as usize
139    }
140}
141
142/// The index of a table allocation within an `InstanceAllocator`.
143#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
144pub struct TableAllocationIndex(u32);
145
146impl Default for TableAllocationIndex {
147    fn default() -> Self {
148        // A default `TableAllocationIndex` that can be used with
149        // `InstanceAllocator`s that don't actually need indices.
150        TableAllocationIndex(u32::MAX)
151    }
152}
153
154impl TableAllocationIndex {
155    /// Get the underlying index of this `TableAllocationIndex`.
156    #[cfg(feature = "pooling-allocator")]
157    pub fn index(&self) -> usize {
158        self.0 as usize
159    }
160}
161
162/// The index of a table allocation within an `InstanceAllocator`.
163#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
164pub struct GcHeapAllocationIndex(u32);
165
166impl Default for GcHeapAllocationIndex {
167    fn default() -> Self {
168        // A default `GcHeapAllocationIndex` that can be used with
169        // `InstanceAllocator`s that don't actually need indices.
170        GcHeapAllocationIndex(u32::MAX)
171    }
172}
173
174impl GcHeapAllocationIndex {
175    /// Get the underlying index of this `GcHeapAllocationIndex`.
176    pub fn index(&self) -> usize {
177        self.0 as usize
178    }
179}
180
181/// Trait that represents the hooks needed to implement an instance allocator.
182///
183/// Implement this trait when implementing new instance allocators, but don't
184/// use this trait when you need an instance allocator. Instead use the
185/// `InstanceAllocator` trait for that, which has additional helper methods and
186/// a blanket implementation for all types that implement this trait.
187///
188/// # Safety
189///
190/// This trait is unsafe as it requires knowledge of Wasmtime's runtime
191/// internals to implement correctly.
192pub unsafe trait InstanceAllocatorImpl {
193    /// Validate whether a component (including all of its contained core
194    /// modules) is allocatable by this instance allocator.
195    #[cfg(feature = "component-model")]
196    fn validate_component_impl<'a>(
197        &self,
198        component: &Component,
199        offsets: &VMComponentOffsets<HostPtr>,
200        get_module: &'a dyn Fn(StaticModuleIndex) -> &'a Module,
201    ) -> Result<()>;
202
203    /// Validate whether a module is allocatable by this instance allocator.
204    fn validate_module_impl(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()>;
205
206    /// Increment the count of concurrent component instances that are currently
207    /// allocated, if applicable.
208    ///
209    /// Not all instance allocators will have limits for the maximum number of
210    /// concurrent component instances that can be live at the same time, and
211    /// these allocators may implement this method with a no-op.
212    //
213    // Note: It would be nice to have an associated type that on construction
214    // does the increment and on drop does the decrement but there are two
215    // problems with this:
216    //
217    // 1. This trait's implementations are always used as trait objects, and
218    //    associated types are not object safe.
219    //
220    // 2. We would want a parameterized `Drop` implementation so that we could
221    //    pass in the `InstanceAllocatorImpl` on drop, but this doesn't exist in
222    //    Rust. Therefore, we would be forced to add reference counting and
223    //    stuff like that to keep a handle on the instance allocator from this
224    //    theoretical type. That's a bummer.
225    fn increment_component_instance_count(&self) -> Result<()>;
226
227    /// The dual of `increment_component_instance_count`.
228    fn decrement_component_instance_count(&self);
229
230    /// Increment the count of concurrent core module instances that are
231    /// currently allocated, if applicable.
232    ///
233    /// Not all instance allocators will have limits for the maximum number of
234    /// concurrent core module instances that can be live at the same time, and
235    /// these allocators may implement this method with a no-op.
236    fn increment_core_instance_count(&self) -> Result<()>;
237
238    /// The dual of `increment_core_instance_count`.
239    fn decrement_core_instance_count(&self);
240
241    /// Allocate a memory for an instance.
242    ///
243    /// # Unsafety
244    ///
245    /// The memory and its associated module must have already been validated by
246    /// `Self::validate_module` and passed that validation.
247    unsafe fn allocate_memory(
248        &self,
249        request: &mut InstanceAllocationRequest,
250        ty: &wasmtime_environ::Memory,
251        tunables: &Tunables,
252        memory_index: DefinedMemoryIndex,
253    ) -> Result<(MemoryAllocationIndex, Memory)>;
254
255    /// Deallocate an instance's previously allocated memory.
256    ///
257    /// # Unsafety
258    ///
259    /// The memory must have previously been allocated by
260    /// `Self::allocate_memory`, be at the given index, and must currently be
261    /// allocated. It must never be used again.
262    unsafe fn deallocate_memory(
263        &self,
264        memory_index: DefinedMemoryIndex,
265        allocation_index: MemoryAllocationIndex,
266        memory: Memory,
267    );
268
269    /// Allocate a table for an instance.
270    ///
271    /// # Unsafety
272    ///
273    /// The table and its associated module must have already been validated by
274    /// `Self::validate_module` and passed that validation.
275    unsafe fn allocate_table(
276        &self,
277        req: &mut InstanceAllocationRequest,
278        table: &wasmtime_environ::Table,
279        tunables: &Tunables,
280        table_index: DefinedTableIndex,
281    ) -> Result<(TableAllocationIndex, Table)>;
282
283    /// Deallocate an instance's previously allocated table.
284    ///
285    /// # Unsafety
286    ///
287    /// The table must have previously been allocated by `Self::allocate_table`,
288    /// be at the given index, and must currently be allocated. It must never be
289    /// used again.
290    unsafe fn deallocate_table(
291        &self,
292        table_index: DefinedTableIndex,
293        allocation_index: TableAllocationIndex,
294        table: Table,
295    );
296
297    /// Allocates a fiber stack for calling async functions on.
298    #[cfg(feature = "async")]
299    fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack>;
300
301    /// Deallocates a fiber stack that was previously allocated with
302    /// `allocate_fiber_stack`.
303    ///
304    /// # Safety
305    ///
306    /// The provided stack is required to have been allocated with
307    /// `allocate_fiber_stack`.
308    #[cfg(feature = "async")]
309    unsafe fn deallocate_fiber_stack(&self, stack: wasmtime_fiber::FiberStack);
310
311    /// Allocate a GC heap for allocating Wasm GC objects within.
312    #[cfg(feature = "gc")]
313    fn allocate_gc_heap(
314        &self,
315        gc_runtime: &dyn GcRuntime,
316    ) -> Result<(GcHeapAllocationIndex, Box<dyn GcHeap>)>;
317
318    /// Deallocate a GC heap that was previously allocated with
319    /// `allocate_gc_heap`.
320    #[cfg(feature = "gc")]
321    fn deallocate_gc_heap(&self, allocation_index: GcHeapAllocationIndex, gc_heap: Box<dyn GcHeap>);
322
323    /// Purges all lingering resources related to `module` from within this
324    /// allocator.
325    ///
326    /// Primarily present for the pooling allocator to remove mappings of
327    /// this module from slots in linear memory.
328    fn purge_module(&self, module: CompiledModuleId);
329
330    /// Use the next available protection key.
331    ///
332    /// The pooling allocator can use memory protection keys (MPK) for
333    /// compressing the guard regions protecting against OOB. Each
334    /// pool-allocated store needs its own key.
335    fn next_available_pkey(&self) -> Option<ProtectionKey>;
336
337    /// Restrict access to memory regions protected by `pkey`.
338    ///
339    /// This is useful for the pooling allocator, which can use memory
340    /// protection keys (MPK). Note: this may still allow access to other
341    /// protection keys, such as the default kernel key; see implementations of
342    /// this.
343    fn restrict_to_pkey(&self, pkey: ProtectionKey);
344
345    /// Allow access to memory regions protected by any protection key.
346    fn allow_all_pkeys(&self);
347}
348
349/// A thing that can allocate instances.
350///
351/// Don't implement this trait directly, instead implement
352/// `InstanceAllocatorImpl` and you'll get this trait for free via a blanket
353/// impl.
354pub trait InstanceAllocator: InstanceAllocatorImpl {
355    /// Validate whether a component (including all of its contained core
356    /// modules) is allocatable with this instance allocator.
357    #[cfg(feature = "component-model")]
358    fn validate_component<'a>(
359        &self,
360        component: &Component,
361        offsets: &VMComponentOffsets<HostPtr>,
362        get_module: &'a dyn Fn(StaticModuleIndex) -> &'a Module,
363    ) -> Result<()> {
364        InstanceAllocatorImpl::validate_component_impl(self, component, offsets, get_module)
365    }
366
367    /// Validate whether a core module is allocatable with this instance
368    /// allocator.
369    fn validate_module(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
370        InstanceAllocatorImpl::validate_module_impl(self, module, offsets)
371    }
372
373    /// Allocates a fresh `InstanceHandle` for the `req` given.
374    ///
375    /// This will allocate memories and tables internally from this allocator
376    /// and weave that altogether into a final and complete `InstanceHandle`
377    /// ready to be registered with a store.
378    ///
379    /// Note that the returned instance must still have `.initialize(..)` called
380    /// on it to complete the instantiation process.
381    ///
382    /// # Unsafety
383    ///
384    /// The request's associated module, memories, tables, and vmctx must have
385    /// already have been validated by `Self::validate_module`.
386    unsafe fn allocate_module(
387        &self,
388        mut request: InstanceAllocationRequest,
389    ) -> Result<InstanceHandle> {
390        let module = request.runtime_info.env_module();
391
392        #[cfg(debug_assertions)]
393        InstanceAllocatorImpl::validate_module_impl(self, module, request.runtime_info.offsets())
394            .expect("module should have already been validated before allocation");
395
396        self.increment_core_instance_count()?;
397
398        let num_defined_memories = module.num_defined_memories();
399        let mut memories = PrimaryMap::with_capacity(num_defined_memories);
400
401        let num_defined_tables = module.num_defined_tables();
402        let mut tables = PrimaryMap::with_capacity(num_defined_tables);
403
404        match (|| {
405            self.allocate_memories(&mut request, &mut memories)?;
406            self.allocate_tables(&mut request, &mut tables)?;
407            Ok(())
408        })() {
409            Ok(_) => Ok(Instance::new(request, memories, tables, &module.memories)),
410            Err(e) => {
411                self.deallocate_memories(&mut memories);
412                self.deallocate_tables(&mut tables);
413                self.decrement_core_instance_count();
414                Err(e)
415            }
416        }
417    }
418
419    /// Deallocates the provided instance.
420    ///
421    /// This will null-out the pointer within `handle` and otherwise reclaim
422    /// resources such as tables, memories, and the instance memory itself.
423    ///
424    /// # Unsafety
425    ///
426    /// The instance must have previously been allocated by `Self::allocate`.
427    unsafe fn deallocate_module(&self, handle: &mut InstanceHandle) {
428        self.deallocate_memories(&mut handle.instance_mut().memories);
429        self.deallocate_tables(&mut handle.instance_mut().tables);
430
431        let layout = Instance::alloc_layout(handle.instance().offsets());
432        let ptr = handle.instance.take().unwrap();
433        ptr::drop_in_place(ptr.as_ptr());
434        alloc::alloc::dealloc(ptr.as_ptr().cast(), layout);
435
436        self.decrement_core_instance_count();
437    }
438
439    /// Allocate the memories for the given instance allocation request, pushing
440    /// them into `memories`.
441    ///
442    /// # Unsafety
443    ///
444    /// The request's associated module and memories must have previously been
445    /// validated by `Self::validate_module`.
446    unsafe fn allocate_memories(
447        &self,
448        request: &mut InstanceAllocationRequest,
449        memories: &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
450    ) -> Result<()> {
451        let module = request.runtime_info.env_module();
452
453        #[cfg(debug_assertions)]
454        InstanceAllocatorImpl::validate_module_impl(self, module, request.runtime_info.offsets())
455            .expect("module should have already been validated before allocation");
456
457        for (memory_index, ty) in module.memories.iter().skip(module.num_imported_memories) {
458            let memory_index = module
459                .defined_memory_index(memory_index)
460                .expect("should be a defined memory since we skipped imported ones");
461
462            memories.push(self.allocate_memory(request, ty, request.tunables, memory_index)?);
463        }
464
465        Ok(())
466    }
467
468    /// Deallocate all the memories in the given primary map.
469    ///
470    /// # Unsafety
471    ///
472    /// The memories must have previously been allocated by
473    /// `Self::allocate_memories`.
474    unsafe fn deallocate_memories(
475        &self,
476        memories: &mut PrimaryMap<DefinedMemoryIndex, (MemoryAllocationIndex, Memory)>,
477    ) {
478        for (memory_index, (allocation_index, memory)) in mem::take(memories) {
479            // Because deallocating memory is infallible, we don't need to worry
480            // about leaking subsequent memories if the first memory failed to
481            // deallocate. If deallocating memory ever becomes fallible, we will
482            // need to be careful here!
483            self.deallocate_memory(memory_index, allocation_index, memory);
484        }
485    }
486
487    /// Allocate tables for the given instance allocation request, pushing them
488    /// into `tables`.
489    ///
490    /// # Unsafety
491    ///
492    /// The request's associated module and tables must have previously been
493    /// validated by `Self::validate_module`.
494    unsafe fn allocate_tables(
495        &self,
496        request: &mut InstanceAllocationRequest,
497        tables: &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
498    ) -> Result<()> {
499        let module = request.runtime_info.env_module();
500
501        #[cfg(debug_assertions)]
502        InstanceAllocatorImpl::validate_module_impl(self, module, request.runtime_info.offsets())
503            .expect("module should have already been validated before allocation");
504
505        for (index, table) in module.tables.iter().skip(module.num_imported_tables) {
506            let def_index = module
507                .defined_table_index(index)
508                .expect("should be a defined table since we skipped imported ones");
509
510            tables.push(self.allocate_table(request, table, request.tunables, def_index)?);
511        }
512
513        Ok(())
514    }
515
516    /// Deallocate all the tables in the given primary map.
517    ///
518    /// # Unsafety
519    ///
520    /// The tables must have previously been allocated by
521    /// `Self::allocate_tables`.
522    unsafe fn deallocate_tables(
523        &self,
524        tables: &mut PrimaryMap<DefinedTableIndex, (TableAllocationIndex, Table)>,
525    ) {
526        for (table_index, (allocation_index, table)) in mem::take(tables) {
527            self.deallocate_table(table_index, allocation_index, table);
528        }
529    }
530}
531
532// Every `InstanceAllocatorImpl` is an `InstanceAllocator` when used
533// correctly. Also, no one is allowed to override this trait's methods, they
534// must use the defaults. This blanket impl provides both of those things.
535impl<T: InstanceAllocatorImpl> InstanceAllocator for T {}
536
537fn check_table_init_bounds(
538    store: &mut StoreOpaque,
539    instance: &mut Instance,
540    module: &Module,
541) -> Result<()> {
542    let mut const_evaluator = ConstExprEvaluator::default();
543
544    for segment in module.table_initialization.segments.iter() {
545        let table = unsafe { &*instance.get_table(segment.table_index) };
546        let mut context = ConstEvalContext::new(instance);
547        let start = unsafe {
548            const_evaluator
549                .eval(store, &mut context, &segment.offset)
550                .expect("const expression should be valid")
551        };
552        let start = usize::try_from(start.get_u32()).unwrap();
553        let end = start.checked_add(usize::try_from(segment.elements.len()).unwrap());
554
555        match end {
556            Some(end) if end <= table.size() => {
557                // Initializer is in bounds
558            }
559            _ => {
560                bail!("table out of bounds: elements segment does not fit")
561            }
562        }
563    }
564
565    Ok(())
566}
567
568fn initialize_tables(
569    store: &mut StoreOpaque,
570    context: &mut ConstEvalContext<'_>,
571    const_evaluator: &mut ConstExprEvaluator,
572    module: &Module,
573) -> Result<()> {
574    for (table, init) in module.table_initialization.initial_values.iter() {
575        match init {
576            // Tables are always initially null-initialized at this time
577            TableInitialValue::Null { precomputed: _ } => {}
578
579            TableInitialValue::Expr(expr) => {
580                let raw = unsafe {
581                    const_evaluator
582                        .eval(store, context, expr)
583                        .expect("const expression should be valid")
584                };
585                let idx = module.table_index(table);
586                let table = unsafe { context.instance.get_defined_table(table).as_mut().unwrap() };
587                match module.tables[idx].ref_type.heap_type.top() {
588                    WasmHeapTopType::Extern => {
589                        let gc_ref = VMGcRef::from_raw_u32(raw.get_externref());
590                        let gc_store = store.gc_store_mut()?;
591                        let items = (0..table.size())
592                            .map(|_| gc_ref.as_ref().map(|r| gc_store.clone_gc_ref(r)));
593                        table.init_gc_refs(0, items)?;
594                    }
595
596                    WasmHeapTopType::Any => {
597                        let gc_ref = VMGcRef::from_raw_u32(raw.get_anyref());
598                        let gc_store = store.gc_store_mut()?;
599                        let items = (0..table.size())
600                            .map(|_| gc_ref.as_ref().map(|r| gc_store.clone_gc_ref(r)));
601                        table.init_gc_refs(0, items)?;
602                    }
603
604                    WasmHeapTopType::Func => {
605                        let funcref = NonNull::new(raw.get_funcref().cast::<VMFuncRef>());
606                        let items = (0..table.size()).map(|_| funcref);
607                        table.init_func(0, items)?;
608                    }
609
610                    WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
611                }
612            }
613        }
614    }
615
616    // Note: if the module's table initializer state is in
617    // FuncTable mode, we will lazily initialize tables based on
618    // any statically-precomputed image of FuncIndexes, but there
619    // may still be "leftover segments" that could not be
620    // incorporated. So we have a unified handler here that
621    // iterates over all segments (Segments mode) or leftover
622    // segments (FuncTable mode) to initialize.
623    for segment in module.table_initialization.segments.iter() {
624        let start = unsafe {
625            const_evaluator
626                .eval(store, context, &segment.offset)
627                .expect("const expression should be valid")
628        };
629        context.instance.table_init_segment(
630            store,
631            const_evaluator,
632            segment.table_index,
633            &segment.elements,
634            start.get_u64(),
635            0,
636            segment.elements.len(),
637        )?;
638    }
639
640    Ok(())
641}
642
643fn get_memory_init_start(
644    store: &mut StoreOpaque,
645    init: &MemoryInitializer,
646    instance: &mut Instance,
647) -> Result<u64> {
648    let mut context = ConstEvalContext::new(instance);
649    let mut const_evaluator = ConstExprEvaluator::default();
650    unsafe { const_evaluator.eval(store, &mut context, &init.offset) }.map(|v| {
651        match instance.env_module().memories[init.memory_index].idx_type {
652            wasmtime_environ::IndexType::I32 => v.get_u32().into(),
653            wasmtime_environ::IndexType::I64 => v.get_u64(),
654        }
655    })
656}
657
658fn check_memory_init_bounds(
659    store: &mut StoreOpaque,
660    instance: &mut Instance,
661    initializers: &[MemoryInitializer],
662) -> Result<()> {
663    for init in initializers {
664        let memory = instance.get_memory(init.memory_index);
665        let start = get_memory_init_start(store, init, instance)?;
666        let end = usize::try_from(start)
667            .ok()
668            .and_then(|start| start.checked_add(init.data.len()));
669
670        match end {
671            Some(end) if end <= memory.current_length() => {
672                // Initializer is in bounds
673            }
674            _ => {
675                bail!("memory out of bounds: data segment does not fit")
676            }
677        }
678    }
679
680    Ok(())
681}
682
683fn initialize_memories(
684    store: &mut StoreOpaque,
685    context: &mut ConstEvalContext<'_>,
686    const_evaluator: &mut ConstExprEvaluator,
687    module: &Module,
688) -> Result<()> {
689    // Delegates to the `init_memory` method which is sort of a duplicate of
690    // `instance.memory_init_segment` but is used at compile-time in other
691    // contexts so is shared here to have only one method of memory
692    // initialization.
693    //
694    // This call to `init_memory` notably implements all the bells and whistles
695    // so errors only happen if an out-of-bounds segment is found, in which case
696    // a trap is returned.
697
698    struct InitMemoryAtInstantiation<'a, 'b> {
699        module: &'a Module,
700        store: &'a mut StoreOpaque,
701        context: &'a mut ConstEvalContext<'b>,
702        const_evaluator: &'a mut ConstExprEvaluator,
703    }
704
705    impl InitMemory for InitMemoryAtInstantiation<'_, '_> {
706        fn memory_size_in_bytes(
707            &mut self,
708            memory: wasmtime_environ::MemoryIndex,
709        ) -> Result<u64, SizeOverflow> {
710            let len = self.context.instance.get_memory(memory).current_length();
711            let len = u64::try_from(len).unwrap();
712            Ok(len)
713        }
714
715        fn eval_offset(
716            &mut self,
717            memory: wasmtime_environ::MemoryIndex,
718            expr: &wasmtime_environ::ConstExpr,
719        ) -> Option<u64> {
720            let val = unsafe { self.const_evaluator.eval(self.store, self.context, expr) }
721                .expect("const expression should be valid");
722            Some(
723                match self.context.instance.env_module().memories[memory].idx_type {
724                    wasmtime_environ::IndexType::I32 => val.get_u32().into(),
725                    wasmtime_environ::IndexType::I64 => val.get_u64(),
726                },
727            )
728        }
729
730        fn write(
731            &mut self,
732            memory_index: wasmtime_environ::MemoryIndex,
733            init: &wasmtime_environ::StaticMemoryInitializer,
734        ) -> bool {
735            // If this initializer applies to a defined memory but that memory
736            // doesn't need initialization, due to something like copy-on-write
737            // pre-initializing it via mmap magic, then this initializer can be
738            // skipped entirely.
739            if let Some(memory_index) = self.module.defined_memory_index(memory_index) {
740                if !self.context.instance.memories[memory_index].1.needs_init() {
741                    return true;
742                }
743            }
744            let memory = self.context.instance.get_memory(memory_index);
745
746            unsafe {
747                let src = self.context.instance.wasm_data(init.data.clone());
748                let offset = usize::try_from(init.offset).unwrap();
749                let dst = memory.base.as_ptr().add(offset);
750
751                assert!(offset + src.len() <= memory.current_length());
752
753                // FIXME audit whether this is safe in the presence of shared
754                // memory
755                // (https://github.com/bytecodealliance/wasmtime/issues/4203).
756                ptr::copy_nonoverlapping(src.as_ptr(), dst, src.len())
757            }
758            true
759        }
760    }
761
762    let ok = module
763        .memory_initialization
764        .init_memory(&mut InitMemoryAtInstantiation {
765            module,
766            store,
767            context,
768            const_evaluator,
769        });
770    if !ok {
771        return Err(Trap::MemoryOutOfBounds.into());
772    }
773
774    Ok(())
775}
776
777fn check_init_bounds(
778    store: &mut StoreOpaque,
779    instance: &mut Instance,
780    module: &Module,
781) -> Result<()> {
782    check_table_init_bounds(store, instance, module)?;
783
784    match &module.memory_initialization {
785        MemoryInitialization::Segmented(initializers) => {
786            check_memory_init_bounds(store, instance, initializers)?;
787        }
788        // Statically validated already to have everything in-bounds.
789        MemoryInitialization::Static { .. } => {}
790    }
791
792    Ok(())
793}
794
795fn initialize_globals(
796    store: &mut StoreOpaque,
797    context: &mut ConstEvalContext<'_>,
798    const_evaluator: &mut ConstExprEvaluator,
799    module: &Module,
800) -> Result<()> {
801    assert!(core::ptr::eq(&**context.instance.env_module(), module));
802
803    let mut store = AutoAssertNoGc::new(store);
804
805    for (index, init) in module.global_initializers.iter() {
806        let raw = unsafe {
807            const_evaluator
808                .eval(&mut store, context, init)
809                .expect("should be a valid const expr")
810        };
811
812        let to = context.instance.global_ptr(index);
813        let wasm_ty = module.globals[module.global_index(index)].wasm_ty;
814
815        #[cfg(feature = "wmemcheck")]
816        if index.as_u32() == 0 && wasm_ty == wasmtime_environ::WasmValType::I32 {
817            if let Some(wmemcheck) = &mut context.instance.wmemcheck_state {
818                let size = usize::try_from(raw.get_i32()).unwrap();
819                wmemcheck.set_stack_size(size);
820            }
821        }
822
823        // This write is safe because we know we have the correct module for
824        // this instance and its vmctx due to the assert above.
825        unsafe {
826            to.write(VMGlobalDefinition::from_val_raw(&mut store, wasm_ty, raw)?);
827        };
828    }
829    Ok(())
830}
831
832pub(super) fn initialize_instance(
833    store: &mut StoreOpaque,
834    instance: &mut Instance,
835    module: &Module,
836    is_bulk_memory: bool,
837) -> Result<()> {
838    // If bulk memory is not enabled, bounds check the data and element segments before
839    // making any changes. With bulk memory enabled, initializers are processed
840    // in-order and side effects are observed up to the point of an out-of-bounds
841    // initializer, so the early checking is not desired.
842    if !is_bulk_memory {
843        check_init_bounds(store, instance, module)?;
844    }
845
846    let mut context = ConstEvalContext::new(instance);
847    let mut const_evaluator = ConstExprEvaluator::default();
848
849    initialize_globals(store, &mut context, &mut const_evaluator, module)?;
850    initialize_tables(store, &mut context, &mut const_evaluator, module)?;
851    initialize_memories(store, &mut context, &mut const_evaluator, &module)?;
852
853    Ok(())
854}
855
856#[cfg(test)]
857mod tests {
858    use super::*;
859
860    #[test]
861    fn allocator_traits_are_object_safe() {
862        fn _instance_allocator(_: &dyn InstanceAllocatorImpl) {}
863        fn _instance_allocator_ext(_: &dyn InstanceAllocator) {}
864    }
865}