wasmtime_cranelift/
func_environ.rs

1use crate::compiler::Compiler;
2use crate::translate::{
3    FuncTranslationState, GlobalVariable, Heap, HeapData, StructFieldsVec, TableData, TableSize,
4    TargetEnvironment,
5};
6use crate::{gc, BuiltinFunctionSignatures, TRAP_INTERNAL_ASSERT};
7use cranelift_codegen::cursor::FuncCursor;
8use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
9use cranelift_codegen::ir::immediates::{Imm64, Offset32};
10use cranelift_codegen::ir::pcc::Fact;
11use cranelift_codegen::ir::types::*;
12use cranelift_codegen::ir::{self, types};
13use cranelift_codegen::ir::{ArgumentPurpose, Function, InstBuilder, MemFlags};
14use cranelift_codegen::isa::{TargetFrontendConfig, TargetIsa};
15use cranelift_entity::packed_option::ReservedValue;
16use cranelift_entity::{EntityRef, PrimaryMap, SecondaryMap};
17use cranelift_frontend::FunctionBuilder;
18use cranelift_frontend::Variable;
19use smallvec::SmallVec;
20use std::mem;
21use wasmparser::{Operator, WasmFeatures};
22use wasmtime_environ::{
23    BuiltinFunctionIndex, DataIndex, ElemIndex, EngineOrModuleTypeIndex, FuncIndex, GlobalIndex,
24    IndexType, Memory, MemoryIndex, Module, ModuleInternedTypeIndex, ModuleTranslation,
25    ModuleTypesBuilder, PtrSize, Table, TableIndex, TripleExt, Tunables, TypeConvert, TypeIndex,
26    VMOffsets, WasmCompositeInnerType, WasmFuncType, WasmHeapTopType, WasmHeapType, WasmRefType,
27    WasmResult, WasmValType,
28};
29use wasmtime_environ::{FUNCREF_INIT_BIT, FUNCREF_MASK};
30
31#[derive(Debug)]
32pub(crate) enum Extension {
33    Sign,
34    Zero,
35}
36
37/// A struct with an `Option<ir::FuncRef>` member for every builtin
38/// function, to de-duplicate constructing/getting its function.
39pub(crate) struct BuiltinFunctions {
40    types: BuiltinFunctionSignatures,
41
42    builtins: [Option<ir::FuncRef>; BuiltinFunctionIndex::len() as usize],
43}
44
45impl BuiltinFunctions {
46    fn new(compiler: &Compiler) -> Self {
47        Self {
48            types: BuiltinFunctionSignatures::new(compiler),
49            builtins: [None; BuiltinFunctionIndex::len() as usize],
50        }
51    }
52
53    fn load_builtin(&mut self, func: &mut Function, index: BuiltinFunctionIndex) -> ir::FuncRef {
54        let cache = &mut self.builtins[index.index() as usize];
55        if let Some(f) = cache {
56            return *f;
57        }
58        let signature = func.import_signature(self.types.wasm_signature(index));
59        let name =
60            ir::ExternalName::User(func.declare_imported_user_function(ir::UserExternalName {
61                namespace: crate::NS_WASMTIME_BUILTIN,
62                index: index.index(),
63            }));
64        let f = func.import_function(ir::ExtFuncData {
65            name,
66            signature,
67            colocated: true,
68        });
69        *cache = Some(f);
70        f
71    }
72}
73
74// Generate helper methods on `BuiltinFunctions` above for each named builtin
75// as well.
76macro_rules! declare_function_signatures {
77    ($(
78        $( #[$attr:meta] )*
79        $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
80    )*) => {
81        $(impl BuiltinFunctions {
82            $( #[$attr] )*
83            pub(crate) fn $name(&mut self, func: &mut Function) -> ir::FuncRef {
84                self.load_builtin(func, BuiltinFunctionIndex::$name())
85            }
86        })*
87    };
88}
89wasmtime_environ::foreach_builtin_function!(declare_function_signatures);
90
91/// The `FuncEnvironment` implementation for use by the `ModuleEnvironment`.
92pub struct FuncEnvironment<'module_environment> {
93    compiler: &'module_environment Compiler,
94    isa: &'module_environment (dyn TargetIsa + 'module_environment),
95    pub(crate) module: &'module_environment Module,
96    pub(crate) types: &'module_environment ModuleTypesBuilder,
97    wasm_func_ty: &'module_environment WasmFuncType,
98    sig_ref_to_ty: SecondaryMap<ir::SigRef, Option<&'module_environment WasmFuncType>>,
99
100    #[cfg(feature = "gc")]
101    pub(crate) ty_to_gc_layout: std::collections::HashMap<
102        wasmtime_environ::ModuleInternedTypeIndex,
103        wasmtime_environ::GcLayout,
104    >,
105
106    #[cfg(feature = "wmemcheck")]
107    translation: &'module_environment ModuleTranslation<'module_environment>,
108
109    /// Heaps implementing WebAssembly linear memories.
110    heaps: PrimaryMap<Heap, HeapData>,
111
112    /// Cranelift tables we have created to implement Wasm tables.
113    tables: SecondaryMap<TableIndex, Option<TableData>>,
114
115    /// The Cranelift global holding the vmctx address.
116    vmctx: Option<ir::GlobalValue>,
117
118    /// The PCC memory type describing the vmctx layout, if we're
119    /// using PCC.
120    pcc_vmctx_memtype: Option<ir::MemoryType>,
121
122    /// Caches of signatures for builtin functions.
123    pub(crate) builtin_functions: BuiltinFunctions,
124
125    /// Offsets to struct fields accessed by JIT code.
126    pub(crate) offsets: VMOffsets<u8>,
127
128    pub(crate) tunables: &'module_environment Tunables,
129
130    /// A function-local variable which stores the cached value of the amount of
131    /// fuel remaining to execute. If used this is modified frequently so it's
132    /// stored locally as a variable instead of always referenced from the field
133    /// in `*const VMStoreContext`
134    fuel_var: cranelift_frontend::Variable,
135
136    /// A function-local variable which caches the value of `*const
137    /// VMStoreContext` for this function's vmctx argument. This pointer is stored
138    /// in the vmctx itself, but never changes for the lifetime of the function,
139    /// so if we load it up front we can continue to use it throughout.
140    vmstore_context_ptr: ir::Value,
141
142    /// A cached epoch deadline value, when performing epoch-based
143    /// interruption. Loaded from `VMStoreContext` and reloaded after
144    /// any yield.
145    epoch_deadline_var: cranelift_frontend::Variable,
146
147    /// A cached pointer to the per-Engine epoch counter, when
148    /// performing epoch-based interruption. Initialized in the
149    /// function prologue. We prefer to use a variable here rather
150    /// than reload on each check because it's better to let the
151    /// regalloc keep it in a register if able; if not, it can always
152    /// spill, and this isn't any worse than reloading each time.
153    epoch_ptr_var: cranelift_frontend::Variable,
154
155    fuel_consumed: i64,
156
157    /// A `GlobalValue` in CLIF which represents the stack limit.
158    ///
159    /// Typically this resides in the `stack_limit` value of `ir::Function` but
160    /// that requires signal handlers on the host and when that's disabled this
161    /// is here with an explicit check instead. Note that the explicit check is
162    /// always present even if this is a "leaf" function, as we have to call
163    /// into the host to trap when signal handlers are disabled.
164    pub(crate) stack_limit_at_function_entry: Option<ir::GlobalValue>,
165}
166
167impl<'module_environment> FuncEnvironment<'module_environment> {
168    pub fn new(
169        compiler: &'module_environment Compiler,
170        translation: &'module_environment ModuleTranslation<'module_environment>,
171        types: &'module_environment ModuleTypesBuilder,
172        wasm_func_ty: &'module_environment WasmFuncType,
173    ) -> Self {
174        let tunables = compiler.tunables();
175        let builtin_functions = BuiltinFunctions::new(compiler);
176
177        // This isn't used during translation, so squash the warning about this
178        // being unused from the compiler.
179        let _ = BuiltinFunctions::raise;
180
181        Self {
182            isa: compiler.isa(),
183            module: &translation.module,
184            compiler,
185            types,
186            wasm_func_ty,
187            sig_ref_to_ty: SecondaryMap::default(),
188
189            #[cfg(feature = "gc")]
190            ty_to_gc_layout: std::collections::HashMap::new(),
191
192            heaps: PrimaryMap::default(),
193            tables: SecondaryMap::default(),
194            vmctx: None,
195            pcc_vmctx_memtype: None,
196            builtin_functions,
197            offsets: VMOffsets::new(compiler.isa().pointer_bytes(), &translation.module),
198            tunables,
199            fuel_var: Variable::new(0),
200            epoch_deadline_var: Variable::new(0),
201            epoch_ptr_var: Variable::new(0),
202            vmstore_context_ptr: ir::Value::reserved_value(),
203
204            // Start with at least one fuel being consumed because even empty
205            // functions should consume at least some fuel.
206            fuel_consumed: 1,
207
208            #[cfg(feature = "wmemcheck")]
209            translation,
210
211            stack_limit_at_function_entry: None,
212        }
213    }
214
215    pub(crate) fn pointer_type(&self) -> ir::Type {
216        self.isa.pointer_type()
217    }
218
219    pub(crate) fn vmctx(&mut self, func: &mut Function) -> ir::GlobalValue {
220        self.vmctx.unwrap_or_else(|| {
221            let vmctx = func.create_global_value(ir::GlobalValueData::VMContext);
222            if self.isa.flags().enable_pcc() {
223                // Create a placeholder memtype for the vmctx; we'll
224                // add fields to it as we lazily create HeapData
225                // structs and global values.
226                let vmctx_memtype = func.create_memory_type(ir::MemoryTypeData::Struct {
227                    size: 0,
228                    fields: vec![],
229                });
230
231                self.pcc_vmctx_memtype = Some(vmctx_memtype);
232                func.global_value_facts[vmctx] = Some(Fact::Mem {
233                    ty: vmctx_memtype,
234                    min_offset: 0,
235                    max_offset: 0,
236                    nullable: false,
237                });
238            }
239
240            self.vmctx = Some(vmctx);
241            vmctx
242        })
243    }
244
245    pub(crate) fn vmctx_val(&mut self, pos: &mut FuncCursor<'_>) -> ir::Value {
246        let pointer_type = self.pointer_type();
247        let vmctx = self.vmctx(&mut pos.func);
248        pos.ins().global_value(pointer_type, vmctx)
249    }
250
251    fn get_table_copy_func(
252        &mut self,
253        func: &mut Function,
254        dst_table_index: TableIndex,
255        src_table_index: TableIndex,
256    ) -> (ir::FuncRef, usize, usize) {
257        let sig = self.builtin_functions.table_copy(func);
258        (
259            sig,
260            dst_table_index.as_u32() as usize,
261            src_table_index.as_u32() as usize,
262        )
263    }
264
265    #[cfg(feature = "threads")]
266    fn get_memory_atomic_wait(
267        &mut self,
268        func: &mut Function,
269        memory_index: MemoryIndex,
270        ty: ir::Type,
271    ) -> (ir::FuncRef, usize) {
272        match ty {
273            I32 => (
274                self.builtin_functions.memory_atomic_wait32(func),
275                memory_index.index(),
276            ),
277            I64 => (
278                self.builtin_functions.memory_atomic_wait64(func),
279                memory_index.index(),
280            ),
281            x => panic!("get_memory_atomic_wait unsupported type: {x:?}"),
282        }
283    }
284
285    fn get_global_location(
286        &mut self,
287        func: &mut ir::Function,
288        index: GlobalIndex,
289    ) -> (ir::GlobalValue, i32) {
290        let pointer_type = self.pointer_type();
291        let vmctx = self.vmctx(func);
292        if let Some(def_index) = self.module.defined_global_index(index) {
293            let offset = i32::try_from(self.offsets.vmctx_vmglobal_definition(def_index)).unwrap();
294            (vmctx, offset)
295        } else {
296            let from_offset = self.offsets.vmctx_vmglobal_import_from(index);
297            let global = func.create_global_value(ir::GlobalValueData::Load {
298                base: vmctx,
299                offset: Offset32::new(i32::try_from(from_offset).unwrap()),
300                global_type: pointer_type,
301                flags: MemFlags::trusted().with_readonly().with_can_move(),
302            });
303            (global, 0)
304        }
305    }
306
307    fn declare_vmstore_context_ptr(&mut self, builder: &mut FunctionBuilder<'_>) {
308        // We load the `*const VMStoreContext` value stored within vmctx at the
309        // head of the function and reuse the same value across the entire
310        // function. This is possible since we know that the pointer never
311        // changes for the lifetime of the function.
312        let pointer_type = self.pointer_type();
313        let vmctx = self.vmctx(builder.func);
314        let base = builder.ins().global_value(pointer_type, vmctx);
315        let offset = i32::from(self.offsets.ptr.vmctx_runtime_limits());
316        debug_assert!(self.vmstore_context_ptr.is_reserved_value());
317        self.vmstore_context_ptr = builder.ins().load(
318            pointer_type,
319            ir::MemFlags::trusted().with_readonly().with_can_move(),
320            base,
321            offset,
322        );
323    }
324
325    fn fuel_function_entry(&mut self, builder: &mut FunctionBuilder<'_>) {
326        // On function entry we load the amount of fuel into a function-local
327        // `self.fuel_var` to make fuel modifications fast locally. This cache
328        // is then periodically flushed to the Store-defined location in
329        // `VMStoreContext` later.
330        builder.declare_var(self.fuel_var, ir::types::I64);
331        self.fuel_load_into_var(builder);
332        self.fuel_check(builder);
333    }
334
335    fn fuel_function_exit(&mut self, builder: &mut FunctionBuilder<'_>) {
336        // On exiting the function we need to be sure to save the fuel we have
337        // cached locally in `self.fuel_var` back into the Store-defined
338        // location.
339        self.fuel_save_from_var(builder);
340    }
341
342    fn fuel_before_op(
343        &mut self,
344        op: &Operator<'_>,
345        builder: &mut FunctionBuilder<'_>,
346        reachable: bool,
347    ) {
348        if !reachable {
349            // In unreachable code we shouldn't have any leftover fuel we
350            // haven't accounted for since the reason for us to become
351            // unreachable should have already added it to `self.fuel_var`.
352            debug_assert_eq!(self.fuel_consumed, 0);
353            return;
354        }
355
356        self.fuel_consumed += match op {
357            // Nop and drop generate no code, so don't consume fuel for them.
358            Operator::Nop | Operator::Drop => 0,
359
360            // Control flow may create branches, but is generally cheap and
361            // free, so don't consume fuel. Note the lack of `if` since some
362            // cost is incurred with the conditional check.
363            Operator::Block { .. }
364            | Operator::Loop { .. }
365            | Operator::Unreachable
366            | Operator::Return
367            | Operator::Else
368            | Operator::End => 0,
369
370            // everything else, just call it one operation.
371            _ => 1,
372        };
373
374        match op {
375            // Exiting a function (via a return or unreachable) or otherwise
376            // entering a different function (via a call) means that we need to
377            // update the fuel consumption in `VMStoreContext` because we're
378            // about to move control out of this function itself and the fuel
379            // may need to be read.
380            //
381            // Before this we need to update the fuel counter from our own cost
382            // leading up to this function call, and then we can store
383            // `self.fuel_var` into `VMStoreContext`.
384            Operator::Unreachable
385            | Operator::Return
386            | Operator::CallIndirect { .. }
387            | Operator::Call { .. }
388            | Operator::ReturnCall { .. }
389            | Operator::ReturnCallRef { .. }
390            | Operator::ReturnCallIndirect { .. } => {
391                self.fuel_increment_var(builder);
392                self.fuel_save_from_var(builder);
393            }
394
395            // To ensure all code preceding a loop is only counted once we
396            // update the fuel variable on entry.
397            Operator::Loop { .. }
398
399            // Entering into an `if` block means that the edge we take isn't
400            // known until runtime, so we need to update our fuel consumption
401            // before we take the branch.
402            | Operator::If { .. }
403
404            // Control-flow instructions mean that we're moving to the end/exit
405            // of a block somewhere else. That means we need to update the fuel
406            // counter since we're effectively terminating our basic block.
407            | Operator::Br { .. }
408            | Operator::BrIf { .. }
409            | Operator::BrTable { .. }
410
411            // Exiting a scope means that we need to update the fuel
412            // consumption because there are multiple ways to exit a scope and
413            // this is the only time we have to account for instructions
414            // executed so far.
415            | Operator::End
416
417            // This is similar to `end`, except that it's only the terminator
418            // for an `if` block. The same reasoning applies though in that we
419            // are terminating a basic block and need to update the fuel
420            // variable.
421            | Operator::Else => self.fuel_increment_var(builder),
422
423            // This is a normal instruction where the fuel is buffered to later
424            // get added to `self.fuel_var`.
425            //
426            // Note that we generally ignore instructions which may trap and
427            // therefore result in exiting a block early. Current usage of fuel
428            // means that it's not too important to account for a precise amount
429            // of fuel consumed but rather "close to the actual amount" is good
430            // enough. For 100% precise counting, however, we'd probably need to
431            // not only increment but also save the fuel amount more often
432            // around trapping instructions. (see the `unreachable` instruction
433            // case above)
434            //
435            // Note that `Block` is specifically omitted from incrementing the
436            // fuel variable. Control flow entering a `block` is unconditional
437            // which means it's effectively executing straight-line code. We'll
438            // update the counter when exiting a block, but we shouldn't need to
439            // do so upon entering a block.
440            _ => {}
441        }
442    }
443
444    fn fuel_after_op(&mut self, op: &Operator<'_>, builder: &mut FunctionBuilder<'_>) {
445        // After a function call we need to reload our fuel value since the
446        // function may have changed it.
447        match op {
448            Operator::Call { .. } | Operator::CallIndirect { .. } => {
449                self.fuel_load_into_var(builder);
450            }
451            _ => {}
452        }
453    }
454
455    /// Adds `self.fuel_consumed` to the `fuel_var`, zero-ing out the amount of
456    /// fuel consumed at that point.
457    fn fuel_increment_var(&mut self, builder: &mut FunctionBuilder<'_>) {
458        let consumption = mem::replace(&mut self.fuel_consumed, 0);
459        if consumption == 0 {
460            return;
461        }
462
463        let fuel = builder.use_var(self.fuel_var);
464        let fuel = builder.ins().iadd_imm(fuel, consumption);
465        builder.def_var(self.fuel_var, fuel);
466    }
467
468    /// Loads the fuel consumption value from `VMStoreContext` into `self.fuel_var`
469    fn fuel_load_into_var(&mut self, builder: &mut FunctionBuilder<'_>) {
470        let (addr, offset) = self.fuel_addr_offset();
471        let fuel = builder
472            .ins()
473            .load(ir::types::I64, ir::MemFlags::trusted(), addr, offset);
474        builder.def_var(self.fuel_var, fuel);
475    }
476
477    /// Stores the fuel consumption value from `self.fuel_var` into
478    /// `VMStoreContext`.
479    fn fuel_save_from_var(&mut self, builder: &mut FunctionBuilder<'_>) {
480        let (addr, offset) = self.fuel_addr_offset();
481        let fuel_consumed = builder.use_var(self.fuel_var);
482        builder
483            .ins()
484            .store(ir::MemFlags::trusted(), fuel_consumed, addr, offset);
485    }
486
487    /// Returns the `(address, offset)` of the fuel consumption within
488    /// `VMStoreContext`, used to perform loads/stores later.
489    fn fuel_addr_offset(&mut self) -> (ir::Value, ir::immediates::Offset32) {
490        debug_assert!(!self.vmstore_context_ptr.is_reserved_value());
491        (
492            self.vmstore_context_ptr,
493            i32::from(self.offsets.ptr.vmstore_context_fuel_consumed()).into(),
494        )
495    }
496
497    /// Checks the amount of remaining, and if we've run out of fuel we call
498    /// the out-of-fuel function.
499    fn fuel_check(&mut self, builder: &mut FunctionBuilder) {
500        self.fuel_increment_var(builder);
501        let out_of_gas_block = builder.create_block();
502        let continuation_block = builder.create_block();
503
504        // Note that our fuel is encoded as adding positive values to a
505        // negative number. Whenever the negative number goes positive that
506        // means we ran out of fuel.
507        //
508        // Compare to see if our fuel is positive, and if so we ran out of gas.
509        // Otherwise we can continue on like usual.
510        let zero = builder.ins().iconst(ir::types::I64, 0);
511        let fuel = builder.use_var(self.fuel_var);
512        let cmp = builder
513            .ins()
514            .icmp(IntCC::SignedGreaterThanOrEqual, fuel, zero);
515        builder
516            .ins()
517            .brif(cmp, out_of_gas_block, &[], continuation_block, &[]);
518        builder.seal_block(out_of_gas_block);
519
520        // If we ran out of gas then we call our out-of-gas intrinsic and it
521        // figures out what to do. Note that this may raise a trap, or do
522        // something like yield to an async runtime. In either case we don't
523        // assume what happens and handle the case the intrinsic returns.
524        //
525        // Note that we save/reload fuel around this since the out-of-gas
526        // intrinsic may alter how much fuel is in the system.
527        builder.switch_to_block(out_of_gas_block);
528        self.fuel_save_from_var(builder);
529        let out_of_gas = self.builtin_functions.out_of_gas(builder.func);
530        let vmctx = self.vmctx_val(&mut builder.cursor());
531        builder.ins().call(out_of_gas, &[vmctx]);
532        self.fuel_load_into_var(builder);
533        builder.ins().jump(continuation_block, &[]);
534        builder.seal_block(continuation_block);
535
536        builder.switch_to_block(continuation_block);
537    }
538
539    fn epoch_function_entry(&mut self, builder: &mut FunctionBuilder<'_>) {
540        builder.declare_var(self.epoch_deadline_var, ir::types::I64);
541        // Let epoch_check_full load the current deadline and call def_var
542
543        builder.declare_var(self.epoch_ptr_var, self.pointer_type());
544        let epoch_ptr = self.epoch_ptr(builder);
545        builder.def_var(self.epoch_ptr_var, epoch_ptr);
546
547        // We must check for an epoch change when entering a
548        // function. Why? Why aren't checks at loops sufficient to
549        // bound runtime to O(|static program size|)?
550        //
551        // The reason is that one can construct a "zip-bomb-like"
552        // program with exponential-in-program-size runtime, with no
553        // backedges (loops), by building a tree of function calls: f0
554        // calls f1 ten times, f1 calls f2 ten times, etc. E.g., nine
555        // levels of this yields a billion function calls with no
556        // backedges. So we can't do checks only at backedges.
557        //
558        // In this "call-tree" scenario, and in fact in any program
559        // that uses calls as a sort of control flow to try to evade
560        // backedge checks, a check at every function entry is
561        // sufficient. Then, combined with checks at every backedge
562        // (loop) the longest runtime between checks is bounded by the
563        // straightline length of any function body.
564        let continuation_block = builder.create_block();
565        let cur_epoch_value = self.epoch_load_current(builder);
566        self.epoch_check_full(builder, cur_epoch_value, continuation_block);
567    }
568
569    #[cfg(feature = "wmemcheck")]
570    fn hook_malloc_exit(&mut self, builder: &mut FunctionBuilder, retvals: &[ir::Value]) {
571        let check_malloc = self.builtin_functions.check_malloc(builder.func);
572        let vmctx = self.vmctx_val(&mut builder.cursor());
573        let func_args = builder
574            .func
575            .dfg
576            .block_params(builder.func.layout.entry_block().unwrap());
577        let len = if func_args.len() < 3 {
578            return;
579        } else {
580            // If a function named `malloc` has at least one argument, we assume the
581            // first argument is the requested allocation size.
582            func_args[2]
583        };
584        let retval = if retvals.len() < 1 {
585            return;
586        } else {
587            retvals[0]
588        };
589        builder.ins().call(check_malloc, &[vmctx, retval, len]);
590    }
591
592    #[cfg(feature = "wmemcheck")]
593    fn hook_free_exit(&mut self, builder: &mut FunctionBuilder) {
594        let check_free = self.builtin_functions.check_free(builder.func);
595        let vmctx = self.vmctx_val(&mut builder.cursor());
596        let func_args = builder
597            .func
598            .dfg
599            .block_params(builder.func.layout.entry_block().unwrap());
600        let ptr = if func_args.len() < 3 {
601            return;
602        } else {
603            // If a function named `free` has at least one argument, we assume the
604            // first argument is a pointer to memory.
605            func_args[2]
606        };
607        builder.ins().call(check_free, &[vmctx, ptr]);
608    }
609
610    fn epoch_ptr(&mut self, builder: &mut FunctionBuilder<'_>) -> ir::Value {
611        let vmctx = self.vmctx(builder.func);
612        let pointer_type = self.pointer_type();
613        let base = builder.ins().global_value(pointer_type, vmctx);
614        let offset = i32::from(self.offsets.ptr.vmctx_epoch_ptr());
615        let epoch_ptr = builder
616            .ins()
617            .load(pointer_type, ir::MemFlags::trusted(), base, offset);
618        epoch_ptr
619    }
620
621    fn epoch_load_current(&mut self, builder: &mut FunctionBuilder<'_>) -> ir::Value {
622        let addr = builder.use_var(self.epoch_ptr_var);
623        builder.ins().load(
624            ir::types::I64,
625            ir::MemFlags::trusted(),
626            addr,
627            ir::immediates::Offset32::new(0),
628        )
629    }
630
631    fn epoch_check(&mut self, builder: &mut FunctionBuilder<'_>) {
632        let continuation_block = builder.create_block();
633
634        // Load new epoch and check against the cached deadline.
635        let cur_epoch_value = self.epoch_load_current(builder);
636        self.epoch_check_cached(builder, cur_epoch_value, continuation_block);
637
638        // At this point we've noticed that the epoch has exceeded our
639        // cached deadline. However the real deadline may have been
640        // updated (within another yield) during some function that we
641        // called in the meantime, so reload the cache and check again.
642        self.epoch_check_full(builder, cur_epoch_value, continuation_block);
643    }
644
645    fn epoch_check_cached(
646        &mut self,
647        builder: &mut FunctionBuilder,
648        cur_epoch_value: ir::Value,
649        continuation_block: ir::Block,
650    ) {
651        let new_epoch_block = builder.create_block();
652        builder.set_cold_block(new_epoch_block);
653
654        let epoch_deadline = builder.use_var(self.epoch_deadline_var);
655        let cmp = builder.ins().icmp(
656            IntCC::UnsignedGreaterThanOrEqual,
657            cur_epoch_value,
658            epoch_deadline,
659        );
660        builder
661            .ins()
662            .brif(cmp, new_epoch_block, &[], continuation_block, &[]);
663        builder.seal_block(new_epoch_block);
664
665        builder.switch_to_block(new_epoch_block);
666    }
667
668    fn epoch_check_full(
669        &mut self,
670        builder: &mut FunctionBuilder,
671        cur_epoch_value: ir::Value,
672        continuation_block: ir::Block,
673    ) {
674        // We keep the deadline cached in a register to speed the checks
675        // in the common case (between epoch ticks) but we want to do a
676        // precise check here by reloading the cache first.
677        let deadline = builder.ins().load(
678            ir::types::I64,
679            ir::MemFlags::trusted(),
680            self.vmstore_context_ptr,
681            ir::immediates::Offset32::new(self.offsets.ptr.vmstore_context_epoch_deadline() as i32),
682        );
683        builder.def_var(self.epoch_deadline_var, deadline);
684        self.epoch_check_cached(builder, cur_epoch_value, continuation_block);
685
686        let new_epoch = self.builtin_functions.new_epoch(builder.func);
687        let vmctx = self.vmctx_val(&mut builder.cursor());
688        // new_epoch() returns the new deadline, so we don't have to
689        // reload it.
690        let call = builder.ins().call(new_epoch, &[vmctx]);
691        let new_deadline = *builder.func.dfg.inst_results(call).first().unwrap();
692        builder.def_var(self.epoch_deadline_var, new_deadline);
693        builder.ins().jump(continuation_block, &[]);
694        builder.seal_block(continuation_block);
695
696        builder.switch_to_block(continuation_block);
697    }
698
699    /// Get the Memory for the given index.
700    fn memory(&self, index: MemoryIndex) -> Memory {
701        self.module.memories[index]
702    }
703
704    /// Get the Table for the given index.
705    fn table(&self, index: TableIndex) -> Table {
706        self.module.tables[index]
707    }
708
709    /// Cast the value to I64 and sign extend if necessary.
710    ///
711    /// Returns the value casted to I64.
712    fn cast_index_to_i64(
713        &self,
714        pos: &mut FuncCursor<'_>,
715        val: ir::Value,
716        index_type: IndexType,
717    ) -> ir::Value {
718        match index_type {
719            IndexType::I32 => pos.ins().uextend(I64, val),
720            IndexType::I64 => val,
721        }
722    }
723
724    /// Convert the target pointer-sized integer `val` into the memory/table's index type.
725    ///
726    /// For memory, `val` is holding a memory length (or the `-1` `memory.grow`-failed sentinel).
727    /// For table, `val` is holding a table length.
728    ///
729    /// This might involve extending or truncating it depending on the memory/table's
730    /// index type and the target's pointer type.
731    fn convert_pointer_to_index_type(
732        &self,
733        mut pos: FuncCursor<'_>,
734        val: ir::Value,
735        index_type: IndexType,
736        // When it is a memory and the memory is using single-byte pages,
737        // we need to handle the tuncation differently. See comments below.
738        //
739        // When it is a table, this should be set to false.
740        single_byte_pages: bool,
741    ) -> ir::Value {
742        let desired_type = index_type_to_ir_type(index_type);
743        let pointer_type = self.pointer_type();
744        assert_eq!(pos.func.dfg.value_type(val), pointer_type);
745
746        // The current length is of type `pointer_type` but we need to fit it
747        // into `desired_type`. We are guaranteed that the result will always
748        // fit, so we just need to do the right ireduce/sextend here.
749        if pointer_type == desired_type {
750            val
751        } else if pointer_type.bits() > desired_type.bits() {
752            pos.ins().ireduce(desired_type, val)
753        } else {
754            // We have a 64-bit memory/table on a 32-bit host -- this combo doesn't
755            // really make a whole lot of sense to do from a user perspective
756            // but that is neither here nor there. We want to logically do an
757            // unsigned extend *except* when we are given the `-1` sentinel,
758            // which we must preserve as `-1` in the wider type.
759            match single_byte_pages {
760                false => {
761                    // In the case that we have default page sizes, we can
762                    // always sign extend, since valid memory lengths (in pages)
763                    // never have their sign bit set, and so if the sign bit is
764                    // set then this must be the `-1` sentinel, which we want to
765                    // preserve through the extension.
766                    //
767                    // When it comes to table, `single_byte_pages` should have always been set to false.
768                    // Then we simply do a signed extension.
769                    pos.ins().sextend(desired_type, val)
770                }
771                true => {
772                    // For single-byte pages, we have to explicitly check for
773                    // `-1` and choose whether to do an unsigned extension or
774                    // return a larger `-1` because there are valid memory
775                    // lengths (in pages) that have the sign bit set.
776                    let extended = pos.ins().uextend(desired_type, val);
777                    let neg_one = pos.ins().iconst(desired_type, -1);
778                    let is_failure = pos.ins().icmp_imm(IntCC::Equal, val, -1);
779                    pos.ins().select(is_failure, neg_one, extended)
780                }
781            }
782        }
783    }
784
785    /// Set up the necessary preamble definitions in `func` to access the table identified
786    /// by `index`.
787    ///
788    /// The index space covers both imported and locally declared tables.
789    fn ensure_table_exists(&mut self, func: &mut ir::Function, index: TableIndex) {
790        if self.tables[index].is_some() {
791            return;
792        }
793
794        let pointer_type = self.pointer_type();
795
796        let (ptr, base_offset, current_elements_offset) = {
797            let vmctx = self.vmctx(func);
798            if let Some(def_index) = self.module.defined_table_index(index) {
799                let base_offset =
800                    i32::try_from(self.offsets.vmctx_vmtable_definition_base(def_index)).unwrap();
801                let current_elements_offset = i32::try_from(
802                    self.offsets
803                        .vmctx_vmtable_definition_current_elements(def_index),
804                )
805                .unwrap();
806                (vmctx, base_offset, current_elements_offset)
807            } else {
808                let from_offset = self.offsets.vmctx_vmtable_import_from(index);
809                let table = func.create_global_value(ir::GlobalValueData::Load {
810                    base: vmctx,
811                    offset: Offset32::new(i32::try_from(from_offset).unwrap()),
812                    global_type: pointer_type,
813                    flags: MemFlags::trusted().with_readonly().with_can_move(),
814                });
815                let base_offset = i32::from(self.offsets.vmtable_definition_base());
816                let current_elements_offset =
817                    i32::from(self.offsets.vmtable_definition_current_elements());
818                (table, base_offset, current_elements_offset)
819            }
820        };
821
822        let table = &self.module.tables[index];
823        let element_size = if table.ref_type.is_vmgcref_type() {
824            // For GC-managed references, tables store `Option<VMGcRef>`s.
825            ir::types::I32.bytes()
826        } else {
827            self.reference_type(table.ref_type.heap_type).0.bytes()
828        };
829
830        let base_gv = func.create_global_value(ir::GlobalValueData::Load {
831            base: ptr,
832            offset: Offset32::new(base_offset),
833            global_type: pointer_type,
834            flags: if Some(table.limits.min) == table.limits.max {
835                // A fixed-size table can't be resized so its base address won't
836                // change.
837                MemFlags::trusted().with_readonly().with_can_move()
838            } else {
839                MemFlags::trusted()
840            },
841        });
842
843        let bound = if Some(table.limits.min) == table.limits.max {
844            TableSize::Static {
845                bound: table.limits.min,
846            }
847        } else {
848            TableSize::Dynamic {
849                bound_gv: func.create_global_value(ir::GlobalValueData::Load {
850                    base: ptr,
851                    offset: Offset32::new(current_elements_offset),
852                    global_type: ir::Type::int(
853                        u16::from(self.offsets.size_of_vmtable_definition_current_elements()) * 8,
854                    )
855                    .unwrap(),
856                    flags: MemFlags::trusted(),
857                }),
858            }
859        };
860
861        self.tables[index] = Some(TableData {
862            base_gv,
863            bound,
864            element_size,
865        });
866    }
867
868    fn get_or_init_func_ref_table_elem(
869        &mut self,
870        builder: &mut FunctionBuilder,
871        table_index: TableIndex,
872        index: ir::Value,
873        cold_blocks: bool,
874    ) -> ir::Value {
875        let pointer_type = self.pointer_type();
876        self.ensure_table_exists(builder.func, table_index);
877        let table_data = self.tables[table_index].clone().unwrap();
878
879        // To support lazy initialization of table
880        // contents, we check for a null entry here, and
881        // if null, we take a slow-path that invokes a
882        // libcall.
883        let (table_entry_addr, flags) = table_data.prepare_table_addr(self, builder, index);
884        let value = builder.ins().load(pointer_type, flags, table_entry_addr, 0);
885
886        if !self.tunables.table_lazy_init {
887            return value;
888        }
889
890        // Mask off the "initialized bit". See documentation on
891        // FUNCREF_INIT_BIT in crates/environ/src/ref_bits.rs for more
892        // details. Note that `FUNCREF_MASK` has type `usize` which may not be
893        // appropriate for the target architecture. Right now its value is
894        // always -2 so assert that part doesn't change and then thread through
895        // -2 as the immediate.
896        assert_eq!(FUNCREF_MASK as isize, -2);
897        let value_masked = builder.ins().band_imm(value, Imm64::from(-2));
898
899        let null_block = builder.create_block();
900        let continuation_block = builder.create_block();
901        if cold_blocks {
902            builder.set_cold_block(null_block);
903            builder.set_cold_block(continuation_block);
904        }
905        let result_param = builder.append_block_param(continuation_block, pointer_type);
906        builder.set_cold_block(null_block);
907
908        builder
909            .ins()
910            .brif(value, continuation_block, &[value_masked], null_block, &[]);
911        builder.seal_block(null_block);
912
913        builder.switch_to_block(null_block);
914        let index_type = self.table(table_index).idx_type;
915        let table_index = builder.ins().iconst(I32, table_index.index() as i64);
916        let lazy_init = self
917            .builtin_functions
918            .table_get_lazy_init_func_ref(builder.func);
919        let vmctx = self.vmctx_val(&mut builder.cursor());
920        let index = self.cast_index_to_i64(&mut builder.cursor(), index, index_type);
921        let call_inst = builder.ins().call(lazy_init, &[vmctx, table_index, index]);
922        let returned_entry = builder.func.dfg.inst_results(call_inst)[0];
923        builder.ins().jump(continuation_block, &[returned_entry]);
924        builder.seal_block(continuation_block);
925
926        builder.switch_to_block(continuation_block);
927        result_param
928    }
929
930    #[cfg(feature = "wmemcheck")]
931    fn check_malloc_start(&mut self, builder: &mut FunctionBuilder) {
932        let malloc_start = self.builtin_functions.malloc_start(builder.func);
933        let vmctx = self.vmctx_val(&mut builder.cursor());
934        builder.ins().call(malloc_start, &[vmctx]);
935    }
936
937    #[cfg(feature = "wmemcheck")]
938    fn check_free_start(&mut self, builder: &mut FunctionBuilder) {
939        let free_start = self.builtin_functions.free_start(builder.func);
940        let vmctx = self.vmctx_val(&mut builder.cursor());
941        builder.ins().call(free_start, &[vmctx]);
942    }
943
944    #[cfg(feature = "wmemcheck")]
945    fn current_func_name(&self, builder: &mut FunctionBuilder) -> Option<&str> {
946        let func_index = match &builder.func.name {
947            ir::UserFuncName::User(user) => FuncIndex::from_u32(user.index),
948            _ => {
949                panic!("function name not a UserFuncName::User as expected")
950            }
951        };
952        self.translation
953            .debuginfo
954            .name_section
955            .func_names
956            .get(&func_index)
957            .copied()
958    }
959
960    /// Proof-carrying code: create a memtype describing an empty
961    /// runtime struct (to be updated later).
962    fn create_empty_struct_memtype(&self, func: &mut ir::Function) -> ir::MemoryType {
963        func.create_memory_type(ir::MemoryTypeData::Struct {
964            size: 0,
965            fields: vec![],
966        })
967    }
968
969    /// Proof-carrying code: add a new field to a memtype used to
970    /// describe a runtime struct. A memory region of type `memtype`
971    /// will have a pointer at `offset` pointing to another memory
972    /// region of type `pointee`. `readonly` indicates whether the
973    /// PCC-checked code is expected to update this field or not.
974    fn add_field_to_memtype(
975        &self,
976        func: &mut ir::Function,
977        memtype: ir::MemoryType,
978        offset: u32,
979        pointee: ir::MemoryType,
980        readonly: bool,
981    ) {
982        let ptr_size = self.pointer_type().bytes();
983        match &mut func.memory_types[memtype] {
984            ir::MemoryTypeData::Struct { size, fields } => {
985                *size = std::cmp::max(*size, offset.checked_add(ptr_size).unwrap().into());
986                fields.push(ir::MemoryTypeField {
987                    ty: self.pointer_type(),
988                    offset: offset.into(),
989                    readonly,
990                    fact: Some(ir::Fact::Mem {
991                        ty: pointee,
992                        min_offset: 0,
993                        max_offset: 0,
994                        nullable: false,
995                    }),
996                });
997
998                // Sort fields by offset -- we need to do this now
999                // because we may create an arbitrary number of
1000                // memtypes for imported memories and we don't
1001                // otherwise track them.
1002                fields.sort_by_key(|f| f.offset);
1003            }
1004            _ => panic!("Cannot add field to non-struct memtype"),
1005        }
1006    }
1007
1008    /// Add one level of indirection to a pointer-and-memtype pair:
1009    /// generate a load in the code at the specified offset, and if
1010    /// memtypes are in use, add a field to the original struct and
1011    /// generate a new memtype for the pointee.
1012    fn load_pointer_with_memtypes(
1013        &mut self,
1014        func: &mut ir::Function,
1015        offset: u32,
1016        readonly: bool,
1017        memtype: Option<ir::MemoryType>,
1018    ) -> (ir::GlobalValue, Option<ir::MemoryType>) {
1019        let vmctx = self.vmctx(func);
1020        let pointee = func.create_global_value(ir::GlobalValueData::Load {
1021            base: vmctx,
1022            offset: Offset32::new(i32::try_from(offset).unwrap()),
1023            global_type: self.pointer_type(),
1024            flags: MemFlags::trusted().with_readonly().with_can_move(),
1025        });
1026
1027        let mt = memtype.map(|mt| {
1028            let pointee_mt = self.create_empty_struct_memtype(func);
1029            self.add_field_to_memtype(func, mt, offset, pointee_mt, readonly);
1030            func.global_value_facts[pointee] = Some(Fact::Mem {
1031                ty: pointee_mt,
1032                min_offset: 0,
1033                max_offset: 0,
1034                nullable: false,
1035            });
1036            pointee_mt
1037        });
1038        (pointee, mt)
1039    }
1040
1041    /// Helper to emit a conditional trap based on `trap_cond`.
1042    ///
1043    /// This should only be used if `self.clif_instruction_traps_enabled()` is
1044    /// false, otherwise native CLIF instructions should be used instead.
1045    pub fn conditionally_trap(
1046        &mut self,
1047        builder: &mut FunctionBuilder,
1048        trap_cond: ir::Value,
1049        trap: ir::TrapCode,
1050    ) {
1051        assert!(!self.clif_instruction_traps_enabled());
1052
1053        let trap_block = builder.create_block();
1054        builder.set_cold_block(trap_block);
1055        let continuation_block = builder.create_block();
1056
1057        builder
1058            .ins()
1059            .brif(trap_cond, trap_block, &[], continuation_block, &[]);
1060
1061        builder.seal_block(trap_block);
1062        builder.seal_block(continuation_block);
1063
1064        builder.switch_to_block(trap_block);
1065        self.trap(builder, trap);
1066        builder.switch_to_block(continuation_block);
1067    }
1068
1069    /// Helper used when `!self.clif_instruction_traps_enabled()` is enabled to
1070    /// test whether the divisor is zero.
1071    fn guard_zero_divisor(&mut self, builder: &mut FunctionBuilder, rhs: ir::Value) {
1072        if self.clif_instruction_traps_enabled() {
1073            return;
1074        }
1075        self.trapz(builder, rhs, ir::TrapCode::INTEGER_DIVISION_BY_ZERO);
1076    }
1077
1078    /// Helper used when `!self.clif_instruction_traps_enabled()` is enabled to
1079    /// test whether a signed division operation will raise a trap.
1080    fn guard_signed_divide(
1081        &mut self,
1082        builder: &mut FunctionBuilder,
1083        lhs: ir::Value,
1084        rhs: ir::Value,
1085    ) {
1086        if self.clif_instruction_traps_enabled() {
1087            return;
1088        }
1089        self.trapz(builder, rhs, ir::TrapCode::INTEGER_DIVISION_BY_ZERO);
1090
1091        let ty = builder.func.dfg.value_type(rhs);
1092        let minus_one = builder.ins().iconst(ty, -1);
1093        let rhs_is_minus_one = builder.ins().icmp(IntCC::Equal, rhs, minus_one);
1094        let int_min = builder.ins().iconst(
1095            ty,
1096            match ty {
1097                I32 => i64::from(i32::MIN),
1098                I64 => i64::MIN,
1099                _ => unreachable!(),
1100            },
1101        );
1102        let lhs_is_int_min = builder.ins().icmp(IntCC::Equal, lhs, int_min);
1103        let is_integer_overflow = builder.ins().band(rhs_is_minus_one, lhs_is_int_min);
1104        self.conditionally_trap(builder, is_integer_overflow, ir::TrapCode::INTEGER_OVERFLOW);
1105    }
1106
1107    /// Helper used when `!self.clif_instruction_traps_enabled()` is enabled to
1108    /// guard the traps from float-to-int conversions.
1109    fn guard_fcvt_to_int(
1110        &mut self,
1111        builder: &mut FunctionBuilder,
1112        ty: ir::Type,
1113        val: ir::Value,
1114        range32: (f64, f64),
1115        range64: (f64, f64),
1116    ) {
1117        assert!(!self.clif_instruction_traps_enabled());
1118        let val_ty = builder.func.dfg.value_type(val);
1119        let val = if val_ty == F64 {
1120            val
1121        } else {
1122            builder.ins().fpromote(F64, val)
1123        };
1124        let isnan = builder.ins().fcmp(FloatCC::NotEqual, val, val);
1125        self.trapnz(builder, isnan, ir::TrapCode::BAD_CONVERSION_TO_INTEGER);
1126        let val = builder.ins().trunc(val);
1127        let (lower_bound, upper_bound) = match ty {
1128            I32 => range32,
1129            I64 => range64,
1130            _ => unreachable!(),
1131        };
1132        let lower_bound = builder.ins().f64const(lower_bound);
1133        let too_small = builder
1134            .ins()
1135            .fcmp(FloatCC::LessThanOrEqual, val, lower_bound);
1136        self.trapnz(builder, too_small, ir::TrapCode::INTEGER_OVERFLOW);
1137        let upper_bound = builder.ins().f64const(upper_bound);
1138        let too_large = builder
1139            .ins()
1140            .fcmp(FloatCC::GreaterThanOrEqual, val, upper_bound);
1141        self.trapnz(builder, too_large, ir::TrapCode::INTEGER_OVERFLOW);
1142    }
1143
1144    /// Get the `ir::Type` for a `VMSharedTypeIndex`.
1145    pub(crate) fn vmshared_type_index_ty(&self) -> Type {
1146        Type::int_with_byte_size(self.offsets.size_of_vmshared_type_index().into()).unwrap()
1147    }
1148
1149    /// Given a `ModuleInternedTypeIndex`, emit code to get the corresponding
1150    /// `VMSharedTypeIndex` at runtime.
1151    pub(crate) fn module_interned_to_shared_ty(
1152        &mut self,
1153        pos: &mut FuncCursor,
1154        interned_ty: ModuleInternedTypeIndex,
1155    ) -> ir::Value {
1156        let vmctx = self.vmctx_val(pos);
1157        let pointer_type = self.pointer_type();
1158        let mem_flags = ir::MemFlags::trusted().with_readonly().with_can_move();
1159
1160        // Load the base pointer of the array of `VMSharedTypeIndex`es.
1161        let shared_indices = pos.ins().load(
1162            pointer_type,
1163            mem_flags,
1164            vmctx,
1165            i32::from(self.offsets.ptr.vmctx_type_ids_array()),
1166        );
1167
1168        // Calculate the offset in that array for this type's entry.
1169        let ty = self.vmshared_type_index_ty();
1170        let offset = i32::try_from(interned_ty.as_u32().checked_mul(ty.bytes()).unwrap()).unwrap();
1171
1172        // Load the`VMSharedTypeIndex` that this `ModuleInternedTypeIndex` is
1173        // associated with at runtime from the array.
1174        pos.ins().load(ty, mem_flags, shared_indices, offset)
1175    }
1176
1177    /// Load the associated `VMSharedTypeIndex` from inside a `*const VMFuncRef`.
1178    ///
1179    /// Does not check for null; just assumes that the `funcref` is a valid
1180    /// pointer.
1181    pub(crate) fn load_funcref_type_index(
1182        &mut self,
1183        pos: &mut FuncCursor,
1184        mem_flags: ir::MemFlags,
1185        funcref: ir::Value,
1186    ) -> ir::Value {
1187        let ty = self.vmshared_type_index_ty();
1188        pos.ins().load(
1189            ty,
1190            mem_flags,
1191            funcref,
1192            i32::from(self.offsets.ptr.vm_func_ref_type_index()),
1193        )
1194    }
1195}
1196
1197struct Call<'a, 'func, 'module_env> {
1198    builder: &'a mut FunctionBuilder<'func>,
1199    env: &'a mut FuncEnvironment<'module_env>,
1200    tail: bool,
1201}
1202
1203enum CheckIndirectCallTypeSignature {
1204    Runtime,
1205    StaticMatch {
1206        /// Whether or not the funcref may be null or if it's statically known
1207        /// to not be null.
1208        may_be_null: bool,
1209    },
1210    StaticTrap,
1211}
1212
1213impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> {
1214    /// Create a new `Call` site that will do regular, non-tail calls.
1215    pub fn new(
1216        builder: &'a mut FunctionBuilder<'func>,
1217        env: &'a mut FuncEnvironment<'module_env>,
1218    ) -> Self {
1219        Call {
1220            builder,
1221            env,
1222            tail: false,
1223        }
1224    }
1225
1226    /// Create a new `Call` site that will perform tail calls.
1227    pub fn new_tail(
1228        builder: &'a mut FunctionBuilder<'func>,
1229        env: &'a mut FuncEnvironment<'module_env>,
1230    ) -> Self {
1231        Call {
1232            builder,
1233            env,
1234            tail: true,
1235        }
1236    }
1237
1238    /// Do a direct call to the given callee function.
1239    pub fn direct_call(
1240        mut self,
1241        callee_index: FuncIndex,
1242        callee: ir::FuncRef,
1243        call_args: &[ir::Value],
1244    ) -> WasmResult<ir::Inst> {
1245        let mut real_call_args = Vec::with_capacity(call_args.len() + 2);
1246        let caller_vmctx = self
1247            .builder
1248            .func
1249            .special_param(ArgumentPurpose::VMContext)
1250            .unwrap();
1251
1252        // Handle direct calls to locally-defined functions.
1253        if !self.env.module.is_imported_function(callee_index) {
1254            // First append the callee vmctx address, which is the same as the caller vmctx in
1255            // this case.
1256            real_call_args.push(caller_vmctx);
1257
1258            // Then append the caller vmctx address.
1259            real_call_args.push(caller_vmctx);
1260
1261            // Then append the regular call arguments.
1262            real_call_args.extend_from_slice(call_args);
1263
1264            // Finally, make the direct call!
1265            return Ok(self.direct_call_inst(callee, &real_call_args));
1266        }
1267
1268        // Handle direct calls to imported functions. We use an indirect call
1269        // so that we don't have to patch the code at runtime.
1270        let pointer_type = self.env.pointer_type();
1271        let sig_ref = self.builder.func.dfg.ext_funcs[callee].signature;
1272        let vmctx = self.env.vmctx(self.builder.func);
1273        let base = self.builder.ins().global_value(pointer_type, vmctx);
1274
1275        let mem_flags = ir::MemFlags::trusted().with_readonly().with_can_move();
1276
1277        // Load the callee address.
1278        let body_offset = i32::try_from(
1279            self.env
1280                .offsets
1281                .vmctx_vmfunction_import_wasm_call(callee_index),
1282        )
1283        .unwrap();
1284        let func_addr = self
1285            .builder
1286            .ins()
1287            .load(pointer_type, mem_flags, base, body_offset);
1288
1289        // First append the callee vmctx address.
1290        let vmctx_offset =
1291            i32::try_from(self.env.offsets.vmctx_vmfunction_import_vmctx(callee_index)).unwrap();
1292        let vmctx = self
1293            .builder
1294            .ins()
1295            .load(pointer_type, mem_flags, base, vmctx_offset);
1296        real_call_args.push(vmctx);
1297        real_call_args.push(caller_vmctx);
1298
1299        // Then append the regular call arguments.
1300        real_call_args.extend_from_slice(call_args);
1301
1302        // Finally, make the indirect call!
1303        Ok(self.indirect_call_inst(sig_ref, func_addr, &real_call_args))
1304    }
1305
1306    /// Do an indirect call through the given funcref table.
1307    pub fn indirect_call(
1308        mut self,
1309        features: &WasmFeatures,
1310        table_index: TableIndex,
1311        ty_index: TypeIndex,
1312        sig_ref: ir::SigRef,
1313        callee: ir::Value,
1314        call_args: &[ir::Value],
1315    ) -> WasmResult<Option<ir::Inst>> {
1316        let (code_ptr, callee_vmctx) = match self.check_and_load_code_and_callee_vmctx(
1317            features,
1318            table_index,
1319            ty_index,
1320            callee,
1321            false,
1322        )? {
1323            Some(pair) => pair,
1324            None => return Ok(None),
1325        };
1326
1327        self.unchecked_call_impl(sig_ref, code_ptr, callee_vmctx, call_args)
1328            .map(Some)
1329    }
1330
1331    fn check_and_load_code_and_callee_vmctx(
1332        &mut self,
1333        features: &WasmFeatures,
1334        table_index: TableIndex,
1335        ty_index: TypeIndex,
1336        callee: ir::Value,
1337        cold_blocks: bool,
1338    ) -> WasmResult<Option<(ir::Value, ir::Value)>> {
1339        // Get the funcref pointer from the table.
1340        let funcref_ptr = self.env.get_or_init_func_ref_table_elem(
1341            self.builder,
1342            table_index,
1343            callee,
1344            cold_blocks,
1345        );
1346
1347        // If necessary, check the signature.
1348        let check =
1349            self.check_indirect_call_type_signature(features, table_index, ty_index, funcref_ptr);
1350
1351        let trap_code = match check {
1352            // `funcref_ptr` is checked at runtime that its type matches,
1353            // meaning that if code gets this far it's guaranteed to not be
1354            // null. That means nothing in `unchecked_call` can fail.
1355            CheckIndirectCallTypeSignature::Runtime => None,
1356
1357            // No type check was performed on `funcref_ptr` because it's
1358            // statically known to have the right type. Note that whether or
1359            // not the function is null is not necessarily tested so far since
1360            // no type information was inspected.
1361            //
1362            // If the table may hold null functions, then further loads in
1363            // `unchecked_call` may fail. If the table only holds non-null
1364            // functions, though, then there's no possibility of a trap.
1365            CheckIndirectCallTypeSignature::StaticMatch { may_be_null } => {
1366                if may_be_null {
1367                    Some(crate::TRAP_INDIRECT_CALL_TO_NULL)
1368                } else {
1369                    None
1370                }
1371            }
1372
1373            // Code has already trapped, so return nothing indicating that this
1374            // is now unreachable code.
1375            CheckIndirectCallTypeSignature::StaticTrap => return Ok(None),
1376        };
1377
1378        Ok(Some(self.load_code_and_vmctx(funcref_ptr, trap_code)))
1379    }
1380
1381    fn check_indirect_call_type_signature(
1382        &mut self,
1383        features: &WasmFeatures,
1384        table_index: TableIndex,
1385        ty_index: TypeIndex,
1386        funcref_ptr: ir::Value,
1387    ) -> CheckIndirectCallTypeSignature {
1388        let table = &self.env.module.tables[table_index];
1389        let sig_id_size = self.env.offsets.size_of_vmshared_type_index();
1390        let sig_id_type = Type::int(u16::from(sig_id_size) * 8).unwrap();
1391
1392        // Test if a type check is necessary for this table. If this table is a
1393        // table of typed functions and that type matches `ty_index`, then
1394        // there's no need to perform a typecheck.
1395        match table.ref_type.heap_type {
1396            // Functions do not have a statically known type in the table, a
1397            // typecheck is required. Fall through to below to perform the
1398            // actual typecheck.
1399            WasmHeapType::Func => {}
1400
1401            // Functions that have a statically known type are either going to
1402            // always succeed or always fail. Figure out by inspecting the types
1403            // further.
1404            WasmHeapType::ConcreteFunc(EngineOrModuleTypeIndex::Module(table_ty)) => {
1405                // If `ty_index` matches `table_ty`, then this call is
1406                // statically known to have the right type, so no checks are
1407                // necessary.
1408                let specified_ty = self.env.module.types[ty_index].unwrap_module_type_index();
1409                if specified_ty == table_ty {
1410                    return CheckIndirectCallTypeSignature::StaticMatch {
1411                        may_be_null: table.ref_type.nullable,
1412                    };
1413                }
1414
1415                if features.gc() {
1416                    // If we are in the Wasm GC world, then we need to perform
1417                    // an actual subtype check at runtime. Fall through to below
1418                    // to do that.
1419                } else {
1420                    // Otherwise if the types don't match then either (a) this
1421                    // is a null pointer or (b) it's a pointer with the wrong
1422                    // type. Figure out which and trap here.
1423                    //
1424                    // If it's possible to have a null here then try to load the
1425                    // type information. If that fails due to the function being
1426                    // a null pointer, then this was a call to null. Otherwise
1427                    // if it succeeds then we know it won't match, so trap
1428                    // anyway.
1429                    if table.ref_type.nullable {
1430                        if self.env.clif_memory_traps_enabled() {
1431                            self.builder.ins().load(
1432                                sig_id_type,
1433                                ir::MemFlags::trusted()
1434                                    .with_readonly()
1435                                    .with_trap_code(Some(crate::TRAP_INDIRECT_CALL_TO_NULL)),
1436                                funcref_ptr,
1437                                i32::from(self.env.offsets.ptr.vm_func_ref_type_index()),
1438                            );
1439                        } else {
1440                            self.env.trapz(
1441                                self.builder,
1442                                funcref_ptr,
1443                                crate::TRAP_INDIRECT_CALL_TO_NULL,
1444                            );
1445                        }
1446                    }
1447                    self.env.trap(self.builder, crate::TRAP_BAD_SIGNATURE);
1448                    return CheckIndirectCallTypeSignature::StaticTrap;
1449                }
1450            }
1451
1452            // Tables of `nofunc` can only be inhabited by null, so go ahead and
1453            // trap with that.
1454            WasmHeapType::NoFunc => {
1455                assert!(table.ref_type.nullable);
1456                self.env
1457                    .trap(self.builder, crate::TRAP_INDIRECT_CALL_TO_NULL);
1458                return CheckIndirectCallTypeSignature::StaticTrap;
1459            }
1460
1461            WasmHeapType::Cont | WasmHeapType::ConcreteCont(_) | WasmHeapType::NoCont => todo!(), // FIXME: #10248 stack switching support.
1462
1463            // Engine-indexed types don't show up until runtime and it's a Wasm
1464            // validation error to perform a call through a non-function table,
1465            // so these cases are dynamically not reachable.
1466            WasmHeapType::ConcreteFunc(EngineOrModuleTypeIndex::Engine(_))
1467            | WasmHeapType::ConcreteFunc(EngineOrModuleTypeIndex::RecGroup(_))
1468            | WasmHeapType::Extern
1469            | WasmHeapType::NoExtern
1470            | WasmHeapType::Any
1471            | WasmHeapType::Eq
1472            | WasmHeapType::I31
1473            | WasmHeapType::Array
1474            | WasmHeapType::ConcreteArray(_)
1475            | WasmHeapType::Struct
1476            | WasmHeapType::ConcreteStruct(_)
1477            | WasmHeapType::None => {
1478                unreachable!()
1479            }
1480        }
1481
1482        // Load the caller's `VMSharedTypeIndex.
1483        let interned_ty = self.env.module.types[ty_index].unwrap_module_type_index();
1484        let caller_sig_id = self
1485            .env
1486            .module_interned_to_shared_ty(&mut self.builder.cursor(), interned_ty);
1487
1488        // Load the callee's `VMSharedTypeIndex`.
1489        //
1490        // Note that the callee may be null in which case this load may
1491        // trap. If so use the `TRAP_INDIRECT_CALL_TO_NULL` trap code.
1492        let mut mem_flags = ir::MemFlags::trusted().with_readonly();
1493        if self.env.clif_memory_traps_enabled() {
1494            mem_flags = mem_flags.with_trap_code(Some(crate::TRAP_INDIRECT_CALL_TO_NULL));
1495        } else {
1496            self.env
1497                .trapz(self.builder, funcref_ptr, crate::TRAP_INDIRECT_CALL_TO_NULL);
1498        }
1499        let callee_sig_id =
1500            self.env
1501                .load_funcref_type_index(&mut self.builder.cursor(), mem_flags, funcref_ptr);
1502
1503        // Check that they match: in the case of Wasm GC, this means doing a
1504        // full subtype check. Otherwise, we do a simple equality check.
1505        let matches = if features.gc() {
1506            #[cfg(feature = "gc")]
1507            {
1508                self.env
1509                    .is_subtype(self.builder, callee_sig_id, caller_sig_id)
1510            }
1511            #[cfg(not(feature = "gc"))]
1512            {
1513                unreachable!()
1514            }
1515        } else {
1516            self.builder
1517                .ins()
1518                .icmp(IntCC::Equal, callee_sig_id, caller_sig_id)
1519        };
1520        self.env
1521            .trapz(self.builder, matches, crate::TRAP_BAD_SIGNATURE);
1522        CheckIndirectCallTypeSignature::Runtime
1523    }
1524
1525    /// Call a typed function reference.
1526    pub fn call_ref(
1527        mut self,
1528        sig_ref: ir::SigRef,
1529        callee: ir::Value,
1530        args: &[ir::Value],
1531    ) -> WasmResult<ir::Inst> {
1532        // FIXME: the wasm type system tracks enough information to know whether
1533        // `callee` is a null reference or not. In some situations it can be
1534        // statically known here that `callee` cannot be null in which case this
1535        // can be `None` instead. This requires feeding type information from
1536        // wasmparser's validator into this function, however, which is not
1537        // easily done at this time.
1538        let callee_load_trap_code = Some(crate::TRAP_NULL_REFERENCE);
1539
1540        self.unchecked_call(sig_ref, callee, callee_load_trap_code, args)
1541    }
1542
1543    /// This calls a function by reference without checking the signature.
1544    ///
1545    /// It gets the function address, sets relevant flags, and passes the
1546    /// special callee/caller vmctxs. It is used by both call_indirect (which
1547    /// checks the signature) and call_ref (which doesn't).
1548    fn unchecked_call(
1549        &mut self,
1550        sig_ref: ir::SigRef,
1551        callee: ir::Value,
1552        callee_load_trap_code: Option<ir::TrapCode>,
1553        call_args: &[ir::Value],
1554    ) -> WasmResult<ir::Inst> {
1555        let (func_addr, callee_vmctx) = self.load_code_and_vmctx(callee, callee_load_trap_code);
1556        self.unchecked_call_impl(sig_ref, func_addr, callee_vmctx, call_args)
1557    }
1558
1559    fn load_code_and_vmctx(
1560        &mut self,
1561        callee: ir::Value,
1562        callee_load_trap_code: Option<ir::TrapCode>,
1563    ) -> (ir::Value, ir::Value) {
1564        let pointer_type = self.env.pointer_type();
1565
1566        // Dereference callee pointer to get the function address.
1567        //
1568        // Note that this may trap if `callee` hasn't previously been verified
1569        // to be non-null. This means that this load is annotated with an
1570        // optional trap code provided by the caller of `unchecked_call` which
1571        // will handle the case where this is either already known to be
1572        // non-null or may trap.
1573        let mem_flags = ir::MemFlags::trusted().with_readonly();
1574        let mut callee_flags = mem_flags;
1575        if self.env.clif_memory_traps_enabled() {
1576            callee_flags = callee_flags.with_trap_code(callee_load_trap_code);
1577        } else {
1578            if let Some(trap) = callee_load_trap_code {
1579                self.env.trapz(self.builder, callee, trap);
1580            }
1581        }
1582        let func_addr = self.builder.ins().load(
1583            pointer_type,
1584            callee_flags,
1585            callee,
1586            i32::from(self.env.offsets.ptr.vm_func_ref_wasm_call()),
1587        );
1588        let callee_vmctx = self.builder.ins().load(
1589            pointer_type,
1590            mem_flags,
1591            callee,
1592            i32::from(self.env.offsets.ptr.vm_func_ref_vmctx()),
1593        );
1594
1595        (func_addr, callee_vmctx)
1596    }
1597
1598    /// This calls a function by reference without checking the
1599    /// signature, given the raw code pointer to the
1600    /// Wasm-calling-convention entry point and the callee vmctx.
1601    fn unchecked_call_impl(
1602        &mut self,
1603        sig_ref: ir::SigRef,
1604        func_addr: ir::Value,
1605        callee_vmctx: ir::Value,
1606        call_args: &[ir::Value],
1607    ) -> WasmResult<ir::Inst> {
1608        let mut real_call_args = Vec::with_capacity(call_args.len() + 2);
1609        let caller_vmctx = self
1610            .builder
1611            .func
1612            .special_param(ArgumentPurpose::VMContext)
1613            .unwrap();
1614
1615        // First append the callee and caller vmctx addresses.
1616        real_call_args.push(callee_vmctx);
1617        real_call_args.push(caller_vmctx);
1618
1619        // Then append the regular call arguments.
1620        real_call_args.extend_from_slice(call_args);
1621
1622        Ok(self.indirect_call_inst(sig_ref, func_addr, &real_call_args))
1623    }
1624
1625    fn direct_call_inst(&mut self, callee: ir::FuncRef, args: &[ir::Value]) -> ir::Inst {
1626        if self.tail {
1627            self.builder.ins().return_call(callee, args)
1628        } else {
1629            let inst = self.builder.ins().call(callee, args);
1630            let results: SmallVec<[_; 4]> = self
1631                .builder
1632                .func
1633                .dfg
1634                .inst_results(inst)
1635                .iter()
1636                .copied()
1637                .collect();
1638            for (i, val) in results.into_iter().enumerate() {
1639                if self
1640                    .env
1641                    .func_ref_result_needs_stack_map(&self.builder.func, callee, i)
1642                {
1643                    self.builder.declare_value_needs_stack_map(val);
1644                }
1645            }
1646            inst
1647        }
1648    }
1649
1650    fn indirect_call_inst(
1651        &mut self,
1652        sig_ref: ir::SigRef,
1653        func_addr: ir::Value,
1654        args: &[ir::Value],
1655    ) -> ir::Inst {
1656        if self.tail {
1657            self.builder
1658                .ins()
1659                .return_call_indirect(sig_ref, func_addr, args)
1660        } else {
1661            let inst = self.builder.ins().call_indirect(sig_ref, func_addr, args);
1662            let results: SmallVec<[_; 4]> = self
1663                .builder
1664                .func
1665                .dfg
1666                .inst_results(inst)
1667                .iter()
1668                .copied()
1669                .collect();
1670            for (i, val) in results.into_iter().enumerate() {
1671                if self.env.sig_ref_result_needs_stack_map(sig_ref, i) {
1672                    self.builder.declare_value_needs_stack_map(val);
1673                }
1674            }
1675            inst
1676        }
1677    }
1678}
1679
1680impl TypeConvert for FuncEnvironment<'_> {
1681    fn lookup_heap_type(&self, ty: wasmparser::UnpackedIndex) -> WasmHeapType {
1682        wasmtime_environ::WasmparserTypeConverter::new(self.types, |idx| {
1683            self.module.types[idx].unwrap_module_type_index()
1684        })
1685        .lookup_heap_type(ty)
1686    }
1687
1688    fn lookup_type_index(&self, index: wasmparser::UnpackedIndex) -> EngineOrModuleTypeIndex {
1689        wasmtime_environ::WasmparserTypeConverter::new(self.types, |idx| {
1690            self.module.types[idx].unwrap_module_type_index()
1691        })
1692        .lookup_type_index(index)
1693    }
1694}
1695
1696impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environment> {
1697    fn target_config(&self) -> TargetFrontendConfig {
1698        self.isa.frontend_config()
1699    }
1700
1701    fn reference_type(&self, wasm_ty: WasmHeapType) -> (ir::Type, bool) {
1702        let ty = crate::reference_type(wasm_ty, self.pointer_type());
1703        let needs_stack_map = match wasm_ty.top() {
1704            WasmHeapTopType::Extern | WasmHeapTopType::Any => true,
1705            WasmHeapTopType::Func => false,
1706            WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
1707        };
1708        (ty, needs_stack_map)
1709    }
1710
1711    fn heap_access_spectre_mitigation(&self) -> bool {
1712        self.isa.flags().enable_heap_access_spectre_mitigation()
1713    }
1714
1715    fn proof_carrying_code(&self) -> bool {
1716        self.isa.flags().enable_pcc()
1717    }
1718
1719    fn tunables(&self) -> &Tunables {
1720        self.compiler.tunables()
1721    }
1722}
1723
1724impl FuncEnvironment<'_> {
1725    pub fn heaps(&self) -> &PrimaryMap<Heap, HeapData> {
1726        &self.heaps
1727    }
1728
1729    pub fn is_wasm_parameter(&self, _signature: &ir::Signature, index: usize) -> bool {
1730        // The first two parameters are the vmctx and caller vmctx. The rest are
1731        // the wasm parameters.
1732        index >= 2
1733    }
1734
1735    pub fn param_needs_stack_map(&self, _signature: &ir::Signature, index: usize) -> bool {
1736        // Skip the caller and callee vmctx.
1737        if index < 2 {
1738            return false;
1739        }
1740
1741        self.wasm_func_ty.params()[index - 2].is_vmgcref_type_and_not_i31()
1742    }
1743
1744    pub fn sig_ref_result_needs_stack_map(&self, sig_ref: ir::SigRef, index: usize) -> bool {
1745        let wasm_func_ty = self.sig_ref_to_ty[sig_ref].as_ref().unwrap();
1746        wasm_func_ty.returns()[index].is_vmgcref_type_and_not_i31()
1747    }
1748
1749    pub fn func_ref_result_needs_stack_map(
1750        &self,
1751        func: &ir::Function,
1752        func_ref: ir::FuncRef,
1753        index: usize,
1754    ) -> bool {
1755        let sig_ref = func.dfg.ext_funcs[func_ref].signature;
1756        let wasm_func_ty = self.sig_ref_to_ty[sig_ref].as_ref().unwrap();
1757        wasm_func_ty.returns()[index].is_vmgcref_type_and_not_i31()
1758    }
1759
1760    pub fn after_locals(&mut self, num_locals: usize) {
1761        self.fuel_var = Variable::new(num_locals);
1762        self.epoch_deadline_var = Variable::new(num_locals + 1);
1763        self.epoch_ptr_var = Variable::new(num_locals + 2);
1764    }
1765
1766    pub fn translate_table_grow(
1767        &mut self,
1768        builder: &mut FunctionBuilder<'_>,
1769        table_index: TableIndex,
1770        delta: ir::Value,
1771        init_value: ir::Value,
1772    ) -> WasmResult<ir::Value> {
1773        let mut pos = builder.cursor();
1774        let table = self.table(table_index);
1775        let ty = table.ref_type.heap_type;
1776        let grow = if ty.is_vmgcref_type() {
1777            gc::builtins::table_grow_gc_ref(self, &mut pos.func)?
1778        } else {
1779            debug_assert_eq!(ty.top(), WasmHeapTopType::Func);
1780            self.builtin_functions.table_grow_func_ref(&mut pos.func)
1781        };
1782
1783        let vmctx = self.vmctx_val(&mut pos);
1784
1785        let index_type = table.idx_type;
1786        let delta = self.cast_index_to_i64(&mut pos, delta, index_type);
1787        let table_index_arg = pos.ins().iconst(I32, table_index.as_u32() as i64);
1788        let call_inst = pos
1789            .ins()
1790            .call(grow, &[vmctx, table_index_arg, delta, init_value]);
1791        let result = pos.func.dfg.first_result(call_inst);
1792        Ok(self.convert_pointer_to_index_type(builder.cursor(), result, index_type, false))
1793    }
1794
1795    pub fn translate_table_get(
1796        &mut self,
1797        builder: &mut FunctionBuilder,
1798        table_index: TableIndex,
1799        index: ir::Value,
1800    ) -> WasmResult<ir::Value> {
1801        let table = self.module.tables[table_index];
1802        self.ensure_table_exists(builder.func, table_index);
1803        let table_data = self.tables[table_index].clone().unwrap();
1804        let heap_ty = table.ref_type.heap_type;
1805        match heap_ty.top() {
1806            // GC-managed types.
1807            WasmHeapTopType::Any | WasmHeapTopType::Extern => {
1808                let (src, flags) = table_data.prepare_table_addr(self, builder, index);
1809                gc::gc_compiler(self)?.translate_read_gc_reference(
1810                    self,
1811                    builder,
1812                    table.ref_type,
1813                    src,
1814                    flags,
1815                )
1816            }
1817
1818            // Function types.
1819            WasmHeapTopType::Func => {
1820                Ok(self.get_or_init_func_ref_table_elem(builder, table_index, index, false))
1821            }
1822
1823            // Continuation types.
1824            WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
1825        }
1826    }
1827
1828    pub fn translate_table_set(
1829        &mut self,
1830        builder: &mut FunctionBuilder,
1831        table_index: TableIndex,
1832        value: ir::Value,
1833        index: ir::Value,
1834    ) -> WasmResult<()> {
1835        let table = self.module.tables[table_index];
1836        self.ensure_table_exists(builder.func, table_index);
1837        let table_data = self.tables[table_index].clone().unwrap();
1838        let heap_ty = table.ref_type.heap_type;
1839        match heap_ty.top() {
1840            // GC-managed types.
1841            WasmHeapTopType::Any | WasmHeapTopType::Extern => {
1842                let (dst, flags) = table_data.prepare_table_addr(self, builder, index);
1843                gc::gc_compiler(self)?.translate_write_gc_reference(
1844                    self,
1845                    builder,
1846                    table.ref_type,
1847                    dst,
1848                    value,
1849                    flags,
1850                )
1851            }
1852
1853            // Function types.
1854            WasmHeapTopType::Func => {
1855                let (elem_addr, flags) = table_data.prepare_table_addr(self, builder, index);
1856                // Set the "initialized bit". See doc-comment on
1857                // `FUNCREF_INIT_BIT` in
1858                // crates/environ/src/ref_bits.rs for details.
1859                let value_with_init_bit = if self.tunables.table_lazy_init {
1860                    builder
1861                        .ins()
1862                        .bor_imm(value, Imm64::from(FUNCREF_INIT_BIT as i64))
1863                } else {
1864                    value
1865                };
1866                builder
1867                    .ins()
1868                    .store(flags, value_with_init_bit, elem_addr, 0);
1869                Ok(())
1870            }
1871
1872            // Continuation types.
1873            WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
1874        }
1875    }
1876
1877    pub fn translate_table_fill(
1878        &mut self,
1879        builder: &mut FunctionBuilder<'_>,
1880        table_index: TableIndex,
1881        dst: ir::Value,
1882        val: ir::Value,
1883        len: ir::Value,
1884    ) -> WasmResult<()> {
1885        let mut pos = builder.cursor();
1886        let table = self.table(table_index);
1887        let index_type = table.idx_type;
1888        let dst = self.cast_index_to_i64(&mut pos, dst, index_type);
1889        let len = self.cast_index_to_i64(&mut pos, len, index_type);
1890        let ty = table.ref_type.heap_type;
1891        let libcall = if ty.is_vmgcref_type() {
1892            gc::builtins::table_fill_gc_ref(self, &mut pos.func)?
1893        } else {
1894            debug_assert_eq!(ty.top(), WasmHeapTopType::Func);
1895            self.builtin_functions.table_fill_func_ref(&mut pos.func)
1896        };
1897
1898        let vmctx = self.vmctx_val(&mut pos);
1899
1900        let table_index_arg = pos.ins().iconst(I32, table_index.as_u32() as i64);
1901        pos.ins()
1902            .call(libcall, &[vmctx, table_index_arg, dst, val, len]);
1903
1904        Ok(())
1905    }
1906
1907    pub fn translate_ref_i31(
1908        &mut self,
1909        mut pos: FuncCursor,
1910        val: ir::Value,
1911    ) -> WasmResult<ir::Value> {
1912        debug_assert_eq!(pos.func.dfg.value_type(val), ir::types::I32);
1913        let shifted = pos.ins().ishl_imm(val, 1);
1914        let tagged = pos
1915            .ins()
1916            .bor_imm(shifted, i64::from(crate::I31_REF_DISCRIMINANT));
1917        let (ref_ty, _needs_stack_map) = self.reference_type(WasmHeapType::I31);
1918        debug_assert_eq!(ref_ty, ir::types::I32);
1919        Ok(tagged)
1920    }
1921
1922    pub fn translate_i31_get_s(
1923        &mut self,
1924        builder: &mut FunctionBuilder,
1925        i31ref: ir::Value,
1926    ) -> WasmResult<ir::Value> {
1927        // TODO: If we knew we have a `(ref i31)` here, instead of maybe a `(ref
1928        // null i31)`, we could omit the `trapz`. But plumbing that type info
1929        // from `wasmparser` and through to here is a bit funky.
1930        self.trapz(builder, i31ref, crate::TRAP_NULL_REFERENCE);
1931        Ok(builder.ins().sshr_imm(i31ref, 1))
1932    }
1933
1934    pub fn translate_i31_get_u(
1935        &mut self,
1936        builder: &mut FunctionBuilder,
1937        i31ref: ir::Value,
1938    ) -> WasmResult<ir::Value> {
1939        // TODO: If we knew we have a `(ref i31)` here, instead of maybe a `(ref
1940        // null i31)`, we could omit the `trapz`. But plumbing that type info
1941        // from `wasmparser` and through to here is a bit funky.
1942        self.trapz(builder, i31ref, crate::TRAP_NULL_REFERENCE);
1943        Ok(builder.ins().ushr_imm(i31ref, 1))
1944    }
1945
1946    pub fn struct_fields_len(&mut self, struct_type_index: TypeIndex) -> WasmResult<usize> {
1947        let ty = self.module.types[struct_type_index].unwrap_module_type_index();
1948        match &self.types[ty].composite_type.inner {
1949            WasmCompositeInnerType::Struct(s) => Ok(s.fields.len()),
1950            _ => unreachable!(),
1951        }
1952    }
1953
1954    pub fn translate_struct_new(
1955        &mut self,
1956        builder: &mut FunctionBuilder,
1957        struct_type_index: TypeIndex,
1958        fields: StructFieldsVec,
1959    ) -> WasmResult<ir::Value> {
1960        gc::translate_struct_new(self, builder, struct_type_index, &fields)
1961    }
1962
1963    pub fn translate_struct_new_default(
1964        &mut self,
1965        builder: &mut FunctionBuilder,
1966        struct_type_index: TypeIndex,
1967    ) -> WasmResult<ir::Value> {
1968        gc::translate_struct_new_default(self, builder, struct_type_index)
1969    }
1970
1971    pub fn translate_struct_get(
1972        &mut self,
1973        builder: &mut FunctionBuilder,
1974        struct_type_index: TypeIndex,
1975        field_index: u32,
1976        struct_ref: ir::Value,
1977        extension: Option<Extension>,
1978    ) -> WasmResult<ir::Value> {
1979        gc::translate_struct_get(
1980            self,
1981            builder,
1982            struct_type_index,
1983            field_index,
1984            struct_ref,
1985            extension,
1986        )
1987    }
1988
1989    pub fn translate_struct_set(
1990        &mut self,
1991        builder: &mut FunctionBuilder,
1992        struct_type_index: TypeIndex,
1993        field_index: u32,
1994        struct_ref: ir::Value,
1995        value: ir::Value,
1996    ) -> WasmResult<()> {
1997        gc::translate_struct_set(
1998            self,
1999            builder,
2000            struct_type_index,
2001            field_index,
2002            struct_ref,
2003            value,
2004        )
2005    }
2006
2007    pub fn translate_array_new(
2008        &mut self,
2009        builder: &mut FunctionBuilder,
2010        array_type_index: TypeIndex,
2011        elem: ir::Value,
2012        len: ir::Value,
2013    ) -> WasmResult<ir::Value> {
2014        gc::translate_array_new(self, builder, array_type_index, elem, len)
2015    }
2016
2017    pub fn translate_array_new_default(
2018        &mut self,
2019        builder: &mut FunctionBuilder,
2020        array_type_index: TypeIndex,
2021        len: ir::Value,
2022    ) -> WasmResult<ir::Value> {
2023        gc::translate_array_new_default(self, builder, array_type_index, len)
2024    }
2025
2026    pub fn translate_array_new_fixed(
2027        &mut self,
2028        builder: &mut FunctionBuilder,
2029        array_type_index: TypeIndex,
2030        elems: &[ir::Value],
2031    ) -> WasmResult<ir::Value> {
2032        gc::translate_array_new_fixed(self, builder, array_type_index, elems)
2033    }
2034
2035    pub fn translate_array_new_data(
2036        &mut self,
2037        builder: &mut FunctionBuilder,
2038        array_type_index: TypeIndex,
2039        data_index: DataIndex,
2040        data_offset: ir::Value,
2041        len: ir::Value,
2042    ) -> WasmResult<ir::Value> {
2043        let libcall = gc::builtins::array_new_data(self, builder.func)?;
2044        let vmctx = self.vmctx_val(&mut builder.cursor());
2045        let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
2046        let interned_type_index = builder
2047            .ins()
2048            .iconst(I32, i64::from(interned_type_index.as_u32()));
2049        let data_index = builder.ins().iconst(I32, i64::from(data_index.as_u32()));
2050        let call_inst = builder.ins().call(
2051            libcall,
2052            &[vmctx, interned_type_index, data_index, data_offset, len],
2053        );
2054        let result = builder.func.dfg.first_result(call_inst);
2055        Ok(builder.ins().ireduce(ir::types::I32, result))
2056    }
2057
2058    pub fn translate_array_new_elem(
2059        &mut self,
2060        builder: &mut FunctionBuilder,
2061        array_type_index: TypeIndex,
2062        elem_index: ElemIndex,
2063        elem_offset: ir::Value,
2064        len: ir::Value,
2065    ) -> WasmResult<ir::Value> {
2066        let libcall = gc::builtins::array_new_elem(self, builder.func)?;
2067        let vmctx = self.vmctx_val(&mut builder.cursor());
2068        let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
2069        let interned_type_index = builder
2070            .ins()
2071            .iconst(I32, i64::from(interned_type_index.as_u32()));
2072        let elem_index = builder.ins().iconst(I32, i64::from(elem_index.as_u32()));
2073        let call_inst = builder.ins().call(
2074            libcall,
2075            &[vmctx, interned_type_index, elem_index, elem_offset, len],
2076        );
2077        let result = builder.func.dfg.first_result(call_inst);
2078        Ok(builder.ins().ireduce(ir::types::I32, result))
2079    }
2080
2081    pub fn translate_array_copy(
2082        &mut self,
2083        builder: &mut FunctionBuilder,
2084        _dst_array_type_index: TypeIndex,
2085        dst_array: ir::Value,
2086        dst_index: ir::Value,
2087        _src_array_type_index: TypeIndex,
2088        src_array: ir::Value,
2089        src_index: ir::Value,
2090        len: ir::Value,
2091    ) -> WasmResult<()> {
2092        let libcall = gc::builtins::array_copy(self, builder.func)?;
2093        let vmctx = self.vmctx_val(&mut builder.cursor());
2094        builder.ins().call(
2095            libcall,
2096            &[vmctx, dst_array, dst_index, src_array, src_index, len],
2097        );
2098        Ok(())
2099    }
2100
2101    pub fn translate_array_fill(
2102        &mut self,
2103        builder: &mut FunctionBuilder,
2104        array_type_index: TypeIndex,
2105        array: ir::Value,
2106        index: ir::Value,
2107        value: ir::Value,
2108        len: ir::Value,
2109    ) -> WasmResult<()> {
2110        gc::translate_array_fill(self, builder, array_type_index, array, index, value, len)
2111    }
2112
2113    pub fn translate_array_init_data(
2114        &mut self,
2115        builder: &mut FunctionBuilder,
2116        array_type_index: TypeIndex,
2117        array: ir::Value,
2118        dst_index: ir::Value,
2119        data_index: DataIndex,
2120        data_offset: ir::Value,
2121        len: ir::Value,
2122    ) -> WasmResult<()> {
2123        let libcall = gc::builtins::array_init_data(self, builder.func)?;
2124        let vmctx = self.vmctx_val(&mut builder.cursor());
2125        let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
2126        let interned_type_index = builder
2127            .ins()
2128            .iconst(I32, i64::from(interned_type_index.as_u32()));
2129        let data_index = builder.ins().iconst(I32, i64::from(data_index.as_u32()));
2130        builder.ins().call(
2131            libcall,
2132            &[
2133                vmctx,
2134                interned_type_index,
2135                array,
2136                dst_index,
2137                data_index,
2138                data_offset,
2139                len,
2140            ],
2141        );
2142        Ok(())
2143    }
2144
2145    pub fn translate_array_init_elem(
2146        &mut self,
2147        builder: &mut FunctionBuilder,
2148        array_type_index: TypeIndex,
2149        array: ir::Value,
2150        dst_index: ir::Value,
2151        elem_index: ElemIndex,
2152        elem_offset: ir::Value,
2153        len: ir::Value,
2154    ) -> WasmResult<()> {
2155        let libcall = gc::builtins::array_init_elem(self, builder.func)?;
2156        let vmctx = self.vmctx_val(&mut builder.cursor());
2157        let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
2158        let interned_type_index = builder
2159            .ins()
2160            .iconst(I32, i64::from(interned_type_index.as_u32()));
2161        let elem_index = builder.ins().iconst(I32, i64::from(elem_index.as_u32()));
2162        builder.ins().call(
2163            libcall,
2164            &[
2165                vmctx,
2166                interned_type_index,
2167                array,
2168                dst_index,
2169                elem_index,
2170                elem_offset,
2171                len,
2172            ],
2173        );
2174        Ok(())
2175    }
2176
2177    pub fn translate_array_len(
2178        &mut self,
2179        builder: &mut FunctionBuilder,
2180        array: ir::Value,
2181    ) -> WasmResult<ir::Value> {
2182        gc::translate_array_len(self, builder, array)
2183    }
2184
2185    pub fn translate_array_get(
2186        &mut self,
2187        builder: &mut FunctionBuilder,
2188        array_type_index: TypeIndex,
2189        array: ir::Value,
2190        index: ir::Value,
2191        extension: Option<Extension>,
2192    ) -> WasmResult<ir::Value> {
2193        gc::translate_array_get(self, builder, array_type_index, array, index, extension)
2194    }
2195
2196    pub fn translate_array_set(
2197        &mut self,
2198        builder: &mut FunctionBuilder,
2199        array_type_index: TypeIndex,
2200        array: ir::Value,
2201        index: ir::Value,
2202        value: ir::Value,
2203    ) -> WasmResult<()> {
2204        gc::translate_array_set(self, builder, array_type_index, array, index, value)
2205    }
2206
2207    pub fn translate_ref_test(
2208        &mut self,
2209        builder: &mut FunctionBuilder<'_>,
2210        ref_ty: WasmRefType,
2211        gc_ref: ir::Value,
2212    ) -> WasmResult<ir::Value> {
2213        gc::translate_ref_test(self, builder, ref_ty, gc_ref)
2214    }
2215
2216    pub fn translate_ref_null(
2217        &mut self,
2218        mut pos: cranelift_codegen::cursor::FuncCursor,
2219        ht: WasmHeapType,
2220    ) -> WasmResult<ir::Value> {
2221        Ok(match ht.top() {
2222            WasmHeapTopType::Func => pos.ins().iconst(self.pointer_type(), 0),
2223            // NB: null GC references don't need to be in stack maps.
2224            WasmHeapTopType::Any | WasmHeapTopType::Extern => pos.ins().iconst(types::I32, 0),
2225            WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
2226        })
2227    }
2228
2229    pub fn translate_ref_is_null(
2230        &mut self,
2231        mut pos: cranelift_codegen::cursor::FuncCursor,
2232        value: ir::Value,
2233    ) -> WasmResult<ir::Value> {
2234        let byte_is_null =
2235            pos.ins()
2236                .icmp_imm(cranelift_codegen::ir::condcodes::IntCC::Equal, value, 0);
2237        Ok(pos.ins().uextend(ir::types::I32, byte_is_null))
2238    }
2239
2240    pub fn translate_ref_func(
2241        &mut self,
2242        mut pos: cranelift_codegen::cursor::FuncCursor<'_>,
2243        func_index: FuncIndex,
2244    ) -> WasmResult<ir::Value> {
2245        let func_index = pos.ins().iconst(I32, func_index.as_u32() as i64);
2246        let ref_func = self.builtin_functions.ref_func(&mut pos.func);
2247        let vmctx = self.vmctx_val(&mut pos);
2248
2249        let call_inst = pos.ins().call(ref_func, &[vmctx, func_index]);
2250        Ok(pos.func.dfg.first_result(call_inst))
2251    }
2252
2253    pub fn translate_custom_global_get(
2254        &mut self,
2255        builder: &mut FunctionBuilder,
2256        index: GlobalIndex,
2257    ) -> WasmResult<ir::Value> {
2258        let global_ty = self.module.globals[index];
2259        let wasm_ty = global_ty.wasm_ty;
2260        debug_assert!(
2261            wasm_ty.is_vmgcref_type(),
2262            "We only use GlobalVariable::Custom for VMGcRef types"
2263        );
2264        let WasmValType::Ref(ref_ty) = wasm_ty else {
2265            unreachable!()
2266        };
2267
2268        let (gv, offset) = self.get_global_location(builder.func, index);
2269        let gv = builder.ins().global_value(self.pointer_type(), gv);
2270        let src = builder.ins().iadd_imm(gv, i64::from(offset));
2271
2272        gc::gc_compiler(self)?.translate_read_gc_reference(
2273            self,
2274            builder,
2275            ref_ty,
2276            src,
2277            if global_ty.mutability {
2278                ir::MemFlags::trusted()
2279            } else {
2280                ir::MemFlags::trusted().with_readonly().with_can_move()
2281            },
2282        )
2283    }
2284
2285    pub fn translate_custom_global_set(
2286        &mut self,
2287        builder: &mut FunctionBuilder,
2288        index: GlobalIndex,
2289        value: ir::Value,
2290    ) -> WasmResult<()> {
2291        let ty = self.module.globals[index].wasm_ty;
2292        debug_assert!(
2293            ty.is_vmgcref_type(),
2294            "We only use GlobalVariable::Custom for VMGcRef types"
2295        );
2296        let WasmValType::Ref(ty) = ty else {
2297            unreachable!()
2298        };
2299
2300        let (gv, offset) = self.get_global_location(builder.func, index);
2301        let gv = builder.ins().global_value(self.pointer_type(), gv);
2302        let src = builder.ins().iadd_imm(gv, i64::from(offset));
2303
2304        gc::gc_compiler(self)?.translate_write_gc_reference(
2305            self,
2306            builder,
2307            ty,
2308            src,
2309            value,
2310            ir::MemFlags::trusted(),
2311        )
2312    }
2313
2314    pub fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult<Heap> {
2315        let pointer_type = self.pointer_type();
2316        let memory = self.module.memories[index];
2317        let is_shared = memory.shared;
2318
2319        let (ptr, base_offset, current_length_offset, ptr_memtype) = {
2320            let vmctx = self.vmctx(func);
2321            if let Some(def_index) = self.module.defined_memory_index(index) {
2322                if is_shared {
2323                    // As with imported memory, the `VMMemoryDefinition` for a
2324                    // shared memory is stored elsewhere. We store a `*mut
2325                    // VMMemoryDefinition` to it and dereference that when
2326                    // atomically growing it.
2327                    let from_offset = self.offsets.vmctx_vmmemory_pointer(def_index);
2328                    let (memory, def_mt) = self.load_pointer_with_memtypes(
2329                        func,
2330                        from_offset,
2331                        true,
2332                        self.pcc_vmctx_memtype,
2333                    );
2334                    let base_offset = i32::from(self.offsets.ptr.vmmemory_definition_base());
2335                    let current_length_offset =
2336                        i32::from(self.offsets.ptr.vmmemory_definition_current_length());
2337                    (memory, base_offset, current_length_offset, def_mt)
2338                } else {
2339                    let owned_index = self.module.owned_memory_index(def_index);
2340                    let owned_base_offset =
2341                        self.offsets.vmctx_vmmemory_definition_base(owned_index);
2342                    let owned_length_offset = self
2343                        .offsets
2344                        .vmctx_vmmemory_definition_current_length(owned_index);
2345                    let current_base_offset = i32::try_from(owned_base_offset).unwrap();
2346                    let current_length_offset = i32::try_from(owned_length_offset).unwrap();
2347                    (
2348                        vmctx,
2349                        current_base_offset,
2350                        current_length_offset,
2351                        self.pcc_vmctx_memtype,
2352                    )
2353                }
2354            } else {
2355                let from_offset = self.offsets.vmctx_vmmemory_import_from(index);
2356                let (memory, def_mt) = self.load_pointer_with_memtypes(
2357                    func,
2358                    from_offset,
2359                    true,
2360                    self.pcc_vmctx_memtype,
2361                );
2362                let base_offset = i32::from(self.offsets.ptr.vmmemory_definition_base());
2363                let current_length_offset =
2364                    i32::from(self.offsets.ptr.vmmemory_definition_current_length());
2365                (memory, base_offset, current_length_offset, def_mt)
2366            }
2367        };
2368
2369        let heap_bound = func.create_global_value(ir::GlobalValueData::Load {
2370            base: ptr,
2371            offset: Offset32::new(current_length_offset),
2372            global_type: pointer_type,
2373            flags: MemFlags::trusted(),
2374        });
2375
2376        // If we have a declared maximum, we can make this a "static" heap, which is
2377        // allocated up front and never moved.
2378        let host_page_size_log2 = self.target_config().page_size_align_log2;
2379        let (base_fact, memory_type) = if !memory
2380            .can_elide_bounds_check(self.tunables, host_page_size_log2)
2381        {
2382            if let Some(ptr_memtype) = ptr_memtype {
2383                // Create a memtype representing the untyped memory region.
2384                let data_mt = func.create_memory_type(ir::MemoryTypeData::DynamicMemory {
2385                    gv: heap_bound,
2386                    size: self.tunables.memory_guard_size,
2387                });
2388                // This fact applies to any pointer to the start of the memory.
2389                let base_fact = ir::Fact::dynamic_base_ptr(data_mt);
2390                // This fact applies to the length.
2391                let length_fact = ir::Fact::global_value(
2392                    u16::try_from(self.isa.pointer_type().bits()).unwrap(),
2393                    heap_bound,
2394                );
2395                // Create a field in the vmctx for the base pointer.
2396                match &mut func.memory_types[ptr_memtype] {
2397                    ir::MemoryTypeData::Struct { size, fields } => {
2398                        let base_offset = u64::try_from(base_offset).unwrap();
2399                        fields.push(ir::MemoryTypeField {
2400                            offset: base_offset,
2401                            ty: self.isa.pointer_type(),
2402                            // Read-only field from the PoV of PCC checks:
2403                            // don't allow stores to this field. (Even if
2404                            // it is a dynamic memory whose base can
2405                            // change, that update happens inside the
2406                            // runtime, not in generated code.)
2407                            readonly: true,
2408                            fact: Some(base_fact.clone()),
2409                        });
2410                        let current_length_offset = u64::try_from(current_length_offset).unwrap();
2411                        fields.push(ir::MemoryTypeField {
2412                            offset: current_length_offset,
2413                            ty: self.isa.pointer_type(),
2414                            // As above, read-only; only the runtime modifies it.
2415                            readonly: true,
2416                            fact: Some(length_fact),
2417                        });
2418
2419                        let pointer_size = u64::from(self.isa.pointer_type().bytes());
2420                        let fields_end = std::cmp::max(
2421                            base_offset + pointer_size,
2422                            current_length_offset + pointer_size,
2423                        );
2424                        *size = std::cmp::max(*size, fields_end);
2425                    }
2426                    _ => {
2427                        panic!("Bad memtype");
2428                    }
2429                }
2430                // Apply a fact to the base pointer.
2431                (Some(base_fact), Some(data_mt))
2432            } else {
2433                (None, None)
2434            }
2435        } else {
2436            if let Some(ptr_memtype) = ptr_memtype {
2437                // Create a memtype representing the untyped memory region.
2438                let data_mt = func.create_memory_type(ir::MemoryTypeData::Memory {
2439                    size: self
2440                        .tunables
2441                        .memory_reservation
2442                        .checked_add(self.tunables.memory_guard_size)
2443                        .expect("Memory plan has overflowing size plus guard"),
2444                });
2445                // This fact applies to any pointer to the start of the memory.
2446                let base_fact = Fact::Mem {
2447                    ty: data_mt,
2448                    min_offset: 0,
2449                    max_offset: 0,
2450                    nullable: false,
2451                };
2452                // Create a field in the vmctx for the base pointer.
2453                match &mut func.memory_types[ptr_memtype] {
2454                    ir::MemoryTypeData::Struct { size, fields } => {
2455                        let offset = u64::try_from(base_offset).unwrap();
2456                        fields.push(ir::MemoryTypeField {
2457                            offset,
2458                            ty: self.isa.pointer_type(),
2459                            // Read-only field from the PoV of PCC checks:
2460                            // don't allow stores to this field. (Even if
2461                            // it is a dynamic memory whose base can
2462                            // change, that update happens inside the
2463                            // runtime, not in generated code.)
2464                            readonly: true,
2465                            fact: Some(base_fact.clone()),
2466                        });
2467                        *size = std::cmp::max(
2468                            *size,
2469                            offset + u64::from(self.isa.pointer_type().bytes()),
2470                        );
2471                    }
2472                    _ => {
2473                        panic!("Bad memtype");
2474                    }
2475                }
2476                // Apply a fact to the base pointer.
2477                (Some(base_fact), Some(data_mt))
2478            } else {
2479                (None, None)
2480            }
2481        };
2482
2483        let mut flags = MemFlags::trusted().with_checked().with_can_move();
2484        if !memory.memory_may_move(self.tunables) {
2485            flags.set_readonly();
2486        }
2487        let heap_base = func.create_global_value(ir::GlobalValueData::Load {
2488            base: ptr,
2489            offset: Offset32::new(base_offset),
2490            global_type: pointer_type,
2491            flags,
2492        });
2493        func.global_value_facts[heap_base] = base_fact;
2494
2495        Ok(self.heaps.push(HeapData {
2496            base: heap_base,
2497            bound: heap_bound,
2498            pcc_memory_type: memory_type,
2499            memory,
2500        }))
2501    }
2502
2503    pub fn make_global(
2504        &mut self,
2505        func: &mut ir::Function,
2506        index: GlobalIndex,
2507    ) -> WasmResult<GlobalVariable> {
2508        let ty = self.module.globals[index].wasm_ty;
2509
2510        if ty.is_vmgcref_type() {
2511            // Although reference-typed globals live at the same memory location as
2512            // any other type of global at the same index would, getting or
2513            // setting them requires ref counting barriers. Therefore, we need
2514            // to use `GlobalVariable::Custom`, as that is the only kind of
2515            // `GlobalVariable` for which translation supports custom
2516            // access translation.
2517            return Ok(GlobalVariable::Custom);
2518        }
2519
2520        let (gv, offset) = self.get_global_location(func, index);
2521        Ok(GlobalVariable::Memory {
2522            gv,
2523            offset: offset.into(),
2524            ty: super::value_type(self.isa, ty),
2525        })
2526    }
2527
2528    pub fn make_indirect_sig(
2529        &mut self,
2530        func: &mut ir::Function,
2531        index: TypeIndex,
2532    ) -> WasmResult<ir::SigRef> {
2533        let interned_index = self.module.types[index].unwrap_module_type_index();
2534        let wasm_func_ty = self.types[interned_index].unwrap_func();
2535        let sig = crate::wasm_call_signature(self.isa, wasm_func_ty, &self.tunables);
2536        let sig_ref = func.import_signature(sig);
2537        self.sig_ref_to_ty[sig_ref] = Some(wasm_func_ty);
2538        Ok(sig_ref)
2539    }
2540
2541    pub fn make_direct_func(
2542        &mut self,
2543        func: &mut ir::Function,
2544        index: FuncIndex,
2545    ) -> WasmResult<ir::FuncRef> {
2546        let sig = self.module.functions[index]
2547            .signature
2548            .unwrap_module_type_index();
2549        let wasm_func_ty = self.types[sig].unwrap_func();
2550        let sig = crate::wasm_call_signature(self.isa, wasm_func_ty, &self.tunables);
2551        let signature = func.import_signature(sig);
2552        self.sig_ref_to_ty[signature] = Some(wasm_func_ty);
2553        let name =
2554            ir::ExternalName::User(func.declare_imported_user_function(ir::UserExternalName {
2555                namespace: crate::NS_WASM_FUNC,
2556                index: index.as_u32(),
2557            }));
2558        Ok(func.import_function(ir::ExtFuncData {
2559            name,
2560            signature,
2561
2562            // the value of this flag determines the codegen for calls to this
2563            // function. if this flag is `false` then absolute relocations will
2564            // be generated for references to the function, which requires
2565            // load-time relocation resolution. if this flag is set to `true`
2566            // then relative relocations are emitted which can be resolved at
2567            // object-link-time, just after all functions are compiled.
2568            //
2569            // this flag is set to `true` for functions defined in the object
2570            // we'll be defining in this compilation unit, or everything local
2571            // to the wasm module. this means that between functions in a wasm
2572            // module there's relative calls encoded. all calls external to a
2573            // wasm module (e.g. imports or libcalls) are either encoded through
2574            // the `vmcontext` as relative jumps (hence no relocations) or
2575            // they're libcalls with absolute relocations.
2576            colocated: self.module.defined_func_index(index).is_some(),
2577        }))
2578    }
2579
2580    pub fn translate_call_indirect(
2581        &mut self,
2582        builder: &mut FunctionBuilder,
2583        features: &WasmFeatures,
2584        table_index: TableIndex,
2585        ty_index: TypeIndex,
2586        sig_ref: ir::SigRef,
2587        callee: ir::Value,
2588        call_args: &[ir::Value],
2589    ) -> WasmResult<Option<ir::Inst>> {
2590        Call::new(builder, self).indirect_call(
2591            features,
2592            table_index,
2593            ty_index,
2594            sig_ref,
2595            callee,
2596            call_args,
2597        )
2598    }
2599
2600    pub fn translate_call(
2601        &mut self,
2602        builder: &mut FunctionBuilder,
2603        callee_index: FuncIndex,
2604        callee: ir::FuncRef,
2605        call_args: &[ir::Value],
2606    ) -> WasmResult<ir::Inst> {
2607        Call::new(builder, self).direct_call(callee_index, callee, call_args)
2608    }
2609
2610    pub fn translate_call_ref(
2611        &mut self,
2612        builder: &mut FunctionBuilder,
2613        sig_ref: ir::SigRef,
2614        callee: ir::Value,
2615        call_args: &[ir::Value],
2616    ) -> WasmResult<ir::Inst> {
2617        Call::new(builder, self).call_ref(sig_ref, callee, call_args)
2618    }
2619
2620    pub fn translate_return_call(
2621        &mut self,
2622        builder: &mut FunctionBuilder,
2623        callee_index: FuncIndex,
2624        callee: ir::FuncRef,
2625        call_args: &[ir::Value],
2626    ) -> WasmResult<()> {
2627        Call::new_tail(builder, self).direct_call(callee_index, callee, call_args)?;
2628        Ok(())
2629    }
2630
2631    pub fn translate_return_call_indirect(
2632        &mut self,
2633        builder: &mut FunctionBuilder,
2634        features: &WasmFeatures,
2635        table_index: TableIndex,
2636        ty_index: TypeIndex,
2637        sig_ref: ir::SigRef,
2638        callee: ir::Value,
2639        call_args: &[ir::Value],
2640    ) -> WasmResult<()> {
2641        Call::new_tail(builder, self).indirect_call(
2642            features,
2643            table_index,
2644            ty_index,
2645            sig_ref,
2646            callee,
2647            call_args,
2648        )?;
2649        Ok(())
2650    }
2651
2652    pub fn translate_return_call_ref(
2653        &mut self,
2654        builder: &mut FunctionBuilder,
2655        sig_ref: ir::SigRef,
2656        callee: ir::Value,
2657        call_args: &[ir::Value],
2658    ) -> WasmResult<()> {
2659        Call::new_tail(builder, self).call_ref(sig_ref, callee, call_args)?;
2660        Ok(())
2661    }
2662
2663    pub fn translate_memory_grow(
2664        &mut self,
2665        builder: &mut FunctionBuilder<'_>,
2666        index: MemoryIndex,
2667        _heap: Heap,
2668        val: ir::Value,
2669    ) -> WasmResult<ir::Value> {
2670        let mut pos = builder.cursor();
2671        let memory_grow = self.builtin_functions.memory32_grow(&mut pos.func);
2672        let index_arg = index.index();
2673
2674        let memory_index = pos.ins().iconst(I32, index_arg as i64);
2675        let vmctx = self.vmctx_val(&mut pos);
2676
2677        let index_type = self.memory(index).idx_type;
2678        let val = self.cast_index_to_i64(&mut pos, val, index_type);
2679        let call_inst = pos.ins().call(memory_grow, &[vmctx, val, memory_index]);
2680        let result = *pos.func.dfg.inst_results(call_inst).first().unwrap();
2681        let single_byte_pages = match self.memory(index).page_size_log2 {
2682            16 => false,
2683            0 => true,
2684            _ => unreachable!("only page sizes 2**0 and 2**16 are currently valid"),
2685        };
2686        Ok(self.convert_pointer_to_index_type(
2687            builder.cursor(),
2688            result,
2689            index_type,
2690            single_byte_pages,
2691        ))
2692    }
2693
2694    pub fn translate_memory_size(
2695        &mut self,
2696        mut pos: FuncCursor<'_>,
2697        index: MemoryIndex,
2698        _heap: Heap,
2699    ) -> WasmResult<ir::Value> {
2700        let pointer_type = self.pointer_type();
2701        let vmctx = self.vmctx(&mut pos.func);
2702        let is_shared = self.module.memories[index].shared;
2703        let base = pos.ins().global_value(pointer_type, vmctx);
2704        let current_length_in_bytes = match self.module.defined_memory_index(index) {
2705            Some(def_index) => {
2706                if is_shared {
2707                    let offset =
2708                        i32::try_from(self.offsets.vmctx_vmmemory_pointer(def_index)).unwrap();
2709                    let vmmemory_ptr =
2710                        pos.ins()
2711                            .load(pointer_type, ir::MemFlags::trusted(), base, offset);
2712                    let vmmemory_definition_offset =
2713                        i64::from(self.offsets.ptr.vmmemory_definition_current_length());
2714                    let vmmemory_definition_ptr =
2715                        pos.ins().iadd_imm(vmmemory_ptr, vmmemory_definition_offset);
2716                    // This atomic access of the
2717                    // `VMMemoryDefinition::current_length` is direct; no bounds
2718                    // check is needed. This is possible because shared memory
2719                    // has a static size (the maximum is always known). Shared
2720                    // memory is thus built with a static memory plan and no
2721                    // bounds-checked version of this is implemented.
2722                    pos.ins().atomic_load(
2723                        pointer_type,
2724                        ir::MemFlags::trusted(),
2725                        vmmemory_definition_ptr,
2726                    )
2727                } else {
2728                    let owned_index = self.module.owned_memory_index(def_index);
2729                    let offset = i32::try_from(
2730                        self.offsets
2731                            .vmctx_vmmemory_definition_current_length(owned_index),
2732                    )
2733                    .unwrap();
2734                    pos.ins()
2735                        .load(pointer_type, ir::MemFlags::trusted(), base, offset)
2736                }
2737            }
2738            None => {
2739                let offset = i32::try_from(self.offsets.vmctx_vmmemory_import_from(index)).unwrap();
2740                let vmmemory_ptr =
2741                    pos.ins()
2742                        .load(pointer_type, ir::MemFlags::trusted(), base, offset);
2743                if is_shared {
2744                    let vmmemory_definition_offset =
2745                        i64::from(self.offsets.ptr.vmmemory_definition_current_length());
2746                    let vmmemory_definition_ptr =
2747                        pos.ins().iadd_imm(vmmemory_ptr, vmmemory_definition_offset);
2748                    pos.ins().atomic_load(
2749                        pointer_type,
2750                        ir::MemFlags::trusted(),
2751                        vmmemory_definition_ptr,
2752                    )
2753                } else {
2754                    pos.ins().load(
2755                        pointer_type,
2756                        ir::MemFlags::trusted(),
2757                        vmmemory_ptr,
2758                        i32::from(self.offsets.ptr.vmmemory_definition_current_length()),
2759                    )
2760                }
2761            }
2762        };
2763
2764        let page_size_log2 = i64::from(self.module.memories[index].page_size_log2);
2765        let current_length_in_pages = pos.ins().ushr_imm(current_length_in_bytes, page_size_log2);
2766        let single_byte_pages = match page_size_log2 {
2767            16 => false,
2768            0 => true,
2769            _ => unreachable!("only page sizes 2**0 and 2**16 are currently valid"),
2770        };
2771        Ok(self.convert_pointer_to_index_type(
2772            pos,
2773            current_length_in_pages,
2774            self.memory(index).idx_type,
2775            single_byte_pages,
2776        ))
2777    }
2778
2779    pub fn translate_memory_copy(
2780        &mut self,
2781        builder: &mut FunctionBuilder<'_>,
2782        src_index: MemoryIndex,
2783        _src_heap: Heap,
2784        dst_index: MemoryIndex,
2785        _dst_heap: Heap,
2786        dst: ir::Value,
2787        src: ir::Value,
2788        len: ir::Value,
2789    ) -> WasmResult<()> {
2790        let mut pos = builder.cursor();
2791        let vmctx = self.vmctx_val(&mut pos);
2792
2793        let memory_copy = self.builtin_functions.memory_copy(&mut pos.func);
2794        let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(dst_index).idx_type);
2795        let src = self.cast_index_to_i64(&mut pos, src, self.memory(src_index).idx_type);
2796        // The length is 32-bit if either memory is 32-bit, but if they're both
2797        // 64-bit then it's 64-bit. Our intrinsic takes a 64-bit length for
2798        // compatibility across all memories, so make sure that it's cast
2799        // correctly here (this is a bit special so no generic helper unlike for
2800        // `dst`/`src` above)
2801        let len = if index_type_to_ir_type(self.memory(dst_index).idx_type) == I64
2802            && index_type_to_ir_type(self.memory(src_index).idx_type) == I64
2803        {
2804            len
2805        } else {
2806            pos.ins().uextend(I64, len)
2807        };
2808        let src_index = pos.ins().iconst(I32, i64::from(src_index.as_u32()));
2809        let dst_index = pos.ins().iconst(I32, i64::from(dst_index.as_u32()));
2810        pos.ins()
2811            .call(memory_copy, &[vmctx, dst_index, dst, src_index, src, len]);
2812
2813        Ok(())
2814    }
2815
2816    pub fn translate_memory_fill(
2817        &mut self,
2818        builder: &mut FunctionBuilder<'_>,
2819        memory_index: MemoryIndex,
2820        _heap: Heap,
2821        dst: ir::Value,
2822        val: ir::Value,
2823        len: ir::Value,
2824    ) -> WasmResult<()> {
2825        let mut pos = builder.cursor();
2826        let memory_fill = self.builtin_functions.memory_fill(&mut pos.func);
2827        let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(memory_index).idx_type);
2828        let len = self.cast_index_to_i64(&mut pos, len, self.memory(memory_index).idx_type);
2829        let memory_index_arg = pos.ins().iconst(I32, i64::from(memory_index.as_u32()));
2830
2831        let vmctx = self.vmctx_val(&mut pos);
2832
2833        pos.ins()
2834            .call(memory_fill, &[vmctx, memory_index_arg, dst, val, len]);
2835
2836        Ok(())
2837    }
2838
2839    pub fn translate_memory_init(
2840        &mut self,
2841        builder: &mut FunctionBuilder<'_>,
2842        memory_index: MemoryIndex,
2843        _heap: Heap,
2844        seg_index: u32,
2845        dst: ir::Value,
2846        src: ir::Value,
2847        len: ir::Value,
2848    ) -> WasmResult<()> {
2849        let mut pos = builder.cursor();
2850        let memory_init = self.builtin_functions.memory_init(&mut pos.func);
2851
2852        let memory_index_arg = pos.ins().iconst(I32, memory_index.index() as i64);
2853        let seg_index_arg = pos.ins().iconst(I32, seg_index as i64);
2854
2855        let vmctx = self.vmctx_val(&mut pos);
2856
2857        let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(memory_index).idx_type);
2858
2859        pos.ins().call(
2860            memory_init,
2861            &[vmctx, memory_index_arg, seg_index_arg, dst, src, len],
2862        );
2863
2864        Ok(())
2865    }
2866
2867    pub fn translate_data_drop(&mut self, mut pos: FuncCursor, seg_index: u32) -> WasmResult<()> {
2868        let data_drop = self.builtin_functions.data_drop(&mut pos.func);
2869        let seg_index_arg = pos.ins().iconst(I32, seg_index as i64);
2870        let vmctx = self.vmctx_val(&mut pos);
2871        pos.ins().call(data_drop, &[vmctx, seg_index_arg]);
2872        Ok(())
2873    }
2874
2875    pub fn translate_table_size(
2876        &mut self,
2877        pos: FuncCursor,
2878        table_index: TableIndex,
2879    ) -> WasmResult<ir::Value> {
2880        self.ensure_table_exists(pos.func, table_index);
2881        let table_data = self.tables[table_index].as_ref().unwrap();
2882        let index_type = index_type_to_ir_type(self.table(table_index).idx_type);
2883        Ok(table_data.bound.bound(&*self.isa, pos, index_type))
2884    }
2885
2886    pub fn translate_table_copy(
2887        &mut self,
2888        builder: &mut FunctionBuilder<'_>,
2889        dst_table_index: TableIndex,
2890        src_table_index: TableIndex,
2891        dst: ir::Value,
2892        src: ir::Value,
2893        len: ir::Value,
2894    ) -> WasmResult<()> {
2895        let (table_copy, dst_table_index_arg, src_table_index_arg) =
2896            self.get_table_copy_func(&mut builder.func, dst_table_index, src_table_index);
2897
2898        let mut pos = builder.cursor();
2899        let dst = self.cast_index_to_i64(&mut pos, dst, self.table(dst_table_index).idx_type);
2900        let src = self.cast_index_to_i64(&mut pos, src, self.table(src_table_index).idx_type);
2901        let len = if index_type_to_ir_type(self.table(dst_table_index).idx_type) == I64
2902            && index_type_to_ir_type(self.table(src_table_index).idx_type) == I64
2903        {
2904            len
2905        } else {
2906            pos.ins().uextend(I64, len)
2907        };
2908        let dst_table_index_arg = pos.ins().iconst(I32, dst_table_index_arg as i64);
2909        let src_table_index_arg = pos.ins().iconst(I32, src_table_index_arg as i64);
2910        let vmctx = self.vmctx_val(&mut pos);
2911        pos.ins().call(
2912            table_copy,
2913            &[
2914                vmctx,
2915                dst_table_index_arg,
2916                src_table_index_arg,
2917                dst,
2918                src,
2919                len,
2920            ],
2921        );
2922
2923        Ok(())
2924    }
2925
2926    pub fn translate_table_init(
2927        &mut self,
2928        builder: &mut FunctionBuilder<'_>,
2929        seg_index: u32,
2930        table_index: TableIndex,
2931        dst: ir::Value,
2932        src: ir::Value,
2933        len: ir::Value,
2934    ) -> WasmResult<()> {
2935        let mut pos = builder.cursor();
2936        let table_init = self.builtin_functions.table_init(&mut pos.func);
2937        let table_index_arg = pos.ins().iconst(I32, i64::from(table_index.as_u32()));
2938        let seg_index_arg = pos.ins().iconst(I32, i64::from(seg_index));
2939        let vmctx = self.vmctx_val(&mut pos);
2940        let index_type = self.table(table_index).idx_type;
2941        let dst = self.cast_index_to_i64(&mut pos, dst, index_type);
2942        let src = pos.ins().uextend(I64, src);
2943        let len = pos.ins().uextend(I64, len);
2944
2945        pos.ins().call(
2946            table_init,
2947            &[vmctx, table_index_arg, seg_index_arg, dst, src, len],
2948        );
2949
2950        Ok(())
2951    }
2952
2953    pub fn translate_elem_drop(&mut self, mut pos: FuncCursor, elem_index: u32) -> WasmResult<()> {
2954        let elem_drop = self.builtin_functions.elem_drop(&mut pos.func);
2955        let elem_index_arg = pos.ins().iconst(I32, elem_index as i64);
2956        let vmctx = self.vmctx_val(&mut pos);
2957        pos.ins().call(elem_drop, &[vmctx, elem_index_arg]);
2958        Ok(())
2959    }
2960
2961    pub fn translate_atomic_wait(
2962        &mut self,
2963        builder: &mut FunctionBuilder<'_>,
2964        memory_index: MemoryIndex,
2965        _heap: Heap,
2966        addr: ir::Value,
2967        expected: ir::Value,
2968        timeout: ir::Value,
2969    ) -> WasmResult<ir::Value> {
2970        #[cfg(feature = "threads")]
2971        {
2972            let mut pos = builder.cursor();
2973            let addr = self.cast_index_to_i64(&mut pos, addr, self.memory(memory_index).idx_type);
2974            let implied_ty = pos.func.dfg.value_type(expected);
2975            let (wait_func, memory_index) =
2976                self.get_memory_atomic_wait(&mut pos.func, memory_index, implied_ty);
2977
2978            let memory_index_arg = pos.ins().iconst(I32, memory_index as i64);
2979
2980            let vmctx = self.vmctx_val(&mut pos);
2981
2982            let call_inst = pos.ins().call(
2983                wait_func,
2984                &[vmctx, memory_index_arg, addr, expected, timeout],
2985            );
2986            let ret = pos.func.dfg.inst_results(call_inst)[0];
2987            Ok(builder.ins().ireduce(ir::types::I32, ret))
2988        }
2989        #[cfg(not(feature = "threads"))]
2990        {
2991            let _ = (builder, memory_index, addr, expected, timeout);
2992            Err(wasmtime_environ::WasmError::Unsupported(
2993                "threads support disabled at compile time".to_string(),
2994            ))
2995        }
2996    }
2997
2998    pub fn translate_atomic_notify(
2999        &mut self,
3000        builder: &mut FunctionBuilder<'_>,
3001        memory_index: MemoryIndex,
3002        _heap: Heap,
3003        addr: ir::Value,
3004        count: ir::Value,
3005    ) -> WasmResult<ir::Value> {
3006        #[cfg(feature = "threads")]
3007        {
3008            let mut pos = builder.cursor();
3009            let addr = self.cast_index_to_i64(&mut pos, addr, self.memory(memory_index).idx_type);
3010            let atomic_notify = self.builtin_functions.memory_atomic_notify(&mut pos.func);
3011
3012            let memory_index_arg = pos.ins().iconst(I32, memory_index.index() as i64);
3013            let vmctx = self.vmctx_val(&mut pos);
3014            let call_inst = pos
3015                .ins()
3016                .call(atomic_notify, &[vmctx, memory_index_arg, addr, count]);
3017            let ret = pos.func.dfg.inst_results(call_inst)[0];
3018            Ok(builder.ins().ireduce(ir::types::I32, ret))
3019        }
3020        #[cfg(not(feature = "threads"))]
3021        {
3022            let _ = (builder, memory_index, addr, count);
3023            Err(wasmtime_environ::WasmError::Unsupported(
3024                "threads support disabled at compile time".to_string(),
3025            ))
3026        }
3027    }
3028
3029    pub fn translate_loop_header(&mut self, builder: &mut FunctionBuilder) -> WasmResult<()> {
3030        // Additionally if enabled check how much fuel we have remaining to see
3031        // if we've run out by this point.
3032        if self.tunables.consume_fuel {
3033            self.fuel_check(builder);
3034        }
3035
3036        // If we are performing epoch-based interruption, check to see
3037        // if the epoch counter has changed.
3038        if self.tunables.epoch_interruption {
3039            self.epoch_check(builder);
3040        }
3041
3042        Ok(())
3043    }
3044
3045    pub fn before_translate_operator(
3046        &mut self,
3047        op: &Operator,
3048        builder: &mut FunctionBuilder,
3049        state: &FuncTranslationState,
3050    ) -> WasmResult<()> {
3051        if self.tunables.consume_fuel {
3052            self.fuel_before_op(op, builder, state.reachable());
3053        }
3054        Ok(())
3055    }
3056
3057    pub fn after_translate_operator(
3058        &mut self,
3059        op: &Operator,
3060        builder: &mut FunctionBuilder,
3061        state: &FuncTranslationState,
3062    ) -> WasmResult<()> {
3063        if self.tunables.consume_fuel && state.reachable() {
3064            self.fuel_after_op(op, builder);
3065        }
3066        Ok(())
3067    }
3068
3069    pub fn before_unconditionally_trapping_memory_access(
3070        &mut self,
3071        builder: &mut FunctionBuilder,
3072    ) -> WasmResult<()> {
3073        if self.tunables.consume_fuel {
3074            self.fuel_increment_var(builder);
3075            self.fuel_save_from_var(builder);
3076        }
3077        Ok(())
3078    }
3079
3080    pub fn before_translate_function(
3081        &mut self,
3082        builder: &mut FunctionBuilder,
3083        _state: &FuncTranslationState,
3084    ) -> WasmResult<()> {
3085        // If an explicit stack limit is requested, emit one here at the start
3086        // of the function.
3087        if let Some(gv) = self.stack_limit_at_function_entry {
3088            let limit = builder.ins().global_value(self.pointer_type(), gv);
3089            let sp = builder.ins().get_stack_pointer(self.pointer_type());
3090            let overflow = builder.ins().icmp(IntCC::UnsignedLessThan, sp, limit);
3091            self.conditionally_trap(builder, overflow, ir::TrapCode::STACK_OVERFLOW);
3092        }
3093
3094        // If the `vmstore_context_ptr` variable will get used then we
3095        // initialize it here.
3096        if self.tunables.consume_fuel || self.tunables.epoch_interruption {
3097            self.declare_vmstore_context_ptr(builder);
3098        }
3099        // Additionally we initialize `fuel_var` if it will get used.
3100        if self.tunables.consume_fuel {
3101            self.fuel_function_entry(builder);
3102        }
3103        // Initialize `epoch_var` with the current epoch.
3104        if self.tunables.epoch_interruption {
3105            self.epoch_function_entry(builder);
3106        }
3107
3108        #[cfg(feature = "wmemcheck")]
3109        if self.compiler.wmemcheck {
3110            let func_name = self.current_func_name(builder);
3111            if func_name == Some("malloc") {
3112                self.check_malloc_start(builder);
3113            } else if func_name == Some("free") {
3114                self.check_free_start(builder);
3115            }
3116        }
3117
3118        Ok(())
3119    }
3120
3121    pub fn after_translate_function(
3122        &mut self,
3123        builder: &mut FunctionBuilder,
3124        state: &FuncTranslationState,
3125    ) -> WasmResult<()> {
3126        if self.tunables.consume_fuel && state.reachable() {
3127            self.fuel_function_exit(builder);
3128        }
3129        Ok(())
3130    }
3131
3132    pub fn relaxed_simd_deterministic(&self) -> bool {
3133        self.tunables.relaxed_simd_deterministic
3134    }
3135
3136    pub fn has_native_fma(&self) -> bool {
3137        self.isa.has_native_fma()
3138    }
3139
3140    pub fn is_x86(&self) -> bool {
3141        self.isa.triple().architecture == target_lexicon::Architecture::X86_64
3142    }
3143
3144    pub fn use_x86_blendv_for_relaxed_laneselect(&self, ty: Type) -> bool {
3145        self.isa.has_x86_blendv_lowering(ty)
3146    }
3147
3148    pub fn use_x86_pshufb_for_relaxed_swizzle(&self) -> bool {
3149        self.isa.has_x86_pshufb_lowering()
3150    }
3151
3152    pub fn use_x86_pmulhrsw_for_relaxed_q15mul(&self) -> bool {
3153        self.isa.has_x86_pmulhrsw_lowering()
3154    }
3155
3156    pub fn use_x86_pmaddubsw_for_dot(&self) -> bool {
3157        self.isa.has_x86_pmaddubsw_lowering()
3158    }
3159
3160    pub fn handle_before_return(&mut self, retvals: &[ir::Value], builder: &mut FunctionBuilder) {
3161        #[cfg(feature = "wmemcheck")]
3162        if self.compiler.wmemcheck {
3163            let func_name = self.current_func_name(builder);
3164            if func_name == Some("malloc") {
3165                self.hook_malloc_exit(builder, retvals);
3166            } else if func_name == Some("free") {
3167                self.hook_free_exit(builder);
3168            }
3169        }
3170        #[cfg(not(feature = "wmemcheck"))]
3171        let _ = (retvals, builder);
3172    }
3173
3174    pub fn before_load(
3175        &mut self,
3176        builder: &mut FunctionBuilder,
3177        val_size: u8,
3178        addr: ir::Value,
3179        offset: u64,
3180    ) {
3181        #[cfg(feature = "wmemcheck")]
3182        if self.compiler.wmemcheck {
3183            let check_load = self.builtin_functions.check_load(builder.func);
3184            let vmctx = self.vmctx_val(&mut builder.cursor());
3185            let num_bytes = builder.ins().iconst(I32, val_size as i64);
3186            let offset_val = builder.ins().iconst(I64, offset as i64);
3187            builder
3188                .ins()
3189                .call(check_load, &[vmctx, num_bytes, addr, offset_val]);
3190        }
3191        #[cfg(not(feature = "wmemcheck"))]
3192        let _ = (builder, val_size, addr, offset);
3193    }
3194
3195    pub fn before_store(
3196        &mut self,
3197        builder: &mut FunctionBuilder,
3198        val_size: u8,
3199        addr: ir::Value,
3200        offset: u64,
3201    ) {
3202        #[cfg(feature = "wmemcheck")]
3203        if self.compiler.wmemcheck {
3204            let check_store = self.builtin_functions.check_store(builder.func);
3205            let vmctx = self.vmctx_val(&mut builder.cursor());
3206            let num_bytes = builder.ins().iconst(I32, val_size as i64);
3207            let offset_val = builder.ins().iconst(I64, offset as i64);
3208            builder
3209                .ins()
3210                .call(check_store, &[vmctx, num_bytes, addr, offset_val]);
3211        }
3212        #[cfg(not(feature = "wmemcheck"))]
3213        let _ = (builder, val_size, addr, offset);
3214    }
3215
3216    pub fn update_global(
3217        &mut self,
3218        builder: &mut FunctionBuilder,
3219        global_index: u32,
3220        value: ir::Value,
3221    ) {
3222        #[cfg(feature = "wmemcheck")]
3223        if self.compiler.wmemcheck {
3224            if global_index == 0 {
3225                // We are making the assumption that global 0 is the auxiliary stack pointer.
3226                let update_stack_pointer =
3227                    self.builtin_functions.update_stack_pointer(builder.func);
3228                let vmctx = self.vmctx_val(&mut builder.cursor());
3229                builder.ins().call(update_stack_pointer, &[vmctx, value]);
3230            }
3231        }
3232        #[cfg(not(feature = "wmemcheck"))]
3233        let _ = (builder, global_index, value);
3234    }
3235
3236    pub fn before_memory_grow(
3237        &mut self,
3238        builder: &mut FunctionBuilder,
3239        num_pages: ir::Value,
3240        mem_index: MemoryIndex,
3241    ) {
3242        #[cfg(feature = "wmemcheck")]
3243        if self.compiler.wmemcheck && mem_index.as_u32() == 0 {
3244            let update_mem_size = self.builtin_functions.update_mem_size(builder.func);
3245            let vmctx = self.vmctx_val(&mut builder.cursor());
3246            builder.ins().call(update_mem_size, &[vmctx, num_pages]);
3247        }
3248        #[cfg(not(feature = "wmemcheck"))]
3249        let _ = (builder, num_pages, mem_index);
3250    }
3251
3252    pub fn isa(&self) -> &dyn TargetIsa {
3253        &*self.isa
3254    }
3255
3256    pub fn trap(&mut self, builder: &mut FunctionBuilder, trap: ir::TrapCode) {
3257        match (
3258            self.clif_instruction_traps_enabled(),
3259            crate::clif_trap_to_env_trap(trap),
3260        ) {
3261            // If libcall traps are disabled or there's no wasmtime-defined trap
3262            // code for this, then emit a native trap instruction.
3263            (true, _) | (_, None) => {
3264                builder.ins().trap(trap);
3265            }
3266            // ... otherwise with libcall traps explicitly enabled and a
3267            // wasmtime-based trap code invoke the libcall to raise a trap and
3268            // pass in our trap code. Leave a debug `unreachable` in place
3269            // afterwards as a defense-in-depth measure.
3270            (false, Some(trap)) => {
3271                let libcall = self.builtin_functions.trap(&mut builder.func);
3272                let vmctx = self.vmctx_val(&mut builder.cursor());
3273                let trap_code = builder.ins().iconst(I8, i64::from(trap as u8));
3274                builder.ins().call(libcall, &[vmctx, trap_code]);
3275                let raise = self.builtin_functions.raise(&mut builder.func);
3276                builder.ins().call(raise, &[vmctx]);
3277                builder.ins().trap(TRAP_INTERNAL_ASSERT);
3278            }
3279        }
3280    }
3281
3282    pub fn trapz(&mut self, builder: &mut FunctionBuilder, value: ir::Value, trap: ir::TrapCode) {
3283        if self.clif_instruction_traps_enabled() {
3284            builder.ins().trapz(value, trap);
3285        } else {
3286            let ty = builder.func.dfg.value_type(value);
3287            let zero = builder.ins().iconst(ty, 0);
3288            let cmp = builder.ins().icmp(IntCC::Equal, value, zero);
3289            self.conditionally_trap(builder, cmp, trap);
3290        }
3291    }
3292
3293    pub fn trapnz(&mut self, builder: &mut FunctionBuilder, value: ir::Value, trap: ir::TrapCode) {
3294        if self.clif_instruction_traps_enabled() {
3295            builder.ins().trapnz(value, trap);
3296        } else {
3297            let ty = builder.func.dfg.value_type(value);
3298            let zero = builder.ins().iconst(ty, 0);
3299            let cmp = builder.ins().icmp(IntCC::NotEqual, value, zero);
3300            self.conditionally_trap(builder, cmp, trap);
3301        }
3302    }
3303
3304    pub fn uadd_overflow_trap(
3305        &mut self,
3306        builder: &mut FunctionBuilder,
3307        lhs: ir::Value,
3308        rhs: ir::Value,
3309        trap: ir::TrapCode,
3310    ) -> ir::Value {
3311        if self.clif_instruction_traps_enabled() {
3312            builder.ins().uadd_overflow_trap(lhs, rhs, trap)
3313        } else {
3314            let (ret, overflow) = builder.ins().uadd_overflow(lhs, rhs);
3315            self.conditionally_trap(builder, overflow, trap);
3316            ret
3317        }
3318    }
3319
3320    pub fn translate_sdiv(
3321        &mut self,
3322        builder: &mut FunctionBuilder,
3323        lhs: ir::Value,
3324        rhs: ir::Value,
3325    ) -> ir::Value {
3326        self.guard_signed_divide(builder, lhs, rhs);
3327        builder.ins().sdiv(lhs, rhs)
3328    }
3329
3330    pub fn translate_udiv(
3331        &mut self,
3332        builder: &mut FunctionBuilder,
3333        lhs: ir::Value,
3334        rhs: ir::Value,
3335    ) -> ir::Value {
3336        self.guard_zero_divisor(builder, rhs);
3337        builder.ins().udiv(lhs, rhs)
3338    }
3339
3340    pub fn translate_srem(
3341        &mut self,
3342        builder: &mut FunctionBuilder,
3343        lhs: ir::Value,
3344        rhs: ir::Value,
3345    ) -> ir::Value {
3346        self.guard_zero_divisor(builder, rhs);
3347        builder.ins().srem(lhs, rhs)
3348    }
3349
3350    pub fn translate_urem(
3351        &mut self,
3352        builder: &mut FunctionBuilder,
3353        lhs: ir::Value,
3354        rhs: ir::Value,
3355    ) -> ir::Value {
3356        self.guard_zero_divisor(builder, rhs);
3357        builder.ins().urem(lhs, rhs)
3358    }
3359
3360    pub fn translate_fcvt_to_sint(
3361        &mut self,
3362        builder: &mut FunctionBuilder,
3363        ty: ir::Type,
3364        val: ir::Value,
3365    ) -> ir::Value {
3366        // NB: for now avoid translating this entire instruction to CLIF and
3367        // just do it in a libcall.
3368        if !self.clif_instruction_traps_enabled() {
3369            self.guard_fcvt_to_int(
3370                builder,
3371                ty,
3372                val,
3373                (-2147483649.0, 2147483648.0),
3374                (-9223372036854777856.0, 9223372036854775808.0),
3375            );
3376        }
3377        builder.ins().fcvt_to_sint(ty, val)
3378    }
3379
3380    pub fn translate_fcvt_to_uint(
3381        &mut self,
3382        builder: &mut FunctionBuilder,
3383        ty: ir::Type,
3384        val: ir::Value,
3385    ) -> ir::Value {
3386        if !self.clif_instruction_traps_enabled() {
3387            self.guard_fcvt_to_int(
3388                builder,
3389                ty,
3390                val,
3391                (-1.0, 4294967296.0),
3392                (-1.0, 18446744073709551616.0),
3393            );
3394        }
3395        builder.ins().fcvt_to_uint(ty, val)
3396    }
3397
3398    /// Returns whether it's acceptable to rely on traps in CLIF memory-related
3399    /// instructions (e.g. loads and stores).
3400    ///
3401    /// This is enabled if `signals_based_traps` is `true` since signal handlers
3402    /// are available, but this is additionally forcibly disabled if Pulley is
3403    /// being targeted since the Pulley runtime doesn't catch segfaults for
3404    /// itself.
3405    pub fn clif_memory_traps_enabled(&self) -> bool {
3406        self.tunables.signals_based_traps && !self.is_pulley()
3407    }
3408
3409    /// Returns whether it's acceptable to have CLIF instructions natively trap,
3410    /// such as division-by-zero.
3411    ///
3412    /// This enabled if `signals_based_traps` is `true` or on Pulley
3413    /// unconditionally since Pulley doesn't use hardware-based traps in its
3414    /// runtime.
3415    pub fn clif_instruction_traps_enabled(&self) -> bool {
3416        self.tunables.signals_based_traps || self.is_pulley()
3417    }
3418
3419    /// Returns whether loads from the null address are allowed as signals of
3420    /// whether to trap or not.
3421    pub fn load_from_zero_allowed(&self) -> bool {
3422        // Pulley allows loads-from-zero and otherwise this is only allowed with
3423        // traps + spectre mitigations.
3424        self.is_pulley()
3425            || (self.clif_memory_traps_enabled() && self.heap_access_spectre_mitigation())
3426    }
3427
3428    /// Returns whether translation is happening for Pulley bytecode.
3429    pub fn is_pulley(&self) -> bool {
3430        self.isa.triple().is_pulley()
3431    }
3432}
3433
3434// Helper function to convert an `IndexType` to an `ir::Type`.
3435//
3436// Implementing From/Into trait for `IndexType` or `ir::Type` would
3437// introduce an extra dependency between `wasmtime_types` and `cranelift_codegen`.
3438fn index_type_to_ir_type(index_type: IndexType) -> ir::Type {
3439    match index_type {
3440        IndexType::I32 => I32,
3441        IndexType::I64 => I64,
3442    }
3443}