wasmtime_cranelift/func_environ/gc/
enabled.rs

1use super::{ArrayInit, GcCompiler};
2use crate::func_environ::{Extension, FuncEnvironment};
3use crate::translate::{StructFieldsVec, TargetEnvironment};
4use crate::TRAP_INTERNAL_ASSERT;
5use cranelift_codegen::{
6    cursor::FuncCursor,
7    ir::{self, condcodes::IntCC, InstBuilder},
8};
9use cranelift_entity::packed_option::ReservedValue;
10use cranelift_frontend::FunctionBuilder;
11use smallvec::SmallVec;
12use wasmtime_environ::{
13    wasm_unsupported, Collector, GcArrayLayout, GcLayout, GcStructLayout, ModuleInternedTypeIndex,
14    PtrSize, TypeIndex, VMGcKind, WasmHeapTopType, WasmHeapType, WasmRefType, WasmResult,
15    WasmStorageType, WasmValType, I31_DISCRIMINANT,
16};
17
18#[cfg(feature = "gc-drc")]
19mod drc;
20#[cfg(feature = "gc-null")]
21mod null;
22
23/// Get the default GC compiler.
24pub fn gc_compiler(func_env: &mut FuncEnvironment<'_>) -> WasmResult<Box<dyn GcCompiler>> {
25    // If this function requires a GC compiler, that is not too bad of an
26    // over-approximation for it requiring a GC heap.
27    func_env.needs_gc_heap = true;
28
29    match func_env.tunables.collector {
30        #[cfg(feature = "gc-drc")]
31        Some(Collector::DeferredReferenceCounting) => Ok(Box::new(drc::DrcCompiler::default())),
32        #[cfg(not(feature = "gc-drc"))]
33        Some(Collector::DeferredReferenceCounting) => Err(wasm_unsupported!(
34            "the DRC collector is unavailable because the `gc-drc` feature \
35             was disabled at compile time",
36        )),
37
38        #[cfg(feature = "gc-null")]
39        Some(Collector::Null) => Ok(Box::new(null::NullCompiler::default())),
40        #[cfg(not(feature = "gc-null"))]
41        Some(Collector::Null) => Err(wasm_unsupported!(
42            "the null collector is unavailable because the `gc-null` feature \
43             was disabled at compile time",
44        )),
45
46        #[cfg(any(feature = "gc-drc", feature = "gc-null"))]
47        None => Err(wasm_unsupported!(
48            "support for GC types disabled at configuration time"
49        )),
50        #[cfg(not(any(feature = "gc-drc", feature = "gc-null")))]
51        None => Err(wasm_unsupported!(
52            "support for GC types disabled because no collector implementation \
53             was selected at compile time; enable one of the `gc-drc` or \
54             `gc-null` features",
55        )),
56    }
57}
58
59#[cfg_attr(not(feature = "gc-drc"), allow(dead_code))]
60fn unbarriered_load_gc_ref(
61    builder: &mut FunctionBuilder,
62    ty: WasmHeapType,
63    ptr_to_gc_ref: ir::Value,
64    flags: ir::MemFlags,
65) -> WasmResult<ir::Value> {
66    debug_assert!(ty.is_vmgcref_type());
67    let gc_ref = builder.ins().load(ir::types::I32, flags, ptr_to_gc_ref, 0);
68    if ty != WasmHeapType::I31 {
69        builder.declare_value_needs_stack_map(gc_ref);
70    }
71    Ok(gc_ref)
72}
73
74#[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
75fn unbarriered_store_gc_ref(
76    builder: &mut FunctionBuilder,
77    ty: WasmHeapType,
78    dst: ir::Value,
79    gc_ref: ir::Value,
80    flags: ir::MemFlags,
81) -> WasmResult<()> {
82    debug_assert!(ty.is_vmgcref_type());
83    builder.ins().store(flags, gc_ref, dst, 0);
84    Ok(())
85}
86
87/// Emit code to read a struct field or array element from its raw address in
88/// the GC heap.
89///
90/// The given address MUST have already been bounds-checked via
91/// `prepare_gc_ref_access`.
92fn read_field_at_addr(
93    func_env: &mut FuncEnvironment<'_>,
94    builder: &mut FunctionBuilder<'_>,
95    ty: WasmStorageType,
96    addr: ir::Value,
97    extension: Option<Extension>,
98) -> WasmResult<ir::Value> {
99    assert_eq!(extension.is_none(), matches!(ty, WasmStorageType::Val(_)));
100    assert_eq!(
101        extension.is_some(),
102        matches!(ty, WasmStorageType::I8 | WasmStorageType::I16)
103    );
104
105    // Data inside GC objects is always little endian.
106    let flags = ir::MemFlags::trusted().with_endianness(ir::Endianness::Little);
107
108    let value = match ty {
109        WasmStorageType::I8 => builder.ins().load(ir::types::I8, flags, addr, 0),
110        WasmStorageType::I16 => builder.ins().load(ir::types::I16, flags, addr, 0),
111        WasmStorageType::Val(v) => match v {
112            WasmValType::I32 => builder.ins().load(ir::types::I32, flags, addr, 0),
113            WasmValType::I64 => builder.ins().load(ir::types::I64, flags, addr, 0),
114            WasmValType::F32 => builder.ins().load(ir::types::F32, flags, addr, 0),
115            WasmValType::F64 => builder.ins().load(ir::types::F64, flags, addr, 0),
116            WasmValType::V128 => builder.ins().load(ir::types::I8X16, flags, addr, 0),
117            WasmValType::Ref(r) => match r.heap_type.top() {
118                WasmHeapTopType::Any | WasmHeapTopType::Extern => gc_compiler(func_env)?
119                    .translate_read_gc_reference(func_env, builder, r, addr, flags)?,
120                WasmHeapTopType::Func => {
121                    let expected_ty = match r.heap_type {
122                        WasmHeapType::Func => ModuleInternedTypeIndex::reserved_value(),
123                        WasmHeapType::ConcreteFunc(ty) => ty.unwrap_module_type_index(),
124                        WasmHeapType::NoFunc => {
125                            let null = builder.ins().iconst(func_env.pointer_type(), 0);
126                            if !r.nullable {
127                                // Because `nofunc` is uninhabited, and this
128                                // reference is non-null, this is unreachable
129                                // code. Unconditionally trap via conditional
130                                // trap instructions to avoid inserting block
131                                // terminators in the middle of this block.
132                                builder.ins().trapz(null, TRAP_INTERNAL_ASSERT);
133                            }
134                            return Ok(null);
135                        }
136                        _ => unreachable!("not a function heap type"),
137                    };
138                    let expected_ty = builder
139                        .ins()
140                        .iconst(ir::types::I32, i64::from(expected_ty.as_bits()));
141
142                    let vmctx = func_env.vmctx_val(&mut builder.cursor());
143
144                    let func_ref_id = builder.ins().load(ir::types::I32, flags, addr, 0);
145                    let get_interned_func_ref = func_env
146                        .builtin_functions
147                        .get_interned_func_ref(builder.func);
148
149                    let call_inst = builder
150                        .ins()
151                        .call(get_interned_func_ref, &[vmctx, func_ref_id, expected_ty]);
152                    builder.func.dfg.first_result(call_inst)
153                }
154                WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
155            },
156        },
157    };
158
159    let value = match extension {
160        Some(Extension::Sign) => builder.ins().sextend(ir::types::I32, value),
161        Some(Extension::Zero) => builder.ins().uextend(ir::types::I32, value),
162        None => value,
163    };
164
165    Ok(value)
166}
167
168fn write_func_ref_at_addr(
169    func_env: &mut FuncEnvironment<'_>,
170    builder: &mut FunctionBuilder<'_>,
171    ref_type: WasmRefType,
172    flags: ir::MemFlags,
173    field_addr: ir::Value,
174    func_ref: ir::Value,
175) -> WasmResult<()> {
176    assert_eq!(ref_type.heap_type.top(), WasmHeapTopType::Func);
177
178    let vmctx = func_env.vmctx_val(&mut builder.cursor());
179
180    let intern_func_ref_for_gc_heap = func_env
181        .builtin_functions
182        .intern_func_ref_for_gc_heap(builder.func);
183
184    let func_ref = if ref_type.heap_type == WasmHeapType::NoFunc {
185        let null = builder.ins().iconst(func_env.pointer_type(), 0);
186        if !ref_type.nullable {
187            // Because `nofunc` is uninhabited, and this reference is
188            // non-null, this is unreachable code. Unconditionally trap
189            // via conditional trap instructions to avoid inserting
190            // block terminators in the middle of this block.
191            builder.ins().trapz(null, TRAP_INTERNAL_ASSERT);
192        }
193        null
194    } else {
195        func_ref
196    };
197
198    // Convert the raw `funcref` into a `FuncRefTableId` for use in the
199    // GC heap.
200    let call_inst = builder
201        .ins()
202        .call(intern_func_ref_for_gc_heap, &[vmctx, func_ref]);
203    let func_ref_id = builder.func.dfg.first_result(call_inst);
204    let func_ref_id = builder.ins().ireduce(ir::types::I32, func_ref_id);
205
206    // Store the id in the field.
207    builder.ins().store(flags, func_ref_id, field_addr, 0);
208
209    Ok(())
210}
211
212fn write_field_at_addr(
213    func_env: &mut FuncEnvironment<'_>,
214    builder: &mut FunctionBuilder<'_>,
215    field_ty: WasmStorageType,
216    field_addr: ir::Value,
217    new_val: ir::Value,
218) -> WasmResult<()> {
219    // Data inside GC objects is always little endian.
220    let flags = ir::MemFlags::trusted().with_endianness(ir::Endianness::Little);
221
222    match field_ty {
223        WasmStorageType::I8 => {
224            builder.ins().istore8(flags, new_val, field_addr, 0);
225        }
226        WasmStorageType::I16 => {
227            builder.ins().istore16(flags, new_val, field_addr, 0);
228        }
229        WasmStorageType::Val(WasmValType::Ref(r)) if r.heap_type.top() == WasmHeapTopType::Func => {
230            write_func_ref_at_addr(func_env, builder, r, flags, field_addr, new_val)?;
231        }
232        WasmStorageType::Val(WasmValType::Ref(r)) => {
233            gc_compiler(func_env)?
234                .translate_write_gc_reference(func_env, builder, r, field_addr, new_val, flags)?;
235        }
236        WasmStorageType::Val(_) => {
237            assert_eq!(
238                builder.func.dfg.value_type(new_val).bytes(),
239                wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty)
240            );
241            builder.ins().store(flags, new_val, field_addr, 0);
242        }
243    }
244    Ok(())
245}
246
247pub fn translate_struct_new(
248    func_env: &mut FuncEnvironment<'_>,
249    builder: &mut FunctionBuilder<'_>,
250    struct_type_index: TypeIndex,
251    fields: &[ir::Value],
252) -> WasmResult<ir::Value> {
253    gc_compiler(func_env)?.alloc_struct(func_env, builder, struct_type_index, &fields)
254}
255
256fn default_value(
257    cursor: &mut FuncCursor,
258    func_env: &FuncEnvironment<'_>,
259    ty: &WasmStorageType,
260) -> ir::Value {
261    match ty {
262        WasmStorageType::I8 | WasmStorageType::I16 => cursor.ins().iconst(ir::types::I32, 0),
263        WasmStorageType::Val(v) => match v {
264            WasmValType::I32 => cursor.ins().iconst(ir::types::I32, 0),
265            WasmValType::I64 => cursor.ins().iconst(ir::types::I64, 0),
266            WasmValType::F32 => cursor.ins().f32const(0.0),
267            WasmValType::F64 => cursor.ins().f64const(0.0),
268            WasmValType::V128 => {
269                let c = cursor.func.dfg.constants.insert(vec![0; 16].into());
270                cursor.ins().vconst(ir::types::I8X16, c)
271            }
272            WasmValType::Ref(r) => {
273                assert!(r.nullable);
274                let (ty, needs_stack_map) = func_env.reference_type(r.heap_type);
275
276                // NB: The collector doesn't need to know about null references.
277                let _ = needs_stack_map;
278
279                cursor.ins().iconst(ty, 0)
280            }
281        },
282    }
283}
284
285pub fn translate_struct_new_default(
286    func_env: &mut FuncEnvironment<'_>,
287    builder: &mut FunctionBuilder<'_>,
288    struct_type_index: TypeIndex,
289) -> WasmResult<ir::Value> {
290    let interned_ty = func_env.module.types[struct_type_index].unwrap_module_type_index();
291    let struct_ty = func_env.types.unwrap_struct(interned_ty)?;
292    let fields = struct_ty
293        .fields
294        .iter()
295        .map(|f| default_value(&mut builder.cursor(), func_env, &f.element_type))
296        .collect::<StructFieldsVec>();
297    gc_compiler(func_env)?.alloc_struct(func_env, builder, struct_type_index, &fields)
298}
299
300pub fn translate_struct_get(
301    func_env: &mut FuncEnvironment<'_>,
302    builder: &mut FunctionBuilder<'_>,
303    struct_type_index: TypeIndex,
304    field_index: u32,
305    struct_ref: ir::Value,
306    extension: Option<Extension>,
307) -> WasmResult<ir::Value> {
308    log::trace!("translate_struct_get({struct_type_index:?}, {field_index:?}, {struct_ref:?}, {extension:?})");
309
310    // TODO: If we know we have a `(ref $my_struct)` here, instead of maybe a
311    // `(ref null $my_struct)`, we could omit the `trapz`. But plumbing that
312    // type info from `wasmparser` and through to here is a bit funky.
313    func_env.trapz(builder, struct_ref, crate::TRAP_NULL_REFERENCE);
314
315    let field_index = usize::try_from(field_index).unwrap();
316    let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index();
317
318    let struct_layout = func_env.struct_layout(interned_type_index);
319    let struct_size = struct_layout.size;
320    let struct_size_val = builder.ins().iconst(ir::types::I32, i64::from(struct_size));
321
322    let field_offset = struct_layout.fields[field_index].offset;
323    let field_ty = &func_env.types.unwrap_struct(interned_type_index)?.fields[field_index];
324    let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty.element_type);
325    assert!(field_offset + field_size <= struct_size);
326
327    let field_addr = func_env.prepare_gc_ref_access(
328        builder,
329        struct_ref,
330        Offset::Static(field_offset),
331        BoundsCheck::Object(struct_size_val),
332    );
333
334    let result = read_field_at_addr(
335        func_env,
336        builder,
337        field_ty.element_type,
338        field_addr,
339        extension,
340    );
341    log::trace!("translate_struct_get(..) -> {result:?}");
342    result
343}
344
345pub fn translate_struct_set(
346    func_env: &mut FuncEnvironment<'_>,
347    builder: &mut FunctionBuilder<'_>,
348    struct_type_index: TypeIndex,
349    field_index: u32,
350    struct_ref: ir::Value,
351    new_val: ir::Value,
352) -> WasmResult<()> {
353    log::trace!(
354        "translate_struct_set({struct_type_index:?}, {field_index:?}, struct_ref: {struct_ref:?}, new_val: {new_val:?})"
355    );
356
357    // TODO: See comment in `translate_struct_get` about the `trapz`.
358    func_env.trapz(builder, struct_ref, crate::TRAP_NULL_REFERENCE);
359
360    let field_index = usize::try_from(field_index).unwrap();
361    let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index();
362
363    let struct_layout = func_env.struct_layout(interned_type_index);
364    let struct_size = struct_layout.size;
365    let struct_size_val = builder.ins().iconst(ir::types::I32, i64::from(struct_size));
366
367    let field_offset = struct_layout.fields[field_index].offset;
368    let field_ty = &func_env.types.unwrap_struct(interned_type_index)?.fields[field_index];
369    let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty.element_type);
370    assert!(field_offset + field_size <= struct_size);
371
372    let field_addr = func_env.prepare_gc_ref_access(
373        builder,
374        struct_ref,
375        Offset::Static(field_offset),
376        BoundsCheck::Object(struct_size_val),
377    );
378
379    write_field_at_addr(
380        func_env,
381        builder,
382        field_ty.element_type,
383        field_addr,
384        new_val,
385    )?;
386
387    log::trace!("translate_struct_set: finished");
388    Ok(())
389}
390
391pub fn translate_array_new(
392    func_env: &mut FuncEnvironment<'_>,
393    builder: &mut FunctionBuilder,
394    array_type_index: TypeIndex,
395    elem: ir::Value,
396    len: ir::Value,
397) -> WasmResult<ir::Value> {
398    log::trace!("translate_array_new({array_type_index:?}, {elem:?}, {len:?})");
399    let result = gc_compiler(func_env)?.alloc_array(
400        func_env,
401        builder,
402        array_type_index,
403        ArrayInit::Fill { elem, len },
404    )?;
405    log::trace!("translate_array_new(..) -> {result:?}");
406    Ok(result)
407}
408
409pub fn translate_array_new_default(
410    func_env: &mut FuncEnvironment<'_>,
411    builder: &mut FunctionBuilder,
412    array_type_index: TypeIndex,
413    len: ir::Value,
414) -> WasmResult<ir::Value> {
415    log::trace!("translate_array_new_default({array_type_index:?}, {len:?})");
416
417    let interned_ty = func_env.module.types[array_type_index].unwrap_module_type_index();
418    let array_ty = func_env.types.unwrap_array(interned_ty)?;
419    let elem = default_value(&mut builder.cursor(), func_env, &array_ty.0.element_type);
420    let result = gc_compiler(func_env)?.alloc_array(
421        func_env,
422        builder,
423        array_type_index,
424        ArrayInit::Fill { elem, len },
425    )?;
426    log::trace!("translate_array_new_default(..) -> {result:?}");
427    Ok(result)
428}
429
430pub fn translate_array_new_fixed(
431    func_env: &mut FuncEnvironment<'_>,
432    builder: &mut FunctionBuilder,
433    array_type_index: TypeIndex,
434    elems: &[ir::Value],
435) -> WasmResult<ir::Value> {
436    log::trace!("translate_array_new_fixed({array_type_index:?}, {elems:?})");
437    let result = gc_compiler(func_env)?.alloc_array(
438        func_env,
439        builder,
440        array_type_index,
441        ArrayInit::Elems(elems),
442    )?;
443    log::trace!("translate_array_new_fixed(..) -> {result:?}");
444    Ok(result)
445}
446
447impl ArrayInit<'_> {
448    /// Get the length (as an `i32`-typed `ir::Value`) of these array elements.
449    #[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
450    fn len(self, pos: &mut FuncCursor) -> ir::Value {
451        match self {
452            ArrayInit::Fill { len, .. } => len,
453            ArrayInit::Elems(e) => {
454                let len = u32::try_from(e.len()).unwrap();
455                pos.ins().iconst(ir::types::I32, i64::from(len))
456            }
457        }
458    }
459
460    /// Initialize a newly-allocated array's elements.
461    #[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
462    fn initialize(
463        self,
464        func_env: &mut FuncEnvironment<'_>,
465        builder: &mut FunctionBuilder<'_>,
466        interned_type_index: ModuleInternedTypeIndex,
467        base_size: u32,
468        size: ir::Value,
469        elems_addr: ir::Value,
470        mut init_field: impl FnMut(
471            &mut FuncEnvironment<'_>,
472            &mut FunctionBuilder<'_>,
473            WasmStorageType,
474            ir::Value,
475            ir::Value,
476        ) -> WasmResult<()>,
477    ) -> WasmResult<()> {
478        log::trace!(
479            "initialize_array({interned_type_index:?}, {base_size:?}, {size:?}, {elems_addr:?})"
480        );
481
482        assert!(!func_env.types[interned_type_index].composite_type.shared);
483        let array_ty = func_env.types[interned_type_index]
484            .composite_type
485            .inner
486            .unwrap_array();
487        let elem_ty = array_ty.0.element_type;
488        let elem_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&elem_ty);
489        let pointer_type = func_env.pointer_type();
490        let elem_size = builder.ins().iconst(pointer_type, i64::from(elem_size));
491        match self {
492            ArrayInit::Elems(elems) => {
493                let mut elem_addr = elems_addr;
494                for val in elems {
495                    init_field(func_env, builder, elem_ty, elem_addr, *val)?;
496                    elem_addr = builder.ins().iadd(elem_addr, elem_size);
497                }
498            }
499            ArrayInit::Fill { elem, len: _ } => {
500                // Compute the end address of the elements.
501                let base_size = builder.ins().iconst(pointer_type, i64::from(base_size));
502                let array_addr = builder.ins().isub(elems_addr, base_size);
503                let size = uextend_i32_to_pointer_type(builder, pointer_type, size);
504                let elems_end = builder.ins().iadd(array_addr, size);
505
506                emit_array_fill_impl(
507                    func_env,
508                    builder,
509                    elems_addr,
510                    elem_size,
511                    elems_end,
512                    |func_env, builder, elem_addr| {
513                        init_field(func_env, builder, elem_ty, elem_addr, elem)
514                    },
515                )?;
516            }
517        }
518        log::trace!("initialize_array: finished");
519        Ok(())
520    }
521}
522
523fn emit_array_fill_impl(
524    func_env: &mut FuncEnvironment<'_>,
525    builder: &mut FunctionBuilder<'_>,
526    elem_addr: ir::Value,
527    elem_size: ir::Value,
528    fill_end: ir::Value,
529    mut emit_elem_write: impl FnMut(
530        &mut FuncEnvironment<'_>,
531        &mut FunctionBuilder<'_>,
532        ir::Value,
533    ) -> WasmResult<()>,
534) -> WasmResult<()> {
535    log::trace!("emit_array_fill_impl(elem_addr: {elem_addr:?}, elem_size: {elem_size:?}, fill_end: {fill_end:?})");
536
537    let pointer_ty = func_env.pointer_type();
538
539    assert_eq!(builder.func.dfg.value_type(elem_addr), pointer_ty);
540    assert_eq!(builder.func.dfg.value_type(elem_size), pointer_ty);
541    assert_eq!(builder.func.dfg.value_type(fill_end), pointer_ty);
542
543    // Loop to fill the elements, emitting the equivalent of the following
544    // pseudo-CLIF:
545    //
546    // current_block:
547    //     ...
548    //     jump loop_header_block(elem_addr)
549    //
550    // loop_header_block(elem_addr: i32):
551    //     done = icmp eq elem_addr, fill_end
552    //     brif done, continue_block, loop_body_block
553    //
554    // loop_body_block:
555    //     emit_elem_write()
556    //     next_elem_addr = iadd elem_addr, elem_size
557    //     jump loop_header_block(next_elem_addr)
558    //
559    // continue_block:
560    //     ...
561
562    let current_block = builder.current_block().unwrap();
563    let loop_header_block = builder.create_block();
564    let loop_body_block = builder.create_block();
565    let continue_block = builder.create_block();
566
567    builder.ensure_inserted_block();
568    builder.insert_block_after(loop_header_block, current_block);
569    builder.insert_block_after(loop_body_block, loop_header_block);
570    builder.insert_block_after(continue_block, loop_body_block);
571
572    // Current block: jump to the loop header block with the first element's
573    // address.
574    builder.ins().jump(loop_header_block, &[elem_addr]);
575
576    // Loop header block: check if we're done, then jump to either the continue
577    // block or the loop body block.
578    builder.switch_to_block(loop_header_block);
579    builder.append_block_param(loop_header_block, pointer_ty);
580    log::trace!("emit_array_fill_impl: loop header");
581    let elem_addr = builder.block_params(loop_header_block)[0];
582    let done = builder.ins().icmp(IntCC::Equal, elem_addr, fill_end);
583    builder
584        .ins()
585        .brif(done, continue_block, &[], loop_body_block, &[]);
586
587    // Loop body block: write the value to the current element, compute the next
588    // element's address, and then jump back to the loop header block.
589    builder.switch_to_block(loop_body_block);
590    log::trace!("emit_array_fill_impl: loop body");
591    emit_elem_write(func_env, builder, elem_addr)?;
592    let next_elem_addr = builder.ins().iadd(elem_addr, elem_size);
593    builder.ins().jump(loop_header_block, &[next_elem_addr]);
594
595    // Continue...
596    builder.switch_to_block(continue_block);
597    log::trace!("emit_array_fill_impl: finished");
598    builder.seal_block(loop_header_block);
599    builder.seal_block(loop_body_block);
600    builder.seal_block(continue_block);
601    Ok(())
602}
603
604pub fn translate_array_fill(
605    func_env: &mut FuncEnvironment<'_>,
606    builder: &mut FunctionBuilder<'_>,
607    array_type_index: TypeIndex,
608    array_ref: ir::Value,
609    index: ir::Value,
610    value: ir::Value,
611    n: ir::Value,
612) -> WasmResult<()> {
613    log::trace!(
614        "translate_array_fill({array_type_index:?}, {array_ref:?}, {index:?}, {value:?}, {n:?})"
615    );
616
617    let len = translate_array_len(func_env, builder, array_ref)?;
618
619    // Check that the full range of elements we want to fill is within bounds.
620    let end_index = func_env.uadd_overflow_trap(builder, index, n, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
621    let out_of_bounds = builder
622        .ins()
623        .icmp(IntCC::UnsignedGreaterThan, end_index, len);
624    func_env.trapnz(builder, out_of_bounds, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
625
626    // Get the address of the first element we want to fill.
627    let interned_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
628    let ArraySizeInfo {
629        obj_size,
630        one_elem_size,
631        base_size,
632    } = emit_array_size_info(func_env, builder, interned_type_index, len);
633    let offset_in_elems = builder.ins().imul(index, one_elem_size);
634    let obj_offset = builder.ins().iadd(base_size, offset_in_elems);
635    let elem_addr = func_env.prepare_gc_ref_access(
636        builder,
637        array_ref,
638        Offset::Dynamic(obj_offset),
639        BoundsCheck::Object(obj_size),
640    );
641
642    // Calculate the end address, just after the filled region.
643    let fill_size = builder.ins().imul(n, one_elem_size);
644    let fill_size = uextend_i32_to_pointer_type(builder, func_env.pointer_type(), fill_size);
645    let fill_end = builder.ins().iadd(elem_addr, fill_size);
646
647    let one_elem_size =
648        uextend_i32_to_pointer_type(builder, func_env.pointer_type(), one_elem_size);
649
650    let result = emit_array_fill_impl(
651        func_env,
652        builder,
653        elem_addr,
654        one_elem_size,
655        fill_end,
656        |func_env, builder, elem_addr| {
657            let elem_ty = func_env
658                .types
659                .unwrap_array(interned_type_index)?
660                .0
661                .element_type;
662            write_field_at_addr(func_env, builder, elem_ty, elem_addr, value)
663        },
664    );
665    log::trace!("translate_array_fill(..) -> {result:?}");
666    result
667}
668
669pub fn translate_array_len(
670    func_env: &mut FuncEnvironment<'_>,
671    builder: &mut FunctionBuilder,
672    array_ref: ir::Value,
673) -> WasmResult<ir::Value> {
674    log::trace!("translate_array_len({array_ref:?})");
675
676    func_env.trapz(builder, array_ref, crate::TRAP_NULL_REFERENCE);
677
678    let len_offset = gc_compiler(func_env)?.layouts().array_length_field_offset();
679    let len_field = func_env.prepare_gc_ref_access(
680        builder,
681        array_ref,
682        Offset::Static(len_offset),
683        // Note: We can't bounds check the whole array object's size because we
684        // don't know its length yet. Chicken and egg problem.
685        BoundsCheck::Access(ir::types::I32.bytes()),
686    );
687    let result = builder.ins().load(
688        ir::types::I32,
689        ir::MemFlags::trusted().with_readonly(),
690        len_field,
691        0,
692    );
693    log::trace!("translate_array_len(..) -> {result:?}");
694    Ok(result)
695}
696
697struct ArraySizeInfo {
698    /// The `i32` size of the whole array object, in bytes.
699    obj_size: ir::Value,
700
701    /// The `i32` size of each one of the array's elements, in bytes.
702    one_elem_size: ir::Value,
703
704    /// The `i32` size of the array's base object, in bytes. This is also the
705    /// offset from the start of the array object to its elements.
706    base_size: ir::Value,
707}
708
709/// Emit code to get the dynamic size (in bytes) of a whole array object, along
710/// with some other related bits.
711fn emit_array_size_info(
712    func_env: &mut FuncEnvironment<'_>,
713    builder: &mut FunctionBuilder<'_>,
714    array_type_index: ModuleInternedTypeIndex,
715    // `i32` value containing the array's length.
716    array_len: ir::Value,
717) -> ArraySizeInfo {
718    let array_layout = func_env.array_layout(array_type_index);
719
720    // Note that we check for overflow below because we can't trust the array's
721    // length: it came from inside the GC heap.
722    //
723    // We check for 32-bit multiplication overflow by performing a 64-bit
724    // multiplication and testing the high bits.
725    let one_elem_size = builder
726        .ins()
727        .iconst(ir::types::I64, i64::from(array_layout.elem_size));
728    let array_len = builder.ins().uextend(ir::types::I64, array_len);
729    let all_elems_size = builder.ins().imul(one_elem_size, array_len);
730
731    let high_bits = builder.ins().ushr_imm(all_elems_size, 32);
732    builder.ins().trapnz(high_bits, TRAP_INTERNAL_ASSERT);
733
734    let all_elems_size = builder.ins().ireduce(ir::types::I32, all_elems_size);
735    let base_size = builder
736        .ins()
737        .iconst(ir::types::I32, i64::from(array_layout.base_size));
738    let obj_size =
739        builder
740            .ins()
741            .uadd_overflow_trap(all_elems_size, base_size, TRAP_INTERNAL_ASSERT);
742
743    let one_elem_size = builder.ins().ireduce(ir::types::I32, one_elem_size);
744
745    ArraySizeInfo {
746        obj_size,
747        one_elem_size,
748        base_size,
749    }
750}
751
752/// Get the bounds-checked address of an element in an array.
753///
754/// The emitted code will trap if `index >= array.length`.
755///
756/// Returns the `ir::Value` containing the address of the `index`th element in
757/// the array. You may read or write a value of the array's element type at this
758/// address. You may not use it for any other kind of access, nor reuse this
759/// value across GC safepoints.
760fn array_elem_addr(
761    func_env: &mut FuncEnvironment<'_>,
762    builder: &mut FunctionBuilder<'_>,
763    array_type_index: ModuleInternedTypeIndex,
764    array_ref: ir::Value,
765    index: ir::Value,
766) -> ir::Value {
767    // First, assert that `index < array.length`.
768    //
769    // This check is visible at the Wasm-semantics level.
770    //
771    // TODO: We should emit spectre-safe bounds checks for array accesses (if
772    // configured) but we don't currently have a great way to do that here. The
773    // proper solution is to use linear memories to back GC heaps and reuse the
774    // code in `bounds_check.rs` to implement these bounds checks. That is all
775    // planned, but not yet implemented.
776
777    let len = translate_array_len(func_env, builder, array_ref).unwrap();
778
779    let in_bounds = builder.ins().icmp(IntCC::UnsignedLessThan, index, len);
780    func_env.trapz(builder, in_bounds, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
781
782    // Compute the size (in bytes) of the whole array object.
783    let ArraySizeInfo {
784        obj_size,
785        one_elem_size,
786        base_size,
787    } = emit_array_size_info(func_env, builder, array_type_index, len);
788
789    // Compute the offset of the `index`th element within the array object.
790    //
791    // NB: no need to check for overflow here, since at this point we know that
792    // `len * elem_size + base_size` did not overflow and `i < len`.
793    let offset_in_elems = builder.ins().imul(index, one_elem_size);
794    let offset_in_array = builder.ins().iadd(offset_in_elems, base_size);
795
796    // Finally, use the object size and element offset we just computed to
797    // perform our implementation-internal bounds checks.
798    //
799    // Checking the whole object's size, rather than the `index`th element's
800    // size allows these bounds checks to be deduplicated across repeated
801    // accesses to the same array at different indices.
802    //
803    // This check should not be visible to Wasm, and serve to protect us from
804    // our own implementation bugs. The goal is to keep any potential widgets
805    // confined within the GC heap, and turn what would otherwise be a security
806    // vulnerability into a simple bug.
807    //
808    // TODO: Ideally we should fold the first Wasm-visible bounds check into
809    // this internal bounds check, so that we aren't performing multiple,
810    // redundant bounds checks. But we should figure out how to do this in a way
811    // that doesn't defeat the object-size bounds checking's deduplication
812    // mentioned above.
813    func_env.prepare_gc_ref_access(
814        builder,
815        array_ref,
816        Offset::Dynamic(offset_in_array),
817        BoundsCheck::Object(obj_size),
818    )
819}
820
821pub fn translate_array_get(
822    func_env: &mut FuncEnvironment<'_>,
823    builder: &mut FunctionBuilder,
824    array_type_index: TypeIndex,
825    array_ref: ir::Value,
826    index: ir::Value,
827    extension: Option<Extension>,
828) -> WasmResult<ir::Value> {
829    log::trace!("translate_array_get({array_type_index:?}, {array_ref:?}, {index:?})");
830
831    let array_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
832    let elem_addr = array_elem_addr(func_env, builder, array_type_index, array_ref, index);
833
834    let array_ty = func_env.types.unwrap_array(array_type_index)?;
835    let elem_ty = array_ty.0.element_type;
836
837    let result = read_field_at_addr(func_env, builder, elem_ty, elem_addr, extension)?;
838    log::trace!("translate_array_get(..) -> {result:?}");
839    Ok(result)
840}
841
842pub fn translate_array_set(
843    func_env: &mut FuncEnvironment<'_>,
844    builder: &mut FunctionBuilder,
845    array_type_index: TypeIndex,
846    array_ref: ir::Value,
847    index: ir::Value,
848    value: ir::Value,
849) -> WasmResult<()> {
850    log::trace!("translate_array_set({array_type_index:?}, {array_ref:?}, {index:?}, {value:?})");
851
852    let array_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
853    let elem_addr = array_elem_addr(func_env, builder, array_type_index, array_ref, index);
854
855    let array_ty = func_env.types.unwrap_array(array_type_index)?;
856    let elem_ty = array_ty.0.element_type;
857
858    write_field_at_addr(func_env, builder, elem_ty, elem_addr, value)?;
859
860    log::trace!("translate_array_set: finished");
861    Ok(())
862}
863
864pub fn translate_ref_test(
865    func_env: &mut FuncEnvironment<'_>,
866    builder: &mut FunctionBuilder<'_>,
867    ref_ty: WasmRefType,
868    val: ir::Value,
869) -> WasmResult<ir::Value> {
870    log::trace!("translate_ref_test({ref_ty:?}, {val:?})");
871
872    // First special case: testing for references to bottom types.
873    if ref_ty.heap_type.is_bottom() {
874        let result = if ref_ty.nullable {
875            // All null references (within the same type hierarchy) match null
876            // references to the bottom type.
877            func_env.translate_ref_is_null(builder.cursor(), val)?
878        } else {
879            // `ref.test` is always false for non-nullable bottom types, as the
880            // bottom types are uninhabited.
881            builder.ins().iconst(ir::types::I32, 0)
882        };
883        log::trace!("translate_ref_test(..) -> {result:?}");
884        return Ok(result);
885    }
886
887    // And because `ref.test heap_ty` is only valid on operands whose type is in
888    // the same type hierarchy as `heap_ty`, if `heap_ty` is its hierarchy's top
889    // type, we only need to worry about whether we are testing for nullability
890    // or not.
891    if ref_ty.heap_type.is_top() {
892        let result = if ref_ty.nullable {
893            builder.ins().iconst(ir::types::I32, 1)
894        } else {
895            let is_null = func_env.translate_ref_is_null(builder.cursor(), val)?;
896            let zero = builder.ins().iconst(ir::types::I32, 0);
897            let one = builder.ins().iconst(ir::types::I32, 1);
898            builder.ins().select(is_null, zero, one)
899        };
900        log::trace!("translate_ref_test(..) -> {result:?}");
901        return Ok(result);
902    }
903
904    // `i31ref`s are a little interesting because they don't point to GC
905    // objects; we test the bit pattern of the reference itself.
906    if ref_ty.heap_type == WasmHeapType::I31 {
907        let i31_mask = builder.ins().iconst(
908            ir::types::I32,
909            i64::from(wasmtime_environ::I31_DISCRIMINANT),
910        );
911        let is_i31 = builder.ins().band(val, i31_mask);
912        let result = if ref_ty.nullable {
913            let is_null = func_env.translate_ref_is_null(builder.cursor(), val)?;
914            builder.ins().bor(is_null, is_i31)
915        } else {
916            is_i31
917        };
918        log::trace!("translate_ref_test(..) -> {result:?}");
919        return Ok(result);
920    }
921
922    // Otherwise, in the general case, we need to inspect our given object's
923    // actual type, which also requires null-checking and i31-checking it.
924
925    let is_any_hierarchy = ref_ty.heap_type.top() == WasmHeapTopType::Any;
926
927    let non_null_block = builder.create_block();
928    let non_null_non_i31_block = builder.create_block();
929    let continue_block = builder.create_block();
930
931    // Current block: check if the reference is null and branch appropriately.
932    let is_null = func_env.translate_ref_is_null(builder.cursor(), val)?;
933    let result_when_is_null = builder.ins().iconst(ir::types::I32, ref_ty.nullable as i64);
934    builder.ins().brif(
935        is_null,
936        continue_block,
937        &[result_when_is_null],
938        non_null_block,
939        &[],
940    );
941
942    // Non-null block: We know the GC ref is non-null, but we need to also check
943    // for `i31` references that don't point to GC objects.
944    builder.switch_to_block(non_null_block);
945    log::trace!("translate_ref_test: non-null ref block");
946    if is_any_hierarchy {
947        let i31_mask = builder.ins().iconst(
948            ir::types::I32,
949            i64::from(wasmtime_environ::I31_DISCRIMINANT),
950        );
951        let is_i31 = builder.ins().band(val, i31_mask);
952        // If it is an `i31`, then create the result value based on whether we
953        // want `i31`s to pass the test or not.
954        let result_when_is_i31 = builder.ins().iconst(
955            ir::types::I32,
956            matches!(
957                ref_ty.heap_type,
958                WasmHeapType::Any | WasmHeapType::Eq | WasmHeapType::I31
959            ) as i64,
960        );
961        builder.ins().brif(
962            is_i31,
963            continue_block,
964            &[result_when_is_i31],
965            non_null_non_i31_block,
966            &[],
967        );
968    } else {
969        // If we aren't testing the `any` hierarchy, the reference cannot be an
970        // `i31ref`. Jump directly to the non-null and non-i31 block; rely on
971        // branch folding during lowering to clean this up.
972        builder.ins().jump(non_null_non_i31_block, &[]);
973    }
974
975    // Non-null and non-i31 block: Read the actual `VMGcKind` or
976    // `VMSharedTypeIndex` out of the object's header and check whether it
977    // matches the expected type.
978    builder.switch_to_block(non_null_non_i31_block);
979    log::trace!("translate_ref_test: non-null and non-i31 ref block");
980    let check_header_kind = |func_env: &mut FuncEnvironment<'_>,
981                             builder: &mut FunctionBuilder,
982                             val: ir::Value,
983                             expected_kind: VMGcKind|
984     -> ir::Value {
985        let header_size = builder.ins().iconst(
986            ir::types::I32,
987            i64::from(wasmtime_environ::VM_GC_HEADER_SIZE),
988        );
989        let kind_addr = func_env.prepare_gc_ref_access(
990            builder,
991            val,
992            Offset::Static(wasmtime_environ::VM_GC_HEADER_KIND_OFFSET),
993            BoundsCheck::Object(header_size),
994        );
995        let actual_kind = builder.ins().load(
996            ir::types::I32,
997            ir::MemFlags::trusted().with_readonly(),
998            kind_addr,
999            0,
1000        );
1001        let expected_kind = builder
1002            .ins()
1003            .iconst(ir::types::I32, i64::from(expected_kind.as_u32()));
1004        // Inline version of `VMGcKind::matches`.
1005        let and = builder.ins().band(actual_kind, expected_kind);
1006        let kind_matches = builder
1007            .ins()
1008            .icmp(ir::condcodes::IntCC::Equal, and, expected_kind);
1009        builder.ins().uextend(ir::types::I32, kind_matches)
1010    };
1011    let result = match ref_ty.heap_type {
1012        WasmHeapType::Any
1013        | WasmHeapType::None
1014        | WasmHeapType::Extern
1015        | WasmHeapType::NoExtern
1016        | WasmHeapType::Func
1017        | WasmHeapType::NoFunc
1018        | WasmHeapType::I31 => unreachable!("handled top, bottom, and i31 types above"),
1019
1020        // For these abstract but non-top and non-bottom types, we check the
1021        // `VMGcKind` that is in the object's header.
1022        WasmHeapType::Eq => check_header_kind(func_env, builder, val, VMGcKind::EqRef),
1023        WasmHeapType::Struct => check_header_kind(func_env, builder, val, VMGcKind::StructRef),
1024        WasmHeapType::Array => check_header_kind(func_env, builder, val, VMGcKind::ArrayRef),
1025
1026        // For concrete types, we need to do a full subtype check between the
1027        // `VMSharedTypeIndex` in the object's header and the
1028        // `ModuleInternedTypeIndex` we have here.
1029        //
1030        // TODO: This check should ideally be done inline, but we don't have a
1031        // good way to access the `TypeRegistry`'s supertypes arrays from Wasm
1032        // code at the moment.
1033        WasmHeapType::ConcreteArray(ty) | WasmHeapType::ConcreteStruct(ty) => {
1034            let expected_interned_ty = ty.unwrap_module_type_index();
1035            let expected_shared_ty =
1036                func_env.module_interned_to_shared_ty(&mut builder.cursor(), expected_interned_ty);
1037
1038            let ty_addr = func_env.prepare_gc_ref_access(
1039                builder,
1040                val,
1041                Offset::Static(wasmtime_environ::VM_GC_HEADER_TYPE_INDEX_OFFSET),
1042                BoundsCheck::Access(func_env.offsets.size_of_vmshared_type_index().into()),
1043            );
1044            let actual_shared_ty = builder.ins().load(
1045                ir::types::I32,
1046                ir::MemFlags::trusted().with_readonly(),
1047                ty_addr,
1048                0,
1049            );
1050
1051            func_env.is_subtype(builder, actual_shared_ty, expected_shared_ty)
1052        }
1053
1054        // Same as for concrete arrays and structs except that a `VMFuncRef`
1055        // doesn't begin with a `VMGcHeader` and is a raw pointer rather than GC
1056        // heap index.
1057        WasmHeapType::ConcreteFunc(ty) => {
1058            let expected_interned_ty = ty.unwrap_module_type_index();
1059            let expected_shared_ty =
1060                func_env.module_interned_to_shared_ty(&mut builder.cursor(), expected_interned_ty);
1061
1062            let actual_shared_ty = func_env.load_funcref_type_index(
1063                &mut builder.cursor(),
1064                ir::MemFlags::trusted().with_readonly(),
1065                val,
1066            );
1067
1068            func_env.is_subtype(builder, actual_shared_ty, expected_shared_ty)
1069        }
1070
1071        WasmHeapType::Cont | WasmHeapType::ConcreteCont(_) | WasmHeapType::NoCont => todo!(), // FIXME: #10248 stack switching support.
1072    };
1073    builder.ins().jump(continue_block, &[result]);
1074
1075    // Control flow join point with the result.
1076    builder.switch_to_block(continue_block);
1077    let result = builder.append_block_param(continue_block, ir::types::I32);
1078    log::trace!("translate_ref_test(..) -> {result:?}");
1079
1080    builder.seal_block(non_null_block);
1081    builder.seal_block(non_null_non_i31_block);
1082    builder.seal_block(continue_block);
1083
1084    Ok(result)
1085}
1086
1087/// A static or dynamic offset from a GC reference.
1088#[derive(Debug)]
1089enum Offset {
1090    /// A static offset from a GC reference.
1091    Static(u32),
1092
1093    /// A dynamic `i32` offset from a GC reference.
1094    Dynamic(ir::Value),
1095}
1096
1097/// The kind of bounds check to perform when accessing a GC object's fields and
1098/// elements.
1099#[derive(Debug)]
1100enum BoundsCheck {
1101    /// Check that this whole object is inside the GC heap:
1102    ///
1103    /// ```ignore
1104    /// gc_ref + size <= gc_heap_bound
1105    /// ```
1106    ///
1107    /// The object size must be an `i32` value.
1108    Object(ir::Value),
1109
1110    /// Check that this one access in particular is inside the GC heap:
1111    ///
1112    /// ```ignore
1113    /// gc_ref + offset + access_size <= gc_heap_bound
1114    /// ```
1115    ///
1116    /// Prefer `Bound::Object` over `Bound::Access` when possible, as that
1117    /// approach allows the mid-end to deduplicate bounds checks across multiple
1118    /// accesses to the same object.
1119    Access(u32),
1120}
1121
1122fn uextend_i32_to_pointer_type(
1123    builder: &mut FunctionBuilder,
1124    pointer_type: ir::Type,
1125    value: ir::Value,
1126) -> ir::Value {
1127    assert_eq!(builder.func.dfg.value_type(value), ir::types::I32);
1128    match pointer_type {
1129        ir::types::I32 => value,
1130        ir::types::I64 => builder.ins().uextend(ir::types::I64, value),
1131        _ => unreachable!(),
1132    }
1133}
1134
1135/// Emit CLIF to compute an array object's total size, given the dynamic length
1136/// in its initialization.
1137///
1138/// Traps if the size overflows.
1139#[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
1140fn emit_array_size(
1141    func_env: &mut FuncEnvironment<'_>,
1142    builder: &mut FunctionBuilder<'_>,
1143    array_layout: &GcArrayLayout,
1144    len: ir::Value,
1145) -> ir::Value {
1146    let base_size = builder
1147        .ins()
1148        .iconst(ir::types::I32, i64::from(array_layout.base_size));
1149
1150    // `elems_size = len * elem_size`
1151    //
1152    // Check for multiplication overflow and trap if it occurs, since that
1153    // means Wasm is attempting to allocate an array that is larger than our
1154    // implementation limits. (Note: there is no standard implementation
1155    // limit for array length beyond `u32::MAX`.)
1156    //
1157    // We implement this check by encoding our logically-32-bit operands as
1158    // i64 values, doing a 64-bit multiplication, and then checking the high
1159    // 32 bits of the multiplication's result. If the high 32 bits are not
1160    // all zeros, then the multiplication overflowed.
1161    debug_assert_eq!(builder.func.dfg.value_type(len), ir::types::I32);
1162    let len = builder.ins().uextend(ir::types::I64, len);
1163    let elems_size_64 = builder
1164        .ins()
1165        .imul_imm(len, i64::from(array_layout.elem_size));
1166    let high_bits = builder.ins().ushr_imm(elems_size_64, 32);
1167    func_env.trapnz(builder, high_bits, crate::TRAP_ALLOCATION_TOO_LARGE);
1168    let elems_size = builder.ins().ireduce(ir::types::I32, elems_size_64);
1169
1170    // And if adding the base size and elements size overflows, then the
1171    // allocation is too large.
1172    let size = func_env.uadd_overflow_trap(
1173        builder,
1174        base_size,
1175        elems_size,
1176        crate::TRAP_ALLOCATION_TOO_LARGE,
1177    );
1178
1179    size
1180}
1181
1182/// Common helper for struct-field initialization that can be reused across
1183/// collectors.
1184#[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
1185fn initialize_struct_fields(
1186    func_env: &mut FuncEnvironment<'_>,
1187    builder: &mut FunctionBuilder<'_>,
1188    struct_ty: ModuleInternedTypeIndex,
1189    raw_ptr_to_struct: ir::Value,
1190    field_values: &[ir::Value],
1191    mut init_field: impl FnMut(
1192        &mut FuncEnvironment<'_>,
1193        &mut FunctionBuilder<'_>,
1194        WasmStorageType,
1195        ir::Value,
1196        ir::Value,
1197    ) -> WasmResult<()>,
1198) -> WasmResult<()> {
1199    let struct_layout = func_env.struct_layout(struct_ty);
1200    let struct_size = struct_layout.size;
1201    let field_offsets: SmallVec<[_; 8]> = struct_layout.fields.iter().map(|f| f.offset).collect();
1202    assert_eq!(field_offsets.len(), field_values.len());
1203
1204    assert!(!func_env.types[struct_ty].composite_type.shared);
1205    let struct_ty = func_env.types[struct_ty]
1206        .composite_type
1207        .inner
1208        .unwrap_struct();
1209    let field_types: SmallVec<[_; 8]> = struct_ty.fields.iter().cloned().collect();
1210    assert_eq!(field_types.len(), field_values.len());
1211
1212    for ((ty, val), offset) in field_types.into_iter().zip(field_values).zip(field_offsets) {
1213        let size_of_access = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&ty.element_type);
1214        assert!(offset + size_of_access <= struct_size);
1215        let field_addr = builder.ins().iadd_imm(raw_ptr_to_struct, i64::from(offset));
1216        init_field(func_env, builder, ty.element_type, field_addr, *val)?;
1217    }
1218
1219    Ok(())
1220}
1221
1222impl FuncEnvironment<'_> {
1223    fn gc_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcLayout {
1224        // Lazily compute and cache the layout.
1225        if !self.ty_to_gc_layout.contains_key(&type_index) {
1226            let ty = &self.types[type_index].composite_type;
1227            let layout = gc_compiler(self)
1228                .unwrap()
1229                .layouts()
1230                .gc_layout(ty)
1231                .expect("should only call `FuncEnvironment::gc_layout` for GC types");
1232            self.ty_to_gc_layout.insert(type_index, layout);
1233        }
1234
1235        self.ty_to_gc_layout.get(&type_index).unwrap()
1236    }
1237
1238    /// Get the `GcArrayLayout` for the array type at the given `type_index`.
1239    fn array_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcArrayLayout {
1240        self.gc_layout(type_index).unwrap_array()
1241    }
1242
1243    /// Get the `GcStructLayout` for the struct type at the given `type_index`.
1244    fn struct_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcStructLayout {
1245        self.gc_layout(type_index).unwrap_struct()
1246    }
1247
1248    /// Get the GC heap's base pointer.
1249    fn get_gc_heap_base(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
1250        let ptr_ty = self.pointer_type();
1251        let flags = ir::MemFlags::trusted().with_readonly().with_can_move();
1252
1253        let vmctx = self.vmctx(builder.func);
1254        let vmctx = builder.ins().global_value(ptr_ty, vmctx);
1255
1256        let base_offset = self.offsets.ptr.vmctx_gc_heap_base();
1257        let base_offset = i32::from(base_offset);
1258
1259        builder.ins().load(ptr_ty, flags, vmctx, base_offset)
1260    }
1261
1262    /// Get the GC heap's bound.
1263    fn get_gc_heap_bound(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
1264        let ptr_ty = self.pointer_type();
1265        let flags = ir::MemFlags::trusted().with_readonly().with_can_move();
1266
1267        let vmctx = self.vmctx(builder.func);
1268        let vmctx = builder.ins().global_value(ptr_ty, vmctx);
1269
1270        let bound_offset = self.offsets.ptr.vmctx_gc_heap_bound();
1271        let bound_offset = i32::from(bound_offset);
1272
1273        builder.ins().load(ptr_ty, flags, vmctx, bound_offset)
1274    }
1275
1276    /// Get the GC heap's base pointer and bound.
1277    fn get_gc_heap_base_bound(&mut self, builder: &mut FunctionBuilder) -> (ir::Value, ir::Value) {
1278        let base = self.get_gc_heap_base(builder);
1279        let bound = self.get_gc_heap_bound(builder);
1280        (base, bound)
1281    }
1282
1283    /// Get the raw pointer of `gc_ref[offset]` bounds checked for an access of
1284    /// `size` bytes.
1285    ///
1286    /// The given `gc_ref` must be a non-null, non-i31 GC reference.
1287    ///
1288    /// If `check` is a `BoundsCheck::Object`, then it is the callers
1289    /// responsibility to ensure that `offset + access_size <= object_size`.
1290    ///
1291    /// Returns a raw pointer to `gc_ref[offset]` -- not a raw pointer to the GC
1292    /// object itself (unless `offset` happens to be `0`). This raw pointer may
1293    /// be used to read or write up to as many bytes as described by `bound`. Do
1294    /// NOT attempt accesses bytes outside of `bound`; that may lead to
1295    /// unchecked out-of-bounds accesses.
1296    ///
1297    /// This method is collector-agnostic.
1298    fn prepare_gc_ref_access(
1299        &mut self,
1300        builder: &mut FunctionBuilder,
1301        gc_ref: ir::Value,
1302        offset: Offset,
1303        check: BoundsCheck,
1304    ) -> ir::Value {
1305        log::trace!("prepare_gc_ref_access({gc_ref:?}, {offset:?}, {check:?})");
1306        assert_eq!(builder.func.dfg.value_type(gc_ref), ir::types::I32);
1307
1308        let pointer_type = self.pointer_type();
1309        let (base, bound) = self.get_gc_heap_base_bound(builder);
1310        let index = uextend_i32_to_pointer_type(builder, pointer_type, gc_ref);
1311
1312        let offset = match offset {
1313            Offset::Dynamic(offset) => uextend_i32_to_pointer_type(builder, pointer_type, offset),
1314            Offset::Static(offset) => builder.ins().iconst(pointer_type, i64::from(offset)),
1315        };
1316
1317        let index_and_offset =
1318            builder
1319                .ins()
1320                .uadd_overflow_trap(index, offset, TRAP_INTERNAL_ASSERT);
1321
1322        let end = match check {
1323            BoundsCheck::Object(object_size) => {
1324                // Check that `index + object_size` is in bounds. This can be
1325                // deduplicated across multiple accesses to different fields
1326                // within the same object.
1327                let object_size = uextend_i32_to_pointer_type(builder, pointer_type, object_size);
1328                builder
1329                    .ins()
1330                    .uadd_overflow_trap(index, object_size, TRAP_INTERNAL_ASSERT)
1331            }
1332            BoundsCheck::Access(access_size) => {
1333                // Check that `index + offset + access_size` is in bounds.
1334                let access_size = builder.ins().iconst(pointer_type, i64::from(access_size));
1335                builder.ins().uadd_overflow_trap(
1336                    index_and_offset,
1337                    access_size,
1338                    TRAP_INTERNAL_ASSERT,
1339                )
1340            }
1341        };
1342
1343        let is_in_bounds =
1344            builder
1345                .ins()
1346                .icmp(ir::condcodes::IntCC::UnsignedLessThanOrEqual, end, bound);
1347        builder.ins().trapz(is_in_bounds, TRAP_INTERNAL_ASSERT);
1348
1349        // NB: No need to check for overflow here, as that would mean that the
1350        // GC heap is hanging off the end of the address space, which is
1351        // impossible.
1352        let result = builder.ins().iadd(base, index_and_offset);
1353        log::trace!("prepare_gc_ref_access(..) -> {result:?}");
1354        result
1355    }
1356
1357    /// Emit checks (if necessary) for whether the given `gc_ref` is null or is
1358    /// an `i31ref`.
1359    ///
1360    /// Takes advantage of static information based on `ty` as to whether the GC
1361    /// reference is nullable or can ever be an `i31`.
1362    ///
1363    /// Returns an `ir::Value` that is an `i32` will be non-zero if the GC
1364    /// reference is null or is an `i31ref`; otherwise, it will be zero.
1365    ///
1366    /// This method is collector-agnostic.
1367    #[cfg_attr(not(feature = "gc-drc"), allow(dead_code))]
1368    fn gc_ref_is_null_or_i31(
1369        &mut self,
1370        builder: &mut FunctionBuilder,
1371        ty: WasmRefType,
1372        gc_ref: ir::Value,
1373    ) -> ir::Value {
1374        assert_eq!(builder.func.dfg.value_type(gc_ref), ir::types::I32);
1375        assert!(ty.is_vmgcref_type_and_not_i31());
1376
1377        let might_be_i31 = match ty.heap_type {
1378            // If we are definitely dealing with an i31, we shouldn't be
1379            // emitting dynamic checks for it, and the caller shouldn't call
1380            // this function. Should have been caught by the assertion at the
1381            // start of the function.
1382            WasmHeapType::I31 => unreachable!(),
1383
1384            // Could potentially be an i31.
1385            WasmHeapType::Any | WasmHeapType::Eq => true,
1386
1387            // If it is definitely a struct, array, or uninhabited type, then it
1388            // is definitely not an i31.
1389            WasmHeapType::Array
1390            | WasmHeapType::ConcreteArray(_)
1391            | WasmHeapType::Struct
1392            | WasmHeapType::ConcreteStruct(_)
1393            | WasmHeapType::None => false,
1394
1395            // Despite being a different type hierarchy, this *could* be an
1396            // `i31` if it is the result of
1397            //
1398            //     (extern.convert_any (ref.i31 ...))
1399            WasmHeapType::Extern => true,
1400
1401            // Can only ever be `null`.
1402            WasmHeapType::NoExtern => false,
1403
1404            // Wrong type hierarchy, and also funcrefs are not GC-managed
1405            // types. Should have been caught by the assertion at the start of
1406            // the function.
1407            WasmHeapType::Func | WasmHeapType::ConcreteFunc(_) | WasmHeapType::NoFunc => {
1408                unreachable!()
1409            }
1410
1411            WasmHeapType::Cont | WasmHeapType::ConcreteCont(_) | WasmHeapType::NoCont => todo!(), // FIXME: #10248 stack switching support.
1412        };
1413
1414        match (ty.nullable, might_be_i31) {
1415            // This GC reference statically cannot be null nor an i31. (Let
1416            // Cranelift's optimizer const-propagate this value and erase any
1417            // unnecessary control flow resulting from branching on this value.)
1418            (false, false) => builder.ins().iconst(ir::types::I32, 0),
1419
1420            // This GC reference is always non-null, but might be an i31.
1421            (false, true) => builder.ins().band_imm(gc_ref, i64::from(I31_DISCRIMINANT)),
1422
1423            // This GC reference might be null, but can never be an i31.
1424            (true, false) => builder.ins().icmp_imm(IntCC::Equal, gc_ref, 0),
1425
1426            // Fully general case: this GC reference could be either null or an
1427            // i31.
1428            (true, true) => {
1429                let is_i31 = builder.ins().band_imm(gc_ref, i64::from(I31_DISCRIMINANT));
1430                let is_null = builder.ins().icmp_imm(IntCC::Equal, gc_ref, 0);
1431                let is_null = builder.ins().uextend(ir::types::I32, is_null);
1432                builder.ins().bor(is_i31, is_null)
1433            }
1434        }
1435    }
1436
1437    // Emit code to check whether `a <: b` for two `VMSharedTypeIndex`es.
1438    pub(crate) fn is_subtype(
1439        &mut self,
1440        builder: &mut FunctionBuilder<'_>,
1441        a: ir::Value,
1442        b: ir::Value,
1443    ) -> ir::Value {
1444        log::trace!("is_subtype({a:?}, {b:?})");
1445
1446        let diff_tys_block = builder.create_block();
1447        let continue_block = builder.create_block();
1448
1449        // Current block: fast path for when `a == b`.
1450        log::trace!("is_subtype: fast path check for exact same types");
1451        let same_ty = builder.ins().icmp(IntCC::Equal, a, b);
1452        let same_ty = builder.ins().uextend(ir::types::I32, same_ty);
1453        builder
1454            .ins()
1455            .brif(same_ty, continue_block, &[same_ty], diff_tys_block, &[]);
1456
1457        // Different types block: fall back to the `is_subtype` libcall.
1458        builder.switch_to_block(diff_tys_block);
1459        log::trace!("is_subtype: slow path to do full `is_subtype` libcall");
1460        let is_subtype = self.builtin_functions.is_subtype(builder.func);
1461        let vmctx = self.vmctx_val(&mut builder.cursor());
1462        let call_inst = builder.ins().call(is_subtype, &[vmctx, a, b]);
1463        let result = builder.func.dfg.first_result(call_inst);
1464        builder.ins().jump(continue_block, &[result]);
1465
1466        // Continue block: join point for the result.
1467        builder.switch_to_block(continue_block);
1468        let result = builder.append_block_param(continue_block, ir::types::I32);
1469        log::trace!("is_subtype(..) -> {result:?}");
1470
1471        builder.seal_block(diff_tys_block);
1472        builder.seal_block(continue_block);
1473
1474        result
1475    }
1476}