wasmtime_cranelift/gc/
enabled.rs

1use super::GcCompiler;
2use crate::func_environ::{Extension, FuncEnvironment};
3use crate::gc::ArrayInit;
4use crate::translate::{StructFieldsVec, TargetEnvironment};
5use crate::TRAP_INTERNAL_ASSERT;
6use cranelift_codegen::{
7    cursor::FuncCursor,
8    ir::{self, condcodes::IntCC, InstBuilder},
9};
10use cranelift_entity::packed_option::ReservedValue;
11use cranelift_frontend::FunctionBuilder;
12use smallvec::SmallVec;
13use wasmtime_environ::{
14    wasm_unsupported, Collector, GcArrayLayout, GcLayout, GcStructLayout, ModuleInternedTypeIndex,
15    PtrSize, TypeIndex, VMGcKind, WasmHeapTopType, WasmHeapType, WasmRefType, WasmResult,
16    WasmStorageType, WasmValType, I31_DISCRIMINANT,
17};
18
19#[cfg(feature = "gc-drc")]
20mod drc;
21#[cfg(feature = "gc-null")]
22mod null;
23
24/// Get the default GC compiler.
25pub fn gc_compiler(func_env: &FuncEnvironment<'_>) -> WasmResult<Box<dyn GcCompiler>> {
26    match func_env.tunables.collector {
27        #[cfg(feature = "gc-drc")]
28        Some(Collector::DeferredReferenceCounting) => Ok(Box::new(drc::DrcCompiler::default())),
29        #[cfg(not(feature = "gc-drc"))]
30        Some(Collector::DeferredReferenceCounting) => Err(wasm_unsupported!(
31            "the DRC collector is unavailable because the `gc-drc` feature \
32             was disabled at compile time",
33        )),
34
35        #[cfg(feature = "gc-null")]
36        Some(Collector::Null) => Ok(Box::new(null::NullCompiler::default())),
37        #[cfg(not(feature = "gc-null"))]
38        Some(Collector::Null) => Err(wasm_unsupported!(
39            "the null collector is unavailable because the `gc-null` feature \
40             was disabled at compile time",
41        )),
42
43        #[cfg(any(feature = "gc-drc", feature = "gc-null"))]
44        None => Err(wasm_unsupported!(
45            "support for GC types disabled at configuration time"
46        )),
47        #[cfg(not(any(feature = "gc-drc", feature = "gc-null")))]
48        None => Err(wasm_unsupported!(
49            "support for GC types disabled because no collector implementation \
50             was selected at compile time; enable one of the `gc-drc` or \
51             `gc-null` features",
52        )),
53    }
54}
55
56#[cfg_attr(not(feature = "gc-drc"), allow(dead_code))]
57fn unbarriered_load_gc_ref(
58    builder: &mut FunctionBuilder,
59    ty: WasmHeapType,
60    ptr_to_gc_ref: ir::Value,
61    flags: ir::MemFlags,
62) -> WasmResult<ir::Value> {
63    debug_assert!(ty.is_vmgcref_type());
64    let gc_ref = builder.ins().load(ir::types::I32, flags, ptr_to_gc_ref, 0);
65    if ty != WasmHeapType::I31 {
66        builder.declare_value_needs_stack_map(gc_ref);
67    }
68    Ok(gc_ref)
69}
70
71#[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
72fn unbarriered_store_gc_ref(
73    builder: &mut FunctionBuilder,
74    ty: WasmHeapType,
75    dst: ir::Value,
76    gc_ref: ir::Value,
77    flags: ir::MemFlags,
78) -> WasmResult<()> {
79    debug_assert!(ty.is_vmgcref_type());
80    builder.ins().store(flags, gc_ref, dst, 0);
81    Ok(())
82}
83
84/// Emit code to read a struct field or array element from its raw address in
85/// the GC heap.
86///
87/// The given address MUST have already been bounds-checked via
88/// `prepare_gc_ref_access`.
89fn read_field_at_addr(
90    func_env: &mut FuncEnvironment<'_>,
91    builder: &mut FunctionBuilder<'_>,
92    ty: WasmStorageType,
93    addr: ir::Value,
94    extension: Option<Extension>,
95) -> WasmResult<ir::Value> {
96    assert_eq!(extension.is_none(), matches!(ty, WasmStorageType::Val(_)));
97    assert_eq!(
98        extension.is_some(),
99        matches!(ty, WasmStorageType::I8 | WasmStorageType::I16)
100    );
101
102    // Data inside GC objects is always little endian.
103    let flags = ir::MemFlags::trusted().with_endianness(ir::Endianness::Little);
104
105    let value = match ty {
106        WasmStorageType::I8 => builder.ins().load(ir::types::I8, flags, addr, 0),
107        WasmStorageType::I16 => builder.ins().load(ir::types::I16, flags, addr, 0),
108        WasmStorageType::Val(v) => match v {
109            WasmValType::I32 => builder.ins().load(ir::types::I32, flags, addr, 0),
110            WasmValType::I64 => builder.ins().load(ir::types::I64, flags, addr, 0),
111            WasmValType::F32 => builder.ins().load(ir::types::F32, flags, addr, 0),
112            WasmValType::F64 => builder.ins().load(ir::types::F64, flags, addr, 0),
113            WasmValType::V128 => builder.ins().load(ir::types::I8X16, flags, addr, 0),
114            WasmValType::Ref(r) => match r.heap_type.top() {
115                WasmHeapTopType::Any | WasmHeapTopType::Extern => gc_compiler(func_env)?
116                    .translate_read_gc_reference(func_env, builder, r, addr, flags)?,
117                WasmHeapTopType::Func => {
118                    let expected_ty = match r.heap_type {
119                        WasmHeapType::Func => ModuleInternedTypeIndex::reserved_value(),
120                        WasmHeapType::ConcreteFunc(ty) => ty.unwrap_module_type_index(),
121                        WasmHeapType::NoFunc => {
122                            let null = builder.ins().iconst(func_env.pointer_type(), 0);
123                            if !r.nullable {
124                                // Because `nofunc` is uninhabited, and this
125                                // reference is non-null, this is unreachable
126                                // code. Unconditionally trap via conditional
127                                // trap instructions to avoid inserting block
128                                // terminators in the middle of this block.
129                                builder.ins().trapz(null, TRAP_INTERNAL_ASSERT);
130                            }
131                            return Ok(null);
132                        }
133                        _ => unreachable!("not a function heap type"),
134                    };
135                    let expected_ty = builder
136                        .ins()
137                        .iconst(ir::types::I32, i64::from(expected_ty.as_bits()));
138
139                    let vmctx = func_env.vmctx_val(&mut builder.cursor());
140
141                    let func_ref_id = builder.ins().load(ir::types::I32, flags, addr, 0);
142                    let get_interned_func_ref = func_env
143                        .builtin_functions
144                        .get_interned_func_ref(builder.func);
145
146                    let call_inst = builder
147                        .ins()
148                        .call(get_interned_func_ref, &[vmctx, func_ref_id, expected_ty]);
149                    builder.func.dfg.first_result(call_inst)
150                }
151                WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
152            },
153        },
154    };
155
156    let value = match extension {
157        Some(Extension::Sign) => builder.ins().sextend(ir::types::I32, value),
158        Some(Extension::Zero) => builder.ins().uextend(ir::types::I32, value),
159        None => value,
160    };
161
162    Ok(value)
163}
164
165fn write_func_ref_at_addr(
166    func_env: &mut FuncEnvironment<'_>,
167    builder: &mut FunctionBuilder<'_>,
168    ref_type: WasmRefType,
169    flags: ir::MemFlags,
170    field_addr: ir::Value,
171    func_ref: ir::Value,
172) -> WasmResult<()> {
173    assert_eq!(ref_type.heap_type.top(), WasmHeapTopType::Func);
174
175    let vmctx = func_env.vmctx_val(&mut builder.cursor());
176
177    let intern_func_ref_for_gc_heap = func_env
178        .builtin_functions
179        .intern_func_ref_for_gc_heap(builder.func);
180
181    let func_ref = if ref_type.heap_type == WasmHeapType::NoFunc {
182        let null = builder.ins().iconst(func_env.pointer_type(), 0);
183        if !ref_type.nullable {
184            // Because `nofunc` is uninhabited, and this reference is
185            // non-null, this is unreachable code. Unconditionally trap
186            // via conditional trap instructions to avoid inserting
187            // block terminators in the middle of this block.
188            builder.ins().trapz(null, TRAP_INTERNAL_ASSERT);
189        }
190        null
191    } else {
192        func_ref
193    };
194
195    // Convert the raw `funcref` into a `FuncRefTableId` for use in the
196    // GC heap.
197    let call_inst = builder
198        .ins()
199        .call(intern_func_ref_for_gc_heap, &[vmctx, func_ref]);
200    let func_ref_id = builder.func.dfg.first_result(call_inst);
201    let func_ref_id = builder.ins().ireduce(ir::types::I32, func_ref_id);
202
203    // Store the id in the field.
204    builder.ins().store(flags, func_ref_id, field_addr, 0);
205
206    Ok(())
207}
208
209fn write_field_at_addr(
210    func_env: &mut FuncEnvironment<'_>,
211    builder: &mut FunctionBuilder<'_>,
212    field_ty: WasmStorageType,
213    field_addr: ir::Value,
214    new_val: ir::Value,
215) -> WasmResult<()> {
216    // Data inside GC objects is always little endian.
217    let flags = ir::MemFlags::trusted().with_endianness(ir::Endianness::Little);
218
219    match field_ty {
220        WasmStorageType::I8 => {
221            builder.ins().istore8(flags, new_val, field_addr, 0);
222        }
223        WasmStorageType::I16 => {
224            builder.ins().istore16(flags, new_val, field_addr, 0);
225        }
226        WasmStorageType::Val(WasmValType::Ref(r)) if r.heap_type.top() == WasmHeapTopType::Func => {
227            write_func_ref_at_addr(func_env, builder, r, flags, field_addr, new_val)?;
228        }
229        WasmStorageType::Val(WasmValType::Ref(r)) => {
230            gc_compiler(func_env)?
231                .translate_write_gc_reference(func_env, builder, r, field_addr, new_val, flags)?;
232        }
233        WasmStorageType::Val(_) => {
234            assert_eq!(
235                builder.func.dfg.value_type(new_val).bytes(),
236                wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty)
237            );
238            builder.ins().store(flags, new_val, field_addr, 0);
239        }
240    }
241    Ok(())
242}
243
244pub fn translate_struct_new(
245    func_env: &mut FuncEnvironment<'_>,
246    builder: &mut FunctionBuilder<'_>,
247    struct_type_index: TypeIndex,
248    fields: &[ir::Value],
249) -> WasmResult<ir::Value> {
250    gc_compiler(func_env)?.alloc_struct(func_env, builder, struct_type_index, &fields)
251}
252
253fn default_value(
254    cursor: &mut FuncCursor,
255    func_env: &FuncEnvironment<'_>,
256    ty: &WasmStorageType,
257) -> ir::Value {
258    match ty {
259        WasmStorageType::I8 | WasmStorageType::I16 => cursor.ins().iconst(ir::types::I32, 0),
260        WasmStorageType::Val(v) => match v {
261            WasmValType::I32 => cursor.ins().iconst(ir::types::I32, 0),
262            WasmValType::I64 => cursor.ins().iconst(ir::types::I64, 0),
263            WasmValType::F32 => cursor.ins().f32const(0.0),
264            WasmValType::F64 => cursor.ins().f64const(0.0),
265            WasmValType::V128 => {
266                let c = cursor.func.dfg.constants.insert(vec![0; 16].into());
267                cursor.ins().vconst(ir::types::I8X16, c)
268            }
269            WasmValType::Ref(r) => {
270                assert!(r.nullable);
271                let (ty, needs_stack_map) = func_env.reference_type(r.heap_type);
272
273                // NB: The collector doesn't need to know about null references.
274                let _ = needs_stack_map;
275
276                cursor.ins().iconst(ty, 0)
277            }
278        },
279    }
280}
281
282pub fn translate_struct_new_default(
283    func_env: &mut FuncEnvironment<'_>,
284    builder: &mut FunctionBuilder<'_>,
285    struct_type_index: TypeIndex,
286) -> WasmResult<ir::Value> {
287    let interned_ty = func_env.module.types[struct_type_index].unwrap_module_type_index();
288    let struct_ty = func_env.types.unwrap_struct(interned_ty)?;
289    let fields = struct_ty
290        .fields
291        .iter()
292        .map(|f| default_value(&mut builder.cursor(), func_env, &f.element_type))
293        .collect::<StructFieldsVec>();
294    gc_compiler(func_env)?.alloc_struct(func_env, builder, struct_type_index, &fields)
295}
296
297pub fn translate_struct_get(
298    func_env: &mut FuncEnvironment<'_>,
299    builder: &mut FunctionBuilder<'_>,
300    struct_type_index: TypeIndex,
301    field_index: u32,
302    struct_ref: ir::Value,
303    extension: Option<Extension>,
304) -> WasmResult<ir::Value> {
305    log::trace!("translate_struct_get({struct_type_index:?}, {field_index:?}, {struct_ref:?}, {extension:?})");
306
307    // TODO: If we know we have a `(ref $my_struct)` here, instead of maybe a
308    // `(ref null $my_struct)`, we could omit the `trapz`. But plumbing that
309    // type info from `wasmparser` and through to here is a bit funky.
310    func_env.trapz(builder, struct_ref, crate::TRAP_NULL_REFERENCE);
311
312    let field_index = usize::try_from(field_index).unwrap();
313    let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index();
314
315    let struct_layout = func_env.struct_layout(interned_type_index);
316    let struct_size = struct_layout.size;
317    let struct_size_val = builder.ins().iconst(ir::types::I32, i64::from(struct_size));
318
319    let field_offset = struct_layout.fields[field_index];
320    let field_ty = &func_env.types.unwrap_struct(interned_type_index)?.fields[field_index];
321    let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty.element_type);
322    assert!(field_offset + field_size <= struct_size);
323
324    let field_addr = func_env.prepare_gc_ref_access(
325        builder,
326        struct_ref,
327        Offset::Static(field_offset),
328        BoundsCheck::Object(struct_size_val),
329    );
330
331    let result = read_field_at_addr(
332        func_env,
333        builder,
334        field_ty.element_type,
335        field_addr,
336        extension,
337    );
338    log::trace!("translate_struct_get(..) -> {result:?}");
339    result
340}
341
342pub fn translate_struct_set(
343    func_env: &mut FuncEnvironment<'_>,
344    builder: &mut FunctionBuilder<'_>,
345    struct_type_index: TypeIndex,
346    field_index: u32,
347    struct_ref: ir::Value,
348    new_val: ir::Value,
349) -> WasmResult<()> {
350    log::trace!(
351        "translate_struct_set({struct_type_index:?}, {field_index:?}, struct_ref: {struct_ref:?}, new_val: {new_val:?})"
352    );
353
354    // TODO: See comment in `translate_struct_get` about the `trapz`.
355    func_env.trapz(builder, struct_ref, crate::TRAP_NULL_REFERENCE);
356
357    let field_index = usize::try_from(field_index).unwrap();
358    let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index();
359
360    let struct_layout = func_env.struct_layout(interned_type_index);
361    let struct_size = struct_layout.size;
362    let struct_size_val = builder.ins().iconst(ir::types::I32, i64::from(struct_size));
363
364    let field_offset = struct_layout.fields[field_index];
365    let field_ty = &func_env.types.unwrap_struct(interned_type_index)?.fields[field_index];
366    let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty.element_type);
367    assert!(field_offset + field_size <= struct_size);
368
369    let field_addr = func_env.prepare_gc_ref_access(
370        builder,
371        struct_ref,
372        Offset::Static(field_offset),
373        BoundsCheck::Object(struct_size_val),
374    );
375
376    write_field_at_addr(
377        func_env,
378        builder,
379        field_ty.element_type,
380        field_addr,
381        new_val,
382    )?;
383
384    log::trace!("translate_struct_set: finished");
385    Ok(())
386}
387
388pub fn translate_array_new(
389    func_env: &mut FuncEnvironment<'_>,
390    builder: &mut FunctionBuilder,
391    array_type_index: TypeIndex,
392    elem: ir::Value,
393    len: ir::Value,
394) -> WasmResult<ir::Value> {
395    log::trace!("translate_array_new({array_type_index:?}, {elem:?}, {len:?})");
396    let result = gc_compiler(func_env)?.alloc_array(
397        func_env,
398        builder,
399        array_type_index,
400        ArrayInit::Fill { elem, len },
401    )?;
402    log::trace!("translate_array_new(..) -> {result:?}");
403    Ok(result)
404}
405
406pub fn translate_array_new_default(
407    func_env: &mut FuncEnvironment<'_>,
408    builder: &mut FunctionBuilder,
409    array_type_index: TypeIndex,
410    len: ir::Value,
411) -> WasmResult<ir::Value> {
412    log::trace!("translate_array_new_default({array_type_index:?}, {len:?})");
413
414    let interned_ty = func_env.module.types[array_type_index].unwrap_module_type_index();
415    let array_ty = func_env.types.unwrap_array(interned_ty)?;
416    let elem = default_value(&mut builder.cursor(), func_env, &array_ty.0.element_type);
417    let result = gc_compiler(func_env)?.alloc_array(
418        func_env,
419        builder,
420        array_type_index,
421        ArrayInit::Fill { elem, len },
422    )?;
423    log::trace!("translate_array_new_default(..) -> {result:?}");
424    Ok(result)
425}
426
427pub fn translate_array_new_fixed(
428    func_env: &mut FuncEnvironment<'_>,
429    builder: &mut FunctionBuilder,
430    array_type_index: TypeIndex,
431    elems: &[ir::Value],
432) -> WasmResult<ir::Value> {
433    log::trace!("translate_array_new_fixed({array_type_index:?}, {elems:?})");
434    let result = gc_compiler(func_env)?.alloc_array(
435        func_env,
436        builder,
437        array_type_index,
438        ArrayInit::Elems(elems),
439    )?;
440    log::trace!("translate_array_new_fixed(..) -> {result:?}");
441    Ok(result)
442}
443
444impl ArrayInit<'_> {
445    /// Get the length (as an `i32`-typed `ir::Value`) of these array elements.
446    #[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
447    fn len(self, pos: &mut FuncCursor) -> ir::Value {
448        match self {
449            ArrayInit::Fill { len, .. } => len,
450            ArrayInit::Elems(e) => {
451                let len = u32::try_from(e.len()).unwrap();
452                pos.ins().iconst(ir::types::I32, i64::from(len))
453            }
454        }
455    }
456
457    /// Initialize a newly-allocated array's elements.
458    #[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
459    fn initialize(
460        self,
461        func_env: &mut FuncEnvironment<'_>,
462        builder: &mut FunctionBuilder<'_>,
463        interned_type_index: ModuleInternedTypeIndex,
464        base_size: u32,
465        size: ir::Value,
466        elems_addr: ir::Value,
467        mut init_field: impl FnMut(
468            &mut FuncEnvironment<'_>,
469            &mut FunctionBuilder<'_>,
470            WasmStorageType,
471            ir::Value,
472            ir::Value,
473        ) -> WasmResult<()>,
474    ) -> WasmResult<()> {
475        log::trace!(
476            "initialize_array({interned_type_index:?}, {base_size:?}, {size:?}, {elems_addr:?})"
477        );
478
479        assert!(!func_env.types[interned_type_index].composite_type.shared);
480        let array_ty = func_env.types[interned_type_index]
481            .composite_type
482            .inner
483            .unwrap_array();
484        let elem_ty = array_ty.0.element_type;
485        let elem_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&elem_ty);
486        let pointer_type = func_env.pointer_type();
487        let elem_size = builder.ins().iconst(pointer_type, i64::from(elem_size));
488        match self {
489            ArrayInit::Elems(elems) => {
490                let mut elem_addr = elems_addr;
491                for val in elems {
492                    init_field(func_env, builder, elem_ty, elem_addr, *val)?;
493                    elem_addr = builder.ins().iadd(elem_addr, elem_size);
494                }
495            }
496            ArrayInit::Fill { elem, len: _ } => {
497                // Compute the end address of the elements.
498                let base_size = builder.ins().iconst(pointer_type, i64::from(base_size));
499                let array_addr = builder.ins().isub(elems_addr, base_size);
500                let size = uextend_i32_to_pointer_type(builder, pointer_type, size);
501                let elems_end = builder.ins().iadd(array_addr, size);
502
503                emit_array_fill_impl(
504                    func_env,
505                    builder,
506                    elems_addr,
507                    elem_size,
508                    elems_end,
509                    |func_env, builder, elem_addr| {
510                        init_field(func_env, builder, elem_ty, elem_addr, elem)
511                    },
512                )?;
513            }
514        }
515        log::trace!("initialize_array: finished");
516        Ok(())
517    }
518}
519
520fn emit_array_fill_impl(
521    func_env: &mut FuncEnvironment<'_>,
522    builder: &mut FunctionBuilder<'_>,
523    elem_addr: ir::Value,
524    elem_size: ir::Value,
525    fill_end: ir::Value,
526    mut emit_elem_write: impl FnMut(
527        &mut FuncEnvironment<'_>,
528        &mut FunctionBuilder<'_>,
529        ir::Value,
530    ) -> WasmResult<()>,
531) -> WasmResult<()> {
532    log::trace!("emit_array_fill_impl(elem_addr: {elem_addr:?}, elem_size: {elem_size:?}, fill_end: {fill_end:?})");
533
534    let pointer_ty = func_env.pointer_type();
535
536    assert_eq!(builder.func.dfg.value_type(elem_addr), pointer_ty);
537    assert_eq!(builder.func.dfg.value_type(elem_size), pointer_ty);
538    assert_eq!(builder.func.dfg.value_type(fill_end), pointer_ty);
539
540    // Loop to fill the elements, emitting the equivalent of the following
541    // pseudo-CLIF:
542    //
543    // current_block:
544    //     ...
545    //     jump loop_header_block(elem_addr)
546    //
547    // loop_header_block(elem_addr: i32):
548    //     done = icmp eq elem_addr, fill_end
549    //     brif done, continue_block, loop_body_block
550    //
551    // loop_body_block:
552    //     emit_elem_write()
553    //     next_elem_addr = iadd elem_addr, elem_size
554    //     jump loop_header_block(next_elem_addr)
555    //
556    // continue_block:
557    //     ...
558
559    let current_block = builder.current_block().unwrap();
560    let loop_header_block = builder.create_block();
561    let loop_body_block = builder.create_block();
562    let continue_block = builder.create_block();
563
564    builder.ensure_inserted_block();
565    builder.insert_block_after(loop_header_block, current_block);
566    builder.insert_block_after(loop_body_block, loop_header_block);
567    builder.insert_block_after(continue_block, loop_body_block);
568
569    // Current block: jump to the loop header block with the first element's
570    // address.
571    builder.ins().jump(loop_header_block, &[elem_addr]);
572
573    // Loop header block: check if we're done, then jump to either the continue
574    // block or the loop body block.
575    builder.switch_to_block(loop_header_block);
576    builder.append_block_param(loop_header_block, pointer_ty);
577    log::trace!("emit_array_fill_impl: loop header");
578    let elem_addr = builder.block_params(loop_header_block)[0];
579    let done = builder.ins().icmp(IntCC::Equal, elem_addr, fill_end);
580    builder
581        .ins()
582        .brif(done, continue_block, &[], loop_body_block, &[]);
583
584    // Loop body block: write the value to the current element, compute the next
585    // element's address, and then jump back to the loop header block.
586    builder.switch_to_block(loop_body_block);
587    log::trace!("emit_array_fill_impl: loop body");
588    emit_elem_write(func_env, builder, elem_addr)?;
589    let next_elem_addr = builder.ins().iadd(elem_addr, elem_size);
590    builder.ins().jump(loop_header_block, &[next_elem_addr]);
591
592    // Continue...
593    builder.switch_to_block(continue_block);
594    log::trace!("emit_array_fill_impl: finished");
595    builder.seal_block(loop_header_block);
596    builder.seal_block(loop_body_block);
597    builder.seal_block(continue_block);
598    Ok(())
599}
600
601pub fn translate_array_fill(
602    func_env: &mut FuncEnvironment<'_>,
603    builder: &mut FunctionBuilder<'_>,
604    array_type_index: TypeIndex,
605    array_ref: ir::Value,
606    index: ir::Value,
607    value: ir::Value,
608    n: ir::Value,
609) -> WasmResult<()> {
610    log::trace!(
611        "translate_array_fill({array_type_index:?}, {array_ref:?}, {index:?}, {value:?}, {n:?})"
612    );
613
614    let len = translate_array_len(func_env, builder, array_ref)?;
615
616    // Check that the full range of elements we want to fill is within bounds.
617    let end_index = func_env.uadd_overflow_trap(builder, index, n, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
618    let out_of_bounds = builder
619        .ins()
620        .icmp(IntCC::UnsignedGreaterThan, end_index, len);
621    func_env.trapnz(builder, out_of_bounds, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
622
623    // Get the address of the first element we want to fill.
624    let interned_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
625    let ArraySizeInfo {
626        obj_size,
627        one_elem_size,
628        base_size,
629    } = emit_array_size_info(func_env, builder, interned_type_index, len);
630    let offset_in_elems = builder.ins().imul(index, one_elem_size);
631    let obj_offset = builder.ins().iadd(base_size, offset_in_elems);
632    let elem_addr = func_env.prepare_gc_ref_access(
633        builder,
634        array_ref,
635        Offset::Dynamic(obj_offset),
636        BoundsCheck::Object(obj_size),
637    );
638
639    // Calculate the end address, just after the filled region.
640    let fill_size = uextend_i32_to_pointer_type(builder, func_env.pointer_type(), offset_in_elems);
641    let fill_end = builder.ins().iadd(elem_addr, fill_size);
642
643    let one_elem_size =
644        uextend_i32_to_pointer_type(builder, func_env.pointer_type(), one_elem_size);
645
646    let result = emit_array_fill_impl(
647        func_env,
648        builder,
649        elem_addr,
650        one_elem_size,
651        fill_end,
652        |func_env, builder, elem_addr| {
653            let elem_ty = func_env
654                .types
655                .unwrap_array(interned_type_index)?
656                .0
657                .element_type;
658            write_field_at_addr(func_env, builder, elem_ty, elem_addr, value)
659        },
660    )?;
661    log::trace!("translate_array_fill(..) -> {result:?}");
662    Ok(result)
663}
664
665pub fn translate_array_len(
666    func_env: &mut FuncEnvironment<'_>,
667    builder: &mut FunctionBuilder,
668    array_ref: ir::Value,
669) -> WasmResult<ir::Value> {
670    log::trace!("translate_array_len({array_ref:?})");
671
672    func_env.trapz(builder, array_ref, crate::TRAP_NULL_REFERENCE);
673
674    let len_offset = gc_compiler(func_env)?.layouts().array_length_field_offset();
675    let len_field = func_env.prepare_gc_ref_access(
676        builder,
677        array_ref,
678        Offset::Static(len_offset),
679        // Note: We can't bounds check the whole array object's size because we
680        // don't know its length yet. Chicken and egg problem.
681        BoundsCheck::Access(ir::types::I32.bytes()),
682    );
683    let result = builder.ins().load(
684        ir::types::I32,
685        ir::MemFlags::trusted().with_readonly(),
686        len_field,
687        0,
688    );
689    log::trace!("translate_array_len(..) -> {result:?}");
690    Ok(result)
691}
692
693struct ArraySizeInfo {
694    /// The `i32` size of the whole array object, in bytes.
695    obj_size: ir::Value,
696
697    /// The `i32` size of each one of the array's elements, in bytes.
698    one_elem_size: ir::Value,
699
700    /// The `i32` size of the array's base object, in bytes. This is also the
701    /// offset from the start of the array object to its elements.
702    base_size: ir::Value,
703}
704
705/// Emit code to get the dynamic size (in bytes) of a whole array object, along
706/// with some other related bits.
707fn emit_array_size_info(
708    func_env: &mut FuncEnvironment<'_>,
709    builder: &mut FunctionBuilder<'_>,
710    array_type_index: ModuleInternedTypeIndex,
711    // `i32` value containing the array's length.
712    array_len: ir::Value,
713) -> ArraySizeInfo {
714    let array_layout = func_env.array_layout(array_type_index);
715
716    // Note that we check for overflow below because we can't trust the array's
717    // length: it came from inside the GC heap.
718    //
719    // We check for 32-bit multiplication overflow by performing a 64-bit
720    // multiplication and testing the high bits.
721    let one_elem_size = builder
722        .ins()
723        .iconst(ir::types::I64, i64::from(array_layout.elem_size));
724    let array_len = builder.ins().uextend(ir::types::I64, array_len);
725    let all_elems_size = builder.ins().imul(one_elem_size, array_len);
726
727    let high_bits = builder.ins().ushr_imm(all_elems_size, 32);
728    builder.ins().trapnz(high_bits, TRAP_INTERNAL_ASSERT);
729
730    let all_elems_size = builder.ins().ireduce(ir::types::I32, all_elems_size);
731    let base_size = builder
732        .ins()
733        .iconst(ir::types::I32, i64::from(array_layout.base_size));
734    let obj_size =
735        builder
736            .ins()
737            .uadd_overflow_trap(all_elems_size, base_size, TRAP_INTERNAL_ASSERT);
738
739    let one_elem_size = builder.ins().ireduce(ir::types::I32, one_elem_size);
740
741    ArraySizeInfo {
742        obj_size,
743        one_elem_size,
744        base_size,
745    }
746}
747
748/// Get the bounds-checked address of an element in an array.
749///
750/// The emitted code will trap if `index >= array.length`.
751///
752/// Returns the `ir::Value` containing the address of the `index`th element in
753/// the array. You may read or write a value of the array's element type at this
754/// address. You may not use it for any other kind of access, nor reuse this
755/// value across GC safepoints.
756fn array_elem_addr(
757    func_env: &mut FuncEnvironment<'_>,
758    builder: &mut FunctionBuilder<'_>,
759    array_type_index: ModuleInternedTypeIndex,
760    array_ref: ir::Value,
761    index: ir::Value,
762) -> ir::Value {
763    // First, assert that `index < array.length`.
764    //
765    // This check is visible at the Wasm-semantics level.
766    //
767    // TODO: We should emit spectre-safe bounds checks for array accesses (if
768    // configured) but we don't currently have a great way to do that here. The
769    // proper solution is to use linear memories to back GC heaps and reuse the
770    // code in `bounds_check.rs` to implement these bounds checks. That is all
771    // planned, but not yet implemented.
772
773    let len = translate_array_len(func_env, builder, array_ref).unwrap();
774
775    let in_bounds = builder.ins().icmp(IntCC::UnsignedLessThan, index, len);
776    func_env.trapz(builder, in_bounds, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
777
778    // Compute the size (in bytes) of the whole array object.
779    let ArraySizeInfo {
780        obj_size,
781        one_elem_size,
782        base_size,
783    } = emit_array_size_info(func_env, builder, array_type_index, len);
784
785    // Compute the offset of the `index`th element within the array object.
786    //
787    // NB: no need to check for overflow here, since at this point we know that
788    // `len * elem_size + base_size` did not overflow and `i < len`.
789    let offset_in_elems = builder.ins().imul(index, one_elem_size);
790    let offset_in_array = builder.ins().iadd(offset_in_elems, base_size);
791
792    // Finally, use the object size and element offset we just computed to
793    // perform our implementation-internal bounds checks.
794    //
795    // Checking the whole object's size, rather than the `index`th element's
796    // size allows these bounds checks to be deduplicated across repeated
797    // accesses to the same array at different indices.
798    //
799    // This check should not be visible to Wasm, and serve to protect us from
800    // our own implementation bugs. The goal is to keep any potential widgets
801    // confined within the GC heap, and turn what would otherwise be a security
802    // vulnerability into a simple bug.
803    //
804    // TODO: Ideally we should fold the first Wasm-visible bounds check into
805    // this internal bounds check, so that we aren't performing multiple,
806    // redundant bounds checks. But we should figure out how to do this in a way
807    // that doesn't defeat the object-size bounds checking's deduplication
808    // mentioned above.
809    func_env.prepare_gc_ref_access(
810        builder,
811        array_ref,
812        Offset::Dynamic(offset_in_array),
813        BoundsCheck::Object(obj_size),
814    )
815}
816
817pub fn translate_array_get(
818    func_env: &mut FuncEnvironment<'_>,
819    builder: &mut FunctionBuilder,
820    array_type_index: TypeIndex,
821    array_ref: ir::Value,
822    index: ir::Value,
823    extension: Option<Extension>,
824) -> WasmResult<ir::Value> {
825    log::trace!("translate_array_get({array_type_index:?}, {array_ref:?}, {index:?})");
826
827    let array_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
828    let elem_addr = array_elem_addr(func_env, builder, array_type_index, array_ref, index);
829
830    let array_ty = func_env.types.unwrap_array(array_type_index)?;
831    let elem_ty = array_ty.0.element_type;
832
833    let result = read_field_at_addr(func_env, builder, elem_ty, elem_addr, extension)?;
834    log::trace!("translate_array_get(..) -> {result:?}");
835    Ok(result)
836}
837
838pub fn translate_array_set(
839    func_env: &mut FuncEnvironment<'_>,
840    builder: &mut FunctionBuilder,
841    array_type_index: TypeIndex,
842    array_ref: ir::Value,
843    index: ir::Value,
844    value: ir::Value,
845) -> WasmResult<()> {
846    log::trace!("translate_array_set({array_type_index:?}, {array_ref:?}, {index:?}, {value:?})");
847
848    let array_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
849    let elem_addr = array_elem_addr(func_env, builder, array_type_index, array_ref, index);
850
851    let array_ty = func_env.types.unwrap_array(array_type_index)?;
852    let elem_ty = array_ty.0.element_type;
853
854    write_field_at_addr(func_env, builder, elem_ty, elem_addr, value)?;
855
856    log::trace!("translate_array_set: finished");
857    Ok(())
858}
859
860pub fn translate_ref_test(
861    func_env: &mut FuncEnvironment<'_>,
862    builder: &mut FunctionBuilder<'_>,
863    ref_ty: WasmRefType,
864    val: ir::Value,
865) -> WasmResult<ir::Value> {
866    log::trace!("translate_ref_test({ref_ty:?}, {val:?})");
867
868    // First special case: testing for references to bottom types.
869    if ref_ty.heap_type.is_bottom() {
870        let result = if ref_ty.nullable {
871            // All null references (within the same type hierarchy) match null
872            // references to the bottom type.
873            func_env.translate_ref_is_null(builder.cursor(), val)?
874        } else {
875            // `ref.test` is always false for non-nullable bottom types, as the
876            // bottom types are uninhabited.
877            builder.ins().iconst(ir::types::I32, 0)
878        };
879        log::trace!("translate_ref_test(..) -> {result:?}");
880        return Ok(result);
881    }
882
883    // And because `ref.test heap_ty` is only valid on operands whose type is in
884    // the same type hierarchy as `heap_ty`, if `heap_ty` is its hierarchy's top
885    // type, we only need to worry about whether we are testing for nullability
886    // or not.
887    if ref_ty.heap_type.is_top() {
888        let result = if ref_ty.nullable {
889            builder.ins().iconst(ir::types::I32, 1)
890        } else {
891            let is_null = func_env.translate_ref_is_null(builder.cursor(), val)?;
892            let zero = builder.ins().iconst(ir::types::I32, 0);
893            let one = builder.ins().iconst(ir::types::I32, 1);
894            builder.ins().select(is_null, zero, one)
895        };
896        log::trace!("translate_ref_test(..) -> {result:?}");
897        return Ok(result);
898    }
899
900    // `i31ref`s are a little interesting because they don't point to GC
901    // objects; we test the bit pattern of the reference itself.
902    if ref_ty.heap_type == WasmHeapType::I31 {
903        let i31_mask = builder.ins().iconst(
904            ir::types::I32,
905            i64::from(wasmtime_environ::I31_DISCRIMINANT),
906        );
907        let is_i31 = builder.ins().band(val, i31_mask);
908        let result = if ref_ty.nullable {
909            let is_null = func_env.translate_ref_is_null(builder.cursor(), val)?;
910            builder.ins().bor(is_null, is_i31)
911        } else {
912            is_i31
913        };
914        log::trace!("translate_ref_test(..) -> {result:?}");
915        return Ok(result);
916    }
917
918    // Otherwise, in the general case, we need to inspect our given object's
919    // actual type, which also requires null-checking and i31-checking it.
920
921    let is_any_hierarchy = ref_ty.heap_type.top() == WasmHeapTopType::Any;
922
923    let non_null_block = builder.create_block();
924    let non_null_non_i31_block = builder.create_block();
925    let continue_block = builder.create_block();
926
927    // Current block: check if the reference is null and branch appropriately.
928    let is_null = func_env.translate_ref_is_null(builder.cursor(), val)?;
929    let result_when_is_null = builder.ins().iconst(ir::types::I32, ref_ty.nullable as i64);
930    builder.ins().brif(
931        is_null,
932        continue_block,
933        &[result_when_is_null],
934        non_null_block,
935        &[],
936    );
937
938    // Non-null block: We know the GC ref is non-null, but we need to also check
939    // for `i31` references that don't point to GC objects.
940    builder.switch_to_block(non_null_block);
941    log::trace!("translate_ref_test: non-null ref block");
942    if is_any_hierarchy {
943        let i31_mask = builder.ins().iconst(
944            ir::types::I32,
945            i64::from(wasmtime_environ::I31_DISCRIMINANT),
946        );
947        let is_i31 = builder.ins().band(val, i31_mask);
948        // If it is an `i31`, then create the result value based on whether we
949        // want `i31`s to pass the test or not.
950        let result_when_is_i31 = builder.ins().iconst(
951            ir::types::I32,
952            matches!(
953                ref_ty.heap_type,
954                WasmHeapType::Any | WasmHeapType::Eq | WasmHeapType::I31
955            ) as i64,
956        );
957        builder.ins().brif(
958            is_i31,
959            continue_block,
960            &[result_when_is_i31],
961            non_null_non_i31_block,
962            &[],
963        );
964    } else {
965        // If we aren't testing the `any` hierarchy, the reference cannot be an
966        // `i31ref`. Jump directly to the non-null and non-i31 block; rely on
967        // branch folding during lowering to clean this up.
968        builder.ins().jump(non_null_non_i31_block, &[]);
969    }
970
971    // Non-null and non-i31 block: Read the actual `VMGcKind` or
972    // `VMSharedTypeIndex` out of the object's header and check whether it
973    // matches the expected type.
974    builder.switch_to_block(non_null_non_i31_block);
975    log::trace!("translate_ref_test: non-null and non-i31 ref block");
976    let check_header_kind = |func_env: &mut FuncEnvironment<'_>,
977                             builder: &mut FunctionBuilder,
978                             val: ir::Value,
979                             expected_kind: VMGcKind|
980     -> ir::Value {
981        let header_size = builder.ins().iconst(
982            ir::types::I32,
983            i64::from(wasmtime_environ::VM_GC_HEADER_SIZE),
984        );
985        let kind_addr = func_env.prepare_gc_ref_access(
986            builder,
987            val,
988            Offset::Static(wasmtime_environ::VM_GC_HEADER_KIND_OFFSET),
989            BoundsCheck::Object(header_size),
990        );
991        let actual_kind = builder.ins().load(
992            ir::types::I32,
993            ir::MemFlags::trusted().with_readonly(),
994            kind_addr,
995            0,
996        );
997        let expected_kind = builder
998            .ins()
999            .iconst(ir::types::I32, i64::from(expected_kind.as_u32()));
1000        // Inline version of `VMGcKind::matches`.
1001        let and = builder.ins().band(actual_kind, expected_kind);
1002        let kind_matches = builder
1003            .ins()
1004            .icmp(ir::condcodes::IntCC::Equal, and, expected_kind);
1005        builder.ins().uextend(ir::types::I32, kind_matches)
1006    };
1007    let result = match ref_ty.heap_type {
1008        WasmHeapType::Any
1009        | WasmHeapType::None
1010        | WasmHeapType::Extern
1011        | WasmHeapType::NoExtern
1012        | WasmHeapType::Func
1013        | WasmHeapType::NoFunc
1014        | WasmHeapType::I31 => unreachable!("handled top, bottom, and i31 types above"),
1015
1016        // For these abstract but non-top and non-bottom types, we check the
1017        // `VMGcKind` that is in the object's header.
1018        WasmHeapType::Eq => check_header_kind(func_env, builder, val, VMGcKind::EqRef),
1019        WasmHeapType::Struct => check_header_kind(func_env, builder, val, VMGcKind::StructRef),
1020        WasmHeapType::Array => check_header_kind(func_env, builder, val, VMGcKind::ArrayRef),
1021
1022        // For concrete types, we need to do a full subtype check between the
1023        // `VMSharedTypeIndex` in the object's header and the
1024        // `ModuleInternedTypeIndex` we have here.
1025        //
1026        // TODO: This check should ideally be done inline, but we don't have a
1027        // good way to access the `TypeRegistry`'s supertypes arrays from Wasm
1028        // code at the moment.
1029        WasmHeapType::ConcreteArray(ty) | WasmHeapType::ConcreteStruct(ty) => {
1030            let expected_interned_ty = ty.unwrap_module_type_index();
1031            let expected_shared_ty =
1032                func_env.module_interned_to_shared_ty(&mut builder.cursor(), expected_interned_ty);
1033
1034            let ty_addr = func_env.prepare_gc_ref_access(
1035                builder,
1036                val,
1037                Offset::Static(wasmtime_environ::VM_GC_HEADER_TYPE_INDEX_OFFSET),
1038                BoundsCheck::Access(func_env.offsets.size_of_vmshared_type_index().into()),
1039            );
1040            let actual_shared_ty = builder.ins().load(
1041                ir::types::I32,
1042                ir::MemFlags::trusted().with_readonly(),
1043                ty_addr,
1044                0,
1045            );
1046
1047            func_env.is_subtype(builder, actual_shared_ty, expected_shared_ty)
1048        }
1049
1050        // Same as for concrete arrays and structs except that a `VMFuncRef`
1051        // doesn't begin with a `VMGcHeader` and is a raw pointer rather than GC
1052        // heap index.
1053        WasmHeapType::ConcreteFunc(ty) => {
1054            let expected_interned_ty = ty.unwrap_module_type_index();
1055            let expected_shared_ty =
1056                func_env.module_interned_to_shared_ty(&mut builder.cursor(), expected_interned_ty);
1057
1058            let actual_shared_ty = func_env.load_funcref_type_index(
1059                &mut builder.cursor(),
1060                ir::MemFlags::trusted().with_readonly(),
1061                val,
1062            );
1063
1064            func_env.is_subtype(builder, actual_shared_ty, expected_shared_ty)
1065        }
1066
1067        WasmHeapType::Cont | WasmHeapType::ConcreteCont(_) | WasmHeapType::NoCont => todo!(), // FIXME: #10248 stack switching support.
1068    };
1069    builder.ins().jump(continue_block, &[result]);
1070
1071    // Control flow join point with the result.
1072    builder.switch_to_block(continue_block);
1073    let result = builder.append_block_param(continue_block, ir::types::I32);
1074    log::trace!("translate_ref_test(..) -> {result:?}");
1075
1076    builder.seal_block(non_null_block);
1077    builder.seal_block(non_null_non_i31_block);
1078    builder.seal_block(continue_block);
1079
1080    Ok(result)
1081}
1082
1083/// A static or dynamic offset from a GC reference.
1084#[derive(Debug)]
1085enum Offset {
1086    /// A static offset from a GC reference.
1087    Static(u32),
1088
1089    /// A dynamic `i32` offset from a GC reference.
1090    Dynamic(ir::Value),
1091}
1092
1093/// The kind of bounds check to perform when accessing a GC object's fields and
1094/// elements.
1095#[derive(Debug)]
1096enum BoundsCheck {
1097    /// Check that this whole object is inside the GC heap:
1098    ///
1099    /// ```ignore
1100    /// gc_ref + size <= gc_heap_bound
1101    /// ```
1102    ///
1103    /// The object size must be an `i32` value.
1104    Object(ir::Value),
1105
1106    /// Check that this one access in particular is inside the GC heap:
1107    ///
1108    /// ```ignore
1109    /// gc_ref + offset + access_size <= gc_heap_bound
1110    /// ```
1111    ///
1112    /// Prefer `Bound::Object` over `Bound::Access` when possible, as that
1113    /// approach allows the mid-end to deduplicate bounds checks across multiple
1114    /// accesses to the same object.
1115    Access(u32),
1116}
1117
1118fn uextend_i32_to_pointer_type(
1119    builder: &mut FunctionBuilder,
1120    pointer_type: ir::Type,
1121    value: ir::Value,
1122) -> ir::Value {
1123    assert_eq!(builder.func.dfg.value_type(value), ir::types::I32);
1124    match pointer_type {
1125        ir::types::I32 => value,
1126        ir::types::I64 => builder.ins().uextend(ir::types::I64, value),
1127        _ => unreachable!(),
1128    }
1129}
1130
1131/// Emit CLIF to compute an array object's total size, given the dynamic length
1132/// in its initialization.
1133///
1134/// Traps if the size overflows.
1135#[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
1136fn emit_array_size(
1137    func_env: &mut FuncEnvironment<'_>,
1138    builder: &mut FunctionBuilder<'_>,
1139    array_layout: &GcArrayLayout,
1140    init: ArrayInit<'_>,
1141) -> ir::Value {
1142    let base_size = builder
1143        .ins()
1144        .iconst(ir::types::I32, i64::from(array_layout.base_size));
1145    let len = init.len(&mut builder.cursor());
1146
1147    // `elems_size = len * elem_size`
1148    //
1149    // Check for multiplication overflow and trap if it occurs, since that
1150    // means Wasm is attempting to allocate an array that is larger than our
1151    // implementation limits. (Note: there is no standard implementation
1152    // limit for array length beyond `u32::MAX`.)
1153    //
1154    // We implement this check by encoding our logically-32-bit operands as
1155    // i64 values, doing a 64-bit multiplication, and then checking the high
1156    // 32 bits of the multiplication's result. If the high 32 bits are not
1157    // all zeros, then the multiplication overflowed.
1158    let len = builder.ins().uextend(ir::types::I64, len);
1159    let elems_size_64 = builder
1160        .ins()
1161        .imul_imm(len, i64::from(array_layout.elem_size));
1162    let high_bits = builder.ins().ushr_imm(elems_size_64, 32);
1163    func_env.trapnz(builder, high_bits, crate::TRAP_ALLOCATION_TOO_LARGE);
1164    let elems_size = builder.ins().ireduce(ir::types::I32, elems_size_64);
1165
1166    // And if adding the base size and elements size overflows, then the
1167    // allocation is too large.
1168    let size = func_env.uadd_overflow_trap(
1169        builder,
1170        base_size,
1171        elems_size,
1172        crate::TRAP_ALLOCATION_TOO_LARGE,
1173    );
1174
1175    size
1176}
1177
1178/// Common helper for struct-field initialization that can be reused across
1179/// collectors.
1180#[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
1181fn initialize_struct_fields(
1182    func_env: &mut FuncEnvironment<'_>,
1183    builder: &mut FunctionBuilder<'_>,
1184    struct_ty: ModuleInternedTypeIndex,
1185    raw_ptr_to_struct: ir::Value,
1186    field_values: &[ir::Value],
1187    mut init_field: impl FnMut(
1188        &mut FuncEnvironment<'_>,
1189        &mut FunctionBuilder<'_>,
1190        WasmStorageType,
1191        ir::Value,
1192        ir::Value,
1193    ) -> WasmResult<()>,
1194) -> WasmResult<()> {
1195    let struct_layout = func_env.struct_layout(struct_ty);
1196    let struct_size = struct_layout.size;
1197    let field_offsets: SmallVec<[_; 8]> = struct_layout.fields.iter().copied().collect();
1198    assert_eq!(field_offsets.len(), field_values.len());
1199
1200    assert!(!func_env.types[struct_ty].composite_type.shared);
1201    let struct_ty = func_env.types[struct_ty]
1202        .composite_type
1203        .inner
1204        .unwrap_struct();
1205    let field_types: SmallVec<[_; 8]> = struct_ty.fields.iter().cloned().collect();
1206    assert_eq!(field_types.len(), field_values.len());
1207
1208    for ((ty, val), offset) in field_types.into_iter().zip(field_values).zip(field_offsets) {
1209        let size_of_access = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&ty.element_type);
1210        assert!(offset + size_of_access <= struct_size);
1211        let field_addr = builder.ins().iadd_imm(raw_ptr_to_struct, i64::from(offset));
1212        init_field(func_env, builder, ty.element_type, field_addr, *val)?;
1213    }
1214
1215    Ok(())
1216}
1217
1218impl FuncEnvironment<'_> {
1219    fn gc_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcLayout {
1220        // Lazily compute and cache the layout.
1221        if !self.ty_to_gc_layout.contains_key(&type_index) {
1222            let ty = &self.types[type_index].composite_type;
1223            let layout = gc_compiler(self)
1224                .unwrap()
1225                .layouts()
1226                .gc_layout(ty)
1227                .expect("should only call `FuncEnvironment::gc_layout` for GC types");
1228            self.ty_to_gc_layout.insert(type_index, layout);
1229        }
1230
1231        self.ty_to_gc_layout.get(&type_index).unwrap()
1232    }
1233
1234    /// Get the `GcArrayLayout` for the array type at the given `type_index`.
1235    fn array_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcArrayLayout {
1236        self.gc_layout(type_index).unwrap_array()
1237    }
1238
1239    /// Get the `GcStructLayout` for the struct type at the given `type_index`.
1240    fn struct_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcStructLayout {
1241        self.gc_layout(type_index).unwrap_struct()
1242    }
1243
1244    /// Get the GC heap's base pointer.
1245    fn get_gc_heap_base(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
1246        let ptr_ty = self.pointer_type();
1247        let flags = ir::MemFlags::trusted().with_readonly().with_can_move();
1248
1249        let vmctx = self.vmctx(builder.func);
1250        let vmctx = builder.ins().global_value(ptr_ty, vmctx);
1251
1252        let base_offset = self.offsets.ptr.vmctx_gc_heap_base();
1253        let base_offset = i32::from(base_offset);
1254
1255        builder.ins().load(ptr_ty, flags, vmctx, base_offset)
1256    }
1257
1258    /// Get the GC heap's bound.
1259    fn get_gc_heap_bound(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
1260        let ptr_ty = self.pointer_type();
1261        let flags = ir::MemFlags::trusted().with_readonly().with_can_move();
1262
1263        let vmctx = self.vmctx(builder.func);
1264        let vmctx = builder.ins().global_value(ptr_ty, vmctx);
1265
1266        let bound_offset = self.offsets.ptr.vmctx_gc_heap_bound();
1267        let bound_offset = i32::from(bound_offset);
1268
1269        builder.ins().load(ptr_ty, flags, vmctx, bound_offset)
1270    }
1271
1272    /// Get the GC heap's base pointer and bound.
1273    fn get_gc_heap_base_bound(&mut self, builder: &mut FunctionBuilder) -> (ir::Value, ir::Value) {
1274        let base = self.get_gc_heap_base(builder);
1275        let bound = self.get_gc_heap_bound(builder);
1276        (base, bound)
1277    }
1278
1279    /// Get the raw pointer of `gc_ref[offset]` bounds checked for an access of
1280    /// `size` bytes.
1281    ///
1282    /// The given `gc_ref` must be a non-null, non-i31 GC reference.
1283    ///
1284    /// If `check` is a `BoundsCheck::Object`, then it is the callers
1285    /// responsibility to ensure that `offset + access_size <= object_size`.
1286    ///
1287    /// Returns a raw pointer to `gc_ref[offset]` -- not a raw pointer to the GC
1288    /// object itself (unless `offset` happens to be `0`). This raw pointer may
1289    /// be used to read or write up to as many bytes as described by `bound`. Do
1290    /// NOT attempt accesses bytes outside of `bound`; that may lead to
1291    /// unchecked out-of-bounds accesses.
1292    ///
1293    /// This method is collector-agnostic.
1294    fn prepare_gc_ref_access(
1295        &mut self,
1296        builder: &mut FunctionBuilder,
1297        gc_ref: ir::Value,
1298        offset: Offset,
1299        check: BoundsCheck,
1300    ) -> ir::Value {
1301        log::trace!("prepare_gc_ref_access({gc_ref:?}, {offset:?}, {check:?})");
1302        assert_eq!(builder.func.dfg.value_type(gc_ref), ir::types::I32);
1303
1304        let pointer_type = self.pointer_type();
1305        let (base, bound) = self.get_gc_heap_base_bound(builder);
1306        let index = uextend_i32_to_pointer_type(builder, pointer_type, gc_ref);
1307
1308        let offset = match offset {
1309            Offset::Dynamic(offset) => uextend_i32_to_pointer_type(builder, pointer_type, offset),
1310            Offset::Static(offset) => builder.ins().iconst(pointer_type, i64::from(offset)),
1311        };
1312
1313        let index_and_offset =
1314            builder
1315                .ins()
1316                .uadd_overflow_trap(index, offset, TRAP_INTERNAL_ASSERT);
1317
1318        let end = match check {
1319            BoundsCheck::Object(object_size) => {
1320                // Check that `index + object_size` is in bounds. This can be
1321                // deduplicated across multiple accesses to different fields
1322                // within the same object.
1323                let object_size = uextend_i32_to_pointer_type(builder, pointer_type, object_size);
1324                builder
1325                    .ins()
1326                    .uadd_overflow_trap(index, object_size, TRAP_INTERNAL_ASSERT)
1327            }
1328            BoundsCheck::Access(access_size) => {
1329                // Check that `index + offset + access_size` is in bounds.
1330                let access_size = builder.ins().iconst(pointer_type, i64::from(access_size));
1331                builder.ins().uadd_overflow_trap(
1332                    index_and_offset,
1333                    access_size,
1334                    TRAP_INTERNAL_ASSERT,
1335                )
1336            }
1337        };
1338
1339        let is_in_bounds =
1340            builder
1341                .ins()
1342                .icmp(ir::condcodes::IntCC::UnsignedLessThanOrEqual, end, bound);
1343        builder.ins().trapz(is_in_bounds, TRAP_INTERNAL_ASSERT);
1344
1345        // NB: No need to check for overflow here, as that would mean that the
1346        // GC heap is hanging off the end of the address space, which is
1347        // impossible.
1348        let result = builder.ins().iadd(base, index_and_offset);
1349        log::trace!("prepare_gc_ref_access(..) -> {result:?}");
1350        result
1351    }
1352
1353    /// Emit checks (if necessary) for whether the given `gc_ref` is null or is
1354    /// an `i31ref`.
1355    ///
1356    /// Takes advantage of static information based on `ty` as to whether the GC
1357    /// reference is nullable or can ever be an `i31`.
1358    ///
1359    /// Returns an `ir::Value` that is an `i32` will be non-zero if the GC
1360    /// reference is null or is an `i31ref`; otherwise, it will be zero.
1361    ///
1362    /// This method is collector-agnostic.
1363    #[cfg_attr(not(feature = "gc-drc"), allow(dead_code))]
1364    fn gc_ref_is_null_or_i31(
1365        &mut self,
1366        builder: &mut FunctionBuilder,
1367        ty: WasmRefType,
1368        gc_ref: ir::Value,
1369    ) -> ir::Value {
1370        assert_eq!(builder.func.dfg.value_type(gc_ref), ir::types::I32);
1371        assert!(ty.is_vmgcref_type_and_not_i31());
1372
1373        let might_be_i31 = match ty.heap_type {
1374            // If we are definitely dealing with an i31, we shouldn't be
1375            // emitting dynamic checks for it, and the caller shouldn't call
1376            // this function. Should have been caught by the assertion at the
1377            // start of the function.
1378            WasmHeapType::I31 => unreachable!(),
1379
1380            // Could potentially be an i31.
1381            WasmHeapType::Any | WasmHeapType::Eq => true,
1382
1383            // If it is definitely a struct, array, or uninhabited type, then it
1384            // is definitely not an i31.
1385            WasmHeapType::Array
1386            | WasmHeapType::ConcreteArray(_)
1387            | WasmHeapType::Struct
1388            | WasmHeapType::ConcreteStruct(_)
1389            | WasmHeapType::None => false,
1390
1391            // Despite being a different type hierarchy, this *could* be an
1392            // `i31` if it is the result of
1393            //
1394            //     (extern.convert_any (ref.i31 ...))
1395            WasmHeapType::Extern => true,
1396
1397            // Can only ever be `null`.
1398            WasmHeapType::NoExtern => false,
1399
1400            // Wrong type hierarchy, and also funcrefs are not GC-managed
1401            // types. Should have been caught by the assertion at the start of
1402            // the function.
1403            WasmHeapType::Func | WasmHeapType::ConcreteFunc(_) | WasmHeapType::NoFunc => {
1404                unreachable!()
1405            }
1406
1407            WasmHeapType::Cont | WasmHeapType::ConcreteCont(_) | WasmHeapType::NoCont => todo!(), // FIXME: #10248 stack switching support.
1408        };
1409
1410        match (ty.nullable, might_be_i31) {
1411            // This GC reference statically cannot be null nor an i31. (Let
1412            // Cranelift's optimizer const-propagate this value and erase any
1413            // unnecessary control flow resulting from branching on this value.)
1414            (false, false) => builder.ins().iconst(ir::types::I32, 0),
1415
1416            // This GC reference is always non-null, but might be an i31.
1417            (false, true) => builder.ins().band_imm(gc_ref, i64::from(I31_DISCRIMINANT)),
1418
1419            // This GC reference might be null, but can never be an i31.
1420            (true, false) => builder.ins().icmp_imm(IntCC::Equal, gc_ref, 0),
1421
1422            // Fully general case: this GC reference could be either null or an
1423            // i31.
1424            (true, true) => {
1425                let is_i31 = builder.ins().band_imm(gc_ref, i64::from(I31_DISCRIMINANT));
1426                let is_null = builder.ins().icmp_imm(IntCC::Equal, gc_ref, 0);
1427                let is_null = builder.ins().uextend(ir::types::I32, is_null);
1428                builder.ins().bor(is_i31, is_null)
1429            }
1430        }
1431    }
1432
1433    // Emit code to check whether `a <: b` for two `VMSharedTypeIndex`es.
1434    pub(crate) fn is_subtype(
1435        &mut self,
1436        builder: &mut FunctionBuilder<'_>,
1437        a: ir::Value,
1438        b: ir::Value,
1439    ) -> ir::Value {
1440        log::trace!("is_subtype({a:?}, {b:?})");
1441
1442        let diff_tys_block = builder.create_block();
1443        let continue_block = builder.create_block();
1444
1445        // Current block: fast path for when `a == b`.
1446        log::trace!("is_subtype: fast path check for exact same types");
1447        let same_ty = builder.ins().icmp(IntCC::Equal, a, b);
1448        let same_ty = builder.ins().uextend(ir::types::I32, same_ty);
1449        builder
1450            .ins()
1451            .brif(same_ty, continue_block, &[same_ty], diff_tys_block, &[]);
1452
1453        // Different types block: fall back to the `is_subtype` libcall.
1454        builder.switch_to_block(diff_tys_block);
1455        log::trace!("is_subtype: slow path to do full `is_subtype` libcall");
1456        let is_subtype = self.builtin_functions.is_subtype(builder.func);
1457        let vmctx = self.vmctx_val(&mut builder.cursor());
1458        let call_inst = builder.ins().call(is_subtype, &[vmctx, a, b]);
1459        let result = builder.func.dfg.first_result(call_inst);
1460        builder.ins().jump(continue_block, &[result]);
1461
1462        // Continue block: join point for the result.
1463        builder.switch_to_block(continue_block);
1464        let result = builder.append_block_param(continue_block, ir::types::I32);
1465        log::trace!("is_subtype(..) -> {result:?}");
1466
1467        builder.seal_block(diff_tys_block);
1468        builder.seal_block(continue_block);
1469
1470        result
1471    }
1472}