wasmtime_cranelift/func_environ/gc/enabled/
null.rs

1//! Compiler for the null collector.
2//!
3//! Note that we don't need to mark any value as requiring inclusion in stack
4//! maps inside this module, because the null collector doesn't ever collect
5//! anything.
6
7use super::*;
8use crate::func_environ::FuncEnvironment;
9use cranelift_codegen::ir::{self, InstBuilder};
10use cranelift_frontend::FunctionBuilder;
11use wasmtime_environ::VMSharedTypeIndex;
12use wasmtime_environ::{
13    null::NullTypeLayouts, GcTypeLayouts, ModuleInternedTypeIndex, PtrSize, TypeIndex, VMGcKind,
14    WasmRefType, WasmResult,
15};
16
17#[derive(Default)]
18pub struct NullCompiler {
19    layouts: NullTypeLayouts,
20}
21
22impl NullCompiler {
23    /// Emit code to perform an allocation inline.
24    ///
25    /// `kind` may be `VMGcKind::ExternRef` iff `ty` is `None`.
26    ///
27    /// `size` must be greater than or equal to `size_of(VMGcHeader)`.
28    ///
29    /// `align` must be greater than or equal to `align_of(VMGcHeader)` and a
30    /// power of two.
31    ///
32    /// The resulting values are
33    ///
34    /// 1. The `VMGcRef` indexing into the GC heap.
35    ///
36    /// 2. The raw pointer to the start of the object inside the GC heap. This
37    ///    may be used to access up to `size` bytes.
38    fn emit_inline_alloc(
39        &mut self,
40        func_env: &mut FuncEnvironment<'_>,
41        builder: &mut FunctionBuilder,
42        kind: VMGcKind,
43        ty: Option<ModuleInternedTypeIndex>,
44        size: ir::Value,
45        align: ir::Value,
46    ) -> (ir::Value, ir::Value) {
47        assert_eq!(builder.func.dfg.value_type(size), ir::types::I32);
48        assert_eq!(builder.func.dfg.value_type(align), ir::types::I32);
49
50        // Check that the size fits in the unused bits of a `VMGcKind`, since
51        // the null collector stores the object's size there.
52        let mask = builder
53            .ins()
54            .iconst(ir::types::I32, i64::from(VMGcKind::MASK));
55        let masked = builder.ins().band(size, mask);
56        func_env.trapnz(builder, masked, crate::TRAP_ALLOCATION_TOO_LARGE);
57
58        // Load the bump "pointer" (it is actually an index into the GC heap,
59        // not a raw pointer).
60        let pointer_type = func_env.pointer_type();
61        let vmctx = func_env.vmctx_val(&mut builder.cursor());
62        let ptr_to_next = builder.ins().load(
63            pointer_type,
64            ir::MemFlags::trusted().with_readonly(),
65            vmctx,
66            i32::from(func_env.offsets.ptr.vmctx_gc_heap_data()),
67        );
68        let next = builder
69            .ins()
70            .load(ir::types::I32, ir::MemFlags::trusted(), ptr_to_next, 0);
71
72        // Increment the bump "pointer" to the requested alignment:
73        //
74        //     next + (align - 1) & !(align - 1)
75        //
76        // Overflow means that the alignment is too large to satisfy, so trap
77        // accordingly. Note that `align - 1` can't overflow because `align` is
78        // a power of two.
79        let minus_one = builder.ins().iconst(ir::types::I32, -1);
80        let align_minus_one = builder.ins().iadd(align, minus_one);
81        let next_plus_align_minus_one = func_env.uadd_overflow_trap(
82            builder,
83            next,
84            align_minus_one,
85            crate::TRAP_ALLOCATION_TOO_LARGE,
86        );
87        let not_align_minus_one = builder.ins().bnot(align_minus_one);
88        let aligned = builder
89            .ins()
90            .band(next_plus_align_minus_one, not_align_minus_one);
91
92        // Check whether the allocation fits in the heap space we have left.
93        let end_of_object =
94            func_env.uadd_overflow_trap(builder, aligned, size, crate::TRAP_ALLOCATION_TOO_LARGE);
95        let uext_end_of_object = uextend_i32_to_pointer_type(builder, pointer_type, end_of_object);
96        let (base, bound) = func_env.get_gc_heap_base_bound(builder);
97        let is_in_bounds = builder.ins().icmp(
98            ir::condcodes::IntCC::UnsignedLessThanOrEqual,
99            uext_end_of_object,
100            bound,
101        );
102        func_env.trapz(builder, is_in_bounds, crate::TRAP_ALLOCATION_TOO_LARGE);
103
104        // Write the header, update the bump "pointer", and return the newly
105        // allocated object.
106        //
107        // TODO: Ideally we would use a single `i64` store to write both the
108        // header and the type index, but that requires generating different
109        // code for big-endian architectures, and I haven't bothered doing that
110        // yet.
111        let uext_aligned = uextend_i32_to_pointer_type(builder, pointer_type, aligned);
112        let ptr_to_object = builder.ins().iadd(base, uext_aligned);
113        let kind = builder
114            .ins()
115            .iconst(ir::types::I32, i64::from(kind.as_u32()));
116        let kind_and_size = builder.ins().bor(kind, size);
117        let ty = match ty {
118            Some(ty) => func_env.module_interned_to_shared_ty(&mut builder.cursor(), ty),
119            None => builder.ins().iconst(
120                func_env.vmshared_type_index_ty(),
121                i64::from(VMSharedTypeIndex::reserved_value().as_bits()),
122            ),
123        };
124        builder.ins().store(
125            ir::MemFlags::trusted(),
126            kind_and_size,
127            ptr_to_object,
128            i32::try_from(wasmtime_environ::VM_GC_HEADER_KIND_OFFSET).unwrap(),
129        );
130        builder.ins().store(
131            ir::MemFlags::trusted(),
132            ty,
133            ptr_to_object,
134            i32::try_from(wasmtime_environ::VM_GC_HEADER_TYPE_INDEX_OFFSET).unwrap(),
135        );
136        builder
137            .ins()
138            .store(ir::MemFlags::trusted(), end_of_object, ptr_to_next, 0);
139
140        (aligned, ptr_to_object)
141    }
142}
143
144impl GcCompiler for NullCompiler {
145    fn layouts(&self) -> &dyn GcTypeLayouts {
146        &self.layouts
147    }
148
149    fn alloc_array(
150        &mut self,
151        func_env: &mut FuncEnvironment<'_>,
152        builder: &mut FunctionBuilder<'_>,
153        array_type_index: TypeIndex,
154        init: super::ArrayInit<'_>,
155    ) -> WasmResult<ir::Value> {
156        let interned_type_index =
157            func_env.module.types[array_type_index].unwrap_module_type_index();
158        let ptr_ty = func_env.pointer_type();
159
160        let len_offset = gc_compiler(func_env)?.layouts().array_length_field_offset();
161        let array_layout = func_env.array_layout(interned_type_index).clone();
162        let base_size = array_layout.base_size;
163        let align = array_layout.align;
164        let len_to_elems_delta = base_size.checked_sub(len_offset).unwrap();
165
166        // First, compute the array's total size from its base size, element
167        // size, and length.
168        let len = init.len(&mut builder.cursor());
169        let size = emit_array_size(func_env, builder, &array_layout, len);
170
171        // Next, allocate the array.
172        assert!(align.is_power_of_two());
173        let align = builder.ins().iconst(ir::types::I32, i64::from(align));
174        let (gc_ref, ptr_to_object) = self.emit_inline_alloc(
175            func_env,
176            builder,
177            VMGcKind::ArrayRef,
178            Some(interned_type_index),
179            size,
180            align,
181        );
182
183        // Write the array's length into its field.
184        //
185        // Note: we don't need to bounds-check the GC ref access here, because
186        // the result of the inline allocation is trusted and we aren't reading
187        // any pointers or offsets out from the (untrusted) GC heap.
188        let len_addr = builder.ins().iadd_imm(ptr_to_object, i64::from(len_offset));
189        let len = init.len(&mut builder.cursor());
190        builder
191            .ins()
192            .store(ir::MemFlags::trusted(), len, len_addr, 0);
193
194        // Finally, initialize the elements.
195        let len_to_elems_delta = builder.ins().iconst(ptr_ty, i64::from(len_to_elems_delta));
196        let elems_addr = builder.ins().iadd(len_addr, len_to_elems_delta);
197        init.initialize(
198            func_env,
199            builder,
200            interned_type_index,
201            base_size,
202            size,
203            elems_addr,
204            |func_env, builder, elem_ty, elem_addr, val| {
205                write_field_at_addr(func_env, builder, elem_ty, elem_addr, val)
206            },
207        )?;
208
209        Ok(gc_ref)
210    }
211
212    fn alloc_struct(
213        &mut self,
214        func_env: &mut FuncEnvironment<'_>,
215        builder: &mut FunctionBuilder<'_>,
216        struct_type_index: TypeIndex,
217        field_vals: &[ir::Value],
218    ) -> WasmResult<ir::Value> {
219        let interned_type_index =
220            func_env.module.types[struct_type_index].unwrap_module_type_index();
221        let struct_layout = func_env.struct_layout(interned_type_index);
222
223        // Copy some stuff out of the struct layout to avoid borrowing issues.
224        let struct_size = struct_layout.size;
225        let struct_align = struct_layout.align;
226
227        assert_eq!(VMGcKind::MASK & struct_size, 0);
228        assert_eq!(VMGcKind::UNUSED_MASK & struct_size, struct_size);
229        let struct_size_val = builder.ins().iconst(ir::types::I32, i64::from(struct_size));
230
231        let align = builder
232            .ins()
233            .iconst(ir::types::I32, i64::from(struct_align));
234
235        let (struct_ref, raw_struct_pointer) = self.emit_inline_alloc(
236            func_env,
237            builder,
238            VMGcKind::StructRef,
239            Some(interned_type_index),
240            struct_size_val,
241            align,
242        );
243
244        // Initialize the struct's fields.
245        //
246        // Note: we don't need to bounds-check the GC ref access here, because
247        // the result of the inline allocation is trusted and we aren't reading
248        // any pointers or offsets out from the (untrusted) GC heap.
249        initialize_struct_fields(
250            func_env,
251            builder,
252            interned_type_index,
253            raw_struct_pointer,
254            field_vals,
255            |func_env, builder, ty, field_addr, val| {
256                write_field_at_addr(func_env, builder, ty, field_addr, val)
257            },
258        )?;
259
260        Ok(struct_ref)
261    }
262
263    fn translate_read_gc_reference(
264        &mut self,
265        _func_env: &mut FuncEnvironment<'_>,
266        builder: &mut FunctionBuilder,
267        _ty: WasmRefType,
268        src: ir::Value,
269        flags: ir::MemFlags,
270    ) -> WasmResult<ir::Value> {
271        // NB: Don't use `unbarriered_load_gc_ref` here because we don't need to
272        // mark the value as requiring inclusion in stack maps.
273        Ok(builder.ins().load(ir::types::I32, flags, src, 0))
274    }
275
276    fn translate_write_gc_reference(
277        &mut self,
278        _func_env: &mut FuncEnvironment<'_>,
279        builder: &mut FunctionBuilder,
280        ty: WasmRefType,
281        dst: ir::Value,
282        new_val: ir::Value,
283        flags: ir::MemFlags,
284    ) -> WasmResult<()> {
285        unbarriered_store_gc_ref(builder, ty.heap_type, dst, new_val, flags)
286    }
287}