winch_codegen/codegen/
context.rs

1use anyhow::{bail, ensure, Result};
2use wasmparser::{Ieee32, Ieee64};
3use wasmtime_environ::{VMOffsets, WasmHeapType, WasmValType};
4
5use super::ControlStackFrame;
6use crate::{
7    abi::{scratch, vmctx, ABIOperand, ABIResults, RetArea},
8    codegen::{CodeGenError, CodeGenPhase, Emission, Prologue},
9    frame::Frame,
10    isa::reg::RegClass,
11    masm::{
12        ExtractLaneKind, MacroAssembler, OperandSize, RegImm, ReplaceLaneKind, SPOffset, ShiftKind,
13        StackSlot,
14    },
15    reg::{writable, Reg, WritableReg},
16    regalloc::RegAlloc,
17    stack::{Stack, TypedReg, Val},
18};
19
20/// The code generation context.
21/// The code generation context is made up of three
22/// essential data structures:
23///
24/// * The register allocator, in charge of keeping the inventory of register
25///   availability.
26/// * The value stack, which keeps track of the state of the values
27///   after each operation.
28/// * The current function's frame.
29///
30/// These data structures normally require cooperating with each other
31/// to perform most of the operations needed during the code
32/// generation process. The code generation context should
33/// be generally used as the single entry point to access
34/// the compound functionality provided by its elements.
35pub(crate) struct CodeGenContext<'a, P: CodeGenPhase> {
36    /// The register allocator.
37    pub regalloc: RegAlloc,
38    /// The value stack.
39    pub stack: Stack,
40    /// The current function's frame.
41    pub frame: Frame<P>,
42    /// Reachability state.
43    pub reachable: bool,
44    /// A reference to the VMOffsets.
45    pub vmoffsets: &'a VMOffsets<u8>,
46}
47
48impl<'a> CodeGenContext<'a, Emission> {
49    /// Prepares arguments for emitting an i32 shift operation.
50    pub fn i32_shift<M>(&mut self, masm: &mut M, kind: ShiftKind) -> Result<()>
51    where
52        M: MacroAssembler,
53    {
54        let top = self
55            .stack
56            .peek()
57            .ok_or_else(|| CodeGenError::missing_values_in_stack())?;
58
59        if top.is_i32_const() {
60            let val = self
61                .stack
62                .pop_i32_const()
63                .ok_or_else(|| CodeGenError::missing_values_in_stack())?;
64            let typed_reg = self.pop_to_reg(masm, None)?;
65            masm.shift_ir(
66                writable!(typed_reg.reg),
67                val as u64,
68                typed_reg.reg,
69                kind,
70                OperandSize::S32,
71            )?;
72            self.stack.push(typed_reg.into());
73        } else {
74            masm.shift(self, kind, OperandSize::S32)?;
75        }
76        Ok(())
77    }
78
79    /// Prepares arguments for emitting an i64 binary operation.
80    pub fn i64_shift<M>(&mut self, masm: &mut M, kind: ShiftKind) -> Result<()>
81    where
82        M: MacroAssembler,
83    {
84        let top = self
85            .stack
86            .peek()
87            .ok_or_else(|| CodeGenError::missing_values_in_stack())?;
88        if top.is_i64_const() {
89            let val = self
90                .stack
91                .pop_i64_const()
92                .ok_or_else(|| CodeGenError::missing_values_in_stack())?;
93            let typed_reg = self.pop_to_reg(masm, None)?;
94            masm.shift_ir(
95                writable!(typed_reg.reg),
96                val as u64,
97                typed_reg.reg,
98                kind,
99                OperandSize::S64,
100            )?;
101            self.stack.push(typed_reg.into());
102        } else {
103            masm.shift(self, kind, OperandSize::S64)?;
104        };
105
106        Ok(())
107    }
108}
109
110impl<'a> CodeGenContext<'a, Prologue> {
111    /// Create a new code generation context.
112    pub fn new(
113        regalloc: RegAlloc,
114        stack: Stack,
115        frame: Frame<Prologue>,
116        vmoffsets: &'a VMOffsets<u8>,
117    ) -> Self {
118        Self {
119            regalloc,
120            stack,
121            frame,
122            reachable: true,
123            vmoffsets,
124        }
125    }
126
127    /// Prepares the frame for the [`Emission`] code generation phase.
128    pub fn for_emission(self) -> CodeGenContext<'a, Emission> {
129        CodeGenContext {
130            regalloc: self.regalloc,
131            stack: self.stack,
132            reachable: self.reachable,
133            vmoffsets: self.vmoffsets,
134            frame: self.frame.for_emission(),
135        }
136    }
137}
138
139impl<'a> CodeGenContext<'a, Emission> {
140    /// Request a specific register to the register allocator,
141    /// spilling if not available.
142    pub fn reg<M: MacroAssembler>(&mut self, named: Reg, masm: &mut M) -> Result<Reg> {
143        self.regalloc.reg(named, |regalloc| {
144            Self::spill_impl(&mut self.stack, regalloc, &self.frame, masm)
145        })
146    }
147
148    /// Allocate a register for the given WebAssembly type.
149    pub fn reg_for_type<M: MacroAssembler>(
150        &mut self,
151        ty: WasmValType,
152        masm: &mut M,
153    ) -> Result<Reg> {
154        use WasmValType::*;
155        match ty {
156            I32 | I64 => self.reg_for_class(RegClass::Int, masm),
157            F32 | F64 => self.reg_for_class(RegClass::Float, masm),
158            // All of our supported architectures use the float registers for vector operations.
159            V128 => self.reg_for_class(RegClass::Float, masm),
160            Ref(rt) => match rt.heap_type {
161                WasmHeapType::Func | WasmHeapType::Extern => {
162                    self.reg_for_class(RegClass::Int, masm)
163                }
164                _ => bail!(CodeGenError::unsupported_wasm_type()),
165            },
166        }
167    }
168
169    /// Request the register allocator to provide the next available
170    /// register of the specified class.
171    pub fn reg_for_class<M: MacroAssembler>(
172        &mut self,
173        class: RegClass,
174        masm: &mut M,
175    ) -> Result<Reg> {
176        self.regalloc.reg_for_class(class, &mut |regalloc| {
177            Self::spill_impl(&mut self.stack, regalloc, &self.frame, masm)
178        })
179    }
180
181    /// Convenience wrapper around `CodeGenContext::reg_for_class`, to
182    /// request the next available general purpose register.
183    pub fn any_gpr<M: MacroAssembler>(&mut self, masm: &mut M) -> Result<Reg> {
184        self.reg_for_class(RegClass::Int, masm)
185    }
186
187    /// Convenience wrapper around `CodeGenContext::reg_for_class`, to
188    /// request the next available floating point register.
189    pub fn any_fpr<M: MacroAssembler>(&mut self, masm: &mut M) -> Result<Reg> {
190        self.reg_for_class(RegClass::Float, masm)
191    }
192
193    /// Executes the provided function, guaranteeing that the specified set of
194    /// registers, if any, remain unallocatable throughout the function's
195    /// execution.
196    pub fn without<'r, T, M, F>(
197        &mut self,
198        regs: impl IntoIterator<Item = &'r Reg> + Copy,
199        masm: &mut M,
200        mut f: F,
201    ) -> Result<T>
202    where
203        M: MacroAssembler,
204        F: FnMut(&mut Self, &mut M) -> T,
205    {
206        for r in regs {
207            self.reg(*r, masm)?;
208        }
209
210        let result = f(self, masm);
211
212        for r in regs {
213            self.free_reg(*r);
214        }
215
216        Ok(result)
217    }
218
219    /// Free the given register.
220    pub fn free_reg(&mut self, reg: impl Into<Reg>) {
221        let reg: Reg = reg.into();
222        self.regalloc.free(reg);
223    }
224
225    /// Loads the stack top value into the next available register, if
226    /// it isn't already one; spilling if there are no registers
227    /// available.  Optionally the caller may specify a specific
228    /// destination register.
229    /// When a named register is requested and it's not at the top of the
230    /// stack a move from register to register might happen, in which case
231    /// the source register will be freed.
232    pub fn pop_to_reg<M: MacroAssembler>(
233        &mut self,
234        masm: &mut M,
235        named: Option<Reg>,
236    ) -> Result<TypedReg> {
237        let typed_reg = if let Some(dst) = named {
238            self.stack.pop_named_reg(dst)
239        } else {
240            self.stack.pop_reg()
241        };
242
243        if let Some(dst) = typed_reg {
244            return Ok(dst);
245        }
246
247        let val = self.stack.pop().expect("a value at stack top");
248        let reg = if let Some(r) = named {
249            self.reg(r, masm)?
250        } else {
251            self.reg_for_type(val.ty(), masm)?
252        };
253
254        if val.is_mem() {
255            let mem = val.unwrap_mem();
256            let curr_offset = masm.sp_offset()?.as_u32();
257            let slot_offset = mem.slot.offset.as_u32();
258            ensure!(
259                curr_offset == slot_offset,
260                CodeGenError::invalid_sp_offset(),
261            );
262            masm.pop(writable!(reg), val.ty().try_into()?)?;
263        } else {
264            self.move_val_to_reg(&val, reg, masm)?;
265            // Free the source value if it is a register.
266            if val.is_reg() {
267                self.free_reg(val.unwrap_reg());
268            }
269        }
270
271        Ok(TypedReg::new(val.ty(), reg))
272    }
273
274    /// Pops the value stack top and stores it at the specified address.
275    pub fn pop_to_addr<M: MacroAssembler>(&mut self, masm: &mut M, addr: M::Address) -> Result<()> {
276        let val = self.stack.pop().expect("a value at stack top");
277        let ty = val.ty();
278        let size: OperandSize = ty.try_into()?;
279        match val {
280            Val::Reg(tr) => {
281                masm.store(tr.reg.into(), addr, size)?;
282                self.free_reg(tr.reg);
283            }
284            Val::I32(v) => masm.store(RegImm::i32(v), addr, size)?,
285            Val::I64(v) => masm.store(RegImm::i64(v), addr, size)?,
286            Val::F32(v) => masm.store(RegImm::f32(v.bits()), addr, size)?,
287            Val::F64(v) => masm.store(RegImm::f64(v.bits()), addr, size)?,
288            Val::V128(v) => masm.store(RegImm::v128(v), addr, size)?,
289            Val::Local(local) => {
290                let slot = self.frame.get_wasm_local(local.index);
291                let scratch = scratch!(M);
292                let local_addr = masm.local_address(&slot)?;
293                masm.load(local_addr, writable!(scratch), size)?;
294                masm.store(scratch.into(), addr, size)?;
295            }
296            Val::Memory(_) => {
297                let scratch = scratch!(M, &ty);
298                masm.pop(writable!(scratch), size)?;
299                masm.store(scratch.into(), addr, size)?;
300            }
301        }
302
303        Ok(())
304    }
305
306    /// Move a stack value to the given register.
307    pub fn move_val_to_reg<M: MacroAssembler>(
308        &self,
309        src: &Val,
310        dst: Reg,
311        masm: &mut M,
312    ) -> Result<()> {
313        let size: OperandSize = src.ty().try_into()?;
314        match src {
315            Val::Reg(tr) => masm.mov(writable!(dst), RegImm::reg(tr.reg), size),
316            Val::I32(imm) => masm.mov(writable!(dst), RegImm::i32(*imm), size),
317            Val::I64(imm) => masm.mov(writable!(dst), RegImm::i64(*imm), size),
318            Val::F32(imm) => masm.mov(writable!(dst), RegImm::f32(imm.bits()), size),
319            Val::F64(imm) => masm.mov(writable!(dst), RegImm::f64(imm.bits()), size),
320            Val::V128(imm) => masm.mov(writable!(dst), RegImm::v128(*imm), size),
321            Val::Local(local) => {
322                let slot = self.frame.get_wasm_local(local.index);
323                let addr = masm.local_address(&slot)?;
324                masm.load(addr, writable!(dst), size)
325            }
326            Val::Memory(mem) => {
327                let addr = masm.address_from_sp(mem.slot.offset)?;
328                masm.load(addr, writable!(dst), size)
329            }
330        }
331    }
332
333    /// Prepares arguments for emitting a unary operation.
334    ///
335    /// The `emit` function returns the `TypedReg` to put on the value stack.
336    pub fn unop<F, M>(&mut self, masm: &mut M, emit: F) -> Result<()>
337    where
338        F: FnOnce(&mut M, Reg) -> Result<TypedReg>,
339        M: MacroAssembler,
340    {
341        let typed_reg = self.pop_to_reg(masm, None)?;
342        let dst = emit(masm, typed_reg.reg)?;
343        self.stack.push(dst.into());
344
345        Ok(())
346    }
347
348    /// Prepares arguments for emitting a binary operation.
349    ///
350    /// The `emit` function returns the `TypedReg` to put on the value stack.
351    pub fn binop<F, M>(&mut self, masm: &mut M, size: OperandSize, emit: F) -> Result<()>
352    where
353        F: FnOnce(&mut M, Reg, Reg, OperandSize) -> Result<TypedReg>,
354        M: MacroAssembler,
355    {
356        let src = self.pop_to_reg(masm, None)?;
357        let dst = self.pop_to_reg(masm, None)?;
358        let dst = emit(masm, dst.reg, src.reg.into(), size)?;
359        self.free_reg(src);
360        self.stack.push(dst.into());
361
362        Ok(())
363    }
364
365    /// Prepares arguments for emitting an f32 or f64 comparison operation.
366    pub fn float_cmp_op<F, M>(&mut self, masm: &mut M, size: OperandSize, emit: F) -> Result<()>
367    where
368        F: FnOnce(&mut M, Reg, Reg, Reg, OperandSize) -> Result<()>,
369        M: MacroAssembler,
370    {
371        let src2 = self.pop_to_reg(masm, None)?;
372        let src1 = self.pop_to_reg(masm, None)?;
373        let dst = self.any_gpr(masm)?;
374        emit(masm, dst, src1.reg, src2.reg, size)?;
375        self.free_reg(src1);
376        self.free_reg(src2);
377
378        let dst = match size {
379            // Float comparison operators are defined as
380            // [f64 f64] -> i32
381            // https://webassembly.github.io/spec/core/appendix/index-instructions.html
382            OperandSize::S32 | OperandSize::S64 => TypedReg::i32(dst),
383            OperandSize::S8 | OperandSize::S16 | OperandSize::S128 => {
384                bail!(CodeGenError::unexpected_operand_size())
385            }
386        };
387        self.stack.push(dst.into());
388
389        Ok(())
390    }
391
392    /// Prepares arguments for emitting an i32 binary operation.
393    ///
394    /// The `emit` function returns the `TypedReg` to put on the value stack.
395    pub fn i32_binop<F, M>(&mut self, masm: &mut M, mut emit: F) -> Result<()>
396    where
397        F: FnMut(&mut M, Reg, RegImm, OperandSize) -> Result<TypedReg>,
398        M: MacroAssembler,
399    {
400        match self.pop_i32_const() {
401            Some(val) => {
402                let typed_reg = self.pop_to_reg(masm, None)?;
403                let dst = emit(masm, typed_reg.reg, RegImm::i32(val), OperandSize::S32)?;
404                self.stack.push(dst.into());
405            }
406            None => self.binop(masm, OperandSize::S32, |masm, dst, src, size| {
407                emit(masm, dst, src.into(), size)
408            })?,
409        }
410        Ok(())
411    }
412
413    /// Prepares arguments for emitting an i64 binary operation.
414    ///
415    /// The `emit` function returns the `TypedReg` to put on the value stack.
416    pub fn i64_binop<F, M>(&mut self, masm: &mut M, emit: F) -> Result<()>
417    where
418        F: FnOnce(&mut M, Reg, RegImm, OperandSize) -> Result<TypedReg>,
419        M: MacroAssembler,
420    {
421        match self.pop_i64_const() {
422            Some(val) => {
423                let typed_reg = self.pop_to_reg(masm, None)?;
424                let dst = emit(masm, typed_reg.reg, RegImm::i64(val), OperandSize::S64)?;
425                self.stack.push(dst.into());
426            }
427            None => self.binop(masm, OperandSize::S64, |masm, dst, src, size| {
428                emit(masm, dst, src.into(), size)
429            })?,
430        }
431        Ok(())
432    }
433
434    /// Returns the i32 const on top of the stack or None if there isn't one.
435    pub fn pop_i32_const(&mut self) -> Option<i32> {
436        let top = self.stack.peek().expect("value at stack top");
437
438        if top.is_i32_const() {
439            let val = self
440                .stack
441                .pop_i32_const()
442                .expect("i32 const value at stack top");
443            Some(val)
444        } else {
445            None
446        }
447    }
448
449    /// Returns the i64 const on top of the stack or None if there isn't one.
450    pub fn pop_i64_const(&mut self) -> Option<i64> {
451        let top = self.stack.peek().expect("value at stack top");
452
453        if top.is_i64_const() {
454            let val = self
455                .stack
456                .pop_i64_const()
457                .expect("i64 const value at stack top");
458            Some(val)
459        } else {
460            None
461        }
462    }
463
464    /// Returns the f32 const on top of the stack or None if there isn't one.
465    pub fn pop_f32_const(&mut self) -> Option<Ieee32> {
466        let top = self.stack.peek().expect("value at stack top");
467
468        if top.is_f32_const() {
469            let val = self
470                .stack
471                .pop_f32_const()
472                .expect("f32 const value at stack top");
473            Some(val)
474        } else {
475            None
476        }
477    }
478
479    /// Returns the f64 const on top of the stack or None if there isn't one.
480    pub fn pop_f64_const(&mut self) -> Option<Ieee64> {
481        let top = self.stack.peek().expect("value at stack top");
482
483        if top.is_f64_const() {
484            let val = self
485                .stack
486                .pop_f64_const()
487                .expect("f64 const value at stack top");
488            Some(val)
489        } else {
490            None
491        }
492    }
493
494    /// Prepares arguments for emitting a convert operation.
495    pub fn convert_op<F, M>(&mut self, masm: &mut M, dst_ty: WasmValType, emit: F) -> Result<()>
496    where
497        F: FnOnce(&mut M, Reg, Reg, OperandSize) -> Result<()>,
498        M: MacroAssembler,
499    {
500        let src = self.pop_to_reg(masm, None)?;
501        let dst = self.reg_for_type(dst_ty, masm)?;
502        let dst_size = match dst_ty {
503            WasmValType::I32 => OperandSize::S32,
504            WasmValType::I64 => OperandSize::S64,
505            WasmValType::F32 => OperandSize::S32,
506            WasmValType::F64 => OperandSize::S64,
507            WasmValType::V128 => bail!(CodeGenError::unsupported_wasm_type()),
508            WasmValType::Ref(_) => bail!(CodeGenError::unsupported_wasm_type()),
509        };
510
511        emit(masm, dst, src.into(), dst_size)?;
512
513        self.free_reg(src);
514        self.stack.push(TypedReg::new(dst_ty, dst).into());
515        Ok(())
516    }
517
518    /// Prepares arguments for emitting a convert operation with a temporary
519    /// register.
520    pub fn convert_op_with_tmp_reg<F, M>(
521        &mut self,
522        masm: &mut M,
523        dst_ty: WasmValType,
524        tmp_reg_class: RegClass,
525        emit: F,
526    ) -> Result<()>
527    where
528        F: FnOnce(&mut M, Reg, Reg, Reg, OperandSize) -> Result<()>,
529        M: MacroAssembler,
530    {
531        let tmp_gpr = self.reg_for_class(tmp_reg_class, masm)?;
532        self.convert_op(masm, dst_ty, |masm, dst, src, dst_size| {
533            emit(masm, dst, src, tmp_gpr, dst_size)
534        })?;
535        self.free_reg(tmp_gpr);
536        Ok(())
537    }
538
539    /// Prepares arguments for emitting an extract lane operation.
540    pub fn extract_lane_op<F, M>(
541        &mut self,
542        masm: &mut M,
543        kind: ExtractLaneKind,
544        emit: F,
545    ) -> Result<()>
546    where
547        F: FnOnce(&mut M, Reg, WritableReg, ExtractLaneKind) -> Result<()>,
548        M: MacroAssembler,
549    {
550        let src = self.pop_to_reg(masm, None)?;
551        let dst = writable!(match kind {
552            ExtractLaneKind::I8x16S
553            | ExtractLaneKind::I8x16U
554            | ExtractLaneKind::I16x8S
555            | ExtractLaneKind::I16x8U
556            | ExtractLaneKind::I32x4
557            | ExtractLaneKind::I64x2 => self.any_gpr(masm)?,
558            ExtractLaneKind::F32x4 | ExtractLaneKind::F64x2 => src.reg,
559        });
560
561        emit(masm, src.reg, dst, kind)?;
562
563        match kind {
564            ExtractLaneKind::I8x16S
565            | ExtractLaneKind::I8x16U
566            | ExtractLaneKind::I16x8S
567            | ExtractLaneKind::I16x8U
568            | ExtractLaneKind::I32x4
569            | ExtractLaneKind::I64x2 => self.free_reg(src),
570            _ => (),
571        }
572
573        let dst = dst.to_reg();
574        let dst = match kind {
575            ExtractLaneKind::I8x16S
576            | ExtractLaneKind::I8x16U
577            | ExtractLaneKind::I16x8S
578            | ExtractLaneKind::I16x8U
579            | ExtractLaneKind::I32x4 => TypedReg::i32(dst),
580            ExtractLaneKind::I64x2 => TypedReg::i64(dst),
581            ExtractLaneKind::F32x4 => TypedReg::f32(dst),
582            ExtractLaneKind::F64x2 => TypedReg::f64(dst),
583        };
584
585        self.stack.push(Val::Reg(dst));
586        Ok(())
587    }
588
589    /// Prepares arguments for emitting a replace lane operation.
590    pub fn replace_lane_op<F, M>(
591        &mut self,
592        masm: &mut M,
593        kind: ReplaceLaneKind,
594        emit: F,
595    ) -> Result<()>
596    where
597        F: FnOnce(&mut M, RegImm, WritableReg, ReplaceLaneKind) -> Result<()>,
598        M: MacroAssembler,
599    {
600        let src = match kind {
601            ReplaceLaneKind::I8x16 | ReplaceLaneKind::I16x8 | ReplaceLaneKind::I32x4 => {
602                self.pop_i32_const().map(RegImm::i32)
603            }
604            ReplaceLaneKind::I64x2 => self.pop_i64_const().map(RegImm::i64),
605            ReplaceLaneKind::F32x4 => self.pop_f32_const().map(|v| RegImm::f32(v.bits())),
606            ReplaceLaneKind::F64x2 => self.pop_f64_const().map(|v| RegImm::f64(v.bits())),
607        }
608        .map_or_else(
609            || Ok(RegImm::reg(self.pop_to_reg(masm, None)?.into())),
610            Ok::<_, anyhow::Error>,
611        )?;
612
613        let dst = self.pop_to_reg(masm, None)?;
614
615        emit(masm, src, writable!(dst.into()), kind)?;
616
617        if let RegImm::Reg(reg) = src {
618            self.free_reg(reg);
619        }
620        self.stack.push(dst.into());
621
622        Ok(())
623    }
624
625    /// Drops the last `n` elements of the stack, calling the provided
626    /// function for each `n` stack value.
627    /// The values are dropped in top-to-bottom order.
628    pub fn drop_last<F>(&mut self, last: usize, mut f: F) -> Result<()>
629    where
630        F: FnMut(&mut RegAlloc, &Val) -> Result<()>,
631    {
632        if last > 0 {
633            let len = self.stack.len();
634            ensure!(last <= len, CodeGenError::unexpected_value_stack_index(),);
635            let truncate = self.stack.len() - last;
636            let stack_mut = self.stack.inner_mut();
637
638            // Invoke the callback in top-to-bottom order.
639            for v in stack_mut[truncate..].into_iter().rev() {
640                f(&mut self.regalloc, v)?
641            }
642            stack_mut.truncate(truncate);
643        }
644
645        Ok(())
646    }
647
648    /// Convenience wrapper around [`Self::spill_callback`].
649    ///
650    /// This function exists for cases in which triggering an unconditional
651    /// spill is needed, like before entering control flow.
652    pub fn spill<M: MacroAssembler>(&mut self, masm: &mut M) -> Result<()> {
653        Self::spill_impl(&mut self.stack, &mut self.regalloc, &self.frame, masm)
654    }
655
656    /// Prepares the compiler to emit an uncoditional jump to the given
657    /// destination branch.  This process involves:
658    /// * Balancing the machine
659    ///   stack pointer and value stack by popping it to match the destination
660    ///   branch.
661    /// * Updating the reachability state.
662    /// * Marking the destination frame as a destination target.
663    pub fn unconditional_jump<M, F>(
664        &mut self,
665        dest: &mut ControlStackFrame,
666        masm: &mut M,
667        mut f: F,
668    ) -> Result<()>
669    where
670        M: MacroAssembler,
671        F: FnMut(&mut M, &mut Self, &mut ControlStackFrame) -> Result<()>,
672    {
673        let state = dest.stack_state();
674        let target_offset = state.target_offset;
675        let base_offset = state.base_offset;
676        // Invariant: The SP, must be greater or equal to the target
677        // SP, given that we haven't popped any results by this point
678        // yet. But it may happen in the callback.
679        ensure!(
680            masm.sp_offset()?.as_u32() >= base_offset.as_u32(),
681            CodeGenError::invalid_sp_offset()
682        );
683        f(masm, self, dest)?;
684
685        // The following snippet, pops the stack pointer to ensure that it
686        // is correctly placed according to the expectations of the destination
687        // branch.
688        //
689        // This is done in the context of unconditional jumps, as the machine
690        // stack might be left unbalanced at the jump site, due to register
691        // spills. Note that in some cases the stack pointer offset might be
692        // already less than or equal to the original stack pointer offset
693        // registered when entering the destination control stack frame, which
694        // effectively means that when reaching the jump site no extra space was
695        // allocated similar to what would happen in a fall through in which we
696        // assume that the program has allocated and deallocated the right
697        // amount of stack space.
698        //
699        // More generally speaking the current stack pointer will be less than
700        // the original stack pointer offset in cases in which the top value in
701        // the value stack is a memory entry which needs to be popped into the
702        // return location according to the ABI (a register for single value
703        // returns and a memory slot for 1+ returns). This could happen in the
704        // callback invocation above if the callback invokes
705        // `ControlStackFrame::pop_abi_results` (e.g. `br` instruction).
706        //
707        // After an unconditional jump, the compiler will enter in an
708        // unreachable state; instead of immediately truncating the value stack
709        // to the expected length of the destination branch, we let the
710        // reachability analysis code decide what should happen with the length
711        // of the value stack once reachability is actually restored. At that
712        // point, the right stack pointer offset will also be restored, which
713        // should match the contents of the value stack.
714        masm.ensure_sp_for_jump(target_offset)?;
715        dest.set_as_target();
716        masm.jmp(*dest.label())?;
717        self.reachable = false;
718        Ok(())
719    }
720
721    /// Push the ABI representation of the results stack.
722    pub fn push_abi_results<M, F>(
723        &mut self,
724        results: &ABIResults,
725        masm: &mut M,
726        mut calculate_ret_area: F,
727    ) -> Result<()>
728    where
729        M: MacroAssembler,
730        F: FnMut(&ABIResults, &mut CodeGenContext<Emission>, &mut M) -> Option<RetArea>,
731    {
732        let area = results
733            .on_stack()
734            .then(|| calculate_ret_area(&results, self, masm).unwrap());
735
736        for operand in results.operands().iter() {
737            match operand {
738                ABIOperand::Reg { reg, ty, .. } => {
739                    ensure!(
740                        self.regalloc.reg_available(*reg),
741                        CodeGenError::expected_register_to_be_available(),
742                    );
743
744                    let typed_reg = TypedReg::new(*ty, self.reg(*reg, masm)?);
745                    self.stack.push(typed_reg.into());
746                }
747                ABIOperand::Stack { ty, offset, size } => match area.unwrap() {
748                    RetArea::SP(sp_offset) => {
749                        let slot =
750                            StackSlot::new(SPOffset::from_u32(sp_offset.as_u32() - offset), *size);
751                        self.stack.push(Val::mem(*ty, slot));
752                    }
753                    // This function is only expected to be called when dealing
754                    // with control flow and when calling functions; as a
755                    // callee, only [Self::pop_abi_results] is needed when
756                    // finalizing the function compilation.
757                    _ => bail!(CodeGenError::unexpected_function_call()),
758                },
759            }
760        }
761
762        Ok(())
763    }
764
765    /// Truncates the value stack to the specified target.
766    /// This function is intended to only be used when restoring the code
767    /// generation's reachability state, when handling an unreachable end or
768    /// else.
769    pub fn truncate_stack_to(&mut self, target: usize) -> Result<()> {
770        if self.stack.len() > target {
771            self.drop_last(self.stack.len() - target, |regalloc, val| match val {
772                Val::Reg(tr) => Ok(regalloc.free(tr.reg)),
773                _ => Ok(()),
774            })
775        } else {
776            Ok(())
777        }
778    }
779
780    /// Load the [VMContext] pointer into the designated pinned register.
781    pub fn load_vmctx<M>(&mut self, masm: &mut M) -> Result<()>
782    where
783        M: MacroAssembler,
784    {
785        let addr = masm.local_address(&self.frame.vmctx_slot())?;
786        masm.load_ptr(addr, writable!(vmctx!(M)))
787    }
788
789    /// Spill locals and registers to memory.
790    // TODO: optimize the spill range;
791    // At any point in the program, the stack might already contain memory
792    // entries; we could effectively ignore that range; only focusing on the
793    // range that contains spillable values.
794    fn spill_impl<M: MacroAssembler>(
795        stack: &mut Stack,
796        regalloc: &mut RegAlloc,
797        frame: &Frame<Emission>,
798        masm: &mut M,
799    ) -> Result<()> {
800        for v in stack.inner_mut() {
801            match v {
802                Val::Reg(r) => {
803                    let slot = masm.push(r.reg, r.ty.try_into()?)?;
804                    regalloc.free(r.reg);
805                    *v = Val::mem(r.ty, slot);
806                }
807                Val::Local(local) => {
808                    let slot = frame.get_wasm_local(local.index);
809                    let addr = masm.local_address(&slot)?;
810                    let scratch = scratch!(M, &slot.ty);
811                    masm.load(addr, writable!(scratch), slot.ty.try_into()?)?;
812                    let stack_slot = masm.push(scratch, slot.ty.try_into()?)?;
813                    *v = Val::mem(slot.ty, stack_slot);
814                }
815                _ => {}
816            }
817        }
818
819        Ok(())
820    }
821
822    /// Prepares for emitting a binary operation where four 64-bit operands are
823    /// used to produce two 64-bit operands, e.g. a 128-bit binop.
824    pub fn binop128<F, M>(&mut self, masm: &mut M, emit: F) -> Result<()>
825    where
826        F: FnOnce(&mut M, Reg, Reg, Reg, Reg) -> Result<(TypedReg, TypedReg)>,
827        M: MacroAssembler,
828    {
829        let rhs_hi = self.pop_to_reg(masm, None)?;
830        let rhs_lo = self.pop_to_reg(masm, None)?;
831        let lhs_hi = self.pop_to_reg(masm, None)?;
832        let lhs_lo = self.pop_to_reg(masm, None)?;
833        let (lo, hi) = emit(masm, lhs_lo.reg, lhs_hi.reg, rhs_lo.reg, rhs_hi.reg)?;
834        self.free_reg(rhs_hi);
835        self.free_reg(rhs_lo);
836        self.stack.push(lo.into());
837        self.stack.push(hi.into());
838
839        Ok(())
840    }
841
842    /// Prepares to emit a vector `all_true` operation.
843    pub fn v128_all_true_op<F, M>(&mut self, masm: &mut M, emit: F) -> Result<()>
844    where
845        F: FnOnce(&mut M, Reg, Reg) -> Result<()>,
846        M: MacroAssembler,
847    {
848        let src = self.pop_to_reg(masm, None)?;
849        let dst = self.any_gpr(masm)?;
850        emit(masm, src.reg, dst)?;
851        self.free_reg(src);
852        self.stack.push(TypedReg::i32(dst).into());
853
854        Ok(())
855    }
856
857    /// Prepares to emit a vector `bitmask` operation.
858    pub fn v128_bitmask_op<F, M>(&mut self, masm: &mut M, emit: F) -> Result<()>
859    where
860        F: FnOnce(&mut M, Reg, Reg) -> Result<()>,
861        M: MacroAssembler,
862    {
863        let src = self.pop_to_reg(masm, None)?;
864        let dst = self.any_gpr(masm)?;
865        emit(masm, src.reg, dst)?;
866        self.free_reg(src);
867        self.stack.push(TypedReg::i32(dst).into());
868
869        Ok(())
870    }
871
872    /// Pops a register from the stack and then immediately frees it. Used to
873    /// discard values from the last operation, for example.
874    pub fn pop_and_free<M: MacroAssembler>(&mut self, masm: &mut M) -> Result<()> {
875        let reg = self.pop_to_reg(masm, None)?;
876        self.free_reg(reg.reg);
877        Ok(())
878    }
879}