1use anyhow::{bail, ensure, Result};
2use wasmparser::{Ieee32, Ieee64};
3use wasmtime_environ::{VMOffsets, WasmHeapType, WasmValType};
4
5use super::ControlStackFrame;
6use crate::{
7 abi::{scratch, vmctx, ABIOperand, ABIResults, RetArea},
8 codegen::{CodeGenError, CodeGenPhase, Emission, Prologue},
9 frame::Frame,
10 isa::reg::RegClass,
11 masm::{
12 ExtractLaneKind, MacroAssembler, OperandSize, RegImm, ReplaceLaneKind, SPOffset, ShiftKind,
13 StackSlot,
14 },
15 reg::{writable, Reg, WritableReg},
16 regalloc::RegAlloc,
17 stack::{Stack, TypedReg, Val},
18};
19
20pub(crate) struct CodeGenContext<'a, P: CodeGenPhase> {
36 pub regalloc: RegAlloc,
38 pub stack: Stack,
40 pub frame: Frame<P>,
42 pub reachable: bool,
44 pub vmoffsets: &'a VMOffsets<u8>,
46}
47
48impl<'a> CodeGenContext<'a, Emission> {
49 pub fn i32_shift<M>(&mut self, masm: &mut M, kind: ShiftKind) -> Result<()>
51 where
52 M: MacroAssembler,
53 {
54 let top = self
55 .stack
56 .peek()
57 .ok_or_else(|| CodeGenError::missing_values_in_stack())?;
58
59 if top.is_i32_const() {
60 let val = self
61 .stack
62 .pop_i32_const()
63 .ok_or_else(|| CodeGenError::missing_values_in_stack())?;
64 let typed_reg = self.pop_to_reg(masm, None)?;
65 masm.shift_ir(
66 writable!(typed_reg.reg),
67 val as u64,
68 typed_reg.reg,
69 kind,
70 OperandSize::S32,
71 )?;
72 self.stack.push(typed_reg.into());
73 } else {
74 masm.shift(self, kind, OperandSize::S32)?;
75 }
76 Ok(())
77 }
78
79 pub fn i64_shift<M>(&mut self, masm: &mut M, kind: ShiftKind) -> Result<()>
81 where
82 M: MacroAssembler,
83 {
84 let top = self
85 .stack
86 .peek()
87 .ok_or_else(|| CodeGenError::missing_values_in_stack())?;
88 if top.is_i64_const() {
89 let val = self
90 .stack
91 .pop_i64_const()
92 .ok_or_else(|| CodeGenError::missing_values_in_stack())?;
93 let typed_reg = self.pop_to_reg(masm, None)?;
94 masm.shift_ir(
95 writable!(typed_reg.reg),
96 val as u64,
97 typed_reg.reg,
98 kind,
99 OperandSize::S64,
100 )?;
101 self.stack.push(typed_reg.into());
102 } else {
103 masm.shift(self, kind, OperandSize::S64)?;
104 };
105
106 Ok(())
107 }
108}
109
110impl<'a> CodeGenContext<'a, Prologue> {
111 pub fn new(
113 regalloc: RegAlloc,
114 stack: Stack,
115 frame: Frame<Prologue>,
116 vmoffsets: &'a VMOffsets<u8>,
117 ) -> Self {
118 Self {
119 regalloc,
120 stack,
121 frame,
122 reachable: true,
123 vmoffsets,
124 }
125 }
126
127 pub fn for_emission(self) -> CodeGenContext<'a, Emission> {
129 CodeGenContext {
130 regalloc: self.regalloc,
131 stack: self.stack,
132 reachable: self.reachable,
133 vmoffsets: self.vmoffsets,
134 frame: self.frame.for_emission(),
135 }
136 }
137}
138
139impl<'a> CodeGenContext<'a, Emission> {
140 pub fn reg<M: MacroAssembler>(&mut self, named: Reg, masm: &mut M) -> Result<Reg> {
143 self.regalloc.reg(named, |regalloc| {
144 Self::spill_impl(&mut self.stack, regalloc, &self.frame, masm)
145 })
146 }
147
148 pub fn reg_for_type<M: MacroAssembler>(
150 &mut self,
151 ty: WasmValType,
152 masm: &mut M,
153 ) -> Result<Reg> {
154 use WasmValType::*;
155 match ty {
156 I32 | I64 => self.reg_for_class(RegClass::Int, masm),
157 F32 | F64 => self.reg_for_class(RegClass::Float, masm),
158 V128 => self.reg_for_class(RegClass::Float, masm),
160 Ref(rt) => match rt.heap_type {
161 WasmHeapType::Func | WasmHeapType::Extern => {
162 self.reg_for_class(RegClass::Int, masm)
163 }
164 _ => bail!(CodeGenError::unsupported_wasm_type()),
165 },
166 }
167 }
168
169 pub fn reg_for_class<M: MacroAssembler>(
172 &mut self,
173 class: RegClass,
174 masm: &mut M,
175 ) -> Result<Reg> {
176 self.regalloc.reg_for_class(class, &mut |regalloc| {
177 Self::spill_impl(&mut self.stack, regalloc, &self.frame, masm)
178 })
179 }
180
181 pub fn any_gpr<M: MacroAssembler>(&mut self, masm: &mut M) -> Result<Reg> {
184 self.reg_for_class(RegClass::Int, masm)
185 }
186
187 pub fn any_fpr<M: MacroAssembler>(&mut self, masm: &mut M) -> Result<Reg> {
190 self.reg_for_class(RegClass::Float, masm)
191 }
192
193 pub fn without<'r, T, M, F>(
197 &mut self,
198 regs: impl IntoIterator<Item = &'r Reg> + Copy,
199 masm: &mut M,
200 mut f: F,
201 ) -> Result<T>
202 where
203 M: MacroAssembler,
204 F: FnMut(&mut Self, &mut M) -> T,
205 {
206 for r in regs {
207 self.reg(*r, masm)?;
208 }
209
210 let result = f(self, masm);
211
212 for r in regs {
213 self.free_reg(*r);
214 }
215
216 Ok(result)
217 }
218
219 pub fn free_reg(&mut self, reg: impl Into<Reg>) {
221 let reg: Reg = reg.into();
222 self.regalloc.free(reg);
223 }
224
225 pub fn pop_to_reg<M: MacroAssembler>(
233 &mut self,
234 masm: &mut M,
235 named: Option<Reg>,
236 ) -> Result<TypedReg> {
237 let typed_reg = if let Some(dst) = named {
238 self.stack.pop_named_reg(dst)
239 } else {
240 self.stack.pop_reg()
241 };
242
243 if let Some(dst) = typed_reg {
244 return Ok(dst);
245 }
246
247 let val = self.stack.pop().expect("a value at stack top");
248 let reg = if let Some(r) = named {
249 self.reg(r, masm)?
250 } else {
251 self.reg_for_type(val.ty(), masm)?
252 };
253
254 if val.is_mem() {
255 let mem = val.unwrap_mem();
256 let curr_offset = masm.sp_offset()?.as_u32();
257 let slot_offset = mem.slot.offset.as_u32();
258 ensure!(
259 curr_offset == slot_offset,
260 CodeGenError::invalid_sp_offset(),
261 );
262 masm.pop(writable!(reg), val.ty().try_into()?)?;
263 } else {
264 self.move_val_to_reg(&val, reg, masm)?;
265 if val.is_reg() {
267 self.free_reg(val.unwrap_reg());
268 }
269 }
270
271 Ok(TypedReg::new(val.ty(), reg))
272 }
273
274 pub fn pop_to_addr<M: MacroAssembler>(&mut self, masm: &mut M, addr: M::Address) -> Result<()> {
276 let val = self.stack.pop().expect("a value at stack top");
277 let ty = val.ty();
278 let size: OperandSize = ty.try_into()?;
279 match val {
280 Val::Reg(tr) => {
281 masm.store(tr.reg.into(), addr, size)?;
282 self.free_reg(tr.reg);
283 }
284 Val::I32(v) => masm.store(RegImm::i32(v), addr, size)?,
285 Val::I64(v) => masm.store(RegImm::i64(v), addr, size)?,
286 Val::F32(v) => masm.store(RegImm::f32(v.bits()), addr, size)?,
287 Val::F64(v) => masm.store(RegImm::f64(v.bits()), addr, size)?,
288 Val::V128(v) => masm.store(RegImm::v128(v), addr, size)?,
289 Val::Local(local) => {
290 let slot = self.frame.get_wasm_local(local.index);
291 let scratch = scratch!(M);
292 let local_addr = masm.local_address(&slot)?;
293 masm.load(local_addr, writable!(scratch), size)?;
294 masm.store(scratch.into(), addr, size)?;
295 }
296 Val::Memory(_) => {
297 let scratch = scratch!(M, &ty);
298 masm.pop(writable!(scratch), size)?;
299 masm.store(scratch.into(), addr, size)?;
300 }
301 }
302
303 Ok(())
304 }
305
306 pub fn move_val_to_reg<M: MacroAssembler>(
308 &self,
309 src: &Val,
310 dst: Reg,
311 masm: &mut M,
312 ) -> Result<()> {
313 let size: OperandSize = src.ty().try_into()?;
314 match src {
315 Val::Reg(tr) => masm.mov(writable!(dst), RegImm::reg(tr.reg), size),
316 Val::I32(imm) => masm.mov(writable!(dst), RegImm::i32(*imm), size),
317 Val::I64(imm) => masm.mov(writable!(dst), RegImm::i64(*imm), size),
318 Val::F32(imm) => masm.mov(writable!(dst), RegImm::f32(imm.bits()), size),
319 Val::F64(imm) => masm.mov(writable!(dst), RegImm::f64(imm.bits()), size),
320 Val::V128(imm) => masm.mov(writable!(dst), RegImm::v128(*imm), size),
321 Val::Local(local) => {
322 let slot = self.frame.get_wasm_local(local.index);
323 let addr = masm.local_address(&slot)?;
324 masm.load(addr, writable!(dst), size)
325 }
326 Val::Memory(mem) => {
327 let addr = masm.address_from_sp(mem.slot.offset)?;
328 masm.load(addr, writable!(dst), size)
329 }
330 }
331 }
332
333 pub fn unop<F, M>(&mut self, masm: &mut M, emit: F) -> Result<()>
337 where
338 F: FnOnce(&mut M, Reg) -> Result<TypedReg>,
339 M: MacroAssembler,
340 {
341 let typed_reg = self.pop_to_reg(masm, None)?;
342 let dst = emit(masm, typed_reg.reg)?;
343 self.stack.push(dst.into());
344
345 Ok(())
346 }
347
348 pub fn binop<F, M>(&mut self, masm: &mut M, size: OperandSize, emit: F) -> Result<()>
352 where
353 F: FnOnce(&mut M, Reg, Reg, OperandSize) -> Result<TypedReg>,
354 M: MacroAssembler,
355 {
356 let src = self.pop_to_reg(masm, None)?;
357 let dst = self.pop_to_reg(masm, None)?;
358 let dst = emit(masm, dst.reg, src.reg.into(), size)?;
359 self.free_reg(src);
360 self.stack.push(dst.into());
361
362 Ok(())
363 }
364
365 pub fn float_cmp_op<F, M>(&mut self, masm: &mut M, size: OperandSize, emit: F) -> Result<()>
367 where
368 F: FnOnce(&mut M, Reg, Reg, Reg, OperandSize) -> Result<()>,
369 M: MacroAssembler,
370 {
371 let src2 = self.pop_to_reg(masm, None)?;
372 let src1 = self.pop_to_reg(masm, None)?;
373 let dst = self.any_gpr(masm)?;
374 emit(masm, dst, src1.reg, src2.reg, size)?;
375 self.free_reg(src1);
376 self.free_reg(src2);
377
378 let dst = match size {
379 OperandSize::S32 | OperandSize::S64 => TypedReg::i32(dst),
383 OperandSize::S8 | OperandSize::S16 | OperandSize::S128 => {
384 bail!(CodeGenError::unexpected_operand_size())
385 }
386 };
387 self.stack.push(dst.into());
388
389 Ok(())
390 }
391
392 pub fn i32_binop<F, M>(&mut self, masm: &mut M, mut emit: F) -> Result<()>
396 where
397 F: FnMut(&mut M, Reg, RegImm, OperandSize) -> Result<TypedReg>,
398 M: MacroAssembler,
399 {
400 match self.pop_i32_const() {
401 Some(val) => {
402 let typed_reg = self.pop_to_reg(masm, None)?;
403 let dst = emit(masm, typed_reg.reg, RegImm::i32(val), OperandSize::S32)?;
404 self.stack.push(dst.into());
405 }
406 None => self.binop(masm, OperandSize::S32, |masm, dst, src, size| {
407 emit(masm, dst, src.into(), size)
408 })?,
409 }
410 Ok(())
411 }
412
413 pub fn i64_binop<F, M>(&mut self, masm: &mut M, emit: F) -> Result<()>
417 where
418 F: FnOnce(&mut M, Reg, RegImm, OperandSize) -> Result<TypedReg>,
419 M: MacroAssembler,
420 {
421 match self.pop_i64_const() {
422 Some(val) => {
423 let typed_reg = self.pop_to_reg(masm, None)?;
424 let dst = emit(masm, typed_reg.reg, RegImm::i64(val), OperandSize::S64)?;
425 self.stack.push(dst.into());
426 }
427 None => self.binop(masm, OperandSize::S64, |masm, dst, src, size| {
428 emit(masm, dst, src.into(), size)
429 })?,
430 }
431 Ok(())
432 }
433
434 pub fn pop_i32_const(&mut self) -> Option<i32> {
436 let top = self.stack.peek().expect("value at stack top");
437
438 if top.is_i32_const() {
439 let val = self
440 .stack
441 .pop_i32_const()
442 .expect("i32 const value at stack top");
443 Some(val)
444 } else {
445 None
446 }
447 }
448
449 pub fn pop_i64_const(&mut self) -> Option<i64> {
451 let top = self.stack.peek().expect("value at stack top");
452
453 if top.is_i64_const() {
454 let val = self
455 .stack
456 .pop_i64_const()
457 .expect("i64 const value at stack top");
458 Some(val)
459 } else {
460 None
461 }
462 }
463
464 pub fn pop_f32_const(&mut self) -> Option<Ieee32> {
466 let top = self.stack.peek().expect("value at stack top");
467
468 if top.is_f32_const() {
469 let val = self
470 .stack
471 .pop_f32_const()
472 .expect("f32 const value at stack top");
473 Some(val)
474 } else {
475 None
476 }
477 }
478
479 pub fn pop_f64_const(&mut self) -> Option<Ieee64> {
481 let top = self.stack.peek().expect("value at stack top");
482
483 if top.is_f64_const() {
484 let val = self
485 .stack
486 .pop_f64_const()
487 .expect("f64 const value at stack top");
488 Some(val)
489 } else {
490 None
491 }
492 }
493
494 pub fn convert_op<F, M>(&mut self, masm: &mut M, dst_ty: WasmValType, emit: F) -> Result<()>
496 where
497 F: FnOnce(&mut M, Reg, Reg, OperandSize) -> Result<()>,
498 M: MacroAssembler,
499 {
500 let src = self.pop_to_reg(masm, None)?;
501 let dst = self.reg_for_type(dst_ty, masm)?;
502 let dst_size = match dst_ty {
503 WasmValType::I32 => OperandSize::S32,
504 WasmValType::I64 => OperandSize::S64,
505 WasmValType::F32 => OperandSize::S32,
506 WasmValType::F64 => OperandSize::S64,
507 WasmValType::V128 => bail!(CodeGenError::unsupported_wasm_type()),
508 WasmValType::Ref(_) => bail!(CodeGenError::unsupported_wasm_type()),
509 };
510
511 emit(masm, dst, src.into(), dst_size)?;
512
513 self.free_reg(src);
514 self.stack.push(TypedReg::new(dst_ty, dst).into());
515 Ok(())
516 }
517
518 pub fn convert_op_with_tmp_reg<F, M>(
521 &mut self,
522 masm: &mut M,
523 dst_ty: WasmValType,
524 tmp_reg_class: RegClass,
525 emit: F,
526 ) -> Result<()>
527 where
528 F: FnOnce(&mut M, Reg, Reg, Reg, OperandSize) -> Result<()>,
529 M: MacroAssembler,
530 {
531 let tmp_gpr = self.reg_for_class(tmp_reg_class, masm)?;
532 self.convert_op(masm, dst_ty, |masm, dst, src, dst_size| {
533 emit(masm, dst, src, tmp_gpr, dst_size)
534 })?;
535 self.free_reg(tmp_gpr);
536 Ok(())
537 }
538
539 pub fn extract_lane_op<F, M>(
541 &mut self,
542 masm: &mut M,
543 kind: ExtractLaneKind,
544 emit: F,
545 ) -> Result<()>
546 where
547 F: FnOnce(&mut M, Reg, WritableReg, ExtractLaneKind) -> Result<()>,
548 M: MacroAssembler,
549 {
550 let src = self.pop_to_reg(masm, None)?;
551 let dst = writable!(match kind {
552 ExtractLaneKind::I8x16S
553 | ExtractLaneKind::I8x16U
554 | ExtractLaneKind::I16x8S
555 | ExtractLaneKind::I16x8U
556 | ExtractLaneKind::I32x4
557 | ExtractLaneKind::I64x2 => self.any_gpr(masm)?,
558 ExtractLaneKind::F32x4 | ExtractLaneKind::F64x2 => src.reg,
559 });
560
561 emit(masm, src.reg, dst, kind)?;
562
563 match kind {
564 ExtractLaneKind::I8x16S
565 | ExtractLaneKind::I8x16U
566 | ExtractLaneKind::I16x8S
567 | ExtractLaneKind::I16x8U
568 | ExtractLaneKind::I32x4
569 | ExtractLaneKind::I64x2 => self.free_reg(src),
570 _ => (),
571 }
572
573 let dst = dst.to_reg();
574 let dst = match kind {
575 ExtractLaneKind::I8x16S
576 | ExtractLaneKind::I8x16U
577 | ExtractLaneKind::I16x8S
578 | ExtractLaneKind::I16x8U
579 | ExtractLaneKind::I32x4 => TypedReg::i32(dst),
580 ExtractLaneKind::I64x2 => TypedReg::i64(dst),
581 ExtractLaneKind::F32x4 => TypedReg::f32(dst),
582 ExtractLaneKind::F64x2 => TypedReg::f64(dst),
583 };
584
585 self.stack.push(Val::Reg(dst));
586 Ok(())
587 }
588
589 pub fn replace_lane_op<F, M>(
591 &mut self,
592 masm: &mut M,
593 kind: ReplaceLaneKind,
594 emit: F,
595 ) -> Result<()>
596 where
597 F: FnOnce(&mut M, RegImm, WritableReg, ReplaceLaneKind) -> Result<()>,
598 M: MacroAssembler,
599 {
600 let src = match kind {
601 ReplaceLaneKind::I8x16 | ReplaceLaneKind::I16x8 | ReplaceLaneKind::I32x4 => {
602 self.pop_i32_const().map(RegImm::i32)
603 }
604 ReplaceLaneKind::I64x2 => self.pop_i64_const().map(RegImm::i64),
605 ReplaceLaneKind::F32x4 => self.pop_f32_const().map(|v| RegImm::f32(v.bits())),
606 ReplaceLaneKind::F64x2 => self.pop_f64_const().map(|v| RegImm::f64(v.bits())),
607 }
608 .map_or_else(
609 || Ok(RegImm::reg(self.pop_to_reg(masm, None)?.into())),
610 Ok::<_, anyhow::Error>,
611 )?;
612
613 let dst = self.pop_to_reg(masm, None)?;
614
615 emit(masm, src, writable!(dst.into()), kind)?;
616
617 if let RegImm::Reg(reg) = src {
618 self.free_reg(reg);
619 }
620 self.stack.push(dst.into());
621
622 Ok(())
623 }
624
625 pub fn drop_last<F>(&mut self, last: usize, mut f: F) -> Result<()>
629 where
630 F: FnMut(&mut RegAlloc, &Val) -> Result<()>,
631 {
632 if last > 0 {
633 let len = self.stack.len();
634 ensure!(last <= len, CodeGenError::unexpected_value_stack_index(),);
635 let truncate = self.stack.len() - last;
636 let stack_mut = self.stack.inner_mut();
637
638 for v in stack_mut[truncate..].into_iter().rev() {
640 f(&mut self.regalloc, v)?
641 }
642 stack_mut.truncate(truncate);
643 }
644
645 Ok(())
646 }
647
648 pub fn spill<M: MacroAssembler>(&mut self, masm: &mut M) -> Result<()> {
653 Self::spill_impl(&mut self.stack, &mut self.regalloc, &self.frame, masm)
654 }
655
656 pub fn unconditional_jump<M, F>(
664 &mut self,
665 dest: &mut ControlStackFrame,
666 masm: &mut M,
667 mut f: F,
668 ) -> Result<()>
669 where
670 M: MacroAssembler,
671 F: FnMut(&mut M, &mut Self, &mut ControlStackFrame) -> Result<()>,
672 {
673 let state = dest.stack_state();
674 let target_offset = state.target_offset;
675 let base_offset = state.base_offset;
676 ensure!(
680 masm.sp_offset()?.as_u32() >= base_offset.as_u32(),
681 CodeGenError::invalid_sp_offset()
682 );
683 f(masm, self, dest)?;
684
685 masm.ensure_sp_for_jump(target_offset)?;
715 dest.set_as_target();
716 masm.jmp(*dest.label())?;
717 self.reachable = false;
718 Ok(())
719 }
720
721 pub fn push_abi_results<M, F>(
723 &mut self,
724 results: &ABIResults,
725 masm: &mut M,
726 mut calculate_ret_area: F,
727 ) -> Result<()>
728 where
729 M: MacroAssembler,
730 F: FnMut(&ABIResults, &mut CodeGenContext<Emission>, &mut M) -> Option<RetArea>,
731 {
732 let area = results
733 .on_stack()
734 .then(|| calculate_ret_area(&results, self, masm).unwrap());
735
736 for operand in results.operands().iter() {
737 match operand {
738 ABIOperand::Reg { reg, ty, .. } => {
739 ensure!(
740 self.regalloc.reg_available(*reg),
741 CodeGenError::expected_register_to_be_available(),
742 );
743
744 let typed_reg = TypedReg::new(*ty, self.reg(*reg, masm)?);
745 self.stack.push(typed_reg.into());
746 }
747 ABIOperand::Stack { ty, offset, size } => match area.unwrap() {
748 RetArea::SP(sp_offset) => {
749 let slot =
750 StackSlot::new(SPOffset::from_u32(sp_offset.as_u32() - offset), *size);
751 self.stack.push(Val::mem(*ty, slot));
752 }
753 _ => bail!(CodeGenError::unexpected_function_call()),
758 },
759 }
760 }
761
762 Ok(())
763 }
764
765 pub fn truncate_stack_to(&mut self, target: usize) -> Result<()> {
770 if self.stack.len() > target {
771 self.drop_last(self.stack.len() - target, |regalloc, val| match val {
772 Val::Reg(tr) => Ok(regalloc.free(tr.reg)),
773 _ => Ok(()),
774 })
775 } else {
776 Ok(())
777 }
778 }
779
780 pub fn load_vmctx<M>(&mut self, masm: &mut M) -> Result<()>
782 where
783 M: MacroAssembler,
784 {
785 let addr = masm.local_address(&self.frame.vmctx_slot())?;
786 masm.load_ptr(addr, writable!(vmctx!(M)))
787 }
788
789 fn spill_impl<M: MacroAssembler>(
795 stack: &mut Stack,
796 regalloc: &mut RegAlloc,
797 frame: &Frame<Emission>,
798 masm: &mut M,
799 ) -> Result<()> {
800 for v in stack.inner_mut() {
801 match v {
802 Val::Reg(r) => {
803 let slot = masm.push(r.reg, r.ty.try_into()?)?;
804 regalloc.free(r.reg);
805 *v = Val::mem(r.ty, slot);
806 }
807 Val::Local(local) => {
808 let slot = frame.get_wasm_local(local.index);
809 let addr = masm.local_address(&slot)?;
810 let scratch = scratch!(M, &slot.ty);
811 masm.load(addr, writable!(scratch), slot.ty.try_into()?)?;
812 let stack_slot = masm.push(scratch, slot.ty.try_into()?)?;
813 *v = Val::mem(slot.ty, stack_slot);
814 }
815 _ => {}
816 }
817 }
818
819 Ok(())
820 }
821
822 pub fn binop128<F, M>(&mut self, masm: &mut M, emit: F) -> Result<()>
825 where
826 F: FnOnce(&mut M, Reg, Reg, Reg, Reg) -> Result<(TypedReg, TypedReg)>,
827 M: MacroAssembler,
828 {
829 let rhs_hi = self.pop_to_reg(masm, None)?;
830 let rhs_lo = self.pop_to_reg(masm, None)?;
831 let lhs_hi = self.pop_to_reg(masm, None)?;
832 let lhs_lo = self.pop_to_reg(masm, None)?;
833 let (lo, hi) = emit(masm, lhs_lo.reg, lhs_hi.reg, rhs_lo.reg, rhs_hi.reg)?;
834 self.free_reg(rhs_hi);
835 self.free_reg(rhs_lo);
836 self.stack.push(lo.into());
837 self.stack.push(hi.into());
838
839 Ok(())
840 }
841
842 pub fn v128_all_true_op<F, M>(&mut self, masm: &mut M, emit: F) -> Result<()>
844 where
845 F: FnOnce(&mut M, Reg, Reg) -> Result<()>,
846 M: MacroAssembler,
847 {
848 let src = self.pop_to_reg(masm, None)?;
849 let dst = self.any_gpr(masm)?;
850 emit(masm, src.reg, dst)?;
851 self.free_reg(src);
852 self.stack.push(TypedReg::i32(dst).into());
853
854 Ok(())
855 }
856
857 pub fn v128_bitmask_op<F, M>(&mut self, masm: &mut M, emit: F) -> Result<()>
859 where
860 F: FnOnce(&mut M, Reg, Reg) -> Result<()>,
861 M: MacroAssembler,
862 {
863 let src = self.pop_to_reg(masm, None)?;
864 let dst = self.any_gpr(masm)?;
865 emit(masm, src.reg, dst)?;
866 self.free_reg(src);
867 self.stack.push(TypedReg::i32(dst).into());
868
869 Ok(())
870 }
871
872 pub fn pop_and_free<M: MacroAssembler>(&mut self, masm: &mut M) -> Result<()> {
875 let reg = self.pop_to_reg(masm, None)?;
876 self.free_reg(reg.reg);
877 Ok(())
878 }
879}