cranelift_codegen/
isle_prelude.rs

1//! Shared ISLE prelude implementation for optimization (mid-end) and
2//! lowering (backend) ISLE environments.
3
4/// Helper macro to define methods in `prelude.isle` within `impl Context for
5/// ...` for each backend. These methods are shared amongst all backends.
6#[macro_export]
7#[doc(hidden)]
8macro_rules! isle_common_prelude_methods {
9    () => {
10        /// We don't have a way of making a `()` value in isle directly.
11        #[inline]
12        fn unit(&mut self) -> Unit {
13            ()
14        }
15
16        #[inline]
17        fn u8_as_u32(&mut self, x: u8) -> u32 {
18            x.into()
19        }
20
21        #[inline]
22        fn u8_as_u64(&mut self, x: u8) -> u64 {
23            x.into()
24        }
25
26        #[inline]
27        fn u16_as_i16(&mut self, x: u16) -> i16 {
28            x as i16
29        }
30
31        #[inline]
32        fn u16_as_u32(&mut self, x: u16) -> u32 {
33            x.into()
34        }
35
36        #[inline]
37        fn u16_as_u64(&mut self, x: u16) -> u64 {
38            x.into()
39        }
40
41        #[inline]
42        fn u32_as_u64(&mut self, x: u32) -> u64 {
43            x.into()
44        }
45
46        #[inline]
47        fn i64_as_u64(&mut self, x: i64) -> u64 {
48            x as u64
49        }
50
51        #[inline]
52        fn u64_as_i32(&mut self, x: u64) -> i32 {
53            x as i32
54        }
55
56        #[inline]
57        fn u64_as_i64(&mut self, x: u64) -> i64 {
58            x as i64
59        }
60
61        #[inline]
62        fn i32_as_i64(&mut self, x: i32) -> i64 {
63            x.into()
64        }
65
66        #[inline]
67        fn i64_neg(&mut self, x: i64) -> i64 {
68            x.wrapping_neg()
69        }
70
71        #[inline]
72        fn i8_neg(&mut self, x: i8) -> i8 {
73            x.wrapping_neg()
74        }
75
76        #[inline]
77        fn u64_add(&mut self, x: u64, y: u64) -> u64 {
78            x.wrapping_add(y)
79        }
80
81        #[inline]
82        fn u64_sub(&mut self, x: u64, y: u64) -> u64 {
83            x.wrapping_sub(y)
84        }
85
86        #[inline]
87        fn u64_mul(&mut self, x: u64, y: u64) -> u64 {
88            x.wrapping_mul(y)
89        }
90
91        #[inline]
92        fn u64_sdiv(&mut self, x: u64, y: u64) -> Option<u64> {
93            let x = x as i64;
94            let y = y as i64;
95            x.checked_div(y).map(|d| d as u64)
96        }
97
98        #[inline]
99        fn u64_udiv(&mut self, x: u64, y: u64) -> Option<u64> {
100            x.checked_div(y)
101        }
102
103        #[inline]
104        fn u64_and(&mut self, x: u64, y: u64) -> u64 {
105            x & y
106        }
107
108        #[inline]
109        fn u64_or(&mut self, x: u64, y: u64) -> u64 {
110            x | y
111        }
112
113        #[inline]
114        fn u64_xor(&mut self, x: u64, y: u64) -> u64 {
115            x ^ y
116        }
117
118        #[inline]
119        fn u64_shl(&mut self, x: u64, y: u64) -> u64 {
120            x << y
121        }
122
123        #[inline]
124        fn imm64_shl(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
125            // Mask off any excess shift bits.
126            let shift_mask = (ty.bits() - 1) as u64;
127            let y = (y.bits() as u64) & shift_mask;
128
129            // Mask the result to `ty` bits.
130            let ty_mask = self.ty_mask(ty) as i64;
131            Imm64::new((x.bits() << y) & ty_mask)
132        }
133
134        #[inline]
135        fn imm64_ushr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
136            let ty_mask = self.ty_mask(ty);
137            let x = (x.bits() as u64) & ty_mask;
138
139            // Mask off any excess shift bits.
140            let shift_mask = (ty.bits() - 1) as u64;
141            let y = (y.bits() as u64) & shift_mask;
142
143            // NB: No need to mask off high bits because they are already zero.
144            Imm64::new((x >> y) as i64)
145        }
146
147        #[inline]
148        fn imm64_sshr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
149            // Sign extend `x` from `ty.bits()`-width to the full 64 bits.
150            let shift = u32::checked_sub(64, ty.bits()).unwrap_or(0);
151            let x = (x.bits() << shift) >> shift;
152
153            // Mask off any excess shift bits.
154            let shift_mask = (ty.bits() - 1) as i64;
155            let y = y.bits() & shift_mask;
156
157            // Mask off sign bits that aren't part of `ty`.
158            let ty_mask = self.ty_mask(ty) as i64;
159            Imm64::new((x >> y) & ty_mask)
160        }
161
162        #[inline]
163        fn u64_not(&mut self, x: u64) -> u64 {
164            !x
165        }
166
167        #[inline]
168        fn u64_eq(&mut self, x: u64, y: u64) -> bool {
169            x == y
170        }
171
172        #[inline]
173        fn u64_le(&mut self, x: u64, y: u64) -> bool {
174            x <= y
175        }
176
177        #[inline]
178        fn u64_lt(&mut self, x: u64, y: u64) -> bool {
179            x < y
180        }
181
182        #[inline]
183        fn u64_is_zero(&mut self, value: u64) -> bool {
184            0 == value
185        }
186
187        fn i64_is_zero(&mut self, value: i64) -> bool {
188            0 == value
189        }
190
191        #[inline]
192        fn u64_is_odd(&mut self, x: u64) -> bool {
193            x & 1 == 1
194        }
195
196        fn i64_shr(&mut self, a: i64, b: i64) -> i64 {
197            a >> b
198        }
199
200        fn i64_ctz(&mut self, a: i64) -> i64 {
201            a.trailing_zeros().into()
202        }
203
204        #[inline]
205        fn i64_sextend_u64(&mut self, ty: Type, x: u64) -> i64 {
206            let shift_amt = std::cmp::max(0, 64 - ty.bits());
207            ((x as i64) << shift_amt) >> shift_amt
208        }
209
210        #[inline]
211        fn i64_sextend_imm64(&mut self, ty: Type, x: Imm64) -> i64 {
212            x.sign_extend_from_width(ty.bits()).bits()
213        }
214
215        #[inline]
216        fn u64_uextend_imm64(&mut self, ty: Type, x: Imm64) -> u64 {
217            (x.bits() as u64) & self.ty_mask(ty)
218        }
219
220        #[inline]
221        fn imm64_icmp(&mut self, ty: Type, cc: &IntCC, x: Imm64, y: Imm64) -> Imm64 {
222            let ux = self.u64_uextend_imm64(ty, x);
223            let uy = self.u64_uextend_imm64(ty, y);
224            let sx = self.i64_sextend_imm64(ty, x);
225            let sy = self.i64_sextend_imm64(ty, y);
226            let result = match cc {
227                IntCC::Equal => ux == uy,
228                IntCC::NotEqual => ux != uy,
229                IntCC::UnsignedGreaterThanOrEqual => ux >= uy,
230                IntCC::UnsignedGreaterThan => ux > uy,
231                IntCC::UnsignedLessThanOrEqual => ux <= uy,
232                IntCC::UnsignedLessThan => ux < uy,
233                IntCC::SignedGreaterThanOrEqual => sx >= sy,
234                IntCC::SignedGreaterThan => sx > sy,
235                IntCC::SignedLessThanOrEqual => sx <= sy,
236                IntCC::SignedLessThan => sx < sy,
237            };
238            Imm64::new(result.into())
239        }
240
241        #[inline]
242        fn ty_bits(&mut self, ty: Type) -> u8 {
243            use std::convert::TryInto;
244            ty.bits().try_into().unwrap()
245        }
246
247        #[inline]
248        fn ty_bits_u16(&mut self, ty: Type) -> u16 {
249            ty.bits() as u16
250        }
251
252        #[inline]
253        fn ty_bits_u64(&mut self, ty: Type) -> u64 {
254            ty.bits() as u64
255        }
256
257        #[inline]
258        fn ty_bytes(&mut self, ty: Type) -> u16 {
259            u16::try_from(ty.bytes()).unwrap()
260        }
261
262        #[inline]
263        fn ty_mask(&mut self, ty: Type) -> u64 {
264            let ty_bits = ty.bits();
265            debug_assert_ne!(ty_bits, 0);
266            let shift = 64_u64
267                .checked_sub(ty_bits.into())
268                .expect("unimplemented for > 64 bits");
269            u64::MAX >> shift
270        }
271
272        #[inline]
273        fn ty_lane_mask(&mut self, ty: Type) -> u64 {
274            let ty_lane_count = ty.lane_count();
275            debug_assert_ne!(ty_lane_count, 0);
276            let shift = 64_u64
277                .checked_sub(ty_lane_count.into())
278                .expect("unimplemented for > 64 bits");
279            u64::MAX >> shift
280        }
281
282        #[inline]
283        fn ty_lane_count(&mut self, ty: Type) -> u64 {
284            ty.lane_count() as u64
285        }
286
287        #[inline]
288        fn ty_umin(&mut self, _ty: Type) -> u64 {
289            0
290        }
291
292        #[inline]
293        fn ty_umax(&mut self, ty: Type) -> u64 {
294            self.ty_mask(ty)
295        }
296
297        #[inline]
298        fn ty_smin(&mut self, ty: Type) -> u64 {
299            let ty_bits = ty.bits();
300            debug_assert_ne!(ty_bits, 0);
301            let shift = 64_u64
302                .checked_sub(ty_bits.into())
303                .expect("unimplemented for > 64 bits");
304            (i64::MIN as u64) >> shift
305        }
306
307        #[inline]
308        fn ty_smax(&mut self, ty: Type) -> u64 {
309            let ty_bits = ty.bits();
310            debug_assert_ne!(ty_bits, 0);
311            let shift = 64_u64
312                .checked_sub(ty_bits.into())
313                .expect("unimplemented for > 64 bits");
314            (i64::MAX as u64) >> shift
315        }
316
317        fn fits_in_16(&mut self, ty: Type) -> Option<Type> {
318            if ty.bits() <= 16 && !ty.is_dynamic_vector() {
319                Some(ty)
320            } else {
321                None
322            }
323        }
324
325        #[inline]
326        fn fits_in_32(&mut self, ty: Type) -> Option<Type> {
327            if ty.bits() <= 32 && !ty.is_dynamic_vector() {
328                Some(ty)
329            } else {
330                None
331            }
332        }
333
334        #[inline]
335        fn lane_fits_in_32(&mut self, ty: Type) -> Option<Type> {
336            if !ty.is_vector() && !ty.is_dynamic_vector() {
337                None
338            } else if ty.lane_type().bits() <= 32 {
339                Some(ty)
340            } else {
341                None
342            }
343        }
344
345        #[inline]
346        fn fits_in_64(&mut self, ty: Type) -> Option<Type> {
347            if ty.bits() <= 64 && !ty.is_dynamic_vector() {
348                Some(ty)
349            } else {
350                None
351            }
352        }
353
354        #[inline]
355        fn ty_int_ref_scalar_64(&mut self, ty: Type) -> Option<Type> {
356            if ty.bits() <= 64 && !ty.is_float() && !ty.is_vector() {
357                Some(ty)
358            } else {
359                None
360            }
361        }
362
363        #[inline]
364        fn ty_int_ref_scalar_64_extract(&mut self, ty: Type) -> Option<Type> {
365            self.ty_int_ref_scalar_64(ty)
366        }
367
368        #[inline]
369        fn ty_32(&mut self, ty: Type) -> Option<Type> {
370            if ty.bits() == 32 {
371                Some(ty)
372            } else {
373                None
374            }
375        }
376
377        #[inline]
378        fn ty_64(&mut self, ty: Type) -> Option<Type> {
379            if ty.bits() == 64 {
380                Some(ty)
381            } else {
382                None
383            }
384        }
385
386        #[inline]
387        fn ty_32_or_64(&mut self, ty: Type) -> Option<Type> {
388            if ty.bits() == 32 || ty.bits() == 64 {
389                Some(ty)
390            } else {
391                None
392            }
393        }
394
395        #[inline]
396        fn ty_8_or_16(&mut self, ty: Type) -> Option<Type> {
397            if ty.bits() == 8 || ty.bits() == 16 {
398                Some(ty)
399            } else {
400                None
401            }
402        }
403
404        #[inline]
405        fn ty_16_or_32(&mut self, ty: Type) -> Option<Type> {
406            if ty.bits() == 16 || ty.bits() == 32 {
407                Some(ty)
408            } else {
409                None
410            }
411        }
412
413        #[inline]
414        fn int_fits_in_32(&mut self, ty: Type) -> Option<Type> {
415            match ty {
416                I8 | I16 | I32 => Some(ty),
417                _ => None,
418            }
419        }
420
421        #[inline]
422        fn ty_int_ref_64(&mut self, ty: Type) -> Option<Type> {
423            match ty {
424                I64 => Some(ty),
425                _ => None,
426            }
427        }
428
429        #[inline]
430        fn ty_int_ref_16_to_64(&mut self, ty: Type) -> Option<Type> {
431            match ty {
432                I16 | I32 | I64 => Some(ty),
433                _ => None,
434            }
435        }
436
437        #[inline]
438        fn ty_int(&mut self, ty: Type) -> Option<Type> {
439            ty.is_int().then(|| ty)
440        }
441
442        #[inline]
443        fn ty_scalar(&mut self, ty: Type) -> Option<Type> {
444            if ty.lane_count() == 1 {
445                Some(ty)
446            } else {
447                None
448            }
449        }
450
451        #[inline]
452        fn ty_scalar_float(&mut self, ty: Type) -> Option<Type> {
453            if ty.is_float() {
454                Some(ty)
455            } else {
456                None
457            }
458        }
459
460        #[inline]
461        fn ty_float_or_vec(&mut self, ty: Type) -> Option<Type> {
462            if ty.is_float() || ty.is_vector() {
463                Some(ty)
464            } else {
465                None
466            }
467        }
468
469        fn ty_vector_float(&mut self, ty: Type) -> Option<Type> {
470            if ty.is_vector() && ty.lane_type().is_float() {
471                Some(ty)
472            } else {
473                None
474            }
475        }
476
477        #[inline]
478        fn ty_vector_not_float(&mut self, ty: Type) -> Option<Type> {
479            if ty.is_vector() && !ty.lane_type().is_float() {
480                Some(ty)
481            } else {
482                None
483            }
484        }
485
486        #[inline]
487        fn ty_vec64_ctor(&mut self, ty: Type) -> Option<Type> {
488            if ty.is_vector() && ty.bits() == 64 {
489                Some(ty)
490            } else {
491                None
492            }
493        }
494
495        #[inline]
496        fn ty_vec64(&mut self, ty: Type) -> Option<Type> {
497            if ty.is_vector() && ty.bits() == 64 {
498                Some(ty)
499            } else {
500                None
501            }
502        }
503
504        #[inline]
505        fn ty_vec128(&mut self, ty: Type) -> Option<Type> {
506            if ty.is_vector() && ty.bits() == 128 {
507                Some(ty)
508            } else {
509                None
510            }
511        }
512
513        #[inline]
514        fn ty_dyn_vec64(&mut self, ty: Type) -> Option<Type> {
515            if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 64 {
516                Some(ty)
517            } else {
518                None
519            }
520        }
521
522        #[inline]
523        fn ty_dyn_vec128(&mut self, ty: Type) -> Option<Type> {
524            if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 128 {
525                Some(ty)
526            } else {
527                None
528            }
529        }
530
531        #[inline]
532        fn ty_vec64_int(&mut self, ty: Type) -> Option<Type> {
533            if ty.is_vector() && ty.bits() == 64 && ty.lane_type().is_int() {
534                Some(ty)
535            } else {
536                None
537            }
538        }
539
540        #[inline]
541        fn ty_vec128_int(&mut self, ty: Type) -> Option<Type> {
542            if ty.is_vector() && ty.bits() == 128 && ty.lane_type().is_int() {
543                Some(ty)
544            } else {
545                None
546            }
547        }
548
549        #[inline]
550        fn ty_addr64(&mut self, ty: Type) -> Option<Type> {
551            match ty {
552                I64 => Some(ty),
553                _ => None,
554            }
555        }
556
557        #[inline]
558        fn u64_from_imm64(&mut self, imm: Imm64) -> u64 {
559            imm.bits() as u64
560        }
561
562        #[inline]
563        fn imm64_power_of_two(&mut self, x: Imm64) -> Option<u64> {
564            let x = i64::from(x);
565            let x = u64::try_from(x).ok()?;
566            if x.is_power_of_two() {
567                Some(x.trailing_zeros().into())
568            } else {
569                None
570            }
571        }
572
573        #[inline]
574        fn u64_from_bool(&mut self, b: bool) -> u64 {
575            if b {
576                u64::MAX
577            } else {
578                0
579            }
580        }
581
582        #[inline]
583        fn multi_lane(&mut self, ty: Type) -> Option<(u32, u32)> {
584            if ty.lane_count() > 1 {
585                Some((ty.lane_bits(), ty.lane_count()))
586            } else {
587                None
588            }
589        }
590
591        #[inline]
592        fn dynamic_lane(&mut self, ty: Type) -> Option<(u32, u32)> {
593            if ty.is_dynamic_vector() {
594                Some((ty.lane_bits(), ty.min_lane_count()))
595            } else {
596                None
597            }
598        }
599
600        #[inline]
601        fn ty_dyn64_int(&mut self, ty: Type) -> Option<Type> {
602            if ty.is_dynamic_vector() && ty.min_bits() == 64 && ty.lane_type().is_int() {
603                Some(ty)
604            } else {
605                None
606            }
607        }
608
609        #[inline]
610        fn ty_dyn128_int(&mut self, ty: Type) -> Option<Type> {
611            if ty.is_dynamic_vector() && ty.min_bits() == 128 && ty.lane_type().is_int() {
612                Some(ty)
613            } else {
614                None
615            }
616        }
617
618        fn u16_from_ieee16(&mut self, val: Ieee16) -> u16 {
619            val.bits()
620        }
621
622        fn u32_from_ieee32(&mut self, val: Ieee32) -> u32 {
623            val.bits()
624        }
625
626        fn u64_from_ieee64(&mut self, val: Ieee64) -> u64 {
627            val.bits()
628        }
629
630        fn u8_from_uimm8(&mut self, val: Uimm8) -> u8 {
631            val
632        }
633
634        fn not_vec32x2(&mut self, ty: Type) -> Option<Type> {
635            if ty.lane_bits() == 32 && ty.lane_count() == 2 {
636                None
637            } else {
638                Some(ty)
639            }
640        }
641
642        fn not_i64x2(&mut self, ty: Type) -> Option<()> {
643            if ty == I64X2 {
644                None
645            } else {
646                Some(())
647            }
648        }
649
650        fn trap_code_division_by_zero(&mut self) -> TrapCode {
651            TrapCode::INTEGER_DIVISION_BY_ZERO
652        }
653
654        fn trap_code_integer_overflow(&mut self) -> TrapCode {
655            TrapCode::INTEGER_OVERFLOW
656        }
657
658        fn trap_code_bad_conversion_to_integer(&mut self) -> TrapCode {
659            TrapCode::BAD_CONVERSION_TO_INTEGER
660        }
661
662        fn nonzero_u64_from_imm64(&mut self, val: Imm64) -> Option<u64> {
663            match val.bits() {
664                0 => None,
665                n => Some(n as u64),
666            }
667        }
668
669        #[inline]
670        fn u32_add(&mut self, a: u32, b: u32) -> u32 {
671            a.wrapping_add(b)
672        }
673
674        #[inline]
675        fn u32_sub(&mut self, a: u32, b: u32) -> u32 {
676            a.wrapping_sub(b)
677        }
678
679        #[inline]
680        fn u32_and(&mut self, a: u32, b: u32) -> u32 {
681            a & b
682        }
683
684        #[inline]
685        fn u32_shl(&mut self, x: u32, y: u32) -> u32 {
686            x << y
687        }
688
689        #[inline]
690        fn s32_add_fallible(&mut self, a: i32, b: i32) -> Option<i32> {
691            a.checked_add(b)
692        }
693
694        #[inline]
695        fn u32_nonnegative(&mut self, x: u32) -> Option<u32> {
696            if (x as i32) >= 0 {
697                Some(x)
698            } else {
699                None
700            }
701        }
702
703        #[inline]
704        fn u32_lteq(&mut self, a: u32, b: u32) -> Option<()> {
705            if a <= b {
706                Some(())
707            } else {
708                None
709            }
710        }
711
712        #[inline]
713        fn u8_lteq(&mut self, a: u8, b: u8) -> Option<()> {
714            if a <= b {
715                Some(())
716            } else {
717                None
718            }
719        }
720
721        #[inline]
722        fn u8_lt(&mut self, a: u8, b: u8) -> Option<()> {
723            if a < b {
724                Some(())
725            } else {
726                None
727            }
728        }
729
730        #[inline]
731        fn imm64(&mut self, x: u64) -> Imm64 {
732            Imm64::new(x as i64)
733        }
734
735        #[inline]
736        fn imm64_masked(&mut self, ty: Type, x: u64) -> Imm64 {
737            Imm64::new((x & self.ty_mask(ty)) as i64)
738        }
739
740        #[inline]
741        fn offset32(&mut self, x: Offset32) -> i32 {
742            x.into()
743        }
744
745        #[inline]
746        fn u8_and(&mut self, a: u8, b: u8) -> u8 {
747            a & b
748        }
749
750        #[inline]
751        fn u8_shl(&mut self, a: u8, b: u8) -> u8 {
752            a << b
753        }
754
755        #[inline]
756        fn u8_shr(&mut self, a: u8, b: u8) -> u8 {
757            a >> b
758        }
759
760        #[inline]
761        fn u8_sub(&mut self, a: u8, b: u8) -> u8 {
762            a.wrapping_sub(b)
763        }
764
765        #[inline]
766        fn lane_type(&mut self, ty: Type) -> Type {
767            ty.lane_type()
768        }
769
770        #[inline]
771        fn ty_half_lanes(&mut self, ty: Type) -> Option<Type> {
772            if ty.lane_count() == 1 {
773                None
774            } else {
775                ty.lane_type().by(ty.lane_count() / 2)
776            }
777        }
778
779        #[inline]
780        fn ty_half_width(&mut self, ty: Type) -> Option<Type> {
781            ty.half_width()
782        }
783
784        #[inline]
785        fn ty_equal(&mut self, lhs: Type, rhs: Type) -> bool {
786            lhs == rhs
787        }
788
789        #[inline]
790        fn offset32_to_i32(&mut self, offset: Offset32) -> i32 {
791            offset.into()
792        }
793
794        #[inline]
795        fn i32_to_offset32(&mut self, offset: i32) -> Offset32 {
796            Offset32::new(offset)
797        }
798
799        fn range(&mut self, start: usize, end: usize) -> Range {
800            (start, end)
801        }
802
803        fn range_view(&mut self, (start, end): Range) -> RangeView {
804            if start >= end {
805                RangeView::Empty
806            } else {
807                RangeView::NonEmpty {
808                    index: start,
809                    rest: (start + 1, end),
810                }
811            }
812        }
813
814        #[inline]
815        fn mem_flags_trusted(&mut self) -> MemFlags {
816            MemFlags::trusted()
817        }
818
819        #[inline]
820        fn intcc_unsigned(&mut self, x: &IntCC) -> IntCC {
821            x.unsigned()
822        }
823
824        #[inline]
825        fn signed_cond_code(&mut self, cc: &IntCC) -> Option<IntCC> {
826            match cc {
827                IntCC::Equal
828                | IntCC::UnsignedGreaterThanOrEqual
829                | IntCC::UnsignedGreaterThan
830                | IntCC::UnsignedLessThanOrEqual
831                | IntCC::UnsignedLessThan
832                | IntCC::NotEqual => None,
833                IntCC::SignedGreaterThanOrEqual
834                | IntCC::SignedGreaterThan
835                | IntCC::SignedLessThanOrEqual
836                | IntCC::SignedLessThan => Some(*cc),
837            }
838        }
839
840        #[inline]
841        fn intcc_swap_args(&mut self, cc: &IntCC) -> IntCC {
842            cc.swap_args()
843        }
844
845        #[inline]
846        fn intcc_complement(&mut self, cc: &IntCC) -> IntCC {
847            cc.complement()
848        }
849
850        #[inline]
851        fn intcc_without_eq(&mut self, x: &IntCC) -> IntCC {
852            x.without_equal()
853        }
854
855        #[inline]
856        fn floatcc_swap_args(&mut self, cc: &FloatCC) -> FloatCC {
857            cc.swap_args()
858        }
859
860        #[inline]
861        fn floatcc_complement(&mut self, cc: &FloatCC) -> FloatCC {
862            cc.complement()
863        }
864
865        fn floatcc_unordered(&mut self, cc: &FloatCC) -> bool {
866            match *cc {
867                FloatCC::Unordered
868                | FloatCC::UnorderedOrEqual
869                | FloatCC::UnorderedOrLessThan
870                | FloatCC::UnorderedOrLessThanOrEqual
871                | FloatCC::UnorderedOrGreaterThan
872                | FloatCC::UnorderedOrGreaterThanOrEqual => true,
873                _ => false,
874            }
875        }
876
877        #[inline]
878        fn unpack_value_array_2(&mut self, arr: &ValueArray2) -> (Value, Value) {
879            let [a, b] = *arr;
880            (a, b)
881        }
882
883        #[inline]
884        fn pack_value_array_2(&mut self, a: Value, b: Value) -> ValueArray2 {
885            [a, b]
886        }
887
888        #[inline]
889        fn unpack_value_array_3(&mut self, arr: &ValueArray3) -> (Value, Value, Value) {
890            let [a, b, c] = *arr;
891            (a, b, c)
892        }
893
894        #[inline]
895        fn pack_value_array_3(&mut self, a: Value, b: Value, c: Value) -> ValueArray3 {
896            [a, b, c]
897        }
898
899        #[inline]
900        fn unpack_block_array_2(&mut self, arr: &BlockArray2) -> (BlockCall, BlockCall) {
901            let [a, b] = *arr;
902            (a, b)
903        }
904
905        #[inline]
906        fn pack_block_array_2(&mut self, a: BlockCall, b: BlockCall) -> BlockArray2 {
907            [a, b]
908        }
909
910        fn u128_as_u64(&mut self, val: u128) -> Option<u64> {
911            u64::try_from(val).ok()
912        }
913
914        fn u64_as_u32(&mut self, val: u64) -> Option<u32> {
915            u32::try_from(val).ok()
916        }
917
918        fn u32_as_u16(&mut self, val: u32) -> Option<u16> {
919            val.try_into().ok()
920        }
921
922        fn i32_as_i8(&mut self, val: i32) -> Option<i8> {
923            val.try_into().ok()
924        }
925
926        fn u8_as_i8(&mut self, val: u8) -> i8 {
927            val as i8
928        }
929
930        fn u64_as_u8(&mut self, val: u64) -> u8 {
931            val as u8
932        }
933
934        fn u64_as_u16(&mut self, val: u64) -> u16 {
935            val as u16
936        }
937
938        fn u8_try_from_u64(&mut self, val: u64) -> Option<u8> {
939            u8::try_from(val).ok()
940        }
941
942        fn u8_try_from_u16(&mut self, val: u16) -> Option<u8> {
943            u8::try_from(val).ok()
944        }
945
946        fn u8_try_from_i32(&mut self, val: i32) -> Option<u8> {
947            u8::try_from(val).ok()
948        }
949
950        fn u64_try_from_i64(&mut self, val: i64) -> Option<u64> {
951            u64::try_from(val).ok()
952        }
953
954        fn u16_try_from_u64(&mut self, val: u64) -> Option<u16> {
955            u16::try_from(val).ok()
956        }
957
958        fn u32_try_from_u64(&mut self, val: u64) -> Option<u32> {
959            u32::try_from(val).ok()
960        }
961
962        fn i8_try_from_u64(&mut self, val: u64) -> Option<i8> {
963            i8::try_from(val).ok()
964        }
965
966        fn i16_try_from_u64(&mut self, val: u64) -> Option<i16> {
967            i16::try_from(val).ok()
968        }
969
970        fn i32_try_from_u64(&mut self, val: u64) -> Option<i32> {
971            i32::try_from(val).ok()
972        }
973
974        fn u128_replicated_u64(&mut self, val: u128) -> Option<u64> {
975            let low64 = val as u64 as u128;
976            if (low64 | (low64 << 64)) == val {
977                Some(low64 as u64)
978            } else {
979                None
980            }
981        }
982
983        fn u64_replicated_u32(&mut self, val: u64) -> Option<u64> {
984            let low32 = val as u32 as u64;
985            if (low32 | (low32 << 32)) == val {
986                Some(low32)
987            } else {
988                None
989            }
990        }
991
992        fn u32_replicated_u16(&mut self, val: u64) -> Option<u64> {
993            let val = val as u32;
994            let low16 = val as u16 as u32;
995            if (low16 | (low16 << 16)) == val {
996                Some(low16.into())
997            } else {
998                None
999            }
1000        }
1001
1002        fn u16_replicated_u8(&mut self, val: u64) -> Option<u8> {
1003            let val = val as u16;
1004            let low8 = val as u8 as u16;
1005            if (low8 | (low8 << 8)) == val {
1006                Some(low8 as u8)
1007            } else {
1008                None
1009            }
1010        }
1011
1012        fn f16_min(&mut self, a: Ieee16, b: Ieee16) -> Option<Ieee16> {
1013            a.minimum(b).non_nan()
1014        }
1015
1016        fn f16_max(&mut self, a: Ieee16, b: Ieee16) -> Option<Ieee16> {
1017            a.maximum(b).non_nan()
1018        }
1019
1020        fn f16_neg(&mut self, n: Ieee16) -> Ieee16 {
1021            -n
1022        }
1023
1024        fn f16_abs(&mut self, n: Ieee16) -> Ieee16 {
1025            n.abs()
1026        }
1027
1028        fn f16_copysign(&mut self, a: Ieee16, b: Ieee16) -> Ieee16 {
1029            a.copysign(b)
1030        }
1031
1032        fn f32_add(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {
1033            (lhs + rhs).non_nan()
1034        }
1035
1036        fn f32_sub(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {
1037            (lhs - rhs).non_nan()
1038        }
1039
1040        fn f32_mul(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {
1041            (lhs * rhs).non_nan()
1042        }
1043
1044        fn f32_div(&mut self, lhs: Ieee32, rhs: Ieee32) -> Option<Ieee32> {
1045            (lhs / rhs).non_nan()
1046        }
1047
1048        fn f32_sqrt(&mut self, n: Ieee32) -> Option<Ieee32> {
1049            n.sqrt().non_nan()
1050        }
1051
1052        fn f32_ceil(&mut self, n: Ieee32) -> Option<Ieee32> {
1053            n.ceil().non_nan()
1054        }
1055
1056        fn f32_floor(&mut self, n: Ieee32) -> Option<Ieee32> {
1057            n.floor().non_nan()
1058        }
1059
1060        fn f32_trunc(&mut self, n: Ieee32) -> Option<Ieee32> {
1061            n.trunc().non_nan()
1062        }
1063
1064        fn f32_nearest(&mut self, n: Ieee32) -> Option<Ieee32> {
1065            n.round_ties_even().non_nan()
1066        }
1067
1068        fn f32_min(&mut self, a: Ieee32, b: Ieee32) -> Option<Ieee32> {
1069            a.minimum(b).non_nan()
1070        }
1071
1072        fn f32_max(&mut self, a: Ieee32, b: Ieee32) -> Option<Ieee32> {
1073            a.maximum(b).non_nan()
1074        }
1075
1076        fn f32_neg(&mut self, n: Ieee32) -> Ieee32 {
1077            -n
1078        }
1079
1080        fn f32_abs(&mut self, n: Ieee32) -> Ieee32 {
1081            n.abs()
1082        }
1083
1084        fn f32_copysign(&mut self, a: Ieee32, b: Ieee32) -> Ieee32 {
1085            a.copysign(b)
1086        }
1087
1088        fn f64_add(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {
1089            (lhs + rhs).non_nan()
1090        }
1091
1092        fn f64_sub(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {
1093            (lhs - rhs).non_nan()
1094        }
1095
1096        fn f64_mul(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {
1097            (lhs * rhs).non_nan()
1098        }
1099
1100        fn f64_div(&mut self, lhs: Ieee64, rhs: Ieee64) -> Option<Ieee64> {
1101            (lhs / rhs).non_nan()
1102        }
1103
1104        fn f64_sqrt(&mut self, n: Ieee64) -> Option<Ieee64> {
1105            n.sqrt().non_nan()
1106        }
1107
1108        fn f64_ceil(&mut self, n: Ieee64) -> Option<Ieee64> {
1109            n.ceil().non_nan()
1110        }
1111
1112        fn f64_floor(&mut self, n: Ieee64) -> Option<Ieee64> {
1113            n.floor().non_nan()
1114        }
1115
1116        fn f64_trunc(&mut self, n: Ieee64) -> Option<Ieee64> {
1117            n.trunc().non_nan()
1118        }
1119
1120        fn f64_nearest(&mut self, n: Ieee64) -> Option<Ieee64> {
1121            n.round_ties_even().non_nan()
1122        }
1123
1124        fn f64_min(&mut self, a: Ieee64, b: Ieee64) -> Option<Ieee64> {
1125            a.minimum(b).non_nan()
1126        }
1127
1128        fn f64_max(&mut self, a: Ieee64, b: Ieee64) -> Option<Ieee64> {
1129            a.maximum(b).non_nan()
1130        }
1131
1132        fn f64_neg(&mut self, n: Ieee64) -> Ieee64 {
1133            -n
1134        }
1135
1136        fn f64_abs(&mut self, n: Ieee64) -> Ieee64 {
1137            n.abs()
1138        }
1139
1140        fn f64_copysign(&mut self, a: Ieee64, b: Ieee64) -> Ieee64 {
1141            a.copysign(b)
1142        }
1143
1144        fn f128_min(&mut self, a: Ieee128, b: Ieee128) -> Option<Ieee128> {
1145            a.minimum(b).non_nan()
1146        }
1147
1148        fn f128_max(&mut self, a: Ieee128, b: Ieee128) -> Option<Ieee128> {
1149            a.maximum(b).non_nan()
1150        }
1151
1152        fn f128_neg(&mut self, n: Ieee128) -> Ieee128 {
1153            -n
1154        }
1155
1156        fn f128_abs(&mut self, n: Ieee128) -> Ieee128 {
1157            n.abs()
1158        }
1159
1160        fn f128_copysign(&mut self, a: Ieee128, b: Ieee128) -> Ieee128 {
1161            a.copysign(b)
1162        }
1163    };
1164}