1use crate::isa::riscv64::lower::isle::generated_code::VecAluOpRRRR;
2use crate::isa::riscv64::lower::isle::generated_code::{
3 VecAMode, VecAluOpRImm5, VecAluOpRR, VecAluOpRRImm5, VecAluOpRRR, VecAluOpRRRImm5, VecAvl,
4 VecElementWidth, VecLmul, VecMaskMode, VecOpCategory, VecOpMasking, VecTailMode,
5};
6use crate::machinst::{OperandVisitor, RegClass};
7use crate::Reg;
8use core::fmt;
9
10use super::{Type, UImm5};
11
12impl VecAvl {
13 pub fn _static(size: u32) -> Self {
14 VecAvl::Static {
15 size: UImm5::maybe_from_u8(size as u8).expect("Invalid size for AVL"),
16 }
17 }
18
19 pub fn is_static(&self) -> bool {
20 match self {
21 VecAvl::Static { .. } => true,
22 }
23 }
24
25 pub fn unwrap_static(&self) -> UImm5 {
26 match self {
27 VecAvl::Static { size } => *size,
28 }
29 }
30}
31
32impl Copy for VecAvl {}
34
35impl PartialEq for VecAvl {
37 fn eq(&self, other: &Self) -> bool {
38 match (self, other) {
39 (VecAvl::Static { size: lhs }, VecAvl::Static { size: rhs }) => lhs == rhs,
40 }
41 }
42}
43
44impl fmt::Display for VecAvl {
45 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
46 match self {
47 VecAvl::Static { size } => write!(f, "{size}"),
48 }
49 }
50}
51
52impl VecElementWidth {
53 pub fn from_type(ty: Type) -> Self {
54 Self::from_bits(ty.lane_bits())
55 }
56
57 pub fn from_bits(bits: u32) -> Self {
58 match bits {
59 8 => VecElementWidth::E8,
60 16 => VecElementWidth::E16,
61 32 => VecElementWidth::E32,
62 64 => VecElementWidth::E64,
63 _ => panic!("Invalid number of bits for VecElementWidth: {bits}"),
64 }
65 }
66
67 pub fn bits(&self) -> u32 {
68 match self {
69 VecElementWidth::E8 => 8,
70 VecElementWidth::E16 => 16,
71 VecElementWidth::E32 => 32,
72 VecElementWidth::E64 => 64,
73 }
74 }
75
76 pub fn encode(&self) -> u32 {
77 match self {
78 VecElementWidth::E8 => 0b000,
79 VecElementWidth::E16 => 0b001,
80 VecElementWidth::E32 => 0b010,
81 VecElementWidth::E64 => 0b011,
82 }
83 }
84}
85
86impl fmt::Display for VecElementWidth {
87 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
88 write!(f, "e{}", self.bits())
89 }
90}
91
92impl VecLmul {
93 pub fn encode(&self) -> u32 {
94 match self {
95 VecLmul::LmulF8 => 0b101,
96 VecLmul::LmulF4 => 0b110,
97 VecLmul::LmulF2 => 0b111,
98 VecLmul::Lmul1 => 0b000,
99 VecLmul::Lmul2 => 0b001,
100 VecLmul::Lmul4 => 0b010,
101 VecLmul::Lmul8 => 0b011,
102 }
103 }
104}
105
106impl fmt::Display for VecLmul {
107 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
108 match self {
109 VecLmul::LmulF8 => write!(f, "mf8"),
110 VecLmul::LmulF4 => write!(f, "mf4"),
111 VecLmul::LmulF2 => write!(f, "mf2"),
112 VecLmul::Lmul1 => write!(f, "m1"),
113 VecLmul::Lmul2 => write!(f, "m2"),
114 VecLmul::Lmul4 => write!(f, "m4"),
115 VecLmul::Lmul8 => write!(f, "m8"),
116 }
117 }
118}
119
120impl VecTailMode {
121 pub fn encode(&self) -> u32 {
122 match self {
123 VecTailMode::Agnostic => 1,
124 VecTailMode::Undisturbed => 0,
125 }
126 }
127}
128
129impl fmt::Display for VecTailMode {
130 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
131 match self {
132 VecTailMode::Agnostic => write!(f, "ta"),
133 VecTailMode::Undisturbed => write!(f, "tu"),
134 }
135 }
136}
137
138impl VecMaskMode {
139 pub fn encode(&self) -> u32 {
140 match self {
141 VecMaskMode::Agnostic => 1,
142 VecMaskMode::Undisturbed => 0,
143 }
144 }
145}
146
147impl fmt::Display for VecMaskMode {
148 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
149 match self {
150 VecMaskMode::Agnostic => write!(f, "ma"),
151 VecMaskMode::Undisturbed => write!(f, "mu"),
152 }
153 }
154}
155
156#[derive(Clone, Copy, Debug, PartialEq)]
160pub struct VType {
161 pub sew: VecElementWidth,
162 pub lmul: VecLmul,
163 pub tail_mode: VecTailMode,
164 pub mask_mode: VecMaskMode,
165}
166
167impl VType {
168 pub fn encode(&self) -> u32 {
170 let mut bits = 0;
171 bits |= self.lmul.encode();
172 bits |= self.sew.encode() << 3;
173 bits |= self.tail_mode.encode() << 6;
174 bits |= self.mask_mode.encode() << 7;
175 bits
176 }
177}
178
179impl fmt::Display for VType {
180 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
181 write!(
182 f,
183 "{}, {}, {}, {}",
184 self.sew, self.lmul, self.tail_mode, self.mask_mode
185 )
186 }
187}
188
189#[derive(Clone, Copy, Debug, PartialEq)]
195pub struct VState {
196 pub avl: VecAvl,
197 pub vtype: VType,
198}
199
200impl VState {
201 pub fn from_type(ty: Type) -> Self {
202 VState {
203 avl: VecAvl::_static(ty.lane_count()),
204 vtype: VType {
205 sew: VecElementWidth::from_type(ty),
206 lmul: VecLmul::Lmul1,
207 tail_mode: VecTailMode::Agnostic,
208 mask_mode: VecMaskMode::Agnostic,
209 },
210 }
211 }
212}
213
214impl fmt::Display for VState {
215 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
216 write!(f, "#avl={}, #vtype=({})", self.avl, self.vtype)
217 }
218}
219
220impl VecOpCategory {
221 pub fn encode(&self) -> u32 {
222 match self {
224 VecOpCategory::OPIVV => 0b000,
225 VecOpCategory::OPFVV => 0b001,
226 VecOpCategory::OPMVV => 0b010,
227 VecOpCategory::OPIVI => 0b011,
228 VecOpCategory::OPIVX => 0b100,
229 VecOpCategory::OPFVF => 0b101,
230 VecOpCategory::OPMVX => 0b110,
231 VecOpCategory::OPCFG => 0b111,
232 }
233 }
234}
235
236impl Copy for VecOpMasking {}
237impl VecOpMasking {
238 pub fn is_enabled(&self) -> bool {
239 match self {
240 VecOpMasking::Enabled { .. } => true,
241 VecOpMasking::Disabled => false,
242 }
243 }
244
245 pub fn encode(&self) -> u32 {
246 match self {
247 VecOpMasking::Enabled { .. } => 0,
248 VecOpMasking::Disabled => 1,
249 }
250 }
251}
252
253impl VecAluOpRRRR {
254 pub fn opcode(&self) -> u32 {
255 0x57
257 }
258 pub fn funct3(&self) -> u32 {
259 self.category().encode()
260 }
261
262 pub fn funct6(&self) -> u32 {
263 match self {
265 VecAluOpRRRR::VmaccVV | VecAluOpRRRR::VmaccVX => 0b101101,
266 VecAluOpRRRR::VnmsacVV | VecAluOpRRRR::VnmsacVX => 0b101111,
267 VecAluOpRRRR::VfmaccVV | VecAluOpRRRR::VfmaccVF => 0b101100,
268 VecAluOpRRRR::VfnmaccVV | VecAluOpRRRR::VfnmaccVF => 0b101101,
269 VecAluOpRRRR::VfmsacVV | VecAluOpRRRR::VfmsacVF => 0b101110,
270 VecAluOpRRRR::VfnmsacVV | VecAluOpRRRR::VfnmsacVF => 0b101111,
271 VecAluOpRRRR::Vslide1upVX => 0b001110,
272 }
273 }
274
275 pub fn category(&self) -> VecOpCategory {
276 match self {
277 VecAluOpRRRR::VmaccVV | VecAluOpRRRR::VnmsacVV => VecOpCategory::OPMVV,
278 VecAluOpRRRR::VmaccVX | VecAluOpRRRR::VnmsacVX | VecAluOpRRRR::Vslide1upVX => {
279 VecOpCategory::OPMVX
280 }
281 VecAluOpRRRR::VfmaccVV
282 | VecAluOpRRRR::VfnmaccVV
283 | VecAluOpRRRR::VfmsacVV
284 | VecAluOpRRRR::VfnmsacVV => VecOpCategory::OPFVV,
285 VecAluOpRRRR::VfmaccVF
286 | VecAluOpRRRR::VfnmaccVF
287 | VecAluOpRRRR::VfmsacVF
288 | VecAluOpRRRR::VfnmsacVF => VecOpCategory::OPFVF,
289 }
290 }
291
292 pub fn vs1_regclass(&self) -> RegClass {
294 match self.category() {
295 VecOpCategory::OPMVV | VecOpCategory::OPFVV => RegClass::Vector,
296 VecOpCategory::OPMVX => RegClass::Int,
297 VecOpCategory::OPFVF => RegClass::Float,
298 _ => unreachable!(),
299 }
300 }
301}
302
303impl VecInstOverlapInfo for VecAluOpRRRR {
304 fn forbids_src_dst_overlaps(&self) -> bool {
305 match self {
306 VecAluOpRRRR::Vslide1upVX => true,
307 _ => false,
308 }
309 }
310}
311
312impl fmt::Display for VecAluOpRRRR {
313 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
314 let mut s = format!("{self:?}");
315 s.make_ascii_lowercase();
316 let (opcode, category) = s.split_at(s.len() - 2);
317 f.write_str(&format!("{opcode}.{category}"))
318 }
319}
320
321impl VecAluOpRRRImm5 {
322 pub fn opcode(&self) -> u32 {
323 0x57
325 }
326 pub fn funct3(&self) -> u32 {
327 self.category().encode()
328 }
329
330 pub fn funct6(&self) -> u32 {
331 match self {
333 VecAluOpRRRImm5::VslideupVI => 0b001110,
334 }
335 }
336
337 pub fn category(&self) -> VecOpCategory {
338 match self {
339 VecAluOpRRRImm5::VslideupVI => VecOpCategory::OPIVI,
340 }
341 }
342
343 pub fn imm_is_unsigned(&self) -> bool {
344 match self {
345 VecAluOpRRRImm5::VslideupVI => true,
346 }
347 }
348}
349
350impl VecInstOverlapInfo for VecAluOpRRRImm5 {
351 fn forbids_src_dst_overlaps(&self) -> bool {
352 match self {
353 VecAluOpRRRImm5::VslideupVI => true,
354 }
355 }
356}
357
358impl fmt::Display for VecAluOpRRRImm5 {
359 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
360 let mut s = format!("{self:?}");
361 s.make_ascii_lowercase();
362 let (opcode, category) = s.split_at(s.len() - 2);
363 f.write_str(&format!("{opcode}.{category}"))
364 }
365}
366
367impl VecAluOpRRR {
368 pub fn opcode(&self) -> u32 {
369 0x57
371 }
372 pub fn funct3(&self) -> u32 {
373 self.category().encode()
374 }
375 pub fn funct6(&self) -> u32 {
376 match self {
378 VecAluOpRRR::VaddVV
379 | VecAluOpRRR::VaddVX
380 | VecAluOpRRR::VfaddVV
381 | VecAluOpRRR::VfaddVF => 0b000000,
382 VecAluOpRRR::VsubVV
383 | VecAluOpRRR::VsubVX
384 | VecAluOpRRR::VfsubVV
385 | VecAluOpRRR::VfsubVF => 0b000010,
386 VecAluOpRRR::VrsubVX => 0b000011,
387 VecAluOpRRR::VmulVV | VecAluOpRRR::VmulVX => 0b100101,
388 VecAluOpRRR::VmulhVV | VecAluOpRRR::VmulhVX => 0b100111,
389 VecAluOpRRR::VmulhuVV
390 | VecAluOpRRR::VmulhuVX
391 | VecAluOpRRR::VfmulVV
392 | VecAluOpRRR::VfmulVF => 0b100100,
393 VecAluOpRRR::VsmulVV | VecAluOpRRR::VsmulVX => 0b100111,
394 VecAluOpRRR::VsllVV | VecAluOpRRR::VsllVX => 0b100101,
395 VecAluOpRRR::VsrlVV | VecAluOpRRR::VsrlVX => 0b101000,
396 VecAluOpRRR::VsraVV | VecAluOpRRR::VsraVX => 0b101001,
397 VecAluOpRRR::VandVV | VecAluOpRRR::VandVX => 0b001001,
398 VecAluOpRRR::VorVV | VecAluOpRRR::VorVX => 0b001010,
399 VecAluOpRRR::VxorVV | VecAluOpRRR::VxorVX => 0b001011,
400 VecAluOpRRR::VminuVV | VecAluOpRRR::VminuVX | VecAluOpRRR::VredminuVS => 0b000100,
401 VecAluOpRRR::VminVV | VecAluOpRRR::VminVX => 0b000101,
402 VecAluOpRRR::VmaxuVV | VecAluOpRRR::VmaxuVX | VecAluOpRRR::VredmaxuVS => 0b000110,
403 VecAluOpRRR::VmaxVV | VecAluOpRRR::VmaxVX => 0b000111,
404 VecAluOpRRR::VslidedownVX => 0b001111,
405 VecAluOpRRR::VfrsubVF => 0b100111,
406 VecAluOpRRR::VmergeVVM
407 | VecAluOpRRR::VmergeVXM
408 | VecAluOpRRR::VfmergeVFM
409 | VecAluOpRRR::VcompressVM => 0b010111,
410 VecAluOpRRR::VfdivVV
411 | VecAluOpRRR::VfdivVF
412 | VecAluOpRRR::VsadduVV
413 | VecAluOpRRR::VsadduVX => 0b100000,
414 VecAluOpRRR::VfrdivVF | VecAluOpRRR::VsaddVV | VecAluOpRRR::VsaddVX => 0b100001,
415 VecAluOpRRR::VfminVV => 0b000100,
416 VecAluOpRRR::VfmaxVV => 0b000110,
417 VecAluOpRRR::VssubuVV | VecAluOpRRR::VssubuVX => 0b100010,
418 VecAluOpRRR::VssubVV | VecAluOpRRR::VssubVX => 0b100011,
419 VecAluOpRRR::VfsgnjVV | VecAluOpRRR::VfsgnjVF => 0b001000,
420 VecAluOpRRR::VfsgnjnVV => 0b001001,
421 VecAluOpRRR::VfsgnjxVV => 0b001010,
422 VecAluOpRRR::VrgatherVV | VecAluOpRRR::VrgatherVX => 0b001100,
423 VecAluOpRRR::VwadduVV | VecAluOpRRR::VwadduVX => 0b110000,
424 VecAluOpRRR::VwaddVV | VecAluOpRRR::VwaddVX => 0b110001,
425 VecAluOpRRR::VwsubuVV | VecAluOpRRR::VwsubuVX => 0b110010,
426 VecAluOpRRR::VwsubVV | VecAluOpRRR::VwsubVX => 0b110011,
427 VecAluOpRRR::VwadduWV | VecAluOpRRR::VwadduWX => 0b110100,
428 VecAluOpRRR::VwaddWV | VecAluOpRRR::VwaddWX => 0b110101,
429 VecAluOpRRR::VwsubuWV | VecAluOpRRR::VwsubuWX => 0b110110,
430 VecAluOpRRR::VwsubWV | VecAluOpRRR::VwsubWX => 0b110111,
431 VecAluOpRRR::VmseqVV
432 | VecAluOpRRR::VmseqVX
433 | VecAluOpRRR::VmfeqVV
434 | VecAluOpRRR::VmfeqVF => 0b011000,
435 VecAluOpRRR::VmsneVV
436 | VecAluOpRRR::VmsneVX
437 | VecAluOpRRR::VmfleVV
438 | VecAluOpRRR::VmfleVF
439 | VecAluOpRRR::VmandMM => 0b011001,
440 VecAluOpRRR::VmsltuVV | VecAluOpRRR::VmsltuVX | VecAluOpRRR::VmorMM => 0b011010,
441 VecAluOpRRR::VmsltVV
442 | VecAluOpRRR::VmsltVX
443 | VecAluOpRRR::VmfltVV
444 | VecAluOpRRR::VmfltVF => 0b011011,
445 VecAluOpRRR::VmsleuVV
446 | VecAluOpRRR::VmsleuVX
447 | VecAluOpRRR::VmfneVV
448 | VecAluOpRRR::VmfneVF => 0b011100,
449 VecAluOpRRR::VmsleVV
450 | VecAluOpRRR::VmsleVX
451 | VecAluOpRRR::VmfgtVF
452 | VecAluOpRRR::VmnandMM => 0b011101,
453 VecAluOpRRR::VmsgtuVX | VecAluOpRRR::VmnorMM => 0b011110,
454 VecAluOpRRR::VmsgtVX | VecAluOpRRR::VmfgeVF => 0b011111,
455 }
456 }
457
458 pub fn category(&self) -> VecOpCategory {
459 match self {
460 VecAluOpRRR::VaddVV
461 | VecAluOpRRR::VsaddVV
462 | VecAluOpRRR::VsadduVV
463 | VecAluOpRRR::VsubVV
464 | VecAluOpRRR::VssubVV
465 | VecAluOpRRR::VssubuVV
466 | VecAluOpRRR::VsmulVV
467 | VecAluOpRRR::VsllVV
468 | VecAluOpRRR::VsrlVV
469 | VecAluOpRRR::VsraVV
470 | VecAluOpRRR::VandVV
471 | VecAluOpRRR::VorVV
472 | VecAluOpRRR::VxorVV
473 | VecAluOpRRR::VminuVV
474 | VecAluOpRRR::VminVV
475 | VecAluOpRRR::VmaxuVV
476 | VecAluOpRRR::VmaxVV
477 | VecAluOpRRR::VmergeVVM
478 | VecAluOpRRR::VrgatherVV
479 | VecAluOpRRR::VmseqVV
480 | VecAluOpRRR::VmsneVV
481 | VecAluOpRRR::VmsltuVV
482 | VecAluOpRRR::VmsltVV
483 | VecAluOpRRR::VmsleuVV
484 | VecAluOpRRR::VmsleVV => VecOpCategory::OPIVV,
485 VecAluOpRRR::VwaddVV
486 | VecAluOpRRR::VwaddWV
487 | VecAluOpRRR::VwadduVV
488 | VecAluOpRRR::VwadduWV
489 | VecAluOpRRR::VwsubVV
490 | VecAluOpRRR::VwsubWV
491 | VecAluOpRRR::VwsubuVV
492 | VecAluOpRRR::VwsubuWV
493 | VecAluOpRRR::VmulVV
494 | VecAluOpRRR::VmulhVV
495 | VecAluOpRRR::VmulhuVV
496 | VecAluOpRRR::VredmaxuVS
497 | VecAluOpRRR::VredminuVS
498 | VecAluOpRRR::VcompressVM
499 | VecAluOpRRR::VmandMM
500 | VecAluOpRRR::VmorMM
501 | VecAluOpRRR::VmnandMM
502 | VecAluOpRRR::VmnorMM => VecOpCategory::OPMVV,
503 VecAluOpRRR::VwaddVX
504 | VecAluOpRRR::VwadduVX
505 | VecAluOpRRR::VwadduWX
506 | VecAluOpRRR::VwaddWX
507 | VecAluOpRRR::VwsubVX
508 | VecAluOpRRR::VwsubuVX
509 | VecAluOpRRR::VwsubuWX
510 | VecAluOpRRR::VwsubWX
511 | VecAluOpRRR::VmulVX
512 | VecAluOpRRR::VmulhVX
513 | VecAluOpRRR::VmulhuVX => VecOpCategory::OPMVX,
514 VecAluOpRRR::VaddVX
515 | VecAluOpRRR::VsaddVX
516 | VecAluOpRRR::VsadduVX
517 | VecAluOpRRR::VsubVX
518 | VecAluOpRRR::VssubVX
519 | VecAluOpRRR::VssubuVX
520 | VecAluOpRRR::VrsubVX
521 | VecAluOpRRR::VsmulVX
522 | VecAluOpRRR::VsllVX
523 | VecAluOpRRR::VsrlVX
524 | VecAluOpRRR::VsraVX
525 | VecAluOpRRR::VandVX
526 | VecAluOpRRR::VorVX
527 | VecAluOpRRR::VxorVX
528 | VecAluOpRRR::VminuVX
529 | VecAluOpRRR::VminVX
530 | VecAluOpRRR::VmaxuVX
531 | VecAluOpRRR::VmaxVX
532 | VecAluOpRRR::VslidedownVX
533 | VecAluOpRRR::VmergeVXM
534 | VecAluOpRRR::VrgatherVX
535 | VecAluOpRRR::VmseqVX
536 | VecAluOpRRR::VmsneVX
537 | VecAluOpRRR::VmsltuVX
538 | VecAluOpRRR::VmsltVX
539 | VecAluOpRRR::VmsleuVX
540 | VecAluOpRRR::VmsleVX
541 | VecAluOpRRR::VmsgtuVX
542 | VecAluOpRRR::VmsgtVX => VecOpCategory::OPIVX,
543 VecAluOpRRR::VfaddVV
544 | VecAluOpRRR::VfsubVV
545 | VecAluOpRRR::VfmulVV
546 | VecAluOpRRR::VfdivVV
547 | VecAluOpRRR::VfmaxVV
548 | VecAluOpRRR::VfminVV
549 | VecAluOpRRR::VfsgnjVV
550 | VecAluOpRRR::VfsgnjnVV
551 | VecAluOpRRR::VfsgnjxVV
552 | VecAluOpRRR::VmfeqVV
553 | VecAluOpRRR::VmfneVV
554 | VecAluOpRRR::VmfltVV
555 | VecAluOpRRR::VmfleVV => VecOpCategory::OPFVV,
556 VecAluOpRRR::VfaddVF
557 | VecAluOpRRR::VfsubVF
558 | VecAluOpRRR::VfrsubVF
559 | VecAluOpRRR::VfmulVF
560 | VecAluOpRRR::VfdivVF
561 | VecAluOpRRR::VfrdivVF
562 | VecAluOpRRR::VfmergeVFM
563 | VecAluOpRRR::VfsgnjVF
564 | VecAluOpRRR::VmfeqVF
565 | VecAluOpRRR::VmfneVF
566 | VecAluOpRRR::VmfltVF
567 | VecAluOpRRR::VmfleVF
568 | VecAluOpRRR::VmfgtVF
569 | VecAluOpRRR::VmfgeVF => VecOpCategory::OPFVF,
570 }
571 }
572
573 pub fn vs1_regclass(&self) -> RegClass {
575 match self.category() {
576 VecOpCategory::OPIVV | VecOpCategory::OPFVV | VecOpCategory::OPMVV => RegClass::Vector,
577 VecOpCategory::OPIVX | VecOpCategory::OPMVX => RegClass::Int,
578 VecOpCategory::OPFVF => RegClass::Float,
579 _ => unreachable!(),
580 }
581 }
582}
583
584impl VecInstOverlapInfo for VecAluOpRRR {
585 fn forbids_src_dst_overlaps(&self) -> bool {
586 match self {
587 VecAluOpRRR::VrgatherVV
588 | VecAluOpRRR::VrgatherVX
589 | VecAluOpRRR::VcompressVM
590 | VecAluOpRRR::VwadduVV
591 | VecAluOpRRR::VwadduVX
592 | VecAluOpRRR::VwaddVV
593 | VecAluOpRRR::VwaddVX
594 | VecAluOpRRR::VwadduWV
595 | VecAluOpRRR::VwadduWX
596 | VecAluOpRRR::VwaddWV
597 | VecAluOpRRR::VwaddWX
598 | VecAluOpRRR::VwsubuVV
599 | VecAluOpRRR::VwsubuVX
600 | VecAluOpRRR::VwsubVV
601 | VecAluOpRRR::VwsubVX
602 | VecAluOpRRR::VwsubuWV
603 | VecAluOpRRR::VwsubuWX
604 | VecAluOpRRR::VwsubWV
605 | VecAluOpRRR::VwsubWX => true,
606 _ => false,
607 }
608 }
609
610 fn forbids_mask_dst_overlaps(&self) -> bool {
612 match self {
613 VecAluOpRRR::VredmaxuVS
614 | VecAluOpRRR::VredminuVS
615 | VecAluOpRRR::VmandMM
616 | VecAluOpRRR::VmorMM
617 | VecAluOpRRR::VmnandMM
618 | VecAluOpRRR::VmnorMM
619 | VecAluOpRRR::VmseqVX
620 | VecAluOpRRR::VmsneVX
621 | VecAluOpRRR::VmsltuVX
622 | VecAluOpRRR::VmsltVX
623 | VecAluOpRRR::VmsleuVX
624 | VecAluOpRRR::VmsleVX
625 | VecAluOpRRR::VmsgtuVX
626 | VecAluOpRRR::VmsgtVX
627 | VecAluOpRRR::VmfeqVV
628 | VecAluOpRRR::VmfneVV
629 | VecAluOpRRR::VmfltVV
630 | VecAluOpRRR::VmfleVV
631 | VecAluOpRRR::VmfeqVF
632 | VecAluOpRRR::VmfneVF
633 | VecAluOpRRR::VmfltVF
634 | VecAluOpRRR::VmfleVF
635 | VecAluOpRRR::VmfgtVF
636 | VecAluOpRRR::VmfgeVF => false,
637 _ => true,
638 }
639 }
640}
641
642impl fmt::Display for VecAluOpRRR {
643 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
644 let suffix_length = match self {
645 VecAluOpRRR::VmergeVVM | VecAluOpRRR::VmergeVXM | VecAluOpRRR::VfmergeVFM => 3,
646 _ => 2,
647 };
648
649 let mut s = format!("{self:?}");
650 s.make_ascii_lowercase();
651 let (opcode, category) = s.split_at(s.len() - suffix_length);
652 f.write_str(&format!("{opcode}.{category}"))
653 }
654}
655
656impl VecAluOpRRImm5 {
657 pub fn opcode(&self) -> u32 {
658 0x57
660 }
661 pub fn funct3(&self) -> u32 {
662 self.category().encode()
663 }
664
665 pub fn funct6(&self) -> u32 {
666 match self {
668 VecAluOpRRImm5::VaddVI => 0b000000,
669 VecAluOpRRImm5::VrsubVI => 0b000011,
670 VecAluOpRRImm5::VsllVI => 0b100101,
671 VecAluOpRRImm5::VsrlVI => 0b101000,
672 VecAluOpRRImm5::VsraVI => 0b101001,
673 VecAluOpRRImm5::VandVI => 0b001001,
674 VecAluOpRRImm5::VorVI => 0b001010,
675 VecAluOpRRImm5::VxorVI => 0b001011,
676 VecAluOpRRImm5::VslidedownVI => 0b001111,
677 VecAluOpRRImm5::VssrlVI => 0b101010,
678 VecAluOpRRImm5::VmergeVIM => 0b010111,
679 VecAluOpRRImm5::VsadduVI => 0b100000,
680 VecAluOpRRImm5::VsaddVI => 0b100001,
681 VecAluOpRRImm5::VrgatherVI => 0b001100,
682 VecAluOpRRImm5::VmvrV => 0b100111,
683 VecAluOpRRImm5::VnclipWI => 0b101111,
684 VecAluOpRRImm5::VnclipuWI => 0b101110,
685 VecAluOpRRImm5::VmseqVI => 0b011000,
686 VecAluOpRRImm5::VmsneVI => 0b011001,
687 VecAluOpRRImm5::VmsleuVI => 0b011100,
688 VecAluOpRRImm5::VmsleVI => 0b011101,
689 VecAluOpRRImm5::VmsgtuVI => 0b011110,
690 VecAluOpRRImm5::VmsgtVI => 0b011111,
691 }
692 }
693
694 pub fn category(&self) -> VecOpCategory {
695 match self {
696 VecAluOpRRImm5::VaddVI
697 | VecAluOpRRImm5::VrsubVI
698 | VecAluOpRRImm5::VsllVI
699 | VecAluOpRRImm5::VsrlVI
700 | VecAluOpRRImm5::VsraVI
701 | VecAluOpRRImm5::VandVI
702 | VecAluOpRRImm5::VorVI
703 | VecAluOpRRImm5::VxorVI
704 | VecAluOpRRImm5::VssrlVI
705 | VecAluOpRRImm5::VslidedownVI
706 | VecAluOpRRImm5::VmergeVIM
707 | VecAluOpRRImm5::VsadduVI
708 | VecAluOpRRImm5::VsaddVI
709 | VecAluOpRRImm5::VrgatherVI
710 | VecAluOpRRImm5::VmvrV
711 | VecAluOpRRImm5::VnclipWI
712 | VecAluOpRRImm5::VnclipuWI
713 | VecAluOpRRImm5::VmseqVI
714 | VecAluOpRRImm5::VmsneVI
715 | VecAluOpRRImm5::VmsleuVI
716 | VecAluOpRRImm5::VmsleVI
717 | VecAluOpRRImm5::VmsgtuVI
718 | VecAluOpRRImm5::VmsgtVI => VecOpCategory::OPIVI,
719 }
720 }
721
722 pub fn imm_is_unsigned(&self) -> bool {
723 match self {
724 VecAluOpRRImm5::VsllVI
725 | VecAluOpRRImm5::VsrlVI
726 | VecAluOpRRImm5::VssrlVI
727 | VecAluOpRRImm5::VsraVI
728 | VecAluOpRRImm5::VslidedownVI
729 | VecAluOpRRImm5::VrgatherVI
730 | VecAluOpRRImm5::VmvrV
731 | VecAluOpRRImm5::VnclipWI
732 | VecAluOpRRImm5::VnclipuWI => true,
733 VecAluOpRRImm5::VaddVI
734 | VecAluOpRRImm5::VrsubVI
735 | VecAluOpRRImm5::VandVI
736 | VecAluOpRRImm5::VorVI
737 | VecAluOpRRImm5::VxorVI
738 | VecAluOpRRImm5::VmergeVIM
739 | VecAluOpRRImm5::VsadduVI
740 | VecAluOpRRImm5::VsaddVI
741 | VecAluOpRRImm5::VmseqVI
742 | VecAluOpRRImm5::VmsneVI
743 | VecAluOpRRImm5::VmsleuVI
744 | VecAluOpRRImm5::VmsleVI
745 | VecAluOpRRImm5::VmsgtuVI
746 | VecAluOpRRImm5::VmsgtVI => false,
747 }
748 }
749}
750
751impl VecInstOverlapInfo for VecAluOpRRImm5 {
752 fn forbids_src_dst_overlaps(&self) -> bool {
753 match self {
754 VecAluOpRRImm5::VrgatherVI => true,
755 _ => false,
756 }
757 }
758
759 fn forbids_mask_dst_overlaps(&self) -> bool {
761 match self {
762 VecAluOpRRImm5::VmseqVI
763 | VecAluOpRRImm5::VmsneVI
764 | VecAluOpRRImm5::VmsleuVI
765 | VecAluOpRRImm5::VmsleVI
766 | VecAluOpRRImm5::VmsgtuVI
767 | VecAluOpRRImm5::VmsgtVI => false,
768 _ => true,
769 }
770 }
771}
772
773impl fmt::Display for VecAluOpRRImm5 {
774 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
775 let suffix_length = match self {
776 VecAluOpRRImm5::VmergeVIM => 3,
777 _ => 2,
778 };
779
780 let mut s = format!("{self:?}");
781 s.make_ascii_lowercase();
782 let (opcode, category) = s.split_at(s.len() - suffix_length);
783 f.write_str(&format!("{opcode}.{category}"))
784 }
785}
786
787impl VecAluOpRR {
788 pub fn opcode(&self) -> u32 {
789 0x57
791 }
792
793 pub fn funct3(&self) -> u32 {
794 self.category().encode()
795 }
796
797 pub fn funct6(&self) -> u32 {
798 match self {
800 VecAluOpRR::VmvSX | VecAluOpRR::VmvXS | VecAluOpRR::VfmvSF | VecAluOpRR::VfmvFS => {
801 0b010000
802 }
803 VecAluOpRR::VzextVF2
804 | VecAluOpRR::VzextVF4
805 | VecAluOpRR::VzextVF8
806 | VecAluOpRR::VsextVF2
807 | VecAluOpRR::VsextVF4
808 | VecAluOpRR::VsextVF8 => 0b010010,
809 VecAluOpRR::VfsqrtV => 0b010011,
810 VecAluOpRR::VmvVV | VecAluOpRR::VmvVX | VecAluOpRR::VfmvVF => 0b010111,
811 VecAluOpRR::VfcvtxufV
812 | VecAluOpRR::VfcvtxfV
813 | VecAluOpRR::VfcvtrtzxufV
814 | VecAluOpRR::VfcvtrtzxfV
815 | VecAluOpRR::VfcvtfxuV
816 | VecAluOpRR::VfcvtfxV
817 | VecAluOpRR::VfwcvtffV
818 | VecAluOpRR::VfncvtffW => 0b010010,
819 }
820 }
821
822 pub fn category(&self) -> VecOpCategory {
823 match self {
824 VecAluOpRR::VmvSX => VecOpCategory::OPMVX,
825 VecAluOpRR::VmvXS
826 | VecAluOpRR::VzextVF2
827 | VecAluOpRR::VzextVF4
828 | VecAluOpRR::VzextVF8
829 | VecAluOpRR::VsextVF2
830 | VecAluOpRR::VsextVF4
831 | VecAluOpRR::VsextVF8 => VecOpCategory::OPMVV,
832 VecAluOpRR::VfmvSF | VecAluOpRR::VfmvVF => VecOpCategory::OPFVF,
833 VecAluOpRR::VfmvFS
834 | VecAluOpRR::VfsqrtV
835 | VecAluOpRR::VfcvtxufV
836 | VecAluOpRR::VfcvtxfV
837 | VecAluOpRR::VfcvtrtzxufV
838 | VecAluOpRR::VfcvtrtzxfV
839 | VecAluOpRR::VfcvtfxuV
840 | VecAluOpRR::VfcvtfxV
841 | VecAluOpRR::VfwcvtffV
842 | VecAluOpRR::VfncvtffW => VecOpCategory::OPFVV,
843 VecAluOpRR::VmvVV => VecOpCategory::OPIVV,
844 VecAluOpRR::VmvVX => VecOpCategory::OPIVX,
845 }
846 }
847
848 pub fn aux_encoding(&self) -> u32 {
850 match self {
851 VecAluOpRR::VmvSX => 0b00000,
853 VecAluOpRR::VmvXS => 0b00000,
855 VecAluOpRR::VfmvSF => 0b00000,
857 VecAluOpRR::VfmvFS => 0b00000,
859 VecAluOpRR::VfsqrtV => 0b00000,
861 VecAluOpRR::VzextVF8 => 0b00010,
863 VecAluOpRR::VsextVF8 => 0b00011,
864 VecAluOpRR::VzextVF4 => 0b00100,
865 VecAluOpRR::VsextVF4 => 0b00101,
866 VecAluOpRR::VzextVF2 => 0b00110,
867 VecAluOpRR::VsextVF2 => 0b00111,
868 VecAluOpRR::VfcvtxufV => 0b00000,
871 VecAluOpRR::VfcvtxfV => 0b00001,
872 VecAluOpRR::VfcvtrtzxufV => 0b00110,
873 VecAluOpRR::VfcvtrtzxfV => 0b00111,
874 VecAluOpRR::VfcvtfxuV => 0b00010,
875 VecAluOpRR::VfcvtfxV => 0b00011,
876 VecAluOpRR::VfwcvtffV => 0b01100,
878 VecAluOpRR::VfncvtffW => 0b10100,
880 VecAluOpRR::VmvVV | VecAluOpRR::VmvVX | VecAluOpRR::VfmvVF => 0,
883 }
884 }
885
886 pub fn vs_is_vs2_encoded(&self) -> bool {
890 match self {
891 VecAluOpRR::VmvXS
892 | VecAluOpRR::VfmvFS
893 | VecAluOpRR::VfsqrtV
894 | VecAluOpRR::VzextVF2
895 | VecAluOpRR::VzextVF4
896 | VecAluOpRR::VzextVF8
897 | VecAluOpRR::VsextVF2
898 | VecAluOpRR::VsextVF4
899 | VecAluOpRR::VsextVF8
900 | VecAluOpRR::VfcvtxufV
901 | VecAluOpRR::VfcvtxfV
902 | VecAluOpRR::VfcvtrtzxufV
903 | VecAluOpRR::VfcvtrtzxfV
904 | VecAluOpRR::VfcvtfxuV
905 | VecAluOpRR::VfcvtfxV
906 | VecAluOpRR::VfwcvtffV
907 | VecAluOpRR::VfncvtffW => true,
908 VecAluOpRR::VmvSX
909 | VecAluOpRR::VfmvSF
910 | VecAluOpRR::VmvVV
911 | VecAluOpRR::VmvVX
912 | VecAluOpRR::VfmvVF => false,
913 }
914 }
915
916 pub fn dst_regclass(&self) -> RegClass {
917 match self {
918 VecAluOpRR::VfmvSF
919 | VecAluOpRR::VmvSX
920 | VecAluOpRR::VmvVV
921 | VecAluOpRR::VmvVX
922 | VecAluOpRR::VfmvVF
923 | VecAluOpRR::VfsqrtV
924 | VecAluOpRR::VzextVF2
925 | VecAluOpRR::VzextVF4
926 | VecAluOpRR::VzextVF8
927 | VecAluOpRR::VsextVF2
928 | VecAluOpRR::VsextVF4
929 | VecAluOpRR::VsextVF8
930 | VecAluOpRR::VfcvtxufV
931 | VecAluOpRR::VfcvtxfV
932 | VecAluOpRR::VfcvtrtzxufV
933 | VecAluOpRR::VfcvtrtzxfV
934 | VecAluOpRR::VfcvtfxuV
935 | VecAluOpRR::VfcvtfxV
936 | VecAluOpRR::VfwcvtffV
937 | VecAluOpRR::VfncvtffW => RegClass::Vector,
938 VecAluOpRR::VmvXS => RegClass::Int,
939 VecAluOpRR::VfmvFS => RegClass::Float,
940 }
941 }
942
943 pub fn src_regclass(&self) -> RegClass {
944 match self {
945 VecAluOpRR::VmvXS
946 | VecAluOpRR::VfmvFS
947 | VecAluOpRR::VmvVV
948 | VecAluOpRR::VfsqrtV
949 | VecAluOpRR::VzextVF2
950 | VecAluOpRR::VzextVF4
951 | VecAluOpRR::VzextVF8
952 | VecAluOpRR::VsextVF2
953 | VecAluOpRR::VsextVF4
954 | VecAluOpRR::VsextVF8
955 | VecAluOpRR::VfcvtxufV
956 | VecAluOpRR::VfcvtxfV
957 | VecAluOpRR::VfcvtrtzxufV
958 | VecAluOpRR::VfcvtrtzxfV
959 | VecAluOpRR::VfcvtfxuV
960 | VecAluOpRR::VfcvtfxV
961 | VecAluOpRR::VfwcvtffV
962 | VecAluOpRR::VfncvtffW => RegClass::Vector,
963 VecAluOpRR::VfmvSF | VecAluOpRR::VfmvVF => RegClass::Float,
964 VecAluOpRR::VmvSX | VecAluOpRR::VmvVX => RegClass::Int,
965 }
966 }
967}
968
969impl VecInstOverlapInfo for VecAluOpRR {
970 fn forbids_src_dst_overlaps(&self) -> bool {
971 match self {
972 VecAluOpRR::VzextVF2
973 | VecAluOpRR::VzextVF4
974 | VecAluOpRR::VzextVF8
975 | VecAluOpRR::VsextVF2
976 | VecAluOpRR::VsextVF4
977 | VecAluOpRR::VsextVF8
978 | VecAluOpRR::VfwcvtffV
979 | VecAluOpRR::VfncvtffW => true,
980 _ => false,
981 }
982 }
983}
984
985impl fmt::Display for VecAluOpRR {
986 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
987 f.write_str(match self {
988 VecAluOpRR::VmvSX => "vmv.s.x",
989 VecAluOpRR::VmvXS => "vmv.x.s",
990 VecAluOpRR::VfmvSF => "vfmv.s.f",
991 VecAluOpRR::VfmvFS => "vfmv.f.s",
992 VecAluOpRR::VfsqrtV => "vfsqrt.v",
993 VecAluOpRR::VzextVF2 => "vzext.vf2",
994 VecAluOpRR::VzextVF4 => "vzext.vf4",
995 VecAluOpRR::VzextVF8 => "vzext.vf8",
996 VecAluOpRR::VsextVF2 => "vsext.vf2",
997 VecAluOpRR::VsextVF4 => "vsext.vf4",
998 VecAluOpRR::VsextVF8 => "vsext.vf8",
999 VecAluOpRR::VmvVV => "vmv.v.v",
1000 VecAluOpRR::VmvVX => "vmv.v.x",
1001 VecAluOpRR::VfmvVF => "vfmv.v.f",
1002 VecAluOpRR::VfcvtxufV => "vfcvt.xu.f.v",
1003 VecAluOpRR::VfcvtxfV => "vfcvt.x.f.v",
1004 VecAluOpRR::VfcvtrtzxufV => "vfcvt.rtz.xu.f.v",
1005 VecAluOpRR::VfcvtrtzxfV => "vfcvt.rtz.x.f.v",
1006 VecAluOpRR::VfcvtfxuV => "vfcvt.f.xu.v",
1007 VecAluOpRR::VfcvtfxV => "vfcvt.f.x.v",
1008 VecAluOpRR::VfwcvtffV => "vfwcvt.f.f.v",
1009 VecAluOpRR::VfncvtffW => "vfncvt.f.f.w",
1010 })
1011 }
1012}
1013
1014impl VecAluOpRImm5 {
1015 pub fn opcode(&self) -> u32 {
1016 0x57
1018 }
1019 pub fn funct3(&self) -> u32 {
1020 self.category().encode()
1021 }
1022
1023 pub fn funct6(&self) -> u32 {
1024 match self {
1026 VecAluOpRImm5::VmvVI => 0b010111,
1027 }
1028 }
1029
1030 pub fn category(&self) -> VecOpCategory {
1031 match self {
1032 VecAluOpRImm5::VmvVI => VecOpCategory::OPIVI,
1033 }
1034 }
1035
1036 pub fn aux_encoding(&self) -> u32 {
1038 match self {
1039 VecAluOpRImm5::VmvVI => 0,
1042 }
1043 }
1044}
1045
1046impl VecInstOverlapInfo for VecAluOpRImm5 {
1047 fn forbids_src_dst_overlaps(&self) -> bool {
1048 match self {
1049 VecAluOpRImm5::VmvVI => false,
1050 }
1051 }
1052}
1053
1054impl fmt::Display for VecAluOpRImm5 {
1055 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1056 f.write_str(match self {
1057 VecAluOpRImm5::VmvVI => "vmv.v.i",
1058 })
1059 }
1060}
1061
1062impl VecAMode {
1063 pub fn get_base_register(&self) -> Option<Reg> {
1064 match self {
1065 VecAMode::UnitStride { base, .. } => base.get_base_register(),
1066 }
1067 }
1068
1069 pub fn get_operands(&mut self, collector: &mut impl OperandVisitor) {
1070 match self {
1071 VecAMode::UnitStride { base, .. } => base.get_operands(collector),
1072 }
1073 }
1074
1075 pub fn mop(&self) -> u32 {
1078 match self {
1079 VecAMode::UnitStride { .. } => 0b00,
1080 }
1081 }
1082
1083 pub fn lumop(&self) -> u32 {
1086 match self {
1087 VecAMode::UnitStride { .. } => 0b00000,
1088 }
1089 }
1090
1091 pub fn sumop(&self) -> u32 {
1094 match self {
1095 VecAMode::UnitStride { .. } => 0b00000,
1096 }
1097 }
1098
1099 pub fn nf(&self) -> u32 {
1106 match self {
1107 VecAMode::UnitStride { .. } => 0b000,
1108 }
1109 }
1110}
1111
1112pub trait VecInstOverlapInfo {
1113 fn forbids_src_dst_overlaps(&self) -> bool;
1130
1131 fn forbids_mask_dst_overlaps(&self) -> bool {
1141 true
1142 }
1143
1144 fn forbids_overlaps(&self, mask: &VecOpMasking) -> bool {
1148 self.forbids_src_dst_overlaps() || (mask.is_enabled() && self.forbids_mask_dst_overlaps())
1149 }
1150}