Skip to content

Commit e42a70a

Browse files
jiahanxie353michaelmaitland
authored andcommittedJan 10, 2024
[RISCV][GISel] IRTranslate and Legalize some instructions with scalable vector type
* Add IRTranslate tests for ADD, SUB, AND, OR, and XOR with scalable vector types to show that they work as expected. * Legalize G_ADD, G_SUB, G_AND, G_OR, and G_XOR of scalable vector type for the RISC-V vector extension.
1 parent a9f39ff commit e42a70a

File tree

9 files changed

+2369
-2
lines changed

9 files changed

+2369
-2
lines changed
 

‎llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,10 +47,50 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
4747
const LLT s32 = LLT::scalar(32);
4848
const LLT s64 = LLT::scalar(64);
4949

50+
const LLT nxv1s8 = LLT::scalable_vector(1, s8);
51+
const LLT nxv2s8 = LLT::scalable_vector(2, s8);
52+
const LLT nxv4s8 = LLT::scalable_vector(4, s8);
53+
const LLT nxv8s8 = LLT::scalable_vector(8, s8);
54+
const LLT nxv16s8 = LLT::scalable_vector(16, s8);
55+
const LLT nxv32s8 = LLT::scalable_vector(32, s8);
56+
const LLT nxv64s8 = LLT::scalable_vector(64, s8);
57+
58+
const LLT nxv1s16 = LLT::scalable_vector(1, s16);
59+
const LLT nxv2s16 = LLT::scalable_vector(2, s16);
60+
const LLT nxv4s16 = LLT::scalable_vector(4, s16);
61+
const LLT nxv8s16 = LLT::scalable_vector(8, s16);
62+
const LLT nxv16s16 = LLT::scalable_vector(16, s16);
63+
const LLT nxv32s16 = LLT::scalable_vector(32, s16);
64+
65+
const LLT nxv1s32 = LLT::scalable_vector(1, s32);
66+
const LLT nxv2s32 = LLT::scalable_vector(2, s32);
67+
const LLT nxv4s32 = LLT::scalable_vector(4, s32);
68+
const LLT nxv8s32 = LLT::scalable_vector(8, s32);
69+
const LLT nxv16s32 = LLT::scalable_vector(16, s32);
70+
71+
const LLT nxv1s64 = LLT::scalable_vector(1, s64);
72+
const LLT nxv2s64 = LLT::scalable_vector(2, s64);
73+
const LLT nxv4s64 = LLT::scalable_vector(4, s64);
74+
const LLT nxv8s64 = LLT::scalable_vector(8, s64);
75+
5076
using namespace TargetOpcode;
5177

78+
auto AllVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
79+
nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
80+
nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
81+
nxv1s64, nxv2s64, nxv4s64, nxv8s64};
82+
5283
getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
5384
.legalFor({s32, sXLen})
85+
.legalIf(all(
86+
typeInSet(0, AllVecTys),
87+
LegalityPredicate([=, &ST](const LegalityQuery &Query) {
88+
return ST.hasVInstructions() &&
89+
(Query.Types[0].getScalarSizeInBits() != 64 ||
90+
ST.hasVInstructionsI64()) &&
91+
(Query.Types[0].getElementCount().getKnownMinValue() != 1 ||
92+
ST.getELen() == 64);
93+
})))
5494
.widenScalarToNextPow2(0)
5595
.clampScalar(0, s32, sXLen);
5696

‎llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20019,8 +20019,13 @@ unsigned RISCVTargetLowering::getCustomCtpopCost(EVT VT,
2001920019
}
2002020020

2002120021
bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
20022-
// At the moment, the only scalable instruction GISel knows how to lower is
20023-
// ret with scalable argument.
20022+
20023+
// GISel support is in progress or complete for G_ADD, G_SUB, G_AND, G_OR, and
20024+
// G_XOR.
20025+
unsigned Op = Inst.getOpcode();
20026+
if (Op == Instruction::Add || Op == Instruction::Sub ||
20027+
Op == Instruction::And || Op == Instruction::Or || Op == Instruction::Xor)
20028+
return false;
2002420029

2002520030
if (Inst.getType()->isScalableTy())
2002620031
return true;
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2+
; RUN: llc -mtriple=riscv32 -mattr=+v -global-isel -stop-before=legalizer -simplify-mir < %s | FileCheck %s --check-prefixes=CHECK,RV32I
3+
; RUN: llc -mtriple=riscv64 -mattr=+v -global-isel -stop-before=legalizer -simplify-mir < %s | FileCheck %s --check-prefixes=CHECK,RV64I
4+
5+
define void @add_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
6+
; CHECK-LABEL: name: add_nxv2i32
7+
; CHECK: bb.1 (%ir-block.0):
8+
; CHECK-NEXT: liveins: $v8, $v9
9+
; CHECK-NEXT: {{ $}}
10+
; CHECK-NEXT: PseudoRET
11+
%c = add <vscale x 2 x i32> %a, %b
12+
ret void
13+
}
14+
15+
define void @sub_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
16+
; CHECK-LABEL: name: sub_nxv2i32
17+
; CHECK: bb.1 (%ir-block.0):
18+
; CHECK-NEXT: liveins: $v8, $v9
19+
; CHECK-NEXT: {{ $}}
20+
; CHECK-NEXT: PseudoRET
21+
%c = sub <vscale x 2 x i32> %a, %b
22+
ret void
23+
}
24+
25+
define void @and_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
26+
; CHECK-LABEL: name: and_nxv2i32
27+
; CHECK: bb.1 (%ir-block.0):
28+
; CHECK-NEXT: liveins: $v8, $v9
29+
; CHECK-NEXT: {{ $}}
30+
; CHECK-NEXT: PseudoRET
31+
%c = and <vscale x 2 x i32> %a, %b
32+
ret void
33+
}
34+
35+
define void @or_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
36+
; CHECK-LABEL: name: or_nxv2i32
37+
; CHECK: bb.1 (%ir-block.0):
38+
; CHECK-NEXT: liveins: $v8, $v9
39+
; CHECK-NEXT: {{ $}}
40+
; CHECK-NEXT: PseudoRET
41+
%c = or <vscale x 2 x i32> %a, %b
42+
ret void
43+
}
44+
45+
define void @xor_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
46+
; CHECK-LABEL: name: xor_nxv2i32
47+
; CHECK: bb.1 (%ir-block.0):
48+
; CHECK-NEXT: liveins: $v8, $v9
49+
; CHECK-NEXT: {{ $}}
50+
; CHECK-NEXT: PseudoRET
51+
%c = xor <vscale x 2 x i32> %a, %b
52+
ret void
53+
}
Lines changed: 274 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,274 @@
1+
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2+
# RUN: llc -mtriple=riscv32 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
3+
# RUN: llc -mtriple=riscv64 -mattr=+zve32x -run-pass=legalizer %s -o - | FileCheck %s
4+
---
5+
name: test_nxv2i8
6+
body: |
7+
bb.0.entry:
8+
9+
; CHECK-LABEL: name: test_nxv2i8
10+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
11+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
12+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
13+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
14+
; CHECK-NEXT: PseudoRET implicit $v8
15+
%0:_(<vscale x 2 x s8>) = COPY $v8
16+
%1:_(<vscale x 2 x s8>) = COPY $v9
17+
%2:_(<vscale x 2 x s8>) = G_ADD %0, %1
18+
$v8 = COPY %2(<vscale x 2 x s8>)
19+
PseudoRET implicit $v8
20+
21+
...
22+
---
23+
name: test_nxv4i8
24+
body: |
25+
bb.0.entry:
26+
27+
; CHECK-LABEL: name: test_nxv4i8
28+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
29+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
30+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
31+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
32+
; CHECK-NEXT: PseudoRET implicit $v8
33+
%0:_(<vscale x 4 x s8>) = COPY $v8
34+
%1:_(<vscale x 4 x s8>) = COPY $v9
35+
%2:_(<vscale x 4 x s8>) = G_ADD %0, %1
36+
$v8 = COPY %2(<vscale x 4 x s8>)
37+
PseudoRET implicit $v8
38+
39+
...
40+
---
41+
name: test_nxv8i8
42+
body: |
43+
bb.0.entry:
44+
45+
; CHECK-LABEL: name: test_nxv8i8
46+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
47+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
48+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
49+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
50+
; CHECK-NEXT: PseudoRET implicit $v8
51+
%0:_(<vscale x 8 x s8>) = COPY $v8
52+
%1:_(<vscale x 8 x s8>) = COPY $v9
53+
%2:_(<vscale x 8 x s8>) = G_ADD %0, %1
54+
$v8 = COPY %2(<vscale x 8 x s8>)
55+
PseudoRET implicit $v8
56+
57+
...
58+
---
59+
name: test_nxv16i8
60+
body: |
61+
bb.0.entry:
62+
63+
; CHECK-LABEL: name: test_nxv16i8
64+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
65+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
66+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
67+
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
68+
; CHECK-NEXT: PseudoRET implicit $v8m2
69+
%0:_(<vscale x 16 x s8>) = COPY $v8m2
70+
%1:_(<vscale x 16 x s8>) = COPY $v10m2
71+
%2:_(<vscale x 16 x s8>) = G_ADD %0, %1
72+
$v8m2 = COPY %2(<vscale x 16 x s8>)
73+
PseudoRET implicit $v8m2
74+
75+
...
76+
---
77+
name: test_nxv32i8
78+
body: |
79+
bb.0.entry:
80+
81+
; CHECK-LABEL: name: test_nxv32i8
82+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
83+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
84+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
85+
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
86+
; CHECK-NEXT: PseudoRET implicit $v8m4
87+
%0:_(<vscale x 32 x s8>) = COPY $v8m4
88+
%1:_(<vscale x 32 x s8>) = COPY $v12m4
89+
%2:_(<vscale x 32 x s8>) = G_ADD %0, %1
90+
$v8m4 = COPY %2(<vscale x 32 x s8>)
91+
PseudoRET implicit $v8m4
92+
93+
...
94+
---
95+
name: test_nxv64i8
96+
body: |
97+
bb.0.entry:
98+
99+
; CHECK-LABEL: name: test_nxv64i8
100+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
101+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
102+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
103+
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
104+
; CHECK-NEXT: PseudoRET implicit $v8m8
105+
%0:_(<vscale x 64 x s8>) = COPY $v8m8
106+
%1:_(<vscale x 64 x s8>) = COPY $v16m8
107+
%2:_(<vscale x 64 x s8>) = G_ADD %0, %1
108+
$v8m8 = COPY %2(<vscale x 64 x s8>)
109+
PseudoRET implicit $v8m8
110+
111+
...
112+
---
113+
name: test_nxv2i16
114+
body: |
115+
bb.0.entry:
116+
117+
; CHECK-LABEL: name: test_nxv2i16
118+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
119+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
120+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
121+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
122+
; CHECK-NEXT: PseudoRET implicit $v8
123+
%0:_(<vscale x 2 x s16>) = COPY $v8
124+
%1:_(<vscale x 2 x s16>) = COPY $v9
125+
%2:_(<vscale x 2 x s16>) = G_ADD %0, %1
126+
$v8 = COPY %2(<vscale x 2 x s16>)
127+
PseudoRET implicit $v8
128+
129+
...
130+
---
131+
name: test_nxv4i16
132+
body: |
133+
bb.0.entry:
134+
135+
; CHECK-LABEL: name: test_nxv4i16
136+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
137+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
138+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
139+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
140+
; CHECK-NEXT: PseudoRET implicit $v8
141+
%0:_(<vscale x 4 x s16>) = COPY $v8
142+
%1:_(<vscale x 4 x s16>) = COPY $v9
143+
%2:_(<vscale x 4 x s16>) = G_ADD %0, %1
144+
$v8 = COPY %2(<vscale x 4 x s16>)
145+
PseudoRET implicit $v8
146+
147+
...
148+
---
149+
name: test_nxv8i16
150+
body: |
151+
bb.0.entry:
152+
153+
; CHECK-LABEL: name: test_nxv8i16
154+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
155+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
156+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
157+
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
158+
; CHECK-NEXT: PseudoRET implicit $v8m2
159+
%0:_(<vscale x 8 x s16>) = COPY $v8m2
160+
%1:_(<vscale x 8 x s16>) = COPY $v10m2
161+
%2:_(<vscale x 8 x s16>) = G_ADD %0, %1
162+
$v8m2 = COPY %2(<vscale x 8 x s16>)
163+
PseudoRET implicit $v8m2
164+
165+
...
166+
---
167+
name: test_nxv16i16
168+
body: |
169+
bb.0.entry:
170+
171+
; CHECK-LABEL: name: test_nxv16i16
172+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
173+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
174+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
175+
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
176+
; CHECK-NEXT: PseudoRET implicit $v8m4
177+
%0:_(<vscale x 16 x s16>) = COPY $v8m4
178+
%1:_(<vscale x 16 x s16>) = COPY $v12m4
179+
%2:_(<vscale x 16 x s16>) = G_ADD %0, %1
180+
$v8m4 = COPY %2(<vscale x 16 x s16>)
181+
PseudoRET implicit $v8m4
182+
183+
...
184+
---
185+
name: test_nxv32i16
186+
body: |
187+
bb.0.entry:
188+
189+
; CHECK-LABEL: name: test_nxv32i16
190+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
191+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
192+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
193+
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
194+
; CHECK-NEXT: PseudoRET implicit $v8m8
195+
%0:_(<vscale x 32 x s16>) = COPY $v8m8
196+
%1:_(<vscale x 32 x s16>) = COPY $v16m8
197+
%2:_(<vscale x 32 x s16>) = G_ADD %0, %1
198+
$v8m8 = COPY %2(<vscale x 32 x s16>)
199+
PseudoRET implicit $v8m8
200+
201+
...
202+
---
203+
name: test_nxv2i32
204+
body: |
205+
bb.0.entry:
206+
207+
; CHECK-LABEL: name: test_nxv2i32
208+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
209+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
210+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
211+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
212+
; CHECK-NEXT: PseudoRET implicit $v8
213+
%0:_(<vscale x 2 x s32>) = COPY $v8
214+
%1:_(<vscale x 2 x s32>) = COPY $v9
215+
%2:_(<vscale x 2 x s32>) = G_ADD %0, %1
216+
$v8 = COPY %2(<vscale x 2 x s32>)
217+
PseudoRET implicit $v8
218+
219+
...
220+
---
221+
name: test_nxv4i32
222+
body: |
223+
bb.0.entry:
224+
225+
; CHECK-LABEL: name: test_nxv4i32
226+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
227+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
228+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
229+
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
230+
; CHECK-NEXT: PseudoRET implicit $v8m2
231+
%0:_(<vscale x 4 x s32>) = COPY $v8m2
232+
%1:_(<vscale x 4 x s32>) = COPY $v10m2
233+
%2:_(<vscale x 4 x s32>) = G_ADD %0, %1
234+
$v8m2 = COPY %2(<vscale x 4 x s32>)
235+
PseudoRET implicit $v8m2
236+
237+
...
238+
---
239+
name: test_nxv8i32
240+
body: |
241+
bb.0.entry:
242+
243+
; CHECK-LABEL: name: test_nxv8i32
244+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
245+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
246+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
247+
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
248+
; CHECK-NEXT: PseudoRET implicit $v8m4
249+
%0:_(<vscale x 8 x s32>) = COPY $v8m4
250+
%1:_(<vscale x 8 x s32>) = COPY $v12m4
251+
%2:_(<vscale x 8 x s32>) = G_ADD %0, %1
252+
$v8m4 = COPY %2(<vscale x 8 x s32>)
253+
PseudoRET implicit $v8m4
254+
255+
...
256+
---
257+
name: test_nxv16i32
258+
body: |
259+
bb.0.entry:
260+
261+
; CHECK-LABEL: name: test_nxv16i32
262+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
263+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
264+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
265+
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
266+
; CHECK-NEXT: PseudoRET implicit $v8m8
267+
%0:_(<vscale x 16 x s32>) = COPY $v8m8
268+
%1:_(<vscale x 16 x s32>) = COPY $v16m8
269+
%2:_(<vscale x 16 x s32>) = G_ADD %0, %1
270+
$v8m8 = COPY %2(<vscale x 16 x s32>)
271+
PseudoRET implicit $v8m8
272+
273+
...
274+
Lines changed: 399 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,399 @@
1+
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2+
# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
3+
# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
4+
---
5+
name: test_nxv1i8
6+
body: |
7+
bb.0.entry:
8+
9+
; CHECK-LABEL: name: test_nxv1i8
10+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
11+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
12+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_ADD [[COPY]], [[COPY1]]
13+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s8>)
14+
; CHECK-NEXT: PseudoRET implicit $v8
15+
%0:_(<vscale x 1 x s8>) = COPY $v8
16+
%1:_(<vscale x 1 x s8>) = COPY $v9
17+
%2:_(<vscale x 1 x s8>) = G_ADD %0, %1
18+
$v8 = COPY %2(<vscale x 1 x s8>)
19+
PseudoRET implicit $v8
20+
21+
...
22+
---
23+
name: test_nxv2i8
24+
body: |
25+
bb.0.entry:
26+
27+
; CHECK-LABEL: name: test_nxv2i8
28+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
29+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
30+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_ADD [[COPY]], [[COPY1]]
31+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s8>)
32+
; CHECK-NEXT: PseudoRET implicit $v8
33+
%0:_(<vscale x 2 x s8>) = COPY $v8
34+
%1:_(<vscale x 2 x s8>) = COPY $v9
35+
%2:_(<vscale x 2 x s8>) = G_ADD %0, %1
36+
$v8 = COPY %2(<vscale x 2 x s8>)
37+
PseudoRET implicit $v8
38+
39+
...
40+
---
41+
name: test_nxv4i8
42+
body: |
43+
bb.0.entry:
44+
45+
; CHECK-LABEL: name: test_nxv4i8
46+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
47+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
48+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_ADD [[COPY]], [[COPY1]]
49+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s8>)
50+
; CHECK-NEXT: PseudoRET implicit $v8
51+
%0:_(<vscale x 4 x s8>) = COPY $v8
52+
%1:_(<vscale x 4 x s8>) = COPY $v9
53+
%2:_(<vscale x 4 x s8>) = G_ADD %0, %1
54+
$v8 = COPY %2(<vscale x 4 x s8>)
55+
PseudoRET implicit $v8
56+
57+
...
58+
---
59+
name: test_nxv8i8
60+
body: |
61+
bb.0.entry:
62+
63+
; CHECK-LABEL: name: test_nxv8i8
64+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
65+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
66+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_ADD [[COPY]], [[COPY1]]
67+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 8 x s8>)
68+
; CHECK-NEXT: PseudoRET implicit $v8
69+
%0:_(<vscale x 8 x s8>) = COPY $v8
70+
%1:_(<vscale x 8 x s8>) = COPY $v9
71+
%2:_(<vscale x 8 x s8>) = G_ADD %0, %1
72+
$v8 = COPY %2(<vscale x 8 x s8>)
73+
PseudoRET implicit $v8
74+
75+
...
76+
---
77+
name: test_nxv16i8
78+
body: |
79+
bb.0.entry:
80+
81+
; CHECK-LABEL: name: test_nxv16i8
82+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
83+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
84+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_ADD [[COPY]], [[COPY1]]
85+
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 16 x s8>)
86+
; CHECK-NEXT: PseudoRET implicit $v8m2
87+
%0:_(<vscale x 16 x s8>) = COPY $v8m2
88+
%1:_(<vscale x 16 x s8>) = COPY $v10m2
89+
%2:_(<vscale x 16 x s8>) = G_ADD %0, %1
90+
$v8m2 = COPY %2(<vscale x 16 x s8>)
91+
PseudoRET implicit $v8m2
92+
93+
...
94+
---
95+
name: test_nxv32i8
96+
body: |
97+
bb.0.entry:
98+
99+
; CHECK-LABEL: name: test_nxv32i8
100+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
101+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
102+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_ADD [[COPY]], [[COPY1]]
103+
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 32 x s8>)
104+
; CHECK-NEXT: PseudoRET implicit $v8m4
105+
%0:_(<vscale x 32 x s8>) = COPY $v8m4
106+
%1:_(<vscale x 32 x s8>) = COPY $v12m4
107+
%2:_(<vscale x 32 x s8>) = G_ADD %0, %1
108+
$v8m4 = COPY %2(<vscale x 32 x s8>)
109+
PseudoRET implicit $v8m4
110+
111+
...
112+
---
113+
name: test_nxv64i8
114+
body: |
115+
bb.0.entry:
116+
117+
; CHECK-LABEL: name: test_nxv64i8
118+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
119+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
120+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_ADD [[COPY]], [[COPY1]]
121+
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 64 x s8>)
122+
; CHECK-NEXT: PseudoRET implicit $v8m8
123+
%0:_(<vscale x 64 x s8>) = COPY $v8m8
124+
%1:_(<vscale x 64 x s8>) = COPY $v16m8
125+
%2:_(<vscale x 64 x s8>) = G_ADD %0, %1
126+
$v8m8 = COPY %2(<vscale x 64 x s8>)
127+
PseudoRET implicit $v8m8
128+
129+
...
130+
---
131+
name: test_nxv1i16
132+
body: |
133+
bb.0.entry:
134+
135+
; CHECK-LABEL: name: test_nxv1i16
136+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
137+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
138+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_ADD [[COPY]], [[COPY1]]
139+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s16>)
140+
; CHECK-NEXT: PseudoRET implicit $v8
141+
%0:_(<vscale x 1 x s16>) = COPY $v8
142+
%1:_(<vscale x 1 x s16>) = COPY $v9
143+
%2:_(<vscale x 1 x s16>) = G_ADD %0, %1
144+
$v8 = COPY %2(<vscale x 1 x s16>)
145+
PseudoRET implicit $v8
146+
147+
...
148+
---
149+
name: test_nxv2i16
150+
body: |
151+
bb.0.entry:
152+
153+
; CHECK-LABEL: name: test_nxv2i16
154+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
155+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
156+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_ADD [[COPY]], [[COPY1]]
157+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s16>)
158+
; CHECK-NEXT: PseudoRET implicit $v8
159+
%0:_(<vscale x 2 x s16>) = COPY $v8
160+
%1:_(<vscale x 2 x s16>) = COPY $v9
161+
%2:_(<vscale x 2 x s16>) = G_ADD %0, %1
162+
$v8 = COPY %2(<vscale x 2 x s16>)
163+
PseudoRET implicit $v8
164+
165+
...
166+
---
167+
name: test_nxv4i16
168+
body: |
169+
bb.0.entry:
170+
171+
; CHECK-LABEL: name: test_nxv4i16
172+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
173+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
174+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_ADD [[COPY]], [[COPY1]]
175+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 4 x s16>)
176+
; CHECK-NEXT: PseudoRET implicit $v8
177+
%0:_(<vscale x 4 x s16>) = COPY $v8
178+
%1:_(<vscale x 4 x s16>) = COPY $v9
179+
%2:_(<vscale x 4 x s16>) = G_ADD %0, %1
180+
$v8 = COPY %2(<vscale x 4 x s16>)
181+
PseudoRET implicit $v8
182+
183+
...
184+
---
185+
name: test_nxv8i16
186+
body: |
187+
bb.0.entry:
188+
189+
; CHECK-LABEL: name: test_nxv8i16
190+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
191+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
192+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_ADD [[COPY]], [[COPY1]]
193+
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 8 x s16>)
194+
; CHECK-NEXT: PseudoRET implicit $v8m2
195+
%0:_(<vscale x 8 x s16>) = COPY $v8m2
196+
%1:_(<vscale x 8 x s16>) = COPY $v10m2
197+
%2:_(<vscale x 8 x s16>) = G_ADD %0, %1
198+
$v8m2 = COPY %2(<vscale x 8 x s16>)
199+
PseudoRET implicit $v8m2
200+
201+
...
202+
---
203+
name: test_nxv16i16
204+
body: |
205+
bb.0.entry:
206+
207+
; CHECK-LABEL: name: test_nxv16i16
208+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
209+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
210+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_ADD [[COPY]], [[COPY1]]
211+
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 16 x s16>)
212+
; CHECK-NEXT: PseudoRET implicit $v8m4
213+
%0:_(<vscale x 16 x s16>) = COPY $v8m4
214+
%1:_(<vscale x 16 x s16>) = COPY $v12m4
215+
%2:_(<vscale x 16 x s16>) = G_ADD %0, %1
216+
$v8m4 = COPY %2(<vscale x 16 x s16>)
217+
PseudoRET implicit $v8m4
218+
219+
...
220+
---
221+
name: test_nxv32i16
222+
body: |
223+
bb.0.entry:
224+
225+
; CHECK-LABEL: name: test_nxv32i16
226+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
227+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
228+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_ADD [[COPY]], [[COPY1]]
229+
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 32 x s16>)
230+
; CHECK-NEXT: PseudoRET implicit $v8m8
231+
%0:_(<vscale x 32 x s16>) = COPY $v8m8
232+
%1:_(<vscale x 32 x s16>) = COPY $v16m8
233+
%2:_(<vscale x 32 x s16>) = G_ADD %0, %1
234+
$v8m8 = COPY %2(<vscale x 32 x s16>)
235+
PseudoRET implicit $v8m8
236+
237+
...
238+
---
239+
name: test_nxv1i32
240+
body: |
241+
bb.0.entry:
242+
243+
; CHECK-LABEL: name: test_nxv1i32
244+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
245+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
246+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_ADD [[COPY]], [[COPY1]]
247+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s32>)
248+
; CHECK-NEXT: PseudoRET implicit $v8
249+
%0:_(<vscale x 1 x s32>) = COPY $v8
250+
%1:_(<vscale x 1 x s32>) = COPY $v9
251+
%2:_(<vscale x 1 x s32>) = G_ADD %0, %1
252+
$v8 = COPY %2(<vscale x 1 x s32>)
253+
PseudoRET implicit $v8
254+
255+
...
256+
---
257+
name: test_nxv2i32
258+
body: |
259+
bb.0.entry:
260+
261+
; CHECK-LABEL: name: test_nxv2i32
262+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
263+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
264+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_ADD [[COPY]], [[COPY1]]
265+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 2 x s32>)
266+
; CHECK-NEXT: PseudoRET implicit $v8
267+
%0:_(<vscale x 2 x s32>) = COPY $v8
268+
%1:_(<vscale x 2 x s32>) = COPY $v9
269+
%2:_(<vscale x 2 x s32>) = G_ADD %0, %1
270+
$v8 = COPY %2(<vscale x 2 x s32>)
271+
PseudoRET implicit $v8
272+
273+
...
274+
---
275+
name: test_nxv4i32
276+
body: |
277+
bb.0.entry:
278+
279+
; CHECK-LABEL: name: test_nxv4i32
280+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
281+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
282+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_ADD [[COPY]], [[COPY1]]
283+
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 4 x s32>)
284+
; CHECK-NEXT: PseudoRET implicit $v8m2
285+
%0:_(<vscale x 4 x s32>) = COPY $v8m2
286+
%1:_(<vscale x 4 x s32>) = COPY $v10m2
287+
%2:_(<vscale x 4 x s32>) = G_ADD %0, %1
288+
$v8m2 = COPY %2(<vscale x 4 x s32>)
289+
PseudoRET implicit $v8m2
290+
291+
...
292+
---
293+
name: test_nxv8i32
294+
body: |
295+
bb.0.entry:
296+
297+
; CHECK-LABEL: name: test_nxv8i32
298+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
299+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
300+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_ADD [[COPY]], [[COPY1]]
301+
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 8 x s32>)
302+
; CHECK-NEXT: PseudoRET implicit $v8m4
303+
%0:_(<vscale x 8 x s32>) = COPY $v8m4
304+
%1:_(<vscale x 8 x s32>) = COPY $v12m4
305+
%2:_(<vscale x 8 x s32>) = G_ADD %0, %1
306+
$v8m4 = COPY %2(<vscale x 8 x s32>)
307+
PseudoRET implicit $v8m4
308+
309+
...
310+
---
311+
name: test_nxv16i32
312+
body: |
313+
bb.0.entry:
314+
315+
; CHECK-LABEL: name: test_nxv16i32
316+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
317+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
318+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_ADD [[COPY]], [[COPY1]]
319+
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 16 x s32>)
320+
; CHECK-NEXT: PseudoRET implicit $v8m8
321+
%0:_(<vscale x 16 x s32>) = COPY $v8m8
322+
%1:_(<vscale x 16 x s32>) = COPY $v16m8
323+
%2:_(<vscale x 16 x s32>) = G_ADD %0, %1
324+
$v8m8 = COPY %2(<vscale x 16 x s32>)
325+
PseudoRET implicit $v8m8
326+
327+
...
328+
---
329+
name: test_nxv1i64
330+
body: |
331+
bb.0.entry:
332+
333+
; CHECK-LABEL: name: test_nxv1i64
334+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
335+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
336+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_ADD [[COPY]], [[COPY1]]
337+
; CHECK-NEXT: $v8 = COPY [[ADD]](<vscale x 1 x s64>)
338+
; CHECK-NEXT: PseudoRET implicit $v8
339+
%0:_(<vscale x 1 x s64>) = COPY $v8
340+
%1:_(<vscale x 1 x s64>) = COPY $v9
341+
%2:_(<vscale x 1 x s64>) = G_ADD %0, %1
342+
$v8 = COPY %2(<vscale x 1 x s64>)
343+
PseudoRET implicit $v8
344+
345+
...
346+
---
347+
name: test_nxv2i64
348+
body: |
349+
bb.0.entry:
350+
351+
; CHECK-LABEL: name: test_nxv2i64
352+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
353+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
354+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_ADD [[COPY]], [[COPY1]]
355+
; CHECK-NEXT: $v8m2 = COPY [[ADD]](<vscale x 2 x s64>)
356+
; CHECK-NEXT: PseudoRET implicit $v8m2
357+
%0:_(<vscale x 2 x s64>) = COPY $v8m2
358+
%1:_(<vscale x 2 x s64>) = COPY $v10m2
359+
%2:_(<vscale x 2 x s64>) = G_ADD %0, %1
360+
$v8m2 = COPY %2(<vscale x 2 x s64>)
361+
PseudoRET implicit $v8m2
362+
363+
...
364+
---
365+
name: test_nxv4i64
366+
body: |
367+
bb.0.entry:
368+
369+
; CHECK-LABEL: name: test_nxv4i64
370+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
371+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
372+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_ADD [[COPY]], [[COPY1]]
373+
; CHECK-NEXT: $v8m4 = COPY [[ADD]](<vscale x 4 x s64>)
374+
; CHECK-NEXT: PseudoRET implicit $v8m4
375+
%0:_(<vscale x 4 x s64>) = COPY $v8m4
376+
%1:_(<vscale x 4 x s64>) = COPY $v12m4
377+
%2:_(<vscale x 4 x s64>) = G_ADD %0, %1
378+
$v8m4 = COPY %2(<vscale x 4 x s64>)
379+
PseudoRET implicit $v8m4
380+
381+
...
382+
---
383+
name: test_nxv8i64
384+
body: |
385+
bb.0.entry:
386+
387+
; CHECK-LABEL: name: test_nxv8i64
388+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
389+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
390+
; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_ADD [[COPY]], [[COPY1]]
391+
; CHECK-NEXT: $v8m8 = COPY [[ADD]](<vscale x 8 x s64>)
392+
; CHECK-NEXT: PseudoRET implicit $v8m8
393+
%0:_(<vscale x 8 x s64>) = COPY $v8m8
394+
%1:_(<vscale x 8 x s64>) = COPY $v16m8
395+
%2:_(<vscale x 8 x s64>) = G_ADD %0, %1
396+
$v8m8 = COPY %2(<vscale x 8 x s64>)
397+
PseudoRET implicit $v8m8
398+
399+
...
Lines changed: 399 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,399 @@
1+
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2+
# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
3+
# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
4+
---
5+
name: test_nxv1i8
6+
body: |
7+
bb.0.entry:
8+
9+
; CHECK-LABEL: name: test_nxv1i8
10+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
11+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
12+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s8>) = G_AND [[COPY]], [[COPY1]]
13+
; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 1 x s8>)
14+
; CHECK-NEXT: PseudoRET implicit $v8
15+
%0:_(<vscale x 1 x s8>) = COPY $v8
16+
%1:_(<vscale x 1 x s8>) = COPY $v9
17+
%2:_(<vscale x 1 x s8>) = G_AND %0, %1
18+
$v8 = COPY %2(<vscale x 1 x s8>)
19+
PseudoRET implicit $v8
20+
21+
...
22+
---
23+
name: test_nxv2i8
24+
body: |
25+
bb.0.entry:
26+
27+
; CHECK-LABEL: name: test_nxv2i8
28+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
29+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
30+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s8>) = G_AND [[COPY]], [[COPY1]]
31+
; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 2 x s8>)
32+
; CHECK-NEXT: PseudoRET implicit $v8
33+
%0:_(<vscale x 2 x s8>) = COPY $v8
34+
%1:_(<vscale x 2 x s8>) = COPY $v9
35+
%2:_(<vscale x 2 x s8>) = G_AND %0, %1
36+
$v8 = COPY %2(<vscale x 2 x s8>)
37+
PseudoRET implicit $v8
38+
39+
...
40+
---
41+
name: test_nxv4i8
42+
body: |
43+
bb.0.entry:
44+
45+
; CHECK-LABEL: name: test_nxv4i8
46+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
47+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
48+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s8>) = G_AND [[COPY]], [[COPY1]]
49+
; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 4 x s8>)
50+
; CHECK-NEXT: PseudoRET implicit $v8
51+
%0:_(<vscale x 4 x s8>) = COPY $v8
52+
%1:_(<vscale x 4 x s8>) = COPY $v9
53+
%2:_(<vscale x 4 x s8>) = G_AND %0, %1
54+
$v8 = COPY %2(<vscale x 4 x s8>)
55+
PseudoRET implicit $v8
56+
57+
...
58+
---
59+
name: test_nxv8i8
60+
body: |
61+
bb.0.entry:
62+
63+
; CHECK-LABEL: name: test_nxv8i8
64+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
65+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
66+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s8>) = G_AND [[COPY]], [[COPY1]]
67+
; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 8 x s8>)
68+
; CHECK-NEXT: PseudoRET implicit $v8
69+
%0:_(<vscale x 8 x s8>) = COPY $v8
70+
%1:_(<vscale x 8 x s8>) = COPY $v9
71+
%2:_(<vscale x 8 x s8>) = G_AND %0, %1
72+
$v8 = COPY %2(<vscale x 8 x s8>)
73+
PseudoRET implicit $v8
74+
75+
...
76+
---
77+
name: test_nxv16i8
78+
body: |
79+
bb.0.entry:
80+
81+
; CHECK-LABEL: name: test_nxv16i8
82+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
83+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
84+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s8>) = G_AND [[COPY]], [[COPY1]]
85+
; CHECK-NEXT: $v8m2 = COPY [[AND]](<vscale x 16 x s8>)
86+
; CHECK-NEXT: PseudoRET implicit $v8m2
87+
%0:_(<vscale x 16 x s8>) = COPY $v8m2
88+
%1:_(<vscale x 16 x s8>) = COPY $v10m2
89+
%2:_(<vscale x 16 x s8>) = G_AND %0, %1
90+
$v8m2 = COPY %2(<vscale x 16 x s8>)
91+
PseudoRET implicit $v8m2
92+
93+
...
94+
---
95+
name: test_nxv32i8
96+
body: |
97+
bb.0.entry:
98+
99+
; CHECK-LABEL: name: test_nxv32i8
100+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
101+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
102+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s8>) = G_AND [[COPY]], [[COPY1]]
103+
; CHECK-NEXT: $v8m4 = COPY [[AND]](<vscale x 32 x s8>)
104+
; CHECK-NEXT: PseudoRET implicit $v8m4
105+
%0:_(<vscale x 32 x s8>) = COPY $v8m4
106+
%1:_(<vscale x 32 x s8>) = COPY $v12m4
107+
%2:_(<vscale x 32 x s8>) = G_AND %0, %1
108+
$v8m4 = COPY %2(<vscale x 32 x s8>)
109+
PseudoRET implicit $v8m4
110+
111+
...
112+
---
113+
name: test_nxv64i8
114+
body: |
115+
bb.0.entry:
116+
117+
; CHECK-LABEL: name: test_nxv64i8
118+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
119+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
120+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 64 x s8>) = G_AND [[COPY]], [[COPY1]]
121+
; CHECK-NEXT: $v8m8 = COPY [[AND]](<vscale x 64 x s8>)
122+
; CHECK-NEXT: PseudoRET implicit $v8m8
123+
%0:_(<vscale x 64 x s8>) = COPY $v8m8
124+
%1:_(<vscale x 64 x s8>) = COPY $v16m8
125+
%2:_(<vscale x 64 x s8>) = G_AND %0, %1
126+
$v8m8 = COPY %2(<vscale x 64 x s8>)
127+
PseudoRET implicit $v8m8
128+
129+
...
130+
---
131+
name: test_nxv1i16
132+
body: |
133+
bb.0.entry:
134+
135+
; CHECK-LABEL: name: test_nxv1i16
136+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
137+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
138+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s16>) = G_AND [[COPY]], [[COPY1]]
139+
; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 1 x s16>)
140+
; CHECK-NEXT: PseudoRET implicit $v8
141+
%0:_(<vscale x 1 x s16>) = COPY $v8
142+
%1:_(<vscale x 1 x s16>) = COPY $v9
143+
%2:_(<vscale x 1 x s16>) = G_AND %0, %1
144+
$v8 = COPY %2(<vscale x 1 x s16>)
145+
PseudoRET implicit $v8
146+
147+
...
148+
---
149+
name: test_nxv2i16
150+
body: |
151+
bb.0.entry:
152+
153+
; CHECK-LABEL: name: test_nxv2i16
154+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
155+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
156+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s16>) = G_AND [[COPY]], [[COPY1]]
157+
; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 2 x s16>)
158+
; CHECK-NEXT: PseudoRET implicit $v8
159+
%0:_(<vscale x 2 x s16>) = COPY $v8
160+
%1:_(<vscale x 2 x s16>) = COPY $v9
161+
%2:_(<vscale x 2 x s16>) = G_AND %0, %1
162+
$v8 = COPY %2(<vscale x 2 x s16>)
163+
PseudoRET implicit $v8
164+
165+
...
166+
---
167+
name: test_nxv4i16
168+
body: |
169+
bb.0.entry:
170+
171+
; CHECK-LABEL: name: test_nxv4i16
172+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
173+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
174+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s16>) = G_AND [[COPY]], [[COPY1]]
175+
; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 4 x s16>)
176+
; CHECK-NEXT: PseudoRET implicit $v8
177+
%0:_(<vscale x 4 x s16>) = COPY $v8
178+
%1:_(<vscale x 4 x s16>) = COPY $v9
179+
%2:_(<vscale x 4 x s16>) = G_AND %0, %1
180+
$v8 = COPY %2(<vscale x 4 x s16>)
181+
PseudoRET implicit $v8
182+
183+
...
184+
---
185+
name: test_nxv8i16
186+
body: |
187+
bb.0.entry:
188+
189+
; CHECK-LABEL: name: test_nxv8i16
190+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
191+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
192+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s16>) = G_AND [[COPY]], [[COPY1]]
193+
; CHECK-NEXT: $v8m2 = COPY [[AND]](<vscale x 8 x s16>)
194+
; CHECK-NEXT: PseudoRET implicit $v8m2
195+
%0:_(<vscale x 8 x s16>) = COPY $v8m2
196+
%1:_(<vscale x 8 x s16>) = COPY $v10m2
197+
%2:_(<vscale x 8 x s16>) = G_AND %0, %1
198+
$v8m2 = COPY %2(<vscale x 8 x s16>)
199+
PseudoRET implicit $v8m2
200+
201+
...
202+
---
203+
name: test_nxv16i16
204+
body: |
205+
bb.0.entry:
206+
207+
; CHECK-LABEL: name: test_nxv16i16
208+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
209+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
210+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s16>) = G_AND [[COPY]], [[COPY1]]
211+
; CHECK-NEXT: $v8m4 = COPY [[AND]](<vscale x 16 x s16>)
212+
; CHECK-NEXT: PseudoRET implicit $v8m4
213+
%0:_(<vscale x 16 x s16>) = COPY $v8m4
214+
%1:_(<vscale x 16 x s16>) = COPY $v12m4
215+
%2:_(<vscale x 16 x s16>) = G_AND %0, %1
216+
$v8m4 = COPY %2(<vscale x 16 x s16>)
217+
PseudoRET implicit $v8m4
218+
219+
...
220+
---
221+
name: test_nxv32i16
222+
body: |
223+
bb.0.entry:
224+
225+
; CHECK-LABEL: name: test_nxv32i16
226+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
227+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
228+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 32 x s16>) = G_AND [[COPY]], [[COPY1]]
229+
; CHECK-NEXT: $v8m8 = COPY [[AND]](<vscale x 32 x s16>)
230+
; CHECK-NEXT: PseudoRET implicit $v8m8
231+
%0:_(<vscale x 32 x s16>) = COPY $v8m8
232+
%1:_(<vscale x 32 x s16>) = COPY $v16m8
233+
%2:_(<vscale x 32 x s16>) = G_AND %0, %1
234+
$v8m8 = COPY %2(<vscale x 32 x s16>)
235+
PseudoRET implicit $v8m8
236+
237+
...
238+
---
239+
name: test_nxv1i32
240+
body: |
241+
bb.0.entry:
242+
243+
; CHECK-LABEL: name: test_nxv1i32
244+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
245+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
246+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s32>) = G_AND [[COPY]], [[COPY1]]
247+
; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 1 x s32>)
248+
; CHECK-NEXT: PseudoRET implicit $v8
249+
%0:_(<vscale x 1 x s32>) = COPY $v8
250+
%1:_(<vscale x 1 x s32>) = COPY $v9
251+
%2:_(<vscale x 1 x s32>) = G_AND %0, %1
252+
$v8 = COPY %2(<vscale x 1 x s32>)
253+
PseudoRET implicit $v8
254+
255+
...
256+
---
257+
name: test_nxv2i32
258+
body: |
259+
bb.0.entry:
260+
261+
; CHECK-LABEL: name: test_nxv2i32
262+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
263+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
264+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s32>) = G_AND [[COPY]], [[COPY1]]
265+
; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 2 x s32>)
266+
; CHECK-NEXT: PseudoRET implicit $v8
267+
%0:_(<vscale x 2 x s32>) = COPY $v8
268+
%1:_(<vscale x 2 x s32>) = COPY $v9
269+
%2:_(<vscale x 2 x s32>) = G_AND %0, %1
270+
$v8 = COPY %2(<vscale x 2 x s32>)
271+
PseudoRET implicit $v8
272+
273+
...
274+
---
275+
name: test_nxv4i32
276+
body: |
277+
bb.0.entry:
278+
279+
; CHECK-LABEL: name: test_nxv4i32
280+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
281+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
282+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s32>) = G_AND [[COPY]], [[COPY1]]
283+
; CHECK-NEXT: $v8m2 = COPY [[AND]](<vscale x 4 x s32>)
284+
; CHECK-NEXT: PseudoRET implicit $v8m2
285+
%0:_(<vscale x 4 x s32>) = COPY $v8m2
286+
%1:_(<vscale x 4 x s32>) = COPY $v10m2
287+
%2:_(<vscale x 4 x s32>) = G_AND %0, %1
288+
$v8m2 = COPY %2(<vscale x 4 x s32>)
289+
PseudoRET implicit $v8m2
290+
291+
...
292+
---
293+
name: test_nxv8i32
294+
body: |
295+
bb.0.entry:
296+
297+
; CHECK-LABEL: name: test_nxv8i32
298+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
299+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
300+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s32>) = G_AND [[COPY]], [[COPY1]]
301+
; CHECK-NEXT: $v8m4 = COPY [[AND]](<vscale x 8 x s32>)
302+
; CHECK-NEXT: PseudoRET implicit $v8m4
303+
%0:_(<vscale x 8 x s32>) = COPY $v8m4
304+
%1:_(<vscale x 8 x s32>) = COPY $v12m4
305+
%2:_(<vscale x 8 x s32>) = G_AND %0, %1
306+
$v8m4 = COPY %2(<vscale x 8 x s32>)
307+
PseudoRET implicit $v8m4
308+
309+
...
310+
---
311+
name: test_nxv16i32
312+
body: |
313+
bb.0.entry:
314+
315+
; CHECK-LABEL: name: test_nxv16i32
316+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
317+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
318+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 16 x s32>) = G_AND [[COPY]], [[COPY1]]
319+
; CHECK-NEXT: $v8m8 = COPY [[AND]](<vscale x 16 x s32>)
320+
; CHECK-NEXT: PseudoRET implicit $v8m8
321+
%0:_(<vscale x 16 x s32>) = COPY $v8m8
322+
%1:_(<vscale x 16 x s32>) = COPY $v16m8
323+
%2:_(<vscale x 16 x s32>) = G_AND %0, %1
324+
$v8m8 = COPY %2(<vscale x 16 x s32>)
325+
PseudoRET implicit $v8m8
326+
327+
...
328+
---
329+
name: test_nxv1i64
330+
body: |
331+
bb.0.entry:
332+
333+
; CHECK-LABEL: name: test_nxv1i64
334+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
335+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
336+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 1 x s64>) = G_AND [[COPY]], [[COPY1]]
337+
; CHECK-NEXT: $v8 = COPY [[AND]](<vscale x 1 x s64>)
338+
; CHECK-NEXT: PseudoRET implicit $v8
339+
%0:_(<vscale x 1 x s64>) = COPY $v8
340+
%1:_(<vscale x 1 x s64>) = COPY $v9
341+
%2:_(<vscale x 1 x s64>) = G_AND %0, %1
342+
$v8 = COPY %2(<vscale x 1 x s64>)
343+
PseudoRET implicit $v8
344+
345+
...
346+
---
347+
name: test_nxv2i64
348+
body: |
349+
bb.0.entry:
350+
351+
; CHECK-LABEL: name: test_nxv2i64
352+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
353+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
354+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 2 x s64>) = G_AND [[COPY]], [[COPY1]]
355+
; CHECK-NEXT: $v8m2 = COPY [[AND]](<vscale x 2 x s64>)
356+
; CHECK-NEXT: PseudoRET implicit $v8m2
357+
%0:_(<vscale x 2 x s64>) = COPY $v8m2
358+
%1:_(<vscale x 2 x s64>) = COPY $v10m2
359+
%2:_(<vscale x 2 x s64>) = G_AND %0, %1
360+
$v8m2 = COPY %2(<vscale x 2 x s64>)
361+
PseudoRET implicit $v8m2
362+
363+
...
364+
---
365+
name: test_nxv4i64
366+
body: |
367+
bb.0.entry:
368+
369+
; CHECK-LABEL: name: test_nxv4i64
370+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
371+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
372+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 4 x s64>) = G_AND [[COPY]], [[COPY1]]
373+
; CHECK-NEXT: $v8m4 = COPY [[AND]](<vscale x 4 x s64>)
374+
; CHECK-NEXT: PseudoRET implicit $v8m4
375+
%0:_(<vscale x 4 x s64>) = COPY $v8m4
376+
%1:_(<vscale x 4 x s64>) = COPY $v12m4
377+
%2:_(<vscale x 4 x s64>) = G_AND %0, %1
378+
$v8m4 = COPY %2(<vscale x 4 x s64>)
379+
PseudoRET implicit $v8m4
380+
381+
...
382+
---
383+
name: test_nxv8i64
384+
body: |
385+
bb.0.entry:
386+
387+
; CHECK-LABEL: name: test_nxv8i64
388+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
389+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
390+
; CHECK-NEXT: [[AND:%[0-9]+]]:_(<vscale x 8 x s64>) = G_AND [[COPY]], [[COPY1]]
391+
; CHECK-NEXT: $v8m8 = COPY [[AND]](<vscale x 8 x s64>)
392+
; CHECK-NEXT: PseudoRET implicit $v8m8
393+
%0:_(<vscale x 8 x s64>) = COPY $v8m8
394+
%1:_(<vscale x 8 x s64>) = COPY $v16m8
395+
%2:_(<vscale x 8 x s64>) = G_AND %0, %1
396+
$v8m8 = COPY %2(<vscale x 8 x s64>)
397+
PseudoRET implicit $v8m8
398+
399+
...
Lines changed: 399 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,399 @@
1+
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2+
# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
3+
# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
4+
---
5+
name: test_nxv1i8
6+
body: |
7+
bb.0.entry:
8+
9+
; CHECK-LABEL: name: test_nxv1i8
10+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
11+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
12+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_OR [[COPY]], [[COPY1]]
13+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s8>)
14+
; CHECK-NEXT: PseudoRET implicit $v8
15+
%0:_(<vscale x 1 x s8>) = COPY $v8
16+
%1:_(<vscale x 1 x s8>) = COPY $v9
17+
%2:_(<vscale x 1 x s8>) = G_OR %0, %1
18+
$v8 = COPY %2(<vscale x 1 x s8>)
19+
PseudoRET implicit $v8
20+
21+
...
22+
---
23+
name: test_nxv2i8
24+
body: |
25+
bb.0.entry:
26+
27+
; CHECK-LABEL: name: test_nxv2i8
28+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
29+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
30+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_OR [[COPY]], [[COPY1]]
31+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s8>)
32+
; CHECK-NEXT: PseudoRET implicit $v8
33+
%0:_(<vscale x 2 x s8>) = COPY $v8
34+
%1:_(<vscale x 2 x s8>) = COPY $v9
35+
%2:_(<vscale x 2 x s8>) = G_OR %0, %1
36+
$v8 = COPY %2(<vscale x 2 x s8>)
37+
PseudoRET implicit $v8
38+
39+
...
40+
---
41+
name: test_nxv4i8
42+
body: |
43+
bb.0.entry:
44+
45+
; CHECK-LABEL: name: test_nxv4i8
46+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
47+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
48+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_OR [[COPY]], [[COPY1]]
49+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 4 x s8>)
50+
; CHECK-NEXT: PseudoRET implicit $v8
51+
%0:_(<vscale x 4 x s8>) = COPY $v8
52+
%1:_(<vscale x 4 x s8>) = COPY $v9
53+
%2:_(<vscale x 4 x s8>) = G_OR %0, %1
54+
$v8 = COPY %2(<vscale x 4 x s8>)
55+
PseudoRET implicit $v8
56+
57+
...
58+
---
59+
name: test_nxv8i8
60+
body: |
61+
bb.0.entry:
62+
63+
; CHECK-LABEL: name: test_nxv8i8
64+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
65+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
66+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_OR [[COPY]], [[COPY1]]
67+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 8 x s8>)
68+
; CHECK-NEXT: PseudoRET implicit $v8
69+
%0:_(<vscale x 8 x s8>) = COPY $v8
70+
%1:_(<vscale x 8 x s8>) = COPY $v9
71+
%2:_(<vscale x 8 x s8>) = G_OR %0, %1
72+
$v8 = COPY %2(<vscale x 8 x s8>)
73+
PseudoRET implicit $v8
74+
75+
...
76+
---
77+
name: test_nxv16i8
78+
body: |
79+
bb.0.entry:
80+
81+
; CHECK-LABEL: name: test_nxv16i8
82+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
83+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
84+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_OR [[COPY]], [[COPY1]]
85+
; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 16 x s8>)
86+
; CHECK-NEXT: PseudoRET implicit $v8m2
87+
%0:_(<vscale x 16 x s8>) = COPY $v8m2
88+
%1:_(<vscale x 16 x s8>) = COPY $v10m2
89+
%2:_(<vscale x 16 x s8>) = G_OR %0, %1
90+
$v8m2 = COPY %2(<vscale x 16 x s8>)
91+
PseudoRET implicit $v8m2
92+
93+
...
94+
---
95+
name: test_nxv32i8
96+
body: |
97+
bb.0.entry:
98+
99+
; CHECK-LABEL: name: test_nxv32i8
100+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
101+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
102+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_OR [[COPY]], [[COPY1]]
103+
; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 32 x s8>)
104+
; CHECK-NEXT: PseudoRET implicit $v8m4
105+
%0:_(<vscale x 32 x s8>) = COPY $v8m4
106+
%1:_(<vscale x 32 x s8>) = COPY $v12m4
107+
%2:_(<vscale x 32 x s8>) = G_OR %0, %1
108+
$v8m4 = COPY %2(<vscale x 32 x s8>)
109+
PseudoRET implicit $v8m4
110+
111+
...
112+
---
113+
name: test_nxv64i8
114+
body: |
115+
bb.0.entry:
116+
117+
; CHECK-LABEL: name: test_nxv64i8
118+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
119+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
120+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_OR [[COPY]], [[COPY1]]
121+
; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 64 x s8>)
122+
; CHECK-NEXT: PseudoRET implicit $v8m8
123+
%0:_(<vscale x 64 x s8>) = COPY $v8m8
124+
%1:_(<vscale x 64 x s8>) = COPY $v16m8
125+
%2:_(<vscale x 64 x s8>) = G_OR %0, %1
126+
$v8m8 = COPY %2(<vscale x 64 x s8>)
127+
PseudoRET implicit $v8m8
128+
129+
...
130+
---
131+
name: test_nxv1i16
132+
body: |
133+
bb.0.entry:
134+
135+
; CHECK-LABEL: name: test_nxv1i16
136+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
137+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
138+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_OR [[COPY]], [[COPY1]]
139+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s16>)
140+
; CHECK-NEXT: PseudoRET implicit $v8
141+
%0:_(<vscale x 1 x s16>) = COPY $v8
142+
%1:_(<vscale x 1 x s16>) = COPY $v9
143+
%2:_(<vscale x 1 x s16>) = G_OR %0, %1
144+
$v8 = COPY %2(<vscale x 1 x s16>)
145+
PseudoRET implicit $v8
146+
147+
...
148+
---
149+
name: test_nxv2i16
150+
body: |
151+
bb.0.entry:
152+
153+
; CHECK-LABEL: name: test_nxv2i16
154+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
155+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
156+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_OR [[COPY]], [[COPY1]]
157+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s16>)
158+
; CHECK-NEXT: PseudoRET implicit $v8
159+
%0:_(<vscale x 2 x s16>) = COPY $v8
160+
%1:_(<vscale x 2 x s16>) = COPY $v9
161+
%2:_(<vscale x 2 x s16>) = G_OR %0, %1
162+
$v8 = COPY %2(<vscale x 2 x s16>)
163+
PseudoRET implicit $v8
164+
165+
...
166+
---
167+
name: test_nxv4i16
168+
body: |
169+
bb.0.entry:
170+
171+
; CHECK-LABEL: name: test_nxv4i16
172+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
173+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
174+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_OR [[COPY]], [[COPY1]]
175+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 4 x s16>)
176+
; CHECK-NEXT: PseudoRET implicit $v8
177+
%0:_(<vscale x 4 x s16>) = COPY $v8
178+
%1:_(<vscale x 4 x s16>) = COPY $v9
179+
%2:_(<vscale x 4 x s16>) = G_OR %0, %1
180+
$v8 = COPY %2(<vscale x 4 x s16>)
181+
PseudoRET implicit $v8
182+
183+
...
184+
---
185+
name: test_nxv8i16
186+
body: |
187+
bb.0.entry:
188+
189+
; CHECK-LABEL: name: test_nxv8i16
190+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
191+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
192+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_OR [[COPY]], [[COPY1]]
193+
; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 8 x s16>)
194+
; CHECK-NEXT: PseudoRET implicit $v8m2
195+
%0:_(<vscale x 8 x s16>) = COPY $v8m2
196+
%1:_(<vscale x 8 x s16>) = COPY $v10m2
197+
%2:_(<vscale x 8 x s16>) = G_OR %0, %1
198+
$v8m2 = COPY %2(<vscale x 8 x s16>)
199+
PseudoRET implicit $v8m2
200+
201+
...
202+
---
203+
name: test_nxv16i16
204+
body: |
205+
bb.0.entry:
206+
207+
; CHECK-LABEL: name: test_nxv16i16
208+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
209+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
210+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_OR [[COPY]], [[COPY1]]
211+
; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 16 x s16>)
212+
; CHECK-NEXT: PseudoRET implicit $v8m4
213+
%0:_(<vscale x 16 x s16>) = COPY $v8m4
214+
%1:_(<vscale x 16 x s16>) = COPY $v12m4
215+
%2:_(<vscale x 16 x s16>) = G_OR %0, %1
216+
$v8m4 = COPY %2(<vscale x 16 x s16>)
217+
PseudoRET implicit $v8m4
218+
219+
...
220+
---
221+
name: test_nxv32i16
222+
body: |
223+
bb.0.entry:
224+
225+
; CHECK-LABEL: name: test_nxv32i16
226+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
227+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
228+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_OR [[COPY]], [[COPY1]]
229+
; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 32 x s16>)
230+
; CHECK-NEXT: PseudoRET implicit $v8m8
231+
%0:_(<vscale x 32 x s16>) = COPY $v8m8
232+
%1:_(<vscale x 32 x s16>) = COPY $v16m8
233+
%2:_(<vscale x 32 x s16>) = G_OR %0, %1
234+
$v8m8 = COPY %2(<vscale x 32 x s16>)
235+
PseudoRET implicit $v8m8
236+
237+
...
238+
---
239+
name: test_nxv1i32
240+
body: |
241+
bb.0.entry:
242+
243+
; CHECK-LABEL: name: test_nxv1i32
244+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
245+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
246+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_OR [[COPY]], [[COPY1]]
247+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s32>)
248+
; CHECK-NEXT: PseudoRET implicit $v8
249+
%0:_(<vscale x 1 x s32>) = COPY $v8
250+
%1:_(<vscale x 1 x s32>) = COPY $v9
251+
%2:_(<vscale x 1 x s32>) = G_OR %0, %1
252+
$v8 = COPY %2(<vscale x 1 x s32>)
253+
PseudoRET implicit $v8
254+
255+
...
256+
---
257+
name: test_nxv2i32
258+
body: |
259+
bb.0.entry:
260+
261+
; CHECK-LABEL: name: test_nxv2i32
262+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
263+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
264+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_OR [[COPY]], [[COPY1]]
265+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s32>)
266+
; CHECK-NEXT: PseudoRET implicit $v8
267+
%0:_(<vscale x 2 x s32>) = COPY $v8
268+
%1:_(<vscale x 2 x s32>) = COPY $v9
269+
%2:_(<vscale x 2 x s32>) = G_OR %0, %1
270+
$v8 = COPY %2(<vscale x 2 x s32>)
271+
PseudoRET implicit $v8
272+
273+
...
274+
---
275+
name: test_nxv4i32
276+
body: |
277+
bb.0.entry:
278+
279+
; CHECK-LABEL: name: test_nxv4i32
280+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
281+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
282+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_OR [[COPY]], [[COPY1]]
283+
; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 4 x s32>)
284+
; CHECK-NEXT: PseudoRET implicit $v8m2
285+
%0:_(<vscale x 4 x s32>) = COPY $v8m2
286+
%1:_(<vscale x 4 x s32>) = COPY $v10m2
287+
%2:_(<vscale x 4 x s32>) = G_OR %0, %1
288+
$v8m2 = COPY %2(<vscale x 4 x s32>)
289+
PseudoRET implicit $v8m2
290+
291+
...
292+
---
293+
name: test_nxv8i32
294+
body: |
295+
bb.0.entry:
296+
297+
; CHECK-LABEL: name: test_nxv8i32
298+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
299+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
300+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_OR [[COPY]], [[COPY1]]
301+
; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 8 x s32>)
302+
; CHECK-NEXT: PseudoRET implicit $v8m4
303+
%0:_(<vscale x 8 x s32>) = COPY $v8m4
304+
%1:_(<vscale x 8 x s32>) = COPY $v12m4
305+
%2:_(<vscale x 8 x s32>) = G_OR %0, %1
306+
$v8m4 = COPY %2(<vscale x 8 x s32>)
307+
PseudoRET implicit $v8m4
308+
309+
...
310+
---
311+
name: test_nxv16i32
312+
body: |
313+
bb.0.entry:
314+
315+
; CHECK-LABEL: name: test_nxv16i32
316+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
317+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
318+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_OR [[COPY]], [[COPY1]]
319+
; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 16 x s32>)
320+
; CHECK-NEXT: PseudoRET implicit $v8m8
321+
%0:_(<vscale x 16 x s32>) = COPY $v8m8
322+
%1:_(<vscale x 16 x s32>) = COPY $v16m8
323+
%2:_(<vscale x 16 x s32>) = G_OR %0, %1
324+
$v8m8 = COPY %2(<vscale x 16 x s32>)
325+
PseudoRET implicit $v8m8
326+
327+
...
328+
---
329+
name: test_nxv1i64
330+
body: |
331+
bb.0.entry:
332+
333+
; CHECK-LABEL: name: test_nxv1i64
334+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
335+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
336+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_OR [[COPY]], [[COPY1]]
337+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s64>)
338+
; CHECK-NEXT: PseudoRET implicit $v8
339+
%0:_(<vscale x 1 x s64>) = COPY $v8
340+
%1:_(<vscale x 1 x s64>) = COPY $v9
341+
%2:_(<vscale x 1 x s64>) = G_OR %0, %1
342+
$v8 = COPY %2(<vscale x 1 x s64>)
343+
PseudoRET implicit $v8
344+
345+
...
346+
---
347+
name: test_nxv2i64
348+
body: |
349+
bb.0.entry:
350+
351+
; CHECK-LABEL: name: test_nxv2i64
352+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
353+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
354+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_OR [[COPY]], [[COPY1]]
355+
; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 2 x s64>)
356+
; CHECK-NEXT: PseudoRET implicit $v8m2
357+
%0:_(<vscale x 2 x s64>) = COPY $v8m2
358+
%1:_(<vscale x 2 x s64>) = COPY $v10m2
359+
%2:_(<vscale x 2 x s64>) = G_OR %0, %1
360+
$v8m2 = COPY %2(<vscale x 2 x s64>)
361+
PseudoRET implicit $v8m2
362+
363+
...
364+
---
365+
name: test_nxv4i64
366+
body: |
367+
bb.0.entry:
368+
369+
; CHECK-LABEL: name: test_nxv4i64
370+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
371+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
372+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_OR [[COPY]], [[COPY1]]
373+
; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 4 x s64>)
374+
; CHECK-NEXT: PseudoRET implicit $v8m4
375+
%0:_(<vscale x 4 x s64>) = COPY $v8m4
376+
%1:_(<vscale x 4 x s64>) = COPY $v12m4
377+
%2:_(<vscale x 4 x s64>) = G_OR %0, %1
378+
$v8m4 = COPY %2(<vscale x 4 x s64>)
379+
PseudoRET implicit $v8m4
380+
381+
...
382+
---
383+
name: test_nxv8i64
384+
body: |
385+
bb.0.entry:
386+
387+
; CHECK-LABEL: name: test_nxv8i64
388+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
389+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
390+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_OR [[COPY]], [[COPY1]]
391+
; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 8 x s64>)
392+
; CHECK-NEXT: PseudoRET implicit $v8m8
393+
%0:_(<vscale x 8 x s64>) = COPY $v8m8
394+
%1:_(<vscale x 8 x s64>) = COPY $v16m8
395+
%2:_(<vscale x 8 x s64>) = G_OR %0, %1
396+
$v8m8 = COPY %2(<vscale x 8 x s64>)
397+
PseudoRET implicit $v8m8
398+
399+
...
Lines changed: 399 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,399 @@
1+
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2+
# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
3+
# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
4+
---
5+
name: test_nxv1i8
6+
body: |
7+
bb.0.entry:
8+
9+
; CHECK-LABEL: name: test_nxv1i8
10+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
11+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
12+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SUB [[COPY]], [[COPY1]]
13+
; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s8>)
14+
; CHECK-NEXT: PseudoRET implicit $v8
15+
%0:_(<vscale x 1 x s8>) = COPY $v8
16+
%1:_(<vscale x 1 x s8>) = COPY $v9
17+
%2:_(<vscale x 1 x s8>) = G_SUB %0, %1
18+
$v8 = COPY %2(<vscale x 1 x s8>)
19+
PseudoRET implicit $v8
20+
21+
...
22+
---
23+
name: test_nxv2i8
24+
body: |
25+
bb.0.entry:
26+
27+
; CHECK-LABEL: name: test_nxv2i8
28+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
29+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
30+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SUB [[COPY]], [[COPY1]]
31+
; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s8>)
32+
; CHECK-NEXT: PseudoRET implicit $v8
33+
%0:_(<vscale x 2 x s8>) = COPY $v8
34+
%1:_(<vscale x 2 x s8>) = COPY $v9
35+
%2:_(<vscale x 2 x s8>) = G_SUB %0, %1
36+
$v8 = COPY %2(<vscale x 2 x s8>)
37+
PseudoRET implicit $v8
38+
39+
...
40+
---
41+
name: test_nxv4i8
42+
body: |
43+
bb.0.entry:
44+
45+
; CHECK-LABEL: name: test_nxv4i8
46+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
47+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
48+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SUB [[COPY]], [[COPY1]]
49+
; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s8>)
50+
; CHECK-NEXT: PseudoRET implicit $v8
51+
%0:_(<vscale x 4 x s8>) = COPY $v8
52+
%1:_(<vscale x 4 x s8>) = COPY $v9
53+
%2:_(<vscale x 4 x s8>) = G_SUB %0, %1
54+
$v8 = COPY %2(<vscale x 4 x s8>)
55+
PseudoRET implicit $v8
56+
57+
...
58+
---
59+
name: test_nxv8i8
60+
body: |
61+
bb.0.entry:
62+
63+
; CHECK-LABEL: name: test_nxv8i8
64+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
65+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
66+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SUB [[COPY]], [[COPY1]]
67+
; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 8 x s8>)
68+
; CHECK-NEXT: PseudoRET implicit $v8
69+
%0:_(<vscale x 8 x s8>) = COPY $v8
70+
%1:_(<vscale x 8 x s8>) = COPY $v9
71+
%2:_(<vscale x 8 x s8>) = G_SUB %0, %1
72+
$v8 = COPY %2(<vscale x 8 x s8>)
73+
PseudoRET implicit $v8
74+
75+
...
76+
---
77+
name: test_nxv16i8
78+
body: |
79+
bb.0.entry:
80+
81+
; CHECK-LABEL: name: test_nxv16i8
82+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
83+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
84+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SUB [[COPY]], [[COPY1]]
85+
; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 16 x s8>)
86+
; CHECK-NEXT: PseudoRET implicit $v8m2
87+
%0:_(<vscale x 16 x s8>) = COPY $v8m2
88+
%1:_(<vscale x 16 x s8>) = COPY $v10m2
89+
%2:_(<vscale x 16 x s8>) = G_SUB %0, %1
90+
$v8m2 = COPY %2(<vscale x 16 x s8>)
91+
PseudoRET implicit $v8m2
92+
93+
...
94+
---
95+
name: test_nxv32i8
96+
body: |
97+
bb.0.entry:
98+
99+
; CHECK-LABEL: name: test_nxv32i8
100+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
101+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
102+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SUB [[COPY]], [[COPY1]]
103+
; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 32 x s8>)
104+
; CHECK-NEXT: PseudoRET implicit $v8m4
105+
%0:_(<vscale x 32 x s8>) = COPY $v8m4
106+
%1:_(<vscale x 32 x s8>) = COPY $v12m4
107+
%2:_(<vscale x 32 x s8>) = G_SUB %0, %1
108+
$v8m4 = COPY %2(<vscale x 32 x s8>)
109+
PseudoRET implicit $v8m4
110+
111+
...
112+
---
113+
name: test_nxv64i8
114+
body: |
115+
bb.0.entry:
116+
117+
; CHECK-LABEL: name: test_nxv64i8
118+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
119+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
120+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SUB [[COPY]], [[COPY1]]
121+
; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 64 x s8>)
122+
; CHECK-NEXT: PseudoRET implicit $v8m8
123+
%0:_(<vscale x 64 x s8>) = COPY $v8m8
124+
%1:_(<vscale x 64 x s8>) = COPY $v16m8
125+
%2:_(<vscale x 64 x s8>) = G_SUB %0, %1
126+
$v8m8 = COPY %2(<vscale x 64 x s8>)
127+
PseudoRET implicit $v8m8
128+
129+
...
130+
---
131+
name: test_nxv1i16
132+
body: |
133+
bb.0.entry:
134+
135+
; CHECK-LABEL: name: test_nxv1i16
136+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
137+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
138+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SUB [[COPY]], [[COPY1]]
139+
; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s16>)
140+
; CHECK-NEXT: PseudoRET implicit $v8
141+
%0:_(<vscale x 1 x s16>) = COPY $v8
142+
%1:_(<vscale x 1 x s16>) = COPY $v9
143+
%2:_(<vscale x 1 x s16>) = G_SUB %0, %1
144+
$v8 = COPY %2(<vscale x 1 x s16>)
145+
PseudoRET implicit $v8
146+
147+
...
148+
---
149+
name: test_nxv2i16
150+
body: |
151+
bb.0.entry:
152+
153+
; CHECK-LABEL: name: test_nxv2i16
154+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
155+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
156+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SUB [[COPY]], [[COPY1]]
157+
; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s16>)
158+
; CHECK-NEXT: PseudoRET implicit $v8
159+
%0:_(<vscale x 2 x s16>) = COPY $v8
160+
%1:_(<vscale x 2 x s16>) = COPY $v9
161+
%2:_(<vscale x 2 x s16>) = G_SUB %0, %1
162+
$v8 = COPY %2(<vscale x 2 x s16>)
163+
PseudoRET implicit $v8
164+
165+
...
166+
---
167+
name: test_nxv4i16
168+
body: |
169+
bb.0.entry:
170+
171+
; CHECK-LABEL: name: test_nxv4i16
172+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
173+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
174+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SUB [[COPY]], [[COPY1]]
175+
; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 4 x s16>)
176+
; CHECK-NEXT: PseudoRET implicit $v8
177+
%0:_(<vscale x 4 x s16>) = COPY $v8
178+
%1:_(<vscale x 4 x s16>) = COPY $v9
179+
%2:_(<vscale x 4 x s16>) = G_SUB %0, %1
180+
$v8 = COPY %2(<vscale x 4 x s16>)
181+
PseudoRET implicit $v8
182+
183+
...
184+
---
185+
name: test_nxv8i16
186+
body: |
187+
bb.0.entry:
188+
189+
; CHECK-LABEL: name: test_nxv8i16
190+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
191+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
192+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SUB [[COPY]], [[COPY1]]
193+
; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 8 x s16>)
194+
; CHECK-NEXT: PseudoRET implicit $v8m2
195+
%0:_(<vscale x 8 x s16>) = COPY $v8m2
196+
%1:_(<vscale x 8 x s16>) = COPY $v10m2
197+
%2:_(<vscale x 8 x s16>) = G_SUB %0, %1
198+
$v8m2 = COPY %2(<vscale x 8 x s16>)
199+
PseudoRET implicit $v8m2
200+
201+
...
202+
---
203+
name: test_nxv16i16
204+
body: |
205+
bb.0.entry:
206+
207+
; CHECK-LABEL: name: test_nxv16i16
208+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
209+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
210+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SUB [[COPY]], [[COPY1]]
211+
; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 16 x s16>)
212+
; CHECK-NEXT: PseudoRET implicit $v8m4
213+
%0:_(<vscale x 16 x s16>) = COPY $v8m4
214+
%1:_(<vscale x 16 x s16>) = COPY $v12m4
215+
%2:_(<vscale x 16 x s16>) = G_SUB %0, %1
216+
$v8m4 = COPY %2(<vscale x 16 x s16>)
217+
PseudoRET implicit $v8m4
218+
219+
...
220+
---
221+
name: test_nxv32i16
222+
body: |
223+
bb.0.entry:
224+
225+
; CHECK-LABEL: name: test_nxv32i16
226+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
227+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
228+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SUB [[COPY]], [[COPY1]]
229+
; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 32 x s16>)
230+
; CHECK-NEXT: PseudoRET implicit $v8m8
231+
%0:_(<vscale x 32 x s16>) = COPY $v8m8
232+
%1:_(<vscale x 32 x s16>) = COPY $v16m8
233+
%2:_(<vscale x 32 x s16>) = G_SUB %0, %1
234+
$v8m8 = COPY %2(<vscale x 32 x s16>)
235+
PseudoRET implicit $v8m8
236+
237+
...
238+
---
239+
name: test_nxv1i32
240+
body: |
241+
bb.0.entry:
242+
243+
; CHECK-LABEL: name: test_nxv1i32
244+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
245+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
246+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SUB [[COPY]], [[COPY1]]
247+
; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s32>)
248+
; CHECK-NEXT: PseudoRET implicit $v8
249+
%0:_(<vscale x 1 x s32>) = COPY $v8
250+
%1:_(<vscale x 1 x s32>) = COPY $v9
251+
%2:_(<vscale x 1 x s32>) = G_SUB %0, %1
252+
$v8 = COPY %2(<vscale x 1 x s32>)
253+
PseudoRET implicit $v8
254+
255+
...
256+
---
257+
name: test_nxv2i32
258+
body: |
259+
bb.0.entry:
260+
261+
; CHECK-LABEL: name: test_nxv2i32
262+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
263+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
264+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SUB [[COPY]], [[COPY1]]
265+
; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 2 x s32>)
266+
; CHECK-NEXT: PseudoRET implicit $v8
267+
%0:_(<vscale x 2 x s32>) = COPY $v8
268+
%1:_(<vscale x 2 x s32>) = COPY $v9
269+
%2:_(<vscale x 2 x s32>) = G_SUB %0, %1
270+
$v8 = COPY %2(<vscale x 2 x s32>)
271+
PseudoRET implicit $v8
272+
273+
...
274+
---
275+
name: test_nxv4i32
276+
body: |
277+
bb.0.entry:
278+
279+
; CHECK-LABEL: name: test_nxv4i32
280+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
281+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
282+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SUB [[COPY]], [[COPY1]]
283+
; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 4 x s32>)
284+
; CHECK-NEXT: PseudoRET implicit $v8m2
285+
%0:_(<vscale x 4 x s32>) = COPY $v8m2
286+
%1:_(<vscale x 4 x s32>) = COPY $v10m2
287+
%2:_(<vscale x 4 x s32>) = G_SUB %0, %1
288+
$v8m2 = COPY %2(<vscale x 4 x s32>)
289+
PseudoRET implicit $v8m2
290+
291+
...
292+
---
293+
name: test_nxv8i32
294+
body: |
295+
bb.0.entry:
296+
297+
; CHECK-LABEL: name: test_nxv8i32
298+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
299+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
300+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SUB [[COPY]], [[COPY1]]
301+
; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 8 x s32>)
302+
; CHECK-NEXT: PseudoRET implicit $v8m4
303+
%0:_(<vscale x 8 x s32>) = COPY $v8m4
304+
%1:_(<vscale x 8 x s32>) = COPY $v12m4
305+
%2:_(<vscale x 8 x s32>) = G_SUB %0, %1
306+
$v8m4 = COPY %2(<vscale x 8 x s32>)
307+
PseudoRET implicit $v8m4
308+
309+
...
310+
---
311+
name: test_nxv16i32
312+
body: |
313+
bb.0.entry:
314+
315+
; CHECK-LABEL: name: test_nxv16i32
316+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
317+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
318+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SUB [[COPY]], [[COPY1]]
319+
; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 16 x s32>)
320+
; CHECK-NEXT: PseudoRET implicit $v8m8
321+
%0:_(<vscale x 16 x s32>) = COPY $v8m8
322+
%1:_(<vscale x 16 x s32>) = COPY $v16m8
323+
%2:_(<vscale x 16 x s32>) = G_SUB %0, %1
324+
$v8m8 = COPY %2(<vscale x 16 x s32>)
325+
PseudoRET implicit $v8m8
326+
327+
...
328+
---
329+
name: test_nxv1i64
330+
body: |
331+
bb.0.entry:
332+
333+
; CHECK-LABEL: name: test_nxv1i64
334+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
335+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
336+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SUB [[COPY]], [[COPY1]]
337+
; CHECK-NEXT: $v8 = COPY [[SUB]](<vscale x 1 x s64>)
338+
; CHECK-NEXT: PseudoRET implicit $v8
339+
%0:_(<vscale x 1 x s64>) = COPY $v8
340+
%1:_(<vscale x 1 x s64>) = COPY $v9
341+
%2:_(<vscale x 1 x s64>) = G_SUB %0, %1
342+
$v8 = COPY %2(<vscale x 1 x s64>)
343+
PseudoRET implicit $v8
344+
345+
...
346+
---
347+
name: test_nxv2i64
348+
body: |
349+
bb.0.entry:
350+
351+
; CHECK-LABEL: name: test_nxv2i64
352+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
353+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
354+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SUB [[COPY]], [[COPY1]]
355+
; CHECK-NEXT: $v8m2 = COPY [[SUB]](<vscale x 2 x s64>)
356+
; CHECK-NEXT: PseudoRET implicit $v8m2
357+
%0:_(<vscale x 2 x s64>) = COPY $v8m2
358+
%1:_(<vscale x 2 x s64>) = COPY $v10m2
359+
%2:_(<vscale x 2 x s64>) = G_SUB %0, %1
360+
$v8m2 = COPY %2(<vscale x 2 x s64>)
361+
PseudoRET implicit $v8m2
362+
363+
...
364+
---
365+
name: test_nxv4i64
366+
body: |
367+
bb.0.entry:
368+
369+
; CHECK-LABEL: name: test_nxv4i64
370+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
371+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
372+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SUB [[COPY]], [[COPY1]]
373+
; CHECK-NEXT: $v8m4 = COPY [[SUB]](<vscale x 4 x s64>)
374+
; CHECK-NEXT: PseudoRET implicit $v8m4
375+
%0:_(<vscale x 4 x s64>) = COPY $v8m4
376+
%1:_(<vscale x 4 x s64>) = COPY $v12m4
377+
%2:_(<vscale x 4 x s64>) = G_SUB %0, %1
378+
$v8m4 = COPY %2(<vscale x 4 x s64>)
379+
PseudoRET implicit $v8m4
380+
381+
...
382+
---
383+
name: test_nxv8i64
384+
body: |
385+
bb.0.entry:
386+
387+
; CHECK-LABEL: name: test_nxv8i64
388+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
389+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
390+
; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SUB [[COPY]], [[COPY1]]
391+
; CHECK-NEXT: $v8m8 = COPY [[SUB]](<vscale x 8 x s64>)
392+
; CHECK-NEXT: PseudoRET implicit $v8m8
393+
%0:_(<vscale x 8 x s64>) = COPY $v8m8
394+
%1:_(<vscale x 8 x s64>) = COPY $v16m8
395+
%2:_(<vscale x 8 x s64>) = G_SUB %0, %1
396+
$v8m8 = COPY %2(<vscale x 8 x s64>)
397+
PseudoRET implicit $v8m8
398+
399+
...
Lines changed: 399 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,399 @@
1+
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2+
# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
3+
# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
4+
---
5+
name: test_nxv1i8
6+
body: |
7+
bb.0.entry:
8+
9+
; CHECK-LABEL: name: test_nxv1i8
10+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
11+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v9
12+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_XOR [[COPY]], [[COPY1]]
13+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s8>)
14+
; CHECK-NEXT: PseudoRET implicit $v8
15+
%0:_(<vscale x 1 x s8>) = COPY $v8
16+
%1:_(<vscale x 1 x s8>) = COPY $v9
17+
%2:_(<vscale x 1 x s8>) = G_XOR %0, %1
18+
$v8 = COPY %2(<vscale x 1 x s8>)
19+
PseudoRET implicit $v8
20+
21+
...
22+
---
23+
name: test_nxv2i8
24+
body: |
25+
bb.0.entry:
26+
27+
; CHECK-LABEL: name: test_nxv2i8
28+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
29+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v9
30+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_XOR [[COPY]], [[COPY1]]
31+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s8>)
32+
; CHECK-NEXT: PseudoRET implicit $v8
33+
%0:_(<vscale x 2 x s8>) = COPY $v8
34+
%1:_(<vscale x 2 x s8>) = COPY $v9
35+
%2:_(<vscale x 2 x s8>) = G_XOR %0, %1
36+
$v8 = COPY %2(<vscale x 2 x s8>)
37+
PseudoRET implicit $v8
38+
39+
...
40+
---
41+
name: test_nxv4i8
42+
body: |
43+
bb.0.entry:
44+
45+
; CHECK-LABEL: name: test_nxv4i8
46+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
47+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v9
48+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_XOR [[COPY]], [[COPY1]]
49+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 4 x s8>)
50+
; CHECK-NEXT: PseudoRET implicit $v8
51+
%0:_(<vscale x 4 x s8>) = COPY $v8
52+
%1:_(<vscale x 4 x s8>) = COPY $v9
53+
%2:_(<vscale x 4 x s8>) = G_XOR %0, %1
54+
$v8 = COPY %2(<vscale x 4 x s8>)
55+
PseudoRET implicit $v8
56+
57+
...
58+
---
59+
name: test_nxv8i8
60+
body: |
61+
bb.0.entry:
62+
63+
; CHECK-LABEL: name: test_nxv8i8
64+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
65+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v9
66+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_XOR [[COPY]], [[COPY1]]
67+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 8 x s8>)
68+
; CHECK-NEXT: PseudoRET implicit $v8
69+
%0:_(<vscale x 8 x s8>) = COPY $v8
70+
%1:_(<vscale x 8 x s8>) = COPY $v9
71+
%2:_(<vscale x 8 x s8>) = G_XOR %0, %1
72+
$v8 = COPY %2(<vscale x 8 x s8>)
73+
PseudoRET implicit $v8
74+
75+
...
76+
---
77+
name: test_nxv16i8
78+
body: |
79+
bb.0.entry:
80+
81+
; CHECK-LABEL: name: test_nxv16i8
82+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
83+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v10m2
84+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_XOR [[COPY]], [[COPY1]]
85+
; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 16 x s8>)
86+
; CHECK-NEXT: PseudoRET implicit $v8m2
87+
%0:_(<vscale x 16 x s8>) = COPY $v8m2
88+
%1:_(<vscale x 16 x s8>) = COPY $v10m2
89+
%2:_(<vscale x 16 x s8>) = G_XOR %0, %1
90+
$v8m2 = COPY %2(<vscale x 16 x s8>)
91+
PseudoRET implicit $v8m2
92+
93+
...
94+
---
95+
name: test_nxv32i8
96+
body: |
97+
bb.0.entry:
98+
99+
; CHECK-LABEL: name: test_nxv32i8
100+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
101+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v12m4
102+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_XOR [[COPY]], [[COPY1]]
103+
; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 32 x s8>)
104+
; CHECK-NEXT: PseudoRET implicit $v8m4
105+
%0:_(<vscale x 32 x s8>) = COPY $v8m4
106+
%1:_(<vscale x 32 x s8>) = COPY $v12m4
107+
%2:_(<vscale x 32 x s8>) = G_XOR %0, %1
108+
$v8m4 = COPY %2(<vscale x 32 x s8>)
109+
PseudoRET implicit $v8m4
110+
111+
...
112+
---
113+
name: test_nxv64i8
114+
body: |
115+
bb.0.entry:
116+
117+
; CHECK-LABEL: name: test_nxv64i8
118+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
119+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v16m8
120+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_XOR [[COPY]], [[COPY1]]
121+
; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 64 x s8>)
122+
; CHECK-NEXT: PseudoRET implicit $v8m8
123+
%0:_(<vscale x 64 x s8>) = COPY $v8m8
124+
%1:_(<vscale x 64 x s8>) = COPY $v16m8
125+
%2:_(<vscale x 64 x s8>) = G_XOR %0, %1
126+
$v8m8 = COPY %2(<vscale x 64 x s8>)
127+
PseudoRET implicit $v8m8
128+
129+
...
130+
---
131+
name: test_nxv1i16
132+
body: |
133+
bb.0.entry:
134+
135+
; CHECK-LABEL: name: test_nxv1i16
136+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
137+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v9
138+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_XOR [[COPY]], [[COPY1]]
139+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s16>)
140+
; CHECK-NEXT: PseudoRET implicit $v8
141+
%0:_(<vscale x 1 x s16>) = COPY $v8
142+
%1:_(<vscale x 1 x s16>) = COPY $v9
143+
%2:_(<vscale x 1 x s16>) = G_XOR %0, %1
144+
$v8 = COPY %2(<vscale x 1 x s16>)
145+
PseudoRET implicit $v8
146+
147+
...
148+
---
149+
name: test_nxv2i16
150+
body: |
151+
bb.0.entry:
152+
153+
; CHECK-LABEL: name: test_nxv2i16
154+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
155+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v9
156+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_XOR [[COPY]], [[COPY1]]
157+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s16>)
158+
; CHECK-NEXT: PseudoRET implicit $v8
159+
%0:_(<vscale x 2 x s16>) = COPY $v8
160+
%1:_(<vscale x 2 x s16>) = COPY $v9
161+
%2:_(<vscale x 2 x s16>) = G_XOR %0, %1
162+
$v8 = COPY %2(<vscale x 2 x s16>)
163+
PseudoRET implicit $v8
164+
165+
...
166+
---
167+
name: test_nxv4i16
168+
body: |
169+
bb.0.entry:
170+
171+
; CHECK-LABEL: name: test_nxv4i16
172+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
173+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v9
174+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_XOR [[COPY]], [[COPY1]]
175+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 4 x s16>)
176+
; CHECK-NEXT: PseudoRET implicit $v8
177+
%0:_(<vscale x 4 x s16>) = COPY $v8
178+
%1:_(<vscale x 4 x s16>) = COPY $v9
179+
%2:_(<vscale x 4 x s16>) = G_XOR %0, %1
180+
$v8 = COPY %2(<vscale x 4 x s16>)
181+
PseudoRET implicit $v8
182+
183+
...
184+
---
185+
name: test_nxv8i16
186+
body: |
187+
bb.0.entry:
188+
189+
; CHECK-LABEL: name: test_nxv8i16
190+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
191+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v10m2
192+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_XOR [[COPY]], [[COPY1]]
193+
; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 8 x s16>)
194+
; CHECK-NEXT: PseudoRET implicit $v8m2
195+
%0:_(<vscale x 8 x s16>) = COPY $v8m2
196+
%1:_(<vscale x 8 x s16>) = COPY $v10m2
197+
%2:_(<vscale x 8 x s16>) = G_XOR %0, %1
198+
$v8m2 = COPY %2(<vscale x 8 x s16>)
199+
PseudoRET implicit $v8m2
200+
201+
...
202+
---
203+
name: test_nxv16i16
204+
body: |
205+
bb.0.entry:
206+
207+
; CHECK-LABEL: name: test_nxv16i16
208+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
209+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v12m4
210+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_XOR [[COPY]], [[COPY1]]
211+
; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 16 x s16>)
212+
; CHECK-NEXT: PseudoRET implicit $v8m4
213+
%0:_(<vscale x 16 x s16>) = COPY $v8m4
214+
%1:_(<vscale x 16 x s16>) = COPY $v12m4
215+
%2:_(<vscale x 16 x s16>) = G_XOR %0, %1
216+
$v8m4 = COPY %2(<vscale x 16 x s16>)
217+
PseudoRET implicit $v8m4
218+
219+
...
220+
---
221+
name: test_nxv32i16
222+
body: |
223+
bb.0.entry:
224+
225+
; CHECK-LABEL: name: test_nxv32i16
226+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
227+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v16m8
228+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_XOR [[COPY]], [[COPY1]]
229+
; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 32 x s16>)
230+
; CHECK-NEXT: PseudoRET implicit $v8m8
231+
%0:_(<vscale x 32 x s16>) = COPY $v8m8
232+
%1:_(<vscale x 32 x s16>) = COPY $v16m8
233+
%2:_(<vscale x 32 x s16>) = G_XOR %0, %1
234+
$v8m8 = COPY %2(<vscale x 32 x s16>)
235+
PseudoRET implicit $v8m8
236+
237+
...
238+
---
239+
name: test_nxv1i32
240+
body: |
241+
bb.0.entry:
242+
243+
; CHECK-LABEL: name: test_nxv1i32
244+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
245+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v9
246+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_XOR [[COPY]], [[COPY1]]
247+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s32>)
248+
; CHECK-NEXT: PseudoRET implicit $v8
249+
%0:_(<vscale x 1 x s32>) = COPY $v8
250+
%1:_(<vscale x 1 x s32>) = COPY $v9
251+
%2:_(<vscale x 1 x s32>) = G_XOR %0, %1
252+
$v8 = COPY %2(<vscale x 1 x s32>)
253+
PseudoRET implicit $v8
254+
255+
...
256+
---
257+
name: test_nxv2i32
258+
body: |
259+
bb.0.entry:
260+
261+
; CHECK-LABEL: name: test_nxv2i32
262+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
263+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v9
264+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_XOR [[COPY]], [[COPY1]]
265+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 2 x s32>)
266+
; CHECK-NEXT: PseudoRET implicit $v8
267+
%0:_(<vscale x 2 x s32>) = COPY $v8
268+
%1:_(<vscale x 2 x s32>) = COPY $v9
269+
%2:_(<vscale x 2 x s32>) = G_XOR %0, %1
270+
$v8 = COPY %2(<vscale x 2 x s32>)
271+
PseudoRET implicit $v8
272+
273+
...
274+
---
275+
name: test_nxv4i32
276+
body: |
277+
bb.0.entry:
278+
279+
; CHECK-LABEL: name: test_nxv4i32
280+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
281+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v10m2
282+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_XOR [[COPY]], [[COPY1]]
283+
; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 4 x s32>)
284+
; CHECK-NEXT: PseudoRET implicit $v8m2
285+
%0:_(<vscale x 4 x s32>) = COPY $v8m2
286+
%1:_(<vscale x 4 x s32>) = COPY $v10m2
287+
%2:_(<vscale x 4 x s32>) = G_XOR %0, %1
288+
$v8m2 = COPY %2(<vscale x 4 x s32>)
289+
PseudoRET implicit $v8m2
290+
291+
...
292+
---
293+
name: test_nxv8i32
294+
body: |
295+
bb.0.entry:
296+
297+
; CHECK-LABEL: name: test_nxv8i32
298+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
299+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v12m4
300+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_XOR [[COPY]], [[COPY1]]
301+
; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 8 x s32>)
302+
; CHECK-NEXT: PseudoRET implicit $v8m4
303+
%0:_(<vscale x 8 x s32>) = COPY $v8m4
304+
%1:_(<vscale x 8 x s32>) = COPY $v12m4
305+
%2:_(<vscale x 8 x s32>) = G_XOR %0, %1
306+
$v8m4 = COPY %2(<vscale x 8 x s32>)
307+
PseudoRET implicit $v8m4
308+
309+
...
310+
---
311+
name: test_nxv16i32
312+
body: |
313+
bb.0.entry:
314+
315+
; CHECK-LABEL: name: test_nxv16i32
316+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
317+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v16m8
318+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_XOR [[COPY]], [[COPY1]]
319+
; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 16 x s32>)
320+
; CHECK-NEXT: PseudoRET implicit $v8m8
321+
%0:_(<vscale x 16 x s32>) = COPY $v8m8
322+
%1:_(<vscale x 16 x s32>) = COPY $v16m8
323+
%2:_(<vscale x 16 x s32>) = G_XOR %0, %1
324+
$v8m8 = COPY %2(<vscale x 16 x s32>)
325+
PseudoRET implicit $v8m8
326+
327+
...
328+
---
329+
name: test_nxv1i64
330+
body: |
331+
bb.0.entry:
332+
333+
; CHECK-LABEL: name: test_nxv1i64
334+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
335+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v9
336+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_XOR [[COPY]], [[COPY1]]
337+
; CHECK-NEXT: $v8 = COPY [[OR]](<vscale x 1 x s64>)
338+
; CHECK-NEXT: PseudoRET implicit $v8
339+
%0:_(<vscale x 1 x s64>) = COPY $v8
340+
%1:_(<vscale x 1 x s64>) = COPY $v9
341+
%2:_(<vscale x 1 x s64>) = G_XOR %0, %1
342+
$v8 = COPY %2(<vscale x 1 x s64>)
343+
PseudoRET implicit $v8
344+
345+
...
346+
---
347+
name: test_nxv2i64
348+
body: |
349+
bb.0.entry:
350+
351+
; CHECK-LABEL: name: test_nxv2i64
352+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
353+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v10m2
354+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_XOR [[COPY]], [[COPY1]]
355+
; CHECK-NEXT: $v8m2 = COPY [[OR]](<vscale x 2 x s64>)
356+
; CHECK-NEXT: PseudoRET implicit $v8m2
357+
%0:_(<vscale x 2 x s64>) = COPY $v8m2
358+
%1:_(<vscale x 2 x s64>) = COPY $v10m2
359+
%2:_(<vscale x 2 x s64>) = G_XOR %0, %1
360+
$v8m2 = COPY %2(<vscale x 2 x s64>)
361+
PseudoRET implicit $v8m2
362+
363+
...
364+
---
365+
name: test_nxv4i64
366+
body: |
367+
bb.0.entry:
368+
369+
; CHECK-LABEL: name: test_nxv4i64
370+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
371+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v12m4
372+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_XOR [[COPY]], [[COPY1]]
373+
; CHECK-NEXT: $v8m4 = COPY [[OR]](<vscale x 4 x s64>)
374+
; CHECK-NEXT: PseudoRET implicit $v8m4
375+
%0:_(<vscale x 4 x s64>) = COPY $v8m4
376+
%1:_(<vscale x 4 x s64>) = COPY $v12m4
377+
%2:_(<vscale x 4 x s64>) = G_XOR %0, %1
378+
$v8m4 = COPY %2(<vscale x 4 x s64>)
379+
PseudoRET implicit $v8m4
380+
381+
...
382+
---
383+
name: test_nxv8i64
384+
body: |
385+
bb.0.entry:
386+
387+
; CHECK-LABEL: name: test_nxv8i64
388+
; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
389+
; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v16m8
390+
; CHECK-NEXT: [[OR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_XOR [[COPY]], [[COPY1]]
391+
; CHECK-NEXT: $v8m8 = COPY [[OR]](<vscale x 8 x s64>)
392+
; CHECK-NEXT: PseudoRET implicit $v8m8
393+
%0:_(<vscale x 8 x s64>) = COPY $v8m8
394+
%1:_(<vscale x 8 x s64>) = COPY $v16m8
395+
%2:_(<vscale x 8 x s64>) = G_XOR %0, %1
396+
$v8m8 = COPY %2(<vscale x 8 x s64>)
397+
PseudoRET implicit $v8m8
398+
399+
...

0 commit comments

Comments
 (0)
Please sign in to comment.