@@ -6,7 +6,7 @@ define <4 x i32> @add_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
6
6
; CHECK-LABEL: add_constant_rhs:
7
7
; CHECK: # %bb.0:
8
8
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
9
- ; CHECK-NEXT: vslide1down.vx v8, v8, a0
9
+ ; CHECK-NEXT: vmv.v.x v8, a0
10
10
; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
11
11
; CHECK-NEXT: addi a0, a0, %lo(.LCPI0_0)
12
12
; CHECK-NEXT: vle32.v v9, (a0)
@@ -30,7 +30,7 @@ define <8 x i32> @add_constant_rhs_8xi32(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e,
30
30
; CHECK-LABEL: add_constant_rhs_8xi32:
31
31
; CHECK: # %bb.0:
32
32
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
33
- ; CHECK-NEXT: vslide1down.vx v8, v8, a0
33
+ ; CHECK-NEXT: vmv.v.x v8, a0
34
34
; CHECK-NEXT: vslide1down.vx v8, v8, a1
35
35
; CHECK-NEXT: vslide1down.vx v8, v8, a2
36
36
; CHECK-NEXT: vslide1down.vx v8, v8, a3
@@ -67,7 +67,7 @@ define <4 x i32> @sub_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
67
67
; CHECK-LABEL: sub_constant_rhs:
68
68
; CHECK: # %bb.0:
69
69
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
70
- ; CHECK-NEXT: vslide1down.vx v8, v8, a0
70
+ ; CHECK-NEXT: vmv.v.x v8, a0
71
71
; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
72
72
; CHECK-NEXT: addi a0, a0, %lo(.LCPI2_0)
73
73
; CHECK-NEXT: vle32.v v9, (a0)
@@ -91,7 +91,7 @@ define <4 x i32> @mul_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
91
91
; CHECK-LABEL: mul_constant_rhs:
92
92
; CHECK: # %bb.0:
93
93
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
94
- ; CHECK-NEXT: vslide1down.vx v8, v8, a0
94
+ ; CHECK-NEXT: vmv.v.x v8, a0
95
95
; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
96
96
; CHECK-NEXT: addi a0, a0, %lo(.LCPI3_0)
97
97
; CHECK-NEXT: vle32.v v9, (a0)
@@ -115,7 +115,7 @@ define <4 x i32> @udiv_constant_rhs(i32 %a, i32 %b, i32 %c, i32 %d) {
115
115
; CHECK-LABEL: udiv_constant_rhs:
116
116
; CHECK: # %bb.0:
117
117
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
118
- ; CHECK-NEXT: vslide1down.vx v8, v8, a0
118
+ ; CHECK-NEXT: vmv.v.x v8, a0
119
119
; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
120
120
; CHECK-NEXT: addi a0, a0, %lo(.LCPI4_0)
121
121
; CHECK-NEXT: vle32.v v9, (a0)
@@ -152,7 +152,7 @@ define <4 x float> @fadd_constant_rhs(float %a, float %b, float %c, float %d) {
152
152
; CHECK-LABEL: fadd_constant_rhs:
153
153
; CHECK: # %bb.0:
154
154
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
155
- ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
155
+ ; CHECK-NEXT: vfmv.v.f v8, fa0
156
156
; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
157
157
; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_0)
158
158
; CHECK-NEXT: vle32.v v9, (a0)
@@ -176,7 +176,7 @@ define <4 x float> @fdiv_constant_rhs(float %a, float %b, float %c, float %d) {
176
176
; CHECK-LABEL: fdiv_constant_rhs:
177
177
; CHECK: # %bb.0:
178
178
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
179
- ; CHECK-NEXT: vfslide1down.vf v8, v8, fa0
179
+ ; CHECK-NEXT: vfmv.v.f v8, fa0
180
180
; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
181
181
; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0)
182
182
; CHECK-NEXT: vle32.v v9, (a0)
@@ -200,7 +200,7 @@ define <4 x i32> @add_constant_rhs_splat(i32 %a, i32 %b, i32 %c, i32 %d) {
200
200
; CHECK-LABEL: add_constant_rhs_splat:
201
201
; CHECK: # %bb.0:
202
202
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
203
- ; CHECK-NEXT: vslide1down.vx v8, v8, a0
203
+ ; CHECK-NEXT: vmv.v.x v8, a0
204
204
; CHECK-NEXT: vslide1down.vx v8, v8, a1
205
205
; CHECK-NEXT: vslide1down.vx v8, v8, a2
206
206
; CHECK-NEXT: vslide1down.vx v8, v8, a3
@@ -226,7 +226,7 @@ define <4 x i32> @add_constant_rhs_with_identity(i32 %a, i32 %b, i32 %c, i32 %d)
226
226
; RV32-NEXT: addi a3, a3, 2047
227
227
; RV32-NEXT: addi a3, a3, 308
228
228
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
229
- ; RV32-NEXT: vslide1down.vx v8, v8, a0
229
+ ; RV32-NEXT: vmv.v.x v8, a0
230
230
; RV32-NEXT: vslide1down.vx v8, v8, a1
231
231
; RV32-NEXT: vslide1down.vx v8, v8, a2
232
232
; RV32-NEXT: vslide1down.vx v8, v8, a3
@@ -239,7 +239,7 @@ define <4 x i32> @add_constant_rhs_with_identity(i32 %a, i32 %b, i32 %c, i32 %d)
239
239
; RV64-NEXT: addi a3, a3, 2047
240
240
; RV64-NEXT: addiw a3, a3, 308
241
241
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
242
- ; RV64-NEXT: vslide1down.vx v8, v8, a0
242
+ ; RV64-NEXT: vmv.v.x v8, a0
243
243
; RV64-NEXT: vslide1down.vx v8, v8, a1
244
244
; RV64-NEXT: vslide1down.vx v8, v8, a2
245
245
; RV64-NEXT: vslide1down.vx v8, v8, a3
@@ -263,7 +263,7 @@ define <4 x i32> @add_constant_rhs_identity(i32 %a, i32 %b, i32 %c, i32 %d) {
263
263
; RV32-NEXT: addi a3, a3, 2047
264
264
; RV32-NEXT: addi a3, a3, 308
265
265
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
266
- ; RV32-NEXT: vslide1down.vx v8, v8, a0
266
+ ; RV32-NEXT: vmv.v.x v8, a0
267
267
; RV32-NEXT: vslide1down.vx v8, v8, a1
268
268
; RV32-NEXT: vslide1down.vx v8, v8, a2
269
269
; RV32-NEXT: vslide1down.vx v8, v8, a3
@@ -276,7 +276,7 @@ define <4 x i32> @add_constant_rhs_identity(i32 %a, i32 %b, i32 %c, i32 %d) {
276
276
; RV64-NEXT: addi a3, a3, 2047
277
277
; RV64-NEXT: addiw a3, a3, 308
278
278
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
279
- ; RV64-NEXT: vslide1down.vx v8, v8, a0
279
+ ; RV64-NEXT: vmv.v.x v8, a0
280
280
; RV64-NEXT: vslide1down.vx v8, v8, a1
281
281
; RV64-NEXT: vslide1down.vx v8, v8, a2
282
282
; RV64-NEXT: vslide1down.vx v8, v8, a3
@@ -293,25 +293,15 @@ define <4 x i32> @add_constant_rhs_identity(i32 %a, i32 %b, i32 %c, i32 %d) {
293
293
}
294
294
295
295
define <4 x i32 > @add_constant_rhs_identity2 (i32 %a , i32 %b , i32 %c , i32 %d ) {
296
- ; RV32-LABEL: add_constant_rhs_identity2:
297
- ; RV32: # %bb.0:
298
- ; RV32-NEXT: addi a0, a0, 23
299
- ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
300
- ; RV32-NEXT: vslide1down.vx v8, v8, a0
301
- ; RV32-NEXT: vslide1down.vx v8, v8, a1
302
- ; RV32-NEXT: vslide1down.vx v8, v8, a2
303
- ; RV32-NEXT: vslide1down.vx v8, v8, a3
304
- ; RV32-NEXT: ret
305
- ;
306
- ; RV64-LABEL: add_constant_rhs_identity2:
307
- ; RV64: # %bb.0:
308
- ; RV64-NEXT: addiw a0, a0, 23
309
- ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
310
- ; RV64-NEXT: vslide1down.vx v8, v8, a0
311
- ; RV64-NEXT: vslide1down.vx v8, v8, a1
312
- ; RV64-NEXT: vslide1down.vx v8, v8, a2
313
- ; RV64-NEXT: vslide1down.vx v8, v8, a3
314
- ; RV64-NEXT: ret
296
+ ; CHECK-LABEL: add_constant_rhs_identity2:
297
+ ; CHECK: # %bb.0:
298
+ ; CHECK-NEXT: addi a0, a0, 23
299
+ ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
300
+ ; CHECK-NEXT: vmv.v.x v8, a0
301
+ ; CHECK-NEXT: vslide1down.vx v8, v8, a1
302
+ ; CHECK-NEXT: vslide1down.vx v8, v8, a2
303
+ ; CHECK-NEXT: vslide1down.vx v8, v8, a3
304
+ ; CHECK-NEXT: ret
315
305
%e0 = add i32 %a , 23
316
306
%v0 = insertelement <4 x i32 > poison, i32 %e0 , i32 0
317
307
%v1 = insertelement <4 x i32 > %v0 , i32 %b , i32 1
@@ -324,7 +314,7 @@ define <4 x i32> @add_constant_rhs_inverse(i32 %a, i32 %b, i32 %c, i32 %d) {
324
314
; CHECK-LABEL: add_constant_rhs_inverse:
325
315
; CHECK: # %bb.0:
326
316
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
327
- ; CHECK-NEXT: vslide1down.vx v8, v8, a0
317
+ ; CHECK-NEXT: vmv.v.x v8, a0
328
318
; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
329
319
; CHECK-NEXT: addi a0, a0, %lo(.LCPI11_0)
330
320
; CHECK-NEXT: vle32.v v9, (a0)
@@ -348,7 +338,7 @@ define <4 x i32> @add_constant_rhs_commute(i32 %a, i32 %b, i32 %c, i32 %d) {
348
338
; CHECK-LABEL: add_constant_rhs_commute:
349
339
; CHECK: # %bb.0:
350
340
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
351
- ; CHECK-NEXT: vslide1down.vx v8, v8, a0
341
+ ; CHECK-NEXT: vmv.v.x v8, a0
352
342
; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
353
343
; CHECK-NEXT: addi a0, a0, %lo(.LCPI12_0)
354
344
; CHECK-NEXT: vle32.v v9, (a0)
@@ -377,20 +367,20 @@ define <4 x i32> @add_general_rhs(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f
377
367
; RV32-NEXT: add a2, a2, a6
378
368
; RV32-NEXT: add a3, a3, a7
379
369
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
380
- ; RV32-NEXT: vslide1down.vx v8, v8, a0
370
+ ; RV32-NEXT: vmv.v.x v8, a0
381
371
; RV32-NEXT: vslide1down.vx v8, v8, a1
382
372
; RV32-NEXT: vslide1down.vx v8, v8, a2
383
373
; RV32-NEXT: vslide1down.vx v8, v8, a3
384
374
; RV32-NEXT: ret
385
375
;
386
376
; RV64-LABEL: add_general_rhs:
387
377
; RV64: # %bb.0:
388
- ; RV64-NEXT: addw a0, a0, a4
378
+ ; RV64-NEXT: add a0, a0, a4
389
379
; RV64-NEXT: addw a1, a1, a5
390
380
; RV64-NEXT: addw a2, a2, a6
391
381
; RV64-NEXT: addw a3, a3, a7
392
382
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
393
- ; RV64-NEXT: vslide1down.vx v8, v8, a0
383
+ ; RV64-NEXT: vmv.v.x v8, a0
394
384
; RV64-NEXT: vslide1down.vx v8, v8, a1
395
385
; RV64-NEXT: vslide1down.vx v8, v8, a2
396
386
; RV64-NEXT: vslide1down.vx v8, v8, a3
@@ -414,20 +404,20 @@ define <4 x i32> @add_general_splat(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
414
404
; RV32-NEXT: add a2, a2, a4
415
405
; RV32-NEXT: add a3, a3, a4
416
406
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
417
- ; RV32-NEXT: vslide1down.vx v8, v8, a0
407
+ ; RV32-NEXT: vmv.v.x v8, a0
418
408
; RV32-NEXT: vslide1down.vx v8, v8, a1
419
409
; RV32-NEXT: vslide1down.vx v8, v8, a2
420
410
; RV32-NEXT: vslide1down.vx v8, v8, a3
421
411
; RV32-NEXT: ret
422
412
;
423
413
; RV64-LABEL: add_general_splat:
424
414
; RV64: # %bb.0:
425
- ; RV64-NEXT: addw a0, a0, a4
415
+ ; RV64-NEXT: add a0, a0, a4
426
416
; RV64-NEXT: addw a1, a1, a4
427
417
; RV64-NEXT: addw a2, a2, a4
428
418
; RV64-NEXT: addw a3, a3, a4
429
419
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
430
- ; RV64-NEXT: vslide1down.vx v8, v8, a0
420
+ ; RV64-NEXT: vmv.v.x v8, a0
431
421
; RV64-NEXT: vslide1down.vx v8, v8, a1
432
422
; RV64-NEXT: vslide1down.vx v8, v8, a2
433
423
; RV64-NEXT: vslide1down.vx v8, v8, a3
0 commit comments