Skip to content

Commit 3c40719

Browse files
committed
[NFC][X86][Codegen] Add test coverage for interleaved i8 load/store stride=4
1 parent 53d7bdb commit 3c40719

File tree

2 files changed

+436
-0
lines changed

2 files changed

+436
-0
lines changed
Lines changed: 274 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,274 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2+
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck --check-prefixes=AVX2 %s
3+
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2 %s
4+
; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck --check-prefixes=AVX2 %s
5+
6+
; These patterns are produced by LoopVectorizer for interleaved stores.
7+
8+
define void @load_i8_stride4_vf2(<8 x i8>* %in.vec, <2 x i8>* %out.vec0, <2 x i8>* %out.vec1, <2 x i8>* %out.vec2, <2 x i8>* %out.vec3) nounwind {
9+
; AVX2-LABEL: load_i8_stride4_vf2:
10+
; AVX2: # %bb.0:
11+
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
12+
; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
13+
; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
14+
; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
15+
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,u,u,u,u,u,u,u,u,u,u,u,u,u,u]
16+
; AVX2-NEXT: vpextrw $0, %xmm1, (%rsi)
17+
; AVX2-NEXT: vpextrw $0, %xmm2, (%rdx)
18+
; AVX2-NEXT: vpextrw $0, %xmm3, (%rcx)
19+
; AVX2-NEXT: vpextrw $0, %xmm0, (%r8)
20+
; AVX2-NEXT: retq
21+
%wide.vec = load <8 x i8>, <8 x i8>* %in.vec, align 32
22+
23+
%strided.vec0 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> <i32 0, i32 4>
24+
%strided.vec1 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> <i32 1, i32 5>
25+
%strided.vec2 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> <i32 2, i32 6>
26+
%strided.vec3 = shufflevector <8 x i8> %wide.vec, <8 x i8> poison, <2 x i32> <i32 3, i32 7>
27+
28+
store <2 x i8> %strided.vec0, <2 x i8>* %out.vec0, align 32
29+
store <2 x i8> %strided.vec1, <2 x i8>* %out.vec1, align 32
30+
store <2 x i8> %strided.vec2, <2 x i8>* %out.vec2, align 32
31+
store <2 x i8> %strided.vec3, <2 x i8>* %out.vec3, align 32
32+
33+
ret void
34+
}
35+
36+
define void @load_i8_stride4_vf4(<16 x i8>* %in.vec, <4 x i8>* %out.vec0, <4 x i8>* %out.vec1, <4 x i8>* %out.vec2, <4 x i8>* %out.vec3) nounwind {
37+
; AVX2-LABEL: load_i8_stride4_vf4:
38+
; AVX2: # %bb.0:
39+
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
40+
; AVX2-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
41+
; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u]
42+
; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm0[2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u]
43+
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u]
44+
; AVX2-NEXT: vmovd %xmm1, (%rsi)
45+
; AVX2-NEXT: vmovd %xmm2, (%rdx)
46+
; AVX2-NEXT: vmovd %xmm3, (%rcx)
47+
; AVX2-NEXT: vmovd %xmm0, (%r8)
48+
; AVX2-NEXT: retq
49+
%wide.vec = load <16 x i8>, <16 x i8>* %in.vec, align 32
50+
51+
%strided.vec0 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
52+
%strided.vec1 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> <i32 1, i32 5, i32 9, i32 13>
53+
%strided.vec2 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> <i32 2, i32 6, i32 10, i32 14>
54+
%strided.vec3 = shufflevector <16 x i8> %wide.vec, <16 x i8> poison, <4 x i32> <i32 3, i32 7, i32 11, i32 15>
55+
56+
store <4 x i8> %strided.vec0, <4 x i8>* %out.vec0, align 32
57+
store <4 x i8> %strided.vec1, <4 x i8>* %out.vec1, align 32
58+
store <4 x i8> %strided.vec2, <4 x i8>* %out.vec2, align 32
59+
store <4 x i8> %strided.vec3, <4 x i8>* %out.vec3, align 32
60+
61+
ret void
62+
}
63+
64+
define void @load_i8_stride4_vf8(<32 x i8>* %in.vec, <8 x i8>* %out.vec0, <8 x i8>* %out.vec1, <8 x i8>* %out.vec2, <8 x i8>* %out.vec3) nounwind {
65+
; AVX2-LABEL: load_i8_stride4_vf8:
66+
; AVX2: # %bb.0:
67+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
68+
; AVX2-NEXT: vmovdqa (%rdi), %xmm1
69+
; AVX2-NEXT: vmovdqa 16(%rdi), %xmm2
70+
; AVX2-NEXT: vpshufb %xmm0, %xmm2, %xmm3
71+
; AVX2-NEXT: vpshufb %xmm0, %xmm1, %xmm0
72+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
73+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
74+
; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm4
75+
; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm3
76+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
77+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
78+
; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm5
79+
; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm4
80+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
81+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
82+
; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm2
83+
; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1
84+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
85+
; AVX2-NEXT: vmovq %xmm0, (%rsi)
86+
; AVX2-NEXT: vmovq %xmm3, (%rdx)
87+
; AVX2-NEXT: vmovq %xmm4, (%rcx)
88+
; AVX2-NEXT: vmovq %xmm1, (%r8)
89+
; AVX2-NEXT: retq
90+
%wide.vec = load <32 x i8>, <32 x i8>* %in.vec, align 32
91+
92+
%strided.vec0 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28>
93+
%strided.vec1 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29>
94+
%strided.vec2 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30>
95+
%strided.vec3 = shufflevector <32 x i8> %wide.vec, <32 x i8> poison, <8 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31>
96+
97+
store <8 x i8> %strided.vec0, <8 x i8>* %out.vec0, align 32
98+
store <8 x i8> %strided.vec1, <8 x i8>* %out.vec1, align 32
99+
store <8 x i8> %strided.vec2, <8 x i8>* %out.vec2, align 32
100+
store <8 x i8> %strided.vec3, <8 x i8>* %out.vec3, align 32
101+
102+
ret void
103+
}
104+
105+
define void @load_i8_stride4_vf16(<64 x i8>* %in.vec, <16 x i8>* %out.vec0, <16 x i8>* %out.vec1, <16 x i8>* %out.vec2, <16 x i8>* %out.vec3) nounwind {
106+
; AVX2-LABEL: load_i8_stride4_vf16:
107+
; AVX2: # %bb.0:
108+
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
109+
; AVX2-NEXT: vmovdqa 16(%rdi), %xmm1
110+
; AVX2-NEXT: vmovdqa 32(%rdi), %xmm2
111+
; AVX2-NEXT: vmovdqa 48(%rdi), %xmm3
112+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
113+
; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm5
114+
; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm4
115+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
116+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
117+
; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm6
118+
; AVX2-NEXT: vpshufb %xmm5, %xmm0, %xmm5
119+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
120+
; AVX2-NEXT: vpblendd {{.*#+}} xmm8 = xmm5[0,1],xmm4[2,3]
121+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
122+
; AVX2-NEXT: vpshufb %xmm5, %xmm3, %xmm6
123+
; AVX2-NEXT: vpshufb %xmm5, %xmm2, %xmm5
124+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
125+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
126+
; AVX2-NEXT: vpshufb %xmm6, %xmm1, %xmm7
127+
; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm6
128+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
129+
; AVX2-NEXT: vpblendd {{.*#+}} xmm5 = xmm6[0,1],xmm5[2,3]
130+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u>
131+
; AVX2-NEXT: vpshufb %xmm6, %xmm3, %xmm7
132+
; AVX2-NEXT: vpshufb %xmm6, %xmm2, %xmm6
133+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
134+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm7 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
135+
; AVX2-NEXT: vpshufb %xmm7, %xmm1, %xmm4
136+
; AVX2-NEXT: vpshufb %xmm7, %xmm0, %xmm7
137+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
138+
; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm4[0,1],xmm6[2,3]
139+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
140+
; AVX2-NEXT: vpshufb %xmm6, %xmm3, %xmm3
141+
; AVX2-NEXT: vpshufb %xmm6, %xmm2, %xmm2
142+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
143+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
144+
; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
145+
; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
146+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
147+
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3]
148+
; AVX2-NEXT: vmovdqa %xmm8, (%rsi)
149+
; AVX2-NEXT: vmovdqa %xmm5, (%rdx)
150+
; AVX2-NEXT: vmovdqa %xmm4, (%rcx)
151+
; AVX2-NEXT: vmovdqa %xmm0, (%r8)
152+
; AVX2-NEXT: retq
153+
%wide.vec = load <64 x i8>, <64 x i8>* %in.vec, align 32
154+
155+
%strided.vec0 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60>
156+
%strided.vec1 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61>
157+
%strided.vec2 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62>
158+
%strided.vec3 = shufflevector <64 x i8> %wide.vec, <64 x i8> poison, <16 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63>
159+
160+
store <16 x i8> %strided.vec0, <16 x i8>* %out.vec0, align 32
161+
store <16 x i8> %strided.vec1, <16 x i8>* %out.vec1, align 32
162+
store <16 x i8> %strided.vec2, <16 x i8>* %out.vec2, align 32
163+
store <16 x i8> %strided.vec3, <16 x i8>* %out.vec3, align 32
164+
165+
ret void
166+
}
167+
168+
define void @load_i8_stride4_vf32(<128 x i8>* %in.vec, <32 x i8>* %out.vec0, <32 x i8>* %out.vec1, <32 x i8>* %out.vec2, <32 x i8>* %out.vec3) nounwind {
169+
; AVX2-LABEL: load_i8_stride4_vf32:
170+
; AVX2: # %bb.0:
171+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
172+
; AVX2-NEXT: vmovdqa 112(%rdi), %xmm10
173+
; AVX2-NEXT: vpshufb %xmm0, %xmm10, %xmm1
174+
; AVX2-NEXT: vmovdqa 96(%rdi), %xmm11
175+
; AVX2-NEXT: vpshufb %xmm0, %xmm11, %xmm3
176+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
177+
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
178+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
179+
; AVX2-NEXT: vmovdqa 80(%rdi), %xmm13
180+
; AVX2-NEXT: vpshufb %xmm2, %xmm13, %xmm4
181+
; AVX2-NEXT: vmovdqa 64(%rdi), %xmm5
182+
; AVX2-NEXT: vpshufb %xmm2, %xmm5, %xmm6
183+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm6[0],xmm4[0],xmm6[1],xmm4[1]
184+
; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
185+
; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm4[0,1,2,3,4,5],ymm1[6,7]
186+
; AVX2-NEXT: vmovdqa (%rdi), %xmm12
187+
; AVX2-NEXT: vmovdqa 16(%rdi), %xmm14
188+
; AVX2-NEXT: vmovdqa 32(%rdi), %xmm6
189+
; AVX2-NEXT: vmovdqa 48(%rdi), %xmm7
190+
; AVX2-NEXT: vpshufb %xmm0, %xmm7, %xmm1
191+
; AVX2-NEXT: vpshufb %xmm0, %xmm6, %xmm0
192+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
193+
; AVX2-NEXT: vpshufb %xmm2, %xmm14, %xmm1
194+
; AVX2-NEXT: vpshufb %xmm2, %xmm12, %xmm2
195+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
196+
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
197+
; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm8[4,5,6,7]
198+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
199+
; AVX2-NEXT: vpshufb %xmm0, %xmm10, %xmm1
200+
; AVX2-NEXT: vpshufb %xmm0, %xmm11, %xmm2
201+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
202+
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
203+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
204+
; AVX2-NEXT: vpshufb %xmm2, %xmm13, %xmm3
205+
; AVX2-NEXT: vpshufb %xmm2, %xmm5, %xmm4
206+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
207+
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
208+
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
209+
; AVX2-NEXT: vpshufb %xmm0, %xmm7, %xmm3
210+
; AVX2-NEXT: vpshufb %xmm0, %xmm6, %xmm0
211+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
212+
; AVX2-NEXT: vpshufb %xmm2, %xmm14, %xmm3
213+
; AVX2-NEXT: vpshufb %xmm2, %xmm12, %xmm2
214+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
215+
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
216+
; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7]
217+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u>
218+
; AVX2-NEXT: vpshufb %xmm0, %xmm10, %xmm1
219+
; AVX2-NEXT: vpshufb %xmm0, %xmm11, %xmm2
220+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
221+
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
222+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
223+
; AVX2-NEXT: vpshufb %xmm2, %xmm13, %xmm3
224+
; AVX2-NEXT: vpshufb %xmm2, %xmm5, %xmm4
225+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
226+
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm3
227+
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3,4,5],ymm1[6,7]
228+
; AVX2-NEXT: vpshufb %xmm0, %xmm7, %xmm3
229+
; AVX2-NEXT: vpshufb %xmm0, %xmm6, %xmm0
230+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
231+
; AVX2-NEXT: vpshufb %xmm2, %xmm14, %xmm3
232+
; AVX2-NEXT: vpshufb %xmm2, %xmm12, %xmm2
233+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
234+
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3]
235+
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
236+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
237+
; AVX2-NEXT: vpshufb %xmm1, %xmm10, %xmm2
238+
; AVX2-NEXT: vpshufb %xmm1, %xmm11, %xmm3
239+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
240+
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm2
241+
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
242+
; AVX2-NEXT: vpshufb %xmm3, %xmm13, %xmm4
243+
; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm5
244+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
245+
; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm4
246+
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3,4,5],ymm2[6,7]
247+
; AVX2-NEXT: vpshufb %xmm1, %xmm7, %xmm4
248+
; AVX2-NEXT: vpshufb %xmm1, %xmm6, %xmm1
249+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
250+
; AVX2-NEXT: vpshufb %xmm3, %xmm14, %xmm4
251+
; AVX2-NEXT: vpshufb %xmm3, %xmm12, %xmm3
252+
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
253+
; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3]
254+
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
255+
; AVX2-NEXT: vmovdqa %ymm8, (%rsi)
256+
; AVX2-NEXT: vmovdqa %ymm9, (%rdx)
257+
; AVX2-NEXT: vmovdqa %ymm0, (%rcx)
258+
; AVX2-NEXT: vmovdqa %ymm1, (%r8)
259+
; AVX2-NEXT: vzeroupper
260+
; AVX2-NEXT: retq
261+
%wide.vec = load <128 x i8>, <128 x i8>* %in.vec, align 32
262+
263+
%strided.vec0 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> <i32 0, i32 4, i32 8, i32 12, i32 16, i32 20, i32 24, i32 28, i32 32, i32 36, i32 40, i32 44, i32 48, i32 52, i32 56, i32 60, i32 64, i32 68, i32 72, i32 76, i32 80, i32 84, i32 88, i32 92, i32 96, i32 100, i32 104, i32 108, i32 112, i32 116, i32 120, i32 124>
264+
%strided.vec1 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> <i32 1, i32 5, i32 9, i32 13, i32 17, i32 21, i32 25, i32 29, i32 33, i32 37, i32 41, i32 45, i32 49, i32 53, i32 57, i32 61, i32 65, i32 69, i32 73, i32 77, i32 81, i32 85, i32 89, i32 93, i32 97, i32 101, i32 105, i32 109, i32 113, i32 117, i32 121, i32 125>
265+
%strided.vec2 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> <i32 2, i32 6, i32 10, i32 14, i32 18, i32 22, i32 26, i32 30, i32 34, i32 38, i32 42, i32 46, i32 50, i32 54, i32 58, i32 62, i32 66, i32 70, i32 74, i32 78, i32 82, i32 86, i32 90, i32 94, i32 98, i32 102, i32 106, i32 110, i32 114, i32 118, i32 122, i32 126>
266+
%strided.vec3 = shufflevector <128 x i8> %wide.vec, <128 x i8> poison, <32 x i32> <i32 3, i32 7, i32 11, i32 15, i32 19, i32 23, i32 27, i32 31, i32 35, i32 39, i32 43, i32 47, i32 51, i32 55, i32 59, i32 63, i32 67, i32 71, i32 75, i32 79, i32 83, i32 87, i32 91, i32 95, i32 99, i32 103, i32 107, i32 111, i32 115, i32 119, i32 123, i32 127>
267+
268+
store <32 x i8> %strided.vec0, <32 x i8>* %out.vec0, align 32
269+
store <32 x i8> %strided.vec1, <32 x i8>* %out.vec1, align 32
270+
store <32 x i8> %strided.vec2, <32 x i8>* %out.vec2, align 32
271+
store <32 x i8> %strided.vec3, <32 x i8>* %out.vec3, align 32
272+
273+
ret void
274+
}

0 commit comments

Comments
 (0)