@@ -18,6 +18,18 @@ func.func @load(%base : memref<200x100xf32>, %i : index, %j : index) -> vector<8
18
18
19
19
// -----
20
20
21
+ func.func @load_with_alignment_attribute (%base : memref <200 x100 xf32 >, %i : index , %j : index ) -> vector <8 xf32 > {
22
+ %0 = vector.load %base [%i , %j ] {alignment = 8 } : memref <200 x100 xf32 >, vector <8 xf32 >
23
+ return %0 : vector <8 xf32 >
24
+ }
25
+
26
+ // ALL-LABEL: func @load_with_alignment_attribute
27
+
28
+ // VEC-ALIGN: llvm.load %{{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<8xf32>
29
+ // MEMREF-ALIGN: llvm.load %{{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<8xf32>
30
+
31
+ // -----
32
+
21
33
//===----------------------------------------------------------------------===//
22
34
// vector.store
23
35
//===----------------------------------------------------------------------===//
@@ -35,6 +47,19 @@ func.func @store(%base : memref<200x100xf32>, %i : index, %j : index) {
35
47
36
48
// -----
37
49
50
+ func.func @store_with_alignment_attribute (%base : memref <200 x100 xf32 >, %i : index , %j : index ) {
51
+ %val = arith.constant dense <11.0 > : vector <4 xf32 >
52
+ vector.store %val , %base [%i , %j ] {alignment = 8 } : memref <200 x100 xf32 >, vector <4 xf32 >
53
+ return
54
+ }
55
+
56
+ // ALL-LABEL: func @store_with_alignment_attribute
57
+
58
+ // VEC-ALIGN: llvm.store %{{.*}}, %{{.*}} {alignment = 8 : i64} : vector<4xf32>, !llvm.ptr
59
+ // MEMREF-ALIGN: llvm.store %{{.*}}, %{{.*}} {alignment = 8 : i64} : vector<4xf32>, !llvm.ptr
60
+
61
+ // -----
62
+
38
63
//===----------------------------------------------------------------------===//
39
64
// vector.maskedload
40
65
//===----------------------------------------------------------------------===//
@@ -52,6 +77,19 @@ func.func @masked_load(%base: memref<?xf32>, %mask: vector<16xi1>, %passthru: ve
52
77
53
78
// -----
54
79
80
+ func.func @masked_load_with_alignment_attribute (%base: memref <?xf32 >, %mask: vector <16 xi1 >, %passthru: vector <16 xf32 >) -> vector <16 xf32 > {
81
+ %c0 = arith.constant 0 : index
82
+ %0 = vector.maskedload %base [%c0 ], %mask , %passthru {alignment = 8 } : memref <?xf32 >, vector <16 xi1 >, vector <16 xf32 > into vector <16 xf32 >
83
+ return %0 : vector <16 xf32 >
84
+ }
85
+
86
+ // ALL-LABEL: func @masked_load_with_alignment_attribute
87
+
88
+ // VEC-ALIGN: %[[L:.*]] = llvm.intr.masked.load %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.ptr, vector<16xi1>, vector<16xf32>) -> vector<16xf32>
89
+ // MEMREF-ALIGN: %[[L:.*]] = llvm.intr.masked.load %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.ptr, vector<16xi1>, vector<16xf32>) -> vector<16xf32>
90
+
91
+ // -----
92
+
55
93
//===----------------------------------------------------------------------===//
56
94
// vector.maskedstore
57
95
//===----------------------------------------------------------------------===//
@@ -69,6 +107,19 @@ func.func @masked_store(%base: memref<?xf32>, %mask: vector<16xi1>, %passthru: v
69
107
70
108
// -----
71
109
110
+ func.func @masked_store_with_alignment_attribute (%base: memref <?xf32 >, %mask: vector <16 xi1 >, %passthru: vector <16 xf32 >) {
111
+ %c0 = arith.constant 0 : index
112
+ vector.maskedstore %base [%c0 ], %mask , %passthru {alignment = 8 } : memref <?xf32 >, vector <16 xi1 >, vector <16 xf32 >
113
+ return
114
+ }
115
+
116
+ // ALL-LABEL: func @masked_store_with_alignment_attribute
117
+
118
+ // VEC-ALIGN: llvm.intr.masked.store %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : vector<16xf32>, vector<16xi1> into !llvm.ptr
119
+ // MEMREF-ALIGN: llvm.intr.masked.store %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : vector<16xf32>, vector<16xi1> into !llvm.ptr
120
+
121
+ // -----
122
+
72
123
//===----------------------------------------------------------------------===//
73
124
// vector.scatter
74
125
//===----------------------------------------------------------------------===//
@@ -86,6 +137,19 @@ func.func @scatter(%base: memref<?xf32>, %index: vector<3xi32>, %mask: vector<3x
86
137
87
138
// -----
88
139
140
+ func.func @scatter_with_alignment_attribute (%base: memref <?xf32 >, %index: vector <3 xi32 >, %mask: vector <3 xi1 >, %value: vector <3 xf32 >) {
141
+ %0 = arith.constant 0 : index
142
+ vector.scatter %base [%0 ][%index ], %mask , %value {alignment = 8 } : memref <?xf32 >, vector <3 xi32 >, vector <3 xi1 >, vector <3 xf32 >
143
+ return
144
+ }
145
+
146
+ // ALL-LABEL: func @scatter_with_alignment_attribute
147
+
148
+ // VEC-ALIGN: llvm.intr.masked.scatter %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : vector<3xf32>, vector<3xi1> into vector<3x!llvm.ptr>
149
+ // MEMREF-ALIGN: llvm.intr.masked.scatter %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : vector<3xf32>, vector<3xi1> into vector<3x!llvm.ptr>
150
+
151
+ // -----
152
+
89
153
//===----------------------------------------------------------------------===//
90
154
// vector.gather
91
155
//===----------------------------------------------------------------------===//
@@ -100,3 +164,16 @@ func.func @gather(%base: memref<?xf32>, %index: vector<3xi32>, %mask: vector<3xi
100
164
101
165
// VEC-ALIGN: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 16 : i32} : (vector<3x!llvm.ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32>
102
166
// MEMREF-ALIGN: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 4 : i32} : (vector<3x!llvm.ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32>
167
+
168
+ // -----
169
+
170
+ func.func @gather_with_alignment_attribute (%base: memref <?xf32 >, %index: vector <3 xi32 >, %mask: vector <3 xi1 >, %passthru: vector <3 xf32 >) -> vector <3 xf32 > {
171
+ %0 = arith.constant 0 : index
172
+ %1 = vector.gather %base [%0 ][%index ], %mask , %passthru {alignment = 8 } : memref <?xf32 >, vector <3 xi32 >, vector <3 xi1 >, vector <3 xf32 > into vector <3 xf32 >
173
+ return %1 : vector <3 xf32 >
174
+ }
175
+
176
+ // ALL-LABEL: func @gather_with_alignment_attribute
177
+
178
+ // VEC-ALIGN: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (vector<3x!llvm.ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32>
179
+ // MEMREF-ALIGN: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (vector<3x!llvm.ptr>, vector<3xi1>, vector<3xf32>) -> vector<3xf32>
0 commit comments