@@ -87,14 +87,6 @@ static inline mlir::Type getI8Type(mlir::MLIRContext *context) {
87
87
return mlir::IntegerType::get (context, 8 );
88
88
}
89
89
90
- static mlir::LLVM::ConstantOp
91
- genConstantIndex (mlir::Location loc, mlir::Type ity,
92
- mlir::ConversionPatternRewriter &rewriter,
93
- std::int64_t offset) {
94
- auto cattr = rewriter.getI64IntegerAttr (offset);
95
- return mlir::LLVM::ConstantOp::create (rewriter, loc, ity, cattr);
96
- }
97
-
98
90
static mlir::Block *createBlock (mlir::ConversionPatternRewriter &rewriter,
99
91
mlir::Block *insertBefore) {
100
92
assert (insertBefore && " expected valid insertion block" );
@@ -208,39 +200,6 @@ getDependentTypeMemSizeFn(fir::RecordType recTy, fir::AllocaOp op,
208
200
TODO (op.getLoc (), " did not find allocation function" );
209
201
}
210
202
211
- // Compute the alloc scale size (constant factors encoded in the array type).
212
- // We do this for arrays without a constant interior or arrays of character with
213
- // dynamic length arrays, since those are the only ones that get decayed to a
214
- // pointer to the element type.
215
- template <typename OP>
216
- static mlir::Value
217
- genAllocationScaleSize (OP op, mlir::Type ity,
218
- mlir::ConversionPatternRewriter &rewriter) {
219
- mlir::Location loc = op.getLoc ();
220
- mlir::Type dataTy = op.getInType ();
221
- auto seqTy = mlir::dyn_cast<fir::SequenceType>(dataTy);
222
- fir::SequenceType::Extent constSize = 1 ;
223
- if (seqTy) {
224
- int constRows = seqTy.getConstantRows ();
225
- const fir::SequenceType::ShapeRef &shape = seqTy.getShape ();
226
- if (constRows != static_cast <int >(shape.size ())) {
227
- for (auto extent : shape) {
228
- if (constRows-- > 0 )
229
- continue ;
230
- if (extent != fir::SequenceType::getUnknownExtent ())
231
- constSize *= extent;
232
- }
233
- }
234
- }
235
-
236
- if (constSize != 1 ) {
237
- mlir::Value constVal{
238
- genConstantIndex (loc, ity, rewriter, constSize).getResult ()};
239
- return constVal;
240
- }
241
- return nullptr ;
242
- }
243
-
244
203
namespace {
245
204
struct DeclareOpConversion : public fir ::FIROpConversion<fir::cg::XDeclareOp> {
246
205
public:
@@ -275,7 +234,7 @@ struct AllocaOpConversion : public fir::FIROpConversion<fir::AllocaOp> {
275
234
auto loc = alloc.getLoc ();
276
235
mlir::Type ity = lowerTy ().indexType ();
277
236
unsigned i = 0 ;
278
- mlir::Value size = genConstantIndex (loc, ity, rewriter, 1 ).getResult ();
237
+ mlir::Value size = fir:: genConstantIndex (loc, ity, rewriter, 1 ).getResult ();
279
238
mlir::Type firObjType = fir::unwrapRefType (alloc.getType ());
280
239
mlir::Type llvmObjectType = convertObjectType (firObjType);
281
240
if (alloc.hasLenParams ()) {
@@ -307,7 +266,8 @@ struct AllocaOpConversion : public fir::FIROpConversion<fir::AllocaOp> {
307
266
<< scalarType << " with type parameters" ;
308
267
}
309
268
}
310
- if (auto scaleSize = genAllocationScaleSize (alloc, ity, rewriter))
269
+ if (auto scaleSize = fir::genAllocationScaleSize (
270
+ alloc.getLoc (), alloc.getInType (), ity, rewriter))
311
271
size =
312
272
rewriter.createOrFold <mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
313
273
if (alloc.hasShapeOperands ()) {
@@ -484,7 +444,7 @@ struct BoxIsArrayOpConversion : public fir::FIROpConversion<fir::BoxIsArrayOp> {
484
444
auto loc = boxisarray.getLoc ();
485
445
TypePair boxTyPair = getBoxTypePair (boxisarray.getVal ().getType ());
486
446
mlir::Value rank = getRankFromBox (loc, boxTyPair, a, rewriter);
487
- mlir::Value c0 = genConstantIndex (loc, rank.getType (), rewriter, 0 );
447
+ mlir::Value c0 = fir:: genConstantIndex (loc, rank.getType (), rewriter, 0 );
488
448
rewriter.replaceOpWithNewOp <mlir::LLVM::ICmpOp>(
489
449
boxisarray, mlir::LLVM::ICmpPredicate::ne, rank, c0);
490
450
return mlir::success ();
@@ -820,7 +780,7 @@ struct ConvertOpConversion : public fir::FIROpConversion<fir::ConvertOp> {
820
780
// Do folding for constant inputs.
821
781
if (auto constVal = fir::getIntIfConstant (op0)) {
822
782
mlir::Value normVal =
823
- genConstantIndex (loc, toTy, rewriter, *constVal ? 1 : 0 );
783
+ fir:: genConstantIndex (loc, toTy, rewriter, *constVal ? 1 : 0 );
824
784
rewriter.replaceOp (convert, normVal);
825
785
return mlir::success ();
826
786
}
@@ -833,7 +793,7 @@ struct ConvertOpConversion : public fir::FIROpConversion<fir::ConvertOp> {
833
793
}
834
794
835
795
// Compare the input with zero.
836
- mlir::Value zero = genConstantIndex (loc, fromTy, rewriter, 0 );
796
+ mlir::Value zero = fir:: genConstantIndex (loc, fromTy, rewriter, 0 );
837
797
auto isTrue = mlir::LLVM::ICmpOp::create (
838
798
rewriter, loc, mlir::LLVM::ICmpPredicate::ne, op0, zero);
839
799
@@ -1082,21 +1042,6 @@ static mlir::SymbolRefAttr getMalloc(fir::AllocMemOp op,
1082
1042
return getMallocInModule (mod, op, rewriter, indexType);
1083
1043
}
1084
1044
1085
- // / Helper function for generating the LLVM IR that computes the distance
1086
- // / in bytes between adjacent elements pointed to by a pointer
1087
- // / of type \p ptrTy. The result is returned as a value of \p idxTy integer
1088
- // / type.
1089
- static mlir::Value
1090
- computeElementDistance (mlir::Location loc, mlir::Type llvmObjectType,
1091
- mlir::Type idxTy,
1092
- mlir::ConversionPatternRewriter &rewriter,
1093
- const mlir::DataLayout &dataLayout) {
1094
- llvm::TypeSize size = dataLayout.getTypeSize (llvmObjectType);
1095
- unsigned short alignment = dataLayout.getTypeABIAlignment (llvmObjectType);
1096
- std::int64_t distance = llvm::alignTo (size, alignment);
1097
- return genConstantIndex (loc, idxTy, rewriter, distance);
1098
- }
1099
-
1100
1045
// / Return value of the stride in bytes between adjacent elements
1101
1046
// / of LLVM type \p llTy. The result is returned as a value of
1102
1047
// / \p idxTy integer type.
@@ -1105,7 +1050,7 @@ genTypeStrideInBytes(mlir::Location loc, mlir::Type idxTy,
1105
1050
mlir::ConversionPatternRewriter &rewriter, mlir::Type llTy,
1106
1051
const mlir::DataLayout &dataLayout) {
1107
1052
// Create a pointer type and use computeElementDistance().
1108
- return computeElementDistance (loc, llTy, idxTy, rewriter, dataLayout);
1053
+ return fir:: computeElementDistance (loc, llTy, idxTy, rewriter, dataLayout);
1109
1054
}
1110
1055
1111
1056
namespace {
@@ -1124,17 +1069,18 @@ struct AllocMemOpConversion : public fir::FIROpConversion<fir::AllocMemOp> {
1124
1069
if (fir::isRecordWithTypeParameters (fir::unwrapSequenceType (dataTy)))
1125
1070
TODO (loc, " fir.allocmem codegen of derived type with length parameters" );
1126
1071
mlir::Value size = genTypeSizeInBytes (loc, ity, rewriter, llvmObjectTy);
1127
- if (auto scaleSize = genAllocationScaleSize (heap, ity, rewriter))
1128
- size = mlir::LLVM::MulOp::create (rewriter, loc, ity, size, scaleSize);
1072
+ if (auto scaleSize =
1073
+ fir::genAllocationScaleSize (loc, heap.getInType (), ity, rewriter))
1074
+ size = rewriter.create <mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
1129
1075
for (mlir::Value opnd : adaptor.getOperands ())
1130
1076
size = mlir::LLVM::MulOp::create (rewriter, loc, ity, size,
1131
1077
integerCast (loc, rewriter, ity, opnd));
1132
1078
1133
1079
// As the return value of malloc(0) is implementation defined, allocate one
1134
1080
// byte to ensure the allocation status being true. This behavior aligns to
1135
1081
// what the runtime has.
1136
- mlir::Value zero = genConstantIndex (loc, ity, rewriter, 0 );
1137
- mlir::Value one = genConstantIndex (loc, ity, rewriter, 1 );
1082
+ mlir::Value zero = fir:: genConstantIndex (loc, ity, rewriter, 0 );
1083
+ mlir::Value one = fir:: genConstantIndex (loc, ity, rewriter, 1 );
1138
1084
mlir::Value cmp = mlir::LLVM::ICmpOp::create (
1139
1085
rewriter, loc, mlir::LLVM::ICmpPredicate::sgt, size, zero);
1140
1086
size = mlir::LLVM::SelectOp::create (rewriter, loc, cmp, size, one);
@@ -1157,7 +1103,8 @@ struct AllocMemOpConversion : public fir::FIROpConversion<fir::AllocMemOp> {
1157
1103
mlir::Value genTypeSizeInBytes (mlir::Location loc, mlir::Type idxTy,
1158
1104
mlir::ConversionPatternRewriter &rewriter,
1159
1105
mlir::Type llTy) const {
1160
- return computeElementDistance (loc, llTy, idxTy, rewriter, getDataLayout ());
1106
+ return fir::computeElementDistance (loc, llTy, idxTy, rewriter,
1107
+ getDataLayout ());
1161
1108
}
1162
1109
};
1163
1110
} // namespace
@@ -1344,7 +1291,7 @@ genCUFAllocDescriptor(mlir::Location loc,
1344
1291
mlir::Type structTy = typeConverter.convertBoxTypeAsStruct (boxTy);
1345
1292
std::size_t boxSize = dl->getTypeSizeInBits (structTy) / 8 ;
1346
1293
mlir::Value sizeInBytes =
1347
- genConstantIndex (loc, llvmIntPtrType, rewriter, boxSize);
1294
+ fir:: genConstantIndex (loc, llvmIntPtrType, rewriter, boxSize);
1348
1295
llvm::SmallVector args = {sizeInBytes, sourceFile, sourceLine};
1349
1296
return mlir::LLVM::CallOp::create (rewriter, loc, fctTy,
1350
1297
RTNAME_STRING (CUFAllocDescriptor), args)
@@ -1599,7 +1546,7 @@ struct EmboxCommonConversion : public fir::FIROpConversion<OP> {
1599
1546
// representation of derived types with pointer/allocatable components.
1600
1547
// This has been seen in hashing algorithms using TRANSFER.
1601
1548
mlir::Value zero =
1602
- genConstantIndex (loc, rewriter.getI64Type (), rewriter, 0 );
1549
+ fir:: genConstantIndex (loc, rewriter.getI64Type (), rewriter, 0 );
1603
1550
descriptor = insertField (rewriter, loc, descriptor,
1604
1551
{getLenParamFieldId (boxTy), 0 }, zero);
1605
1552
}
@@ -1944,8 +1891,8 @@ struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
1944
1891
bool hasSlice = !xbox.getSlice ().empty ();
1945
1892
unsigned sliceOffset = xbox.getSliceOperandIndex ();
1946
1893
mlir::Location loc = xbox.getLoc ();
1947
- mlir::Value zero = genConstantIndex (loc, i64Ty, rewriter, 0 );
1948
- mlir::Value one = genConstantIndex (loc, i64Ty, rewriter, 1 );
1894
+ mlir::Value zero = fir:: genConstantIndex (loc, i64Ty, rewriter, 0 );
1895
+ mlir::Value one = fir:: genConstantIndex (loc, i64Ty, rewriter, 1 );
1949
1896
mlir::Value prevPtrOff = one;
1950
1897
mlir::Type eleTy = boxTy.getEleTy ();
1951
1898
const unsigned rank = xbox.getRank ();
@@ -1994,7 +1941,7 @@ struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
1994
1941
prevDimByteStride =
1995
1942
getCharacterByteSize (loc, rewriter, charTy, adaptor.getLenParams ());
1996
1943
} else {
1997
- prevDimByteStride = genConstantIndex (
1944
+ prevDimByteStride = fir:: genConstantIndex (
1998
1945
loc, i64Ty, rewriter,
1999
1946
charTy.getLen () * lowerTy ().characterBitsize (charTy) / 8 );
2000
1947
}
@@ -2152,7 +2099,7 @@ struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> {
2152
2099
if (auto charTy = mlir::dyn_cast<fir::CharacterType>(inputEleTy)) {
2153
2100
if (charTy.hasConstantLen ()) {
2154
2101
mlir::Value len =
2155
- genConstantIndex (loc, idxTy, rewriter, charTy.getLen ());
2102
+ fir:: genConstantIndex (loc, idxTy, rewriter, charTy.getLen ());
2156
2103
lenParams.emplace_back (len);
2157
2104
} else {
2158
2105
mlir::Value len = getElementSizeFromBox (loc, idxTy, inputBoxTyPair,
@@ -2161,7 +2108,7 @@ struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> {
2161
2108
assert (!isInGlobalOp (rewriter) &&
2162
2109
" character target in global op must have constant length" );
2163
2110
mlir::Value width =
2164
- genConstantIndex (loc, idxTy, rewriter, charTy.getFKind ());
2111
+ fir:: genConstantIndex (loc, idxTy, rewriter, charTy.getFKind ());
2165
2112
len = mlir::LLVM::SDivOp::create (rewriter, loc, idxTy, len, width);
2166
2113
}
2167
2114
lenParams.emplace_back (len);
@@ -2215,8 +2162,9 @@ struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> {
2215
2162
mlir::ConversionPatternRewriter &rewriter) const {
2216
2163
mlir::Location loc = rebox.getLoc ();
2217
2164
mlir::Value zero =
2218
- genConstantIndex (loc, lowerTy ().indexType (), rewriter, 0 );
2219
- mlir::Value one = genConstantIndex (loc, lowerTy ().indexType (), rewriter, 1 );
2165
+ fir::genConstantIndex (loc, lowerTy ().indexType (), rewriter, 0 );
2166
+ mlir::Value one =
2167
+ fir::genConstantIndex (loc, lowerTy ().indexType (), rewriter, 1 );
2220
2168
for (auto iter : llvm::enumerate (llvm::zip (extents, strides))) {
2221
2169
mlir::Value extent = std::get<0 >(iter.value ());
2222
2170
unsigned dim = iter.index ();
@@ -2249,7 +2197,7 @@ struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> {
2249
2197
mlir::Location loc = rebox.getLoc ();
2250
2198
mlir::Type byteTy = ::getI8Type (rebox.getContext ());
2251
2199
mlir::Type idxTy = lowerTy ().indexType ();
2252
- mlir::Value zero = genConstantIndex (loc, idxTy, rewriter, 0 );
2200
+ mlir::Value zero = fir:: genConstantIndex (loc, idxTy, rewriter, 0 );
2253
2201
// Apply subcomponent and substring shift on base address.
2254
2202
if (!rebox.getSubcomponent ().empty () || !rebox.getSubstr ().empty ()) {
2255
2203
// Cast to inputEleTy* so that a GEP can be used.
@@ -2277,7 +2225,7 @@ struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> {
2277
2225
// and strides.
2278
2226
llvm::SmallVector<mlir::Value> slicedExtents;
2279
2227
llvm::SmallVector<mlir::Value> slicedStrides;
2280
- mlir::Value one = genConstantIndex (loc, idxTy, rewriter, 1 );
2228
+ mlir::Value one = fir:: genConstantIndex (loc, idxTy, rewriter, 1 );
2281
2229
const bool sliceHasOrigins = !rebox.getShift ().empty ();
2282
2230
unsigned sliceOps = rebox.getSliceOperandIndex ();
2283
2231
unsigned shiftOps = rebox.getShiftOperandIndex ();
@@ -2350,7 +2298,7 @@ struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> {
2350
2298
// which may be OK if all new extents are ones, the stride does not
2351
2299
// matter, use one.
2352
2300
mlir::Value stride = inputStrides.empty ()
2353
- ? genConstantIndex (loc, idxTy, rewriter, 1 )
2301
+ ? fir:: genConstantIndex (loc, idxTy, rewriter, 1 )
2354
2302
: inputStrides[0 ];
2355
2303
for (unsigned i = 0 ; i < rebox.getShape ().size (); ++i) {
2356
2304
mlir::Value rawExtent = operands[rebox.getShapeOperandIndex () + i];
@@ -2585,9 +2533,9 @@ struct XArrayCoorOpConversion
2585
2533
unsigned shiftOffset = coor.getShiftOperandIndex ();
2586
2534
unsigned sliceOffset = coor.getSliceOperandIndex ();
2587
2535
auto sliceOps = coor.getSlice ().begin ();
2588
- mlir::Value one = genConstantIndex (loc, idxTy, rewriter, 1 );
2536
+ mlir::Value one = fir:: genConstantIndex (loc, idxTy, rewriter, 1 );
2589
2537
mlir::Value prevExt = one;
2590
- mlir::Value offset = genConstantIndex (loc, idxTy, rewriter, 0 );
2538
+ mlir::Value offset = fir:: genConstantIndex (loc, idxTy, rewriter, 0 );
2591
2539
const bool isShifted = !coor.getShift ().empty ();
2592
2540
const bool isSliced = !coor.getSlice ().empty ();
2593
2541
const bool baseIsBoxed =
@@ -2918,7 +2866,7 @@ struct CoordinateOpConversion
2918
2866
// of lower bound aspects. This both accounts for dynamically sized
2919
2867
// types and non contiguous arrays.
2920
2868
auto idxTy = lowerTy ().indexType ();
2921
- mlir::Value off = genConstantIndex (loc, idxTy, rewriter, 0 );
2869
+ mlir::Value off = fir:: genConstantIndex (loc, idxTy, rewriter, 0 );
2922
2870
unsigned arrayDim = arrTy.getDimension ();
2923
2871
for (unsigned dim = 0 ; dim < arrayDim && it != end; ++dim, ++it) {
2924
2872
mlir::Value stride =
@@ -3846,7 +3794,7 @@ struct IsPresentOpConversion : public fir::FIROpConversion<fir::IsPresentOp> {
3846
3794
ptr = mlir::LLVM::ExtractValueOp::create (rewriter, loc, ptr, 0 );
3847
3795
}
3848
3796
mlir::LLVM::ConstantOp c0 =
3849
- genConstantIndex (isPresent.getLoc (), idxTy, rewriter, 0 );
3797
+ fir:: genConstantIndex (isPresent.getLoc (), idxTy, rewriter, 0 );
3850
3798
auto addr = mlir::LLVM::PtrToIntOp::create (rewriter, loc, idxTy, ptr);
3851
3799
rewriter.replaceOpWithNewOp <mlir::LLVM::ICmpOp>(
3852
3800
isPresent, mlir::LLVM::ICmpPredicate::ne, addr, c0);
0 commit comments