Skip to content

Commit 15eba9c

Browse files
committed
[VectorCombine] Add DataLayout to VectorCombine class instead of repeated calls to getDataLayout(). NFC.
1 parent 5344a37 commit 15eba9c

File tree

1 file changed

+17
-20
lines changed

1 file changed

+17
-20
lines changed

llvm/lib/Transforms/Vectorize/VectorCombine.cpp

Lines changed: 17 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,8 @@ class VectorCombine {
6666
public:
6767
VectorCombine(Function &F, const TargetTransformInfo &TTI,
6868
const DominatorTree &DT, AAResults &AA, AssumptionCache &AC,
69-
bool TryEarlyFoldsOnly)
70-
: F(F), Builder(F.getContext()), TTI(TTI), DT(DT), AA(AA), AC(AC),
69+
const DataLayout *DL, bool TryEarlyFoldsOnly)
70+
: F(F), Builder(F.getContext()), TTI(TTI), DT(DT), AA(AA), AC(AC), DL(DL),
7171
TryEarlyFoldsOnly(TryEarlyFoldsOnly) {}
7272

7373
bool run();
@@ -79,6 +79,7 @@ class VectorCombine {
7979
const DominatorTree &DT;
8080
AAResults &AA;
8181
AssumptionCache ∾
82+
const DataLayout *DL;
8283

8384
/// If true, only perform beneficial early IR transforms. Do not introduce new
8485
/// vector operations.
@@ -181,23 +182,22 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
181182
// We use minimal alignment (maximum flexibility) because we only care about
182183
// the dereferenceable region. When calculating cost and creating a new op,
183184
// we may use a larger value based on alignment attributes.
184-
const DataLayout &DL = I.getModule()->getDataLayout();
185185
Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
186186
assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
187187

188188
unsigned MinVecNumElts = MinVectorSize / ScalarSize;
189189
auto *MinVecTy = VectorType::get(ScalarTy, MinVecNumElts, false);
190190
unsigned OffsetEltIndex = 0;
191191
Align Alignment = Load->getAlign();
192-
if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &AC,
192+
if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), *DL, Load, &AC,
193193
&DT)) {
194194
// It is not safe to load directly from the pointer, but we can still peek
195195
// through gep offsets and check if it safe to load from a base address with
196196
// updated alignment. If it is, we can shuffle the element(s) into place
197197
// after loading.
198-
unsigned OffsetBitWidth = DL.getIndexTypeSizeInBits(SrcPtr->getType());
198+
unsigned OffsetBitWidth = DL->getIndexTypeSizeInBits(SrcPtr->getType());
199199
APInt Offset(OffsetBitWidth, 0);
200-
SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
200+
SrcPtr = SrcPtr->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
201201

202202
// We want to shuffle the result down from a high element of a vector, so
203203
// the offset must be positive.
@@ -215,7 +215,7 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
215215
if (OffsetEltIndex >= MinVecNumElts)
216216
return false;
217217

218-
if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), DL, Load, &AC,
218+
if (!isSafeToLoadUnconditionally(SrcPtr, MinVecTy, Align(1), *DL, Load, &AC,
219219
&DT))
220220
return false;
221221

@@ -227,7 +227,7 @@ bool VectorCombine::vectorizeLoadInsert(Instruction &I) {
227227

228228
// Original pattern: insertelt undef, load [free casts of] PtrOp, 0
229229
// Use the greater of the alignment on the load or its source pointer.
230-
Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment);
230+
Alignment = std::max(SrcPtr->getPointerAlignment(*DL), Alignment);
231231
Type *LoadTy = Load->getType();
232232
unsigned AS = Load->getPointerAddressSpace();
233233
InstructionCost OldCost =
@@ -298,14 +298,13 @@ bool VectorCombine::widenSubvectorLoad(Instruction &I) {
298298
// the dereferenceable region. When calculating cost and creating a new op,
299299
// we may use a larger value based on alignment attributes.
300300
auto *Ty = cast<FixedVectorType>(I.getType());
301-
const DataLayout &DL = I.getModule()->getDataLayout();
302301
Value *SrcPtr = Load->getPointerOperand()->stripPointerCasts();
303302
assert(isa<PointerType>(SrcPtr->getType()) && "Expected a pointer type");
304303
Align Alignment = Load->getAlign();
305-
if (!isSafeToLoadUnconditionally(SrcPtr, Ty, Align(1), DL, Load, &AC, &DT))
304+
if (!isSafeToLoadUnconditionally(SrcPtr, Ty, Align(1), *DL, Load, &AC, &DT))
306305
return false;
307306

308-
Alignment = std::max(SrcPtr->getPointerAlignment(DL), Alignment);
307+
Alignment = std::max(SrcPtr->getPointerAlignment(*DL), Alignment);
309308
Type *LoadTy = Load->getType();
310309
unsigned AS = Load->getPointerAddressSpace();
311310

@@ -854,7 +853,6 @@ bool VectorCombine::scalarizeVPIntrinsic(Instruction &I) {
854853
// Scalarize the intrinsic
855854
ElementCount EC = cast<VectorType>(Op0->getType())->getElementCount();
856855
Value *EVL = VPI.getArgOperand(3);
857-
const DataLayout &DL = VPI.getModule()->getDataLayout();
858856

859857
// If the VP op might introduce UB or poison, we can scalarize it provided
860858
// that we know the EVL > 0: If the EVL is zero, then the original VP op
@@ -867,7 +865,7 @@ bool VectorCombine::scalarizeVPIntrinsic(Instruction &I) {
867865
else
868866
SafeToSpeculate = isSafeToSpeculativelyExecuteWithOpcode(
869867
*FunctionalOpcode, &VPI, nullptr, &AC, &DT);
870-
if (!SafeToSpeculate && !isKnownNonZero(EVL, DL, 0, &AC, &VPI, &DT))
868+
if (!SafeToSpeculate && !isKnownNonZero(EVL, *DL, 0, &AC, &VPI, &DT))
871869
return false;
872870

873871
Value *ScalarVal =
@@ -1246,12 +1244,11 @@ bool VectorCombine::foldSingleElementStore(Instruction &I) {
12461244

12471245
if (auto *Load = dyn_cast<LoadInst>(Source)) {
12481246
auto VecTy = cast<VectorType>(SI->getValueOperand()->getType());
1249-
const DataLayout &DL = I.getModule()->getDataLayout();
12501247
Value *SrcAddr = Load->getPointerOperand()->stripPointerCasts();
12511248
// Don't optimize for atomic/volatile load or store. Ensure memory is not
12521249
// modified between, vector type matches store size, and index is inbounds.
12531250
if (!Load->isSimple() || Load->getParent() != SI->getParent() ||
1254-
!DL.typeSizeEqualsStoreSize(Load->getType()->getScalarType()) ||
1251+
!DL->typeSizeEqualsStoreSize(Load->getType()->getScalarType()) ||
12551252
SrcAddr != SI->getPointerOperand()->stripPointerCasts())
12561253
return false;
12571254

@@ -1270,7 +1267,7 @@ bool VectorCombine::foldSingleElementStore(Instruction &I) {
12701267
NSI->copyMetadata(*SI);
12711268
Align ScalarOpAlignment = computeAlignmentAfterScalarization(
12721269
std::max(SI->getAlign(), Load->getAlign()), NewElement->getType(), Idx,
1273-
DL);
1270+
*DL);
12741271
NSI->setAlignment(ScalarOpAlignment);
12751272
replaceValue(I, *NSI);
12761273
eraseInstruction(I);
@@ -1288,8 +1285,7 @@ bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
12881285

12891286
auto *VecTy = cast<VectorType>(I.getType());
12901287
auto *LI = cast<LoadInst>(&I);
1291-
const DataLayout &DL = I.getModule()->getDataLayout();
1292-
if (LI->isVolatile() || !DL.typeSizeEqualsStoreSize(VecTy->getScalarType()))
1288+
if (LI->isVolatile() || !DL->typeSizeEqualsStoreSize(VecTy->getScalarType()))
12931289
return false;
12941290

12951291
InstructionCost OriginalCost =
@@ -1367,7 +1363,7 @@ bool VectorCombine::scalarizeLoadExtract(Instruction &I) {
13671363
VecTy->getElementType(), GEP, EI->getName() + ".scalar"));
13681364

13691365
Align ScalarOpAlignment = computeAlignmentAfterScalarization(
1370-
LI->getAlign(), VecTy->getElementType(), Idx, DL);
1366+
LI->getAlign(), VecTy->getElementType(), Idx, *DL);
13711367
NewLoad->setAlignment(ScalarOpAlignment);
13721368

13731369
replaceValue(*EI, *NewLoad);
@@ -2042,7 +2038,8 @@ PreservedAnalyses VectorCombinePass::run(Function &F,
20422038
TargetTransformInfo &TTI = FAM.getResult<TargetIRAnalysis>(F);
20432039
DominatorTree &DT = FAM.getResult<DominatorTreeAnalysis>(F);
20442040
AAResults &AA = FAM.getResult<AAManager>(F);
2045-
VectorCombine Combiner(F, TTI, DT, AA, AC, TryEarlyFoldsOnly);
2041+
const DataLayout *DL = &F.getParent()->getDataLayout();
2042+
VectorCombine Combiner(F, TTI, DT, AA, AC, DL, TryEarlyFoldsOnly);
20462043
if (!Combiner.run())
20472044
return PreservedAnalyses::all();
20482045
PreservedAnalyses PA;

0 commit comments

Comments
 (0)