diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 0d4be09846b60..fadebc3ae4266 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -4049,7 +4049,9 @@ struct MemorySanitizerVisitor : public InstVisitor { // consider this an acceptable tradeoff for performance. // To make shadow propagation precise, we want the equivalent of // "horizontal OR", but this is not available. - return handleIntrinsicByApplyingToShadow(I, /* trailingVerbatimArgs */ 0); + return handleIntrinsicByApplyingToShadow( + I, /*shadowIntrinsicID=*/I.getIntrinsicID(), + /*trailingVerbatimArgs*/ 0); } /// Handle Arm NEON vector store intrinsics (vst{2,3,4}, vst1x_{2,3,4}, @@ -4156,6 +4158,10 @@ struct MemorySanitizerVisitor : public InstVisitor { /// shadow[out] = /// intrinsic(shadow[var1], shadow[var2], opType) | shadow[opType] /// + /// Typically, shadowIntrinsicID will be specified by the caller to be + /// I.getIntrinsicID(), but the caller can choose to replace it with another + /// intrinsic of the same type. + /// /// CAUTION: this assumes that the intrinsic will handle arbitrary /// bit-patterns (for example, if the intrinsic accepts floats for /// var1, we require that it doesn't care if inputs are NaNs). @@ -4165,6 +4171,7 @@ struct MemorySanitizerVisitor : public InstVisitor { /// /// The origin is approximated using setOriginForNaryOp. void handleIntrinsicByApplyingToShadow(IntrinsicInst &I, + Intrinsic::ID shadowIntrinsicID, unsigned int trailingVerbatimArgs) { IRBuilder<> IRB(&I); @@ -4188,7 +4195,7 @@ struct MemorySanitizerVisitor : public InstVisitor { } CallInst *CI = - IRB.CreateIntrinsic(I.getType(), I.getIntrinsicID(), ShadowArgs); + IRB.CreateIntrinsic(I.getType(), shadowIntrinsicID, ShadowArgs); Value *CombinedShadow = CI; // Combine the computed shadow with the shadow of trailing args @@ -4664,7 +4671,9 @@ struct MemorySanitizerVisitor : public InstVisitor { case Intrinsic::aarch64_neon_tbx3: case Intrinsic::aarch64_neon_tbx4: { // The last trailing argument (index register) should be handled verbatim - handleIntrinsicByApplyingToShadow(I, 1); + handleIntrinsicByApplyingToShadow( + I, /*shadowIntrinsicID=*/I.getIntrinsicID(), + /*trailingVerbatimArgs*/ 1); break; }