diff --git a/include/glow/Graph/Nodes.h b/include/glow/Graph/Nodes.h index 087bed39b5..8b890a816c 100644 --- a/include/glow/Graph/Nodes.h +++ b/include/glow/Graph/Nodes.h @@ -3,7 +3,8 @@ #include "glow/Base/Tensor.h" #include "glow/Graph/Node.h" -#include "glow/Support/Casting.h" + +#include "llvm/Support/Casting.h" namespace glow { diff --git a/include/glow/IR/Instrs.h b/include/glow/IR/Instrs.h index dc1a76fde8..1c67f42958 100644 --- a/include/glow/IR/Instrs.h +++ b/include/glow/IR/Instrs.h @@ -4,9 +4,9 @@ #include "glow/Base/Type.h" #include "glow/Graph/Nodes.h" #include "glow/IR/IR.h" -#include "glow/Support/Casting.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/Support/Casting.h" namespace glow { diff --git a/include/glow/Importer/Caffe2.h b/include/glow/Importer/Caffe2.h index 4a5c638f0a..50c8dba4b0 100644 --- a/include/glow/Importer/Caffe2.h +++ b/include/glow/Importer/Caffe2.h @@ -1,10 +1,10 @@ #ifndef GLOW_IMPORTER_CAFFE2_H #define GLOW_IMPORTER_CAFFE2_H -#include "llvm/ADT/ArrayRef.h" - #include "glow/Graph/Graph.h" -#include "glow/Support/Casting.h" + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/Support/Casting.h" #include #include diff --git a/include/glow/Support/Casting.h b/include/glow/Support/Casting.h deleted file mode 100644 index e1c8feb0a9..0000000000 --- a/include/glow/Support/Casting.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef GLOW_SUPPORT_CASTING_H -#define GLOW_SUPPORT_CASTING_H - -#include -#include -#include - -namespace glow { - -template bool isa(FROM *k) { return (TO::classof(k)); } - -template TO *cast(FROM *k) { - assert(isa(k) && "Invalid cast"); - return static_cast(k); -} - -template TO *dyn_cast(FROM *k) { - if (isa(k)) { - return cast(k); - } - - return nullptr; -} - -} // end namespace glow - -#endif // GLOW_SUPPORT_CASTING_H diff --git a/src/glow/Graph/Graph.cpp b/src/glow/Graph/Graph.cpp index a994c9ee07..bca570c15e 100644 --- a/src/glow/Graph/Graph.cpp +++ b/src/glow/Graph/Graph.cpp @@ -3,9 +3,10 @@ #include "glow/Graph/Graph.h" #include "glow/Graph/Nodes.h" #include "glow/IR/IR.h" -#include "glow/Support/Casting.h" #include "glow/Support/Support.h" +#include "llvm/Support/Casting.h" + #include #include @@ -274,7 +275,7 @@ struct DottyPrinterPass : NodeVisitor { std::string repr = escapeDottyString(N->getDebugDesc()); os_ << "\tlabel = " + quote(repr) + "\n"; os_ << "\tshape = \"record\"\n"; - if (isa(N)) { + if (llvm::isa(N)) { os_ << "\tfillcolor=pink,style=filled\n"; } os_ << "];\n\n"; diff --git a/src/glow/IR/IR.cpp b/src/glow/IR/IR.cpp index c417a16431..35989b6415 100644 --- a/src/glow/IR/IR.cpp +++ b/src/glow/IR/IR.cpp @@ -2,9 +2,10 @@ #include "glow/IR/IR.h" #include "glow/IR/Instrs.h" -#include "glow/Support/Casting.h" #include "glow/Support/Support.h" +#include "llvm/Support/Casting.h" + #include #include #include @@ -58,7 +59,7 @@ void Instruction::verifyUseList() const { void Instruction::verify() const { #define DEF_INSTR(CLASS, NAME) \ - if (auto *X = dyn_cast(this)) \ + if (auto *X = llvm::dyn_cast(this)) \ X->verify(); #define DEF_VALUE(CLASS, NAME) #include "AutoGenInstr.def" @@ -120,10 +121,10 @@ Value *Module::getWeightForNode(const Node *V) const { static void dumpIR(Value *V, std::ostream &out) { #define DEF_INSTR(CLASS, NAME) \ - if (const auto *X = dyn_cast(V)) \ + if (const auto *X = llvm::dyn_cast(V)) \ return X->dump(out); #define DEF_VALUE(CLASS, NAME) \ - if (const auto *X = dyn_cast(V)) \ + if (const auto *X = llvm::dyn_cast(V)) \ return X->dump(out); #include "AutoGenInstr.def" glow_unreachable(); @@ -132,7 +133,7 @@ static void dumpIR(Value *V, std::ostream &out) { bool Instruction::isInplaceOp(const Instruction *I, unsigned dstIdx, unsigned srcIdx) { #define DEF_INSTR(CLASS, NAME) \ - if (const auto *X = dyn_cast(I)) \ + if (const auto *X = llvm::dyn_cast(I)) \ return X->isInplaceOp(dstIdx, srcIdx); #define DEF_VALUE(CLASS, NAME) #include "AutoGenInstr.def" diff --git a/src/glow/IR/IRGen.cpp b/src/glow/IR/IRGen.cpp index ca920eb0db..8402cd4b16 100644 --- a/src/glow/IR/IRGen.cpp +++ b/src/glow/IR/IRGen.cpp @@ -4,7 +4,8 @@ #include "glow/Graph/Nodes.h" #include "glow/IR/IR.h" #include "glow/IR/IRBuilder.h" -#include "glow/Support/Casting.h" + +#include "llvm/Support/Casting.h" #include @@ -44,7 +45,7 @@ struct IRGenVisitor : NodeVisitor { /// Saves the generated IR in \p v for the node \p N. void registerIR(Node *N, Value *v) { assert(!generatedNodes.count(N) && "Already generated code for this node"); - assert((isa(v) || isa(v)) && + assert((llvm::isa(v) || llvm::isa(v)) && "Value operand must be a memory location"); generatedNodes[N] = v; // Register the fact that we've lowered this variable to the new weight. @@ -59,7 +60,7 @@ struct IRGenVisitor : NodeVisitor { glow_unreachable(); break; case glow::Kinded::Kind::ConvolutionNodeKind: { - auto *C = cast(N); + auto *C = llvm::cast(N); auto *in = valueForNode(C->getInput()); auto *filter = valueForNode(C->getFilter()); auto *bias = valueForNode(C->getBias()); @@ -72,7 +73,7 @@ struct IRGenVisitor : NodeVisitor { break; } case glow::Kinded::Kind::PoolNodeKind: { - auto *P = cast(N); + auto *P = llvm::cast(N); auto *in = valueForNode(P->getInput()); Instruction *V = nullptr; if (P->getMode() == PoolNode::Mode::Max) { @@ -88,7 +89,7 @@ struct IRGenVisitor : NodeVisitor { break; } case glow::Kinded::Kind::FullyConnectedNodeKind: { - auto *FC = cast(N); + auto *FC = llvm::cast(N); auto *in = valueForNode(FC->getInput()); auto *filter = valueForNode(FC->getFilter()); auto *bias = valueForNode(FC->getBias()); @@ -99,7 +100,7 @@ struct IRGenVisitor : NodeVisitor { break; } case glow::Kinded::Kind::ReluNodeKind: { - auto *R = cast(N); + auto *R = llvm::cast(N); auto *V = builder_.createRELUOp(valueForNode(R->getInput())); V->setName(N->getName()); registerIR(N, V->getDest()); @@ -107,21 +108,21 @@ struct IRGenVisitor : NodeVisitor { break; } case glow::Kinded::Kind::SigmoidNodeKind: { - auto *S = cast(N); + auto *S = llvm::cast(N); auto *V = builder_.createSigmoidOp(valueForNode(S->getInput())); V->setName(N->getName()); registerIR(N, V->getDest()); break; } case glow::Kinded::Kind::TanhNodeKind: { - auto *T = cast(N); + auto *T = llvm::cast(N); auto *V = builder_.createTanhOp(valueForNode(T->getInput())); V->setName(N->getName()); registerIR(N, V->getDest()); break; } case glow::Kinded::Kind::SoftMaxNodeKind: { - auto *SM = cast(N); + auto *SM = llvm::cast(N); auto *in = valueForNode(SM->getInput()); auto *select = valueForNode(SM->getSelected()); auto *V = builder_.createSoftMaxOp(in, select); @@ -130,7 +131,7 @@ struct IRGenVisitor : NodeVisitor { break; } case glow::Kinded::Kind::RegressionNodeKind: { - auto *RR = cast(N); + auto *RR = llvm::cast(N); auto *in = valueForNode(RR->getInput()); auto *expected = valueForNode(RR->getExpected()); auto *V = builder_.createRegressionOp(in, expected); @@ -139,7 +140,7 @@ struct IRGenVisitor : NodeVisitor { break; } case glow::Kinded::Kind::TransposeNodeKind: { - auto *TT = cast(N); + auto *TT = llvm::cast(N); auto *in = valueForNode(TT->getInput()); auto *V = builder_.createTransposeOp(in, TT->getShuffle()); V->setName(N->getName()); @@ -147,7 +148,7 @@ struct IRGenVisitor : NodeVisitor { break; } case glow::Kinded::Kind::ReshapeNodeKind: { - auto *RS = cast(N); + auto *RS = llvm::cast(N); auto *in = valueForNode(RS->getInput()); auto *V = builder_.createReshapeOp(in, RS->getDims()); V->setName(N->getName()); @@ -155,7 +156,7 @@ struct IRGenVisitor : NodeVisitor { break; } case glow::Kinded::Kind::ConcatNodeKind: { - auto *CC = cast(N); + auto *CC = llvm::cast(N); auto *LHS = valueForNode(CC->getLHS()); auto *RHS = valueForNode(CC->getRHS()); auto *V = builder_.createConcatOp(LHS, RHS, CC->getDim()); @@ -164,7 +165,7 @@ struct IRGenVisitor : NodeVisitor { break; } case glow::Kinded::Kind::BatchNormalizationNodeKind: { - auto *BN = cast(N); + auto *BN = llvm::cast(N); auto *in = valueForNode(BN->getInput()); auto *beta = valueForNode(BN->getBias()); auto *gamma = valueForNode(BN->getScale()); @@ -180,7 +181,7 @@ struct IRGenVisitor : NodeVisitor { } case glow::Kinded::Kind::LocalResponseNormalizationNodeKind: { - auto *LR = cast(N); + auto *LR = llvm::cast(N); auto *in = valueForNode(LR->getInput()); auto *V = builder_.createLocalResponseNormalizationOp( in, LR->getHalfWindowSize(), LR->getAlpha(), LR->getBeta(), @@ -190,7 +191,7 @@ struct IRGenVisitor : NodeVisitor { break; } case glow::Kinded::Kind::ArithmeticNodeKind: { - auto *AR = cast(N); + auto *AR = llvm::cast(N); auto *L = valueForNode(AR->getLHS()); auto *R = valueForNode(AR->getRHS()); @@ -206,7 +207,7 @@ struct IRGenVisitor : NodeVisitor { break; } case glow::Kinded::Kind::SaveNodeKind: { - auto *R = cast(N); + auto *R = llvm::cast(N); auto *src = valueForNode(R->getInput()); auto *dest = valueForNode(R->getOutput()); auto *V = builder_.createCopyInst(dest, src); @@ -215,7 +216,7 @@ struct IRGenVisitor : NodeVisitor { } case glow::Kinded::Kind::VariableNodeKind: { using MK = WeightVar::MutabilityKind; - auto *V = cast(N); + auto *V = llvm::cast(N); bool isConst = V->getInitKind() == Variable::InitKind::Extern; auto *W = builder_.createWeightVar(V->getType(), V->getName(), isConst ? MK::Constant : MK::Mutable); @@ -259,7 +260,7 @@ void generateBackwardPass(Module &M) { for (auto I : instrs) { switch (I->getKind()) { case Kind::AllocActivationInstKind: { - auto *AC = cast(I); + auto *AC = llvm::cast(I); auto *N = new AllocActivationInst(AC->getName(), AC->getType()); allocs.push_back(N); weightToGradMap[I] = N; @@ -269,78 +270,82 @@ void generateBackwardPass(Module &M) { break; } case Kind::CopyInstKind: { - auto *CC = cast(I); + auto *CC = llvm::cast(I); auto *N = new CopyInst(CC->getName(), weightToGradMap[CC->getSrc()], weightToGradMap[CC->getDest()]); toAppend.push_back(N); break; } case Kind::ConvolutionInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); + toAppend.push_back( + llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::PoolMaxInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); + toAppend.push_back(llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::PoolAvgInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); + toAppend.push_back(llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::FullyConnectedInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); + toAppend.push_back( + llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::BatchNormalizationInstKind: { toAppend.push_back( - cast(I)->getGrad(weightToGradMap)); + llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::LocalResponseNormalizationInstKind: { - toAppend.push_back( - cast(I)->getGrad(weightToGradMap)); + toAppend.push_back(llvm::cast(I)->getGrad( + weightToGradMap)); break; } case Kind::SoftMaxInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); + toAppend.push_back(llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::RegressionInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); - + toAppend.push_back( + llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::ElementAddInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); + toAppend.push_back( + llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::ElementMulInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); + toAppend.push_back( + llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::ReluInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); - + toAppend.push_back(llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::SigmoidInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); + toAppend.push_back(llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::TanhInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); + toAppend.push_back(llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::ReshapeInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); + toAppend.push_back(llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::TransposeInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); + toAppend.push_back( + llvm::cast(I)->getGrad(weightToGradMap)); break; } case Kind::ConcatInstKind: { - toAppend.push_back(cast(I)->getGrad(weightToGradMap)); + toAppend.push_back(llvm::cast(I)->getGrad(weightToGradMap)); break; } default: diff --git a/src/glow/IR/Instrs.cpp b/src/glow/IR/Instrs.cpp index 1f037b8b65..d61b509401 100644 --- a/src/glow/IR/Instrs.cpp +++ b/src/glow/IR/Instrs.cpp @@ -2,9 +2,10 @@ #include "glow/IR/Instrs.h" #include "glow/IR/IR.h" -#include "glow/Support/Casting.h" #include "glow/Support/Support.h" +#include "llvm/Support/Casting.h" + #include using namespace glow; @@ -42,8 +43,8 @@ void CopyInst::verify() const { (void)op0; (void)op1; // The operands of the copy instruction must be variables. - assert(isa(op0) || isa(op0)); - assert(isa(op1) || isa(op1)); + assert(llvm::isa(op0) || llvm::isa(op0)); + assert(llvm::isa(op1) || llvm::isa(op1)); } void ConvolutionInst::verify() const { Value *dest = getOperand(0).first; @@ -209,7 +210,7 @@ void ElementMulInst::verify() const { void AllocActivationInst::verify() const { unsigned numDealloc = 0; for (const Use &U : getUsers()) { - numDealloc += isa(U.get()); + numDealloc += llvm::isa(U.get()); } // Make sure that there is exactly one user is a deallocation. @@ -218,7 +219,8 @@ void AllocActivationInst::verify() const { void DeallocActivationInst::verify() const { // The operand of this instruction needs to be an AllocActivationInst. - assert(isa(getOperand(0).first) && "Invalid operand"); + assert(llvm::isa(getOperand(0).first) && + "Invalid operand"); } // TODO: verify the gradient instructions. #define NOVERIFY(ClassName) \ diff --git a/src/glow/Importer/Caffe2.cpp b/src/glow/Importer/Caffe2.cpp index fa2f823e50..d194a9944e 100644 --- a/src/glow/Importer/Caffe2.cpp +++ b/src/glow/Importer/Caffe2.cpp @@ -8,7 +8,8 @@ #include "glow/IR/IR.h" #include "glow/IR/IRBuilder.h" #include "glow/IR/Instrs.h" -#include "glow/Support/Casting.h" + +#include "llvm/Support/Casting.h" #include "caffe.pb.h" #include @@ -185,14 +186,14 @@ void caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { auto *tr = G.createTranspose(op.name(), in, NCHW2NHWC); auto *node = G.createConv(op.name(), tr, numFilters, kernel, stride, pad); - cast(node->getFilter())->copyFrom(&wtag); + llvm::cast(node->getFilter())->copyFrom(&wtag); // If we don't have a bias vector then create one that matches the weight // size and fill it with zeros. if (b) { - cast(node->getBias())->copyFrom(b); + llvm::cast(node->getBias())->copyFrom(b); } else { - cast(node->getBias())->getPayload().zero(); + llvm::cast(node->getBias())->getPayload().zero(); } auto *N = G.createTranspose(op.name(), node, NHWC2NCHW); @@ -262,10 +263,10 @@ void caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { auto *node = G.createBatchNormalization(op.name(), in, channel, epsilon); // Load the weights. - cast(node->getScale())->copyFrom(scale); - cast(node->getBias())->copyFrom(bias); - cast(node->getMean())->copyFrom(mean); - cast(node->getVar())->copyFrom(var); + llvm::cast(node->getScale())->copyFrom(scale); + llvm::cast(node->getBias())->copyFrom(bias); + llvm::cast(node->getMean())->copyFrom(mean); + llvm::cast(node->getVar())->copyFrom(var); for (int i = 0, e = op.output_size(); i < e; i++) { nodeByName_[op.output(i)] = node; @@ -332,8 +333,8 @@ void caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { auto *FC = G.createFullyConnected(op.name(), in, b->size()); // Load weights. - cast(FC->getFilter())->getPayload().copyFrom(w); - cast(FC->getBias())->getPayload().copyFrom(b); + llvm::cast(FC->getFilter())->getPayload().copyFrom(w); + llvm::cast(FC->getBias())->getPayload().copyFrom(b); // Save the outputs: for (int i = 0, e = op.output_size(); i < e; i++) { diff --git a/src/glow/Interpreter/Interpreter.cpp b/src/glow/Interpreter/Interpreter.cpp index a45a191eab..85ab3c1230 100644 --- a/src/glow/Interpreter/Interpreter.cpp +++ b/src/glow/Interpreter/Interpreter.cpp @@ -5,7 +5,8 @@ #include "glow/Graph/Nodes.h" #include "glow/IR/Instrs.h" #include "glow/Optimizer/Optimizer.h" -#include "glow/Support/Casting.h" + +#include "llvm/Support/Casting.h" using namespace glow; @@ -89,7 +90,7 @@ void Interpreter::doForwardPass(bool isTrain) { #define DEF_VALUE(CLASS, NAME) #define DEF_INSTR(CLASS, NAME) \ case Kinded::Kind::CLASS##Kind: { \ - fwd##CLASS(isTrain, cast(I)); \ + fwd##CLASS(isTrain, llvm::cast(I)); \ break; \ } // Dispatch the interpreter on each instruction in the program: diff --git a/src/glow/Interpreter/InterpreterNodes.cpp b/src/glow/Interpreter/InterpreterNodes.cpp index d8af6a7389..ffecdb3784 100644 --- a/src/glow/Interpreter/InterpreterNodes.cpp +++ b/src/glow/Interpreter/InterpreterNodes.cpp @@ -2,7 +2,8 @@ #include "glow/IR/Instrs.h" #include "glow/Interpreter/Interpreter.h" -#include "glow/Support/Casting.h" + +#include "llvm/Support/Casting.h" using namespace glow; diff --git a/src/glow/Optimizer/GraphOptimizer.cpp b/src/glow/Optimizer/GraphOptimizer.cpp index efb8385946..af01435e85 100644 --- a/src/glow/Optimizer/GraphOptimizer.cpp +++ b/src/glow/Optimizer/GraphOptimizer.cpp @@ -3,7 +3,8 @@ #include "glow/Graph/Graph.h" #include "glow/Graph/Node.h" #include "glow/Optimizer/Optimizer.h" -#include "glow/Support/Casting.h" + +#include "llvm/Support/Casting.h" #include #include @@ -22,7 +23,7 @@ static void DCE(Graph &G) { changedLocally = false; for (auto it = nodes.begin(), e = nodes.end(); it != e;) { bool used = (*it)->hasUsers(); - if (used || isa(*it)) { + if (used || llvm::isa(*it)) { it++; continue; } @@ -73,8 +74,8 @@ static void sinkCode(Graph &G) { // For each node: for (auto it = nodes.begin(), e = nodes.end(); it != e; ++it) { // Sink Transpose below batch normalization nodes: - if (auto *BN = dyn_cast(*it)) { - auto *TR = dyn_cast(BN->getInput()); + if (auto *BN = llvm::dyn_cast(*it)) { + auto *TR = llvm::dyn_cast(BN->getInput()); if (!TR) { continue; @@ -97,8 +98,8 @@ static void sinkCode(Graph &G) { // Sink Transpose below batch RELU nodes. // TODO: support other similar activation functions, such as sigmoid, etc. - if (auto *RL = dyn_cast(*it)) { - auto *TR = dyn_cast(RL->getInput()); + if (auto *RL = llvm::dyn_cast(*it)) { + auto *TR = llvm::dyn_cast(RL->getInput()); if (!TR) { continue; @@ -111,8 +112,8 @@ static void sinkCode(Graph &G) { } // Merge consecutive Transpose operations. - if (auto *TR1 = dyn_cast(*it)) { - auto *TR2 = dyn_cast(TR1->getInput()); + if (auto *TR1 = llvm::dyn_cast(*it)) { + auto *TR2 = llvm::dyn_cast(TR1->getInput()); if (!TR2) { continue; @@ -131,9 +132,9 @@ static void sinkCode(Graph &G) { } // Sink Transpose below Arithmetic nodes. - if (auto *AN = dyn_cast(*it)) { - auto *LTR = dyn_cast(AN->getLHS()); - auto *RTR = dyn_cast(AN->getRHS()); + if (auto *AN = llvm::dyn_cast(*it)) { + auto *LTR = llvm::dyn_cast(AN->getLHS()); + auto *RTR = llvm::dyn_cast(AN->getRHS()); if (!LTR || !RTR) { continue; @@ -150,9 +151,9 @@ static void sinkCode(Graph &G) { } // Sink RELU below batch concat nodes. - if (auto *CN = dyn_cast(*it)) { - auto *L = dyn_cast(CN->getLHS()); - auto *R = dyn_cast(CN->getRHS()); + if (auto *CN = llvm::dyn_cast(*it)) { + auto *L = llvm::dyn_cast(CN->getLHS()); + auto *R = llvm::dyn_cast(CN->getRHS()); if (L && R) { auto *newCN = G.createConcat(CN->getName(), L->getInput(), @@ -163,9 +164,9 @@ static void sinkCode(Graph &G) { } // Sink Transpose below concat nodes. - if (auto *CN = dyn_cast(*it)) { - TransposeNode *L = dyn_cast(CN->getLHS()); - TransposeNode *R = dyn_cast(CN->getRHS()); + if (auto *CN = llvm::dyn_cast(*it)) { + TransposeNode *L = llvm::dyn_cast(CN->getLHS()); + TransposeNode *R = llvm::dyn_cast(CN->getRHS()); // Both sides must be a transpose instruction. if (!L || !R) { @@ -203,8 +204,8 @@ static void OptimizePool(Graph &G) { // nodes does not give us much. However, reordering the buffers allows us to // reuse the memory buffer of the pool operation and potentially save // memory. - if (auto *PL = dyn_cast(*it)) { - auto *RL = dyn_cast(PL->getInput()); + if (auto *PL = llvm::dyn_cast(*it)) { + auto *RL = llvm::dyn_cast(PL->getInput()); if (!RL) { continue; @@ -238,8 +239,8 @@ static void OptimizeBatchNorm(Graph &G) { for (auto it = nodes.begin(), e = nodes.end(); it != e; ++it) { // Merge the Batch Normalization operation into the convolution that comes // before it by updating the weights of the filter. - if (auto *BN = dyn_cast(*it)) { - auto *CV = dyn_cast(BN->getInput()); + if (auto *BN = llvm::dyn_cast(*it)) { + auto *CV = llvm::dyn_cast(BN->getInput()); if (!CV) { continue; } @@ -283,13 +284,13 @@ static void OptimizeBatchNorm(Graph &G) { // Q = W * A // C = b * A + B - auto filterH = cast(CV->getFilter())->getHandle<>(); - auto cbiasH = cast(CV->getBias())->getHandle<>(); + auto filterH = llvm::cast(CV->getFilter())->getHandle<>(); + auto cbiasH = llvm::cast(CV->getBias())->getHandle<>(); - auto scaleH = cast(BN->getScale())->getHandle<>(); - auto biasH = cast(BN->getBias())->getHandle<>(); - auto meanH = cast(BN->getMean())->getHandle<>(); - auto varH = cast(BN->getVar())->getHandle<>(); + auto scaleH = llvm::cast(BN->getScale())->getHandle<>(); + auto biasH = llvm::cast(BN->getBias())->getHandle<>(); + auto meanH = llvm::cast(BN->getMean())->getHandle<>(); + auto varH = llvm::cast(BN->getVar())->getHandle<>(); // Update the filater/bias variables of the Conv node. auto epsilon = BN->getEpsilon(); diff --git a/src/glow/Optimizer/IROptimizer.cpp b/src/glow/Optimizer/IROptimizer.cpp index 10e5669b73..15d60dcf02 100644 --- a/src/glow/Optimizer/IROptimizer.cpp +++ b/src/glow/Optimizer/IROptimizer.cpp @@ -3,7 +3,8 @@ #include "glow/IR/IR.h" #include "glow/IR/Instrs.h" #include "glow/Optimizer/Optimizer.h" -#include "glow/Support/Casting.h" + +#include "llvm/Support/Casting.h" #include #include @@ -21,13 +22,13 @@ static void calculateLiveness(Module &M, LivenessMap &liveness) { for (auto it = instrs.begin(), e = instrs.end(); it != e; ++it) { instIdx++; // Ignore deallocations in our liveness calculation. - if (isa(*it)) { + if (llvm::isa(*it)) { continue; } for (int i = 0, e = (*it)->getNumOperands(); i < e; i++) { auto op = (*it)->getOperand(i).first; - auto aa = dyn_cast(op); + auto aa = llvm::dyn_cast(op); if (!aa) { continue; } @@ -54,12 +55,12 @@ static void hoistDealloc(Module &M) { // Record the last use of each dealloc. for (auto it = instrs.begin(), e = instrs.end(); it != e; ++it) { - if (isa(*it)) + if (llvm::isa(*it)) continue; for (int i = 0, e = (*it)->getNumOperands(); i < e; i++) { auto op = (*it)->getOperand(i).first; - if (auto alloc = dyn_cast(op)) { + if (auto alloc = llvm::dyn_cast(op)) { lastUser[alloc] = it; } } @@ -69,13 +70,13 @@ static void hoistDealloc(Module &M) { for (auto it = instrs.begin(), e = instrs.end(); it != e; /* increment below */) { iterator curr = it; - auto *da = dyn_cast(*curr); + auto *da = llvm::dyn_cast(*curr); if (!da) { ++it; continue; } - auto *alloc = cast(da->getOperand(0).first); + auto *alloc = llvm::cast(da->getOperand(0).first); it = instrs.erase(curr); auto &where = lastUser[alloc]; @@ -94,7 +95,7 @@ static void sinkAllocas(Module &M) { // Remove all of the allocas. for (auto it = instrs.begin(), e = instrs.end(); it != e;) { iterator curr = it; - auto *aa = dyn_cast(*curr); + auto *aa = llvm::dyn_cast(*curr); if (!aa) { ++it; continue; @@ -108,7 +109,7 @@ static void sinkAllocas(Module &M) { for (auto it = instrs.begin(), e = instrs.end(); it != e; ++it) { for (int i = 0, e = (*it)->getNumOperands(); i < e; i++) { auto op = (*it)->getOperand(i).first; - auto aa = dyn_cast(op); + auto aa = llvm::dyn_cast(op); if (!aa) { continue; } @@ -133,7 +134,7 @@ static void deleteDeadAllocs(Module &M) { std::remove_if(instrs.begin(), instrs.end(), [](const Instruction *I) -> bool { if (const auto *DA = - dyn_cast(I)) { + llvm::dyn_cast(I)) { return DA->getAlloc()->getNumUsers() < 2; } return false; @@ -143,7 +144,7 @@ static void deleteDeadAllocs(Module &M) { // Remove the unused allocs. instrs.erase(std::remove_if(instrs.begin(), instrs.end(), [](const Instruction *I) -> bool { - if (isa(I)) { + if (llvm::isa(I)) { return I->getNumUsers() < 2; } return false; @@ -162,7 +163,7 @@ static void replaceAllNonDeallocUsersWith(Value *val, Value *with) { std::vector usersVec(users.begin(), users.end()); for (auto &U : usersVec) { // Ignore dealloc instrs. - if (isa(U.get())) { + if (llvm::isa(U.get())) { continue; } @@ -223,7 +224,7 @@ static void shareBuffers(Module &M) { // point. for (unsigned op = 0, ope = I->getNumOperands(); op < ope; op++) { auto O = I->getOperand(op); - auto ai = dyn_cast(O.first); + auto ai = llvm::dyn_cast(O.first); if (!ai) { continue; } @@ -253,7 +254,7 @@ static void shareBuffers(Module &M) { // alive. for (unsigned op = 0, ope = I->getNumOperands(); op < ope; op++) { auto O = I->getOperand(op); - auto ai = dyn_cast(O.first); + auto ai = llvm::dyn_cast(O.first); if (!ai) { continue; } @@ -275,7 +276,7 @@ static Instruction *getSingleWriter(Value *V) { Instruction *user = U.get(); // Ignore deallocs. - if (isa(user)) + if (llvm::isa(user)) continue; auto op = U.getOperand(); @@ -328,7 +329,7 @@ void rematerializeCompute(Module &M) { // Do an initial pass that collects all of the available RELUs. for (auto it = instrs.begin(), e = instrs.end(); it != e; ++it) { instIdx++; - auto RL = dyn_cast(*it); + auto RL = llvm::dyn_cast(*it); if (!RL) { continue; } diff --git a/tests/unittests/IRTest.cpp b/tests/unittests/IRTest.cpp index 6f71c3e7d0..01587ff9c8 100644 --- a/tests/unittests/IRTest.cpp +++ b/tests/unittests/IRTest.cpp @@ -6,7 +6,7 @@ #include "glow/IR/IRBuilder.h" #include "glow/IR/Instrs.h" -#include "glow/Support/Casting.h" +#include "llvm/Support/Casting.h" #include "gtest/gtest.h" @@ -128,15 +128,15 @@ TEST(IR, casting) { auto *relu = bb.createRELUOp(input); auto *pool = bb.createPoolMaxOp(relu->getOperand(0).first, 7, 2, 3); - EXPECT_EQ(isa(pool), true); - EXPECT_EQ(isa(input), false); - EXPECT_EQ(isa(relu), true); - EXPECT_EQ(isa(pool), false); + EXPECT_EQ(llvm::isa(pool), true); + EXPECT_EQ(llvm::isa(input), false); + EXPECT_EQ(llvm::isa(relu), true); + EXPECT_EQ(llvm::isa(pool), false); - EXPECT_NE(dyn_cast(pool), nullptr); - EXPECT_EQ(dyn_cast(pool), pool); + EXPECT_NE(llvm::dyn_cast(pool), nullptr); + EXPECT_EQ(llvm::dyn_cast(pool), pool); - EXPECT_NE(dyn_cast(input), nullptr); - EXPECT_EQ(dyn_cast(input), input); + EXPECT_NE(llvm::dyn_cast(input), nullptr); + EXPECT_EQ(llvm::dyn_cast(input), input); } } diff --git a/tools/ClassGen/InstrGen.cpp b/tools/ClassGen/InstrGen.cpp index 6e7465771d..786e953111 100644 --- a/tools/ClassGen/InstrGen.cpp +++ b/tools/ClassGen/InstrGen.cpp @@ -30,8 +30,8 @@ int main(int argc, char **argv) { BB.newInstr("DeallocActivation") .addOperand("Src", OperandKind::Out) .overrideGetter("Src", "AllocActivationInst *getAlloc() const { return " - "cast(getOperand(0).first); " - "}") + "llvm::cast(getOperand(0)." + "first); }") .setType("Src->getType()"); BB.newInstr("Copy") diff --git a/tools/ClassGen/NodeGen.cpp b/tools/ClassGen/NodeGen.cpp index 78ed17cc08..d9ff114570 100644 --- a/tools/ClassGen/NodeGen.cpp +++ b/tools/ClassGen/NodeGen.cpp @@ -30,7 +30,7 @@ int main(int argc, char **argv) { .addOperand("Output") .setType("Input->getType()") .overrideGetter("Output", "Variable *getOutput() const { return " - "cast(Output_.get()); };"); + "llvm::cast(Output_.get()); };"); //===--------------------------------------------------------------------===// // Convolution / Pool / FC diff --git a/tools/loader/loader.cpp b/tools/loader/loader.cpp index d81f66a908..a080e40b80 100644 --- a/tools/loader/loader.cpp +++ b/tools/loader/loader.cpp @@ -87,8 +87,8 @@ int main(int argc, char **argv) { {"data", "gpu_0/data", "softmax_expected"}, {&data, &data, &expected_softmax}, EE); SM = LD.getRoot(); - i0 = cast(LD.getOrCreateNodeByName("gpu_0/data")); - i1 = cast(LD.getOrCreateNodeByName("data")); + i0 = llvm::cast(LD.getOrCreateNodeByName("gpu_0/data")); + i1 = llvm::cast(LD.getOrCreateNodeByName("data")); } llvm::Timer timer("Infer", "Infer");