diff --git a/docs/GraphOptimizationPipeline.md b/docs/GraphOptimizationPipeline.md index d262d28513..86eabc0304 100644 --- a/docs/GraphOptimizationPipeline.md +++ b/docs/GraphOptimizationPipeline.md @@ -86,7 +86,7 @@ Here we describe the API for `glow::optimizeFunction()` and how to use it in different modes. ``` -llvm::Error glow::optimizeFunction(Function *F, const Backend &B, +Error glow::optimizeFunction(Function *F, const Backend &B, CompilationContext &cctx); ``` diff --git a/examples/mnist.cpp b/examples/mnist.cpp index 0be54d4fcb..74e4f7fda6 100644 --- a/examples/mnist.cpp +++ b/examples/mnist.cpp @@ -256,8 +256,7 @@ void testMNISTLoadAndTraining() { inferMod.uniqueType(glow::ElemKind::FloatTy, {minibatchSize, 1, 28, 28}); const char *inputName = "data"; - llvm::Error errPtr = llvm::Error::success(); - MARK_ERR_CHECKED(errPtr); + Error errPtr = Error::empty(); // Load and compile LeNet MNIST model. glow::Caffe2ModelLoader loader("lenet_mnist/predict_net.pb", "lenet_mnist/init_net.pb", {inputName}, diff --git a/examples/resnet-runtime.cpp b/examples/resnet-runtime.cpp index 4c05e8b221..190e79dae1 100644 --- a/examples/resnet-runtime.cpp +++ b/examples/resnet-runtime.cpp @@ -96,7 +96,7 @@ void dispatchClassify(unsigned int id, HostManager *hostManager, std::promise &finished) { auto runid = hostManager->runNetwork( "resnet50" + std::to_string(id), std::move(context), - [path, &returned, &finished](RunIdentifierTy runid, llvm::Error err, + [path, &returned, &finished](RunIdentifierTy runid, Error err, std::unique_ptr context) { EXIT_ON_ERR(std::move(err)); auto *bindings = context->getPlaceholderBindings(); diff --git a/examples/tracing-compare.cpp b/examples/tracing-compare.cpp index f1bc200e8c..5c5d160139 100644 --- a/examples/tracing-compare.cpp +++ b/examples/tracing-compare.cpp @@ -90,18 +90,16 @@ std::future addToDevice(unsigned int id, DeviceManager *device, auto compilePromise = std::make_shared>(); auto future = compilePromise->get_future(); - device->addNetwork(&module, functions, - [compilePromise, id](const Module *, llvm::Error err) { - if (err) { - llvm::errs() << "Failed to compile model for device " - << id << ".\n"; - EXIT_ON_ERR(std::move(err)); - } else { - llvm::outs() - << "Successfully added to Device " << id << ".\n"; - } - compilePromise->set_value(); - }); + device->addNetwork( + &module, functions, [compilePromise, id](const Module *, Error err) { + if (err) { + llvm::errs() << "Failed to compile model for device " << id << ".\n"; + EXIT_ON_ERR(std::move(err)); + } else { + llvm::outs() << "Successfully added to Device " << id << ".\n"; + } + compilePromise->set_value(); + }); return future; } @@ -157,7 +155,7 @@ int main(int argc, char **argv) { devices[i]->runFunction( "resnet50", std::move(context), - [&promises, i](RunIdentifierTy, llvm::Error err, + [&promises, i](RunIdentifierTy, Error err, std::unique_ptr context) { EXIT_ON_ERR(std::move(err)); promises[i].set_value(std::move(context)); diff --git a/examples/training/resnet50/main.cpp b/examples/training/resnet50/main.cpp index 06285c6358..e5350b4f6c 100644 --- a/examples/training/resnet50/main.cpp +++ b/examples/training/resnet50/main.cpp @@ -154,7 +154,7 @@ int main(int argc, char **argv) { // Load ResNet model. llvm::outs() << "Loading resnet50 model.\n"; - llvm::Error errPtr = llvm::Error::success(); + Error errPtr = Error::success(); // Loader has randomly initialized trainable weights. Caffe2ModelLoader loader(resnet50Path + "/predict_net.pbtxt", diff --git a/include/glow/Backend/Backend.h b/include/glow/Backend/Backend.h index 34ed7da5d6..b6be01c2f6 100644 --- a/include/glow/Backend/Backend.h +++ b/include/glow/Backend/Backend.h @@ -51,7 +51,7 @@ class Backend { /// Generate code for a vector of functions, \p functions. All compilations /// use the same settings provided by \p opts. This allows the compiler to /// support shared constants between functions. - virtual llvm::Expected>> + virtual Expected>> compileFunctions(llvm::ArrayRef functions, BackendOptions &opts) const { std::vector> compiledFunctions; @@ -62,18 +62,18 @@ class Backend { return resOrErr.takeError(); } } - return llvm::Expected>>( + return Expected>>( std::move(compiledFunctions)); } - virtual llvm::Expected> + virtual Expected> compile(Function *F) const { BackendOptions opts; return compile(F, opts); } /// Generate code for input function \param F given settings in \p opts. - virtual llvm::Expected> + virtual Expected> compile(Function *F, const BackendOptions &opts) const = 0; /// Save the bundle for \p F for a later standalone execution in \p outputDir diff --git a/include/glow/Backend/CompiledFunction.h b/include/glow/Backend/CompiledFunction.h index 2724dc7cc2..fb89e2ac13 100644 --- a/include/glow/Backend/CompiledFunction.h +++ b/include/glow/Backend/CompiledFunction.h @@ -37,8 +37,8 @@ class CompiledFunction { virtual ~CompiledFunction(); /// Execute the network and allocate Placeholder memory with given /// \p bindings providing mapping between Placeholder and populated tensor. - /// \returns an llvm::Error if an error ocurred during execution. - virtual llvm::Error execute(ExecutionContext *context) = 0; + /// \returns an Error if an error ocurred during execution. + virtual Error execute(ExecutionContext *context) = 0; /// Getter for the runtimeBundle. runtime::RuntimeBundle &getRuntimeBundle() { return runtimeBundle_; } diff --git a/include/glow/Backends/DeviceManager.h b/include/glow/Backends/DeviceManager.h index a159737bb7..2be0aed32a 100644 --- a/include/glow/Backends/DeviceManager.h +++ b/include/glow/Backends/DeviceManager.h @@ -32,11 +32,10 @@ namespace glow { namespace runtime { /// Callback signalling success/failure of evicting a function from a Device. -using EvictFunctionCBTy = - std::function; +using EvictFunctionCBTy = std::function; /// Callback signalling success/failure of loading a Module onto a device. -using ReadyCBTy = std::function; +using ReadyCBTy = std::function; /// Map of Function name -> CompiledFunction, used when loading a network onto a /// device. @@ -95,7 +94,7 @@ class DeviceManager { generateDeviceConfigs(llvm::StringRef backendName); /// Initialize the device. - virtual llvm::Error init() { return llvm::Error::success(); } + virtual Error init() { return Error::success(); } /// Load the provided module into the device, readyCB will be called when /// ready to use. @@ -108,8 +107,8 @@ class DeviceManager { /// up space on the device. \p evictCB will be called when the operation /// is completed or attempted and failed. virtual void evictNetwork(std::string functionName, - EvictFunctionCBTy evictCB = [](std::string, - llvm::Error) {}) = 0; + EvictFunctionCBTy evictCB = [](std::string, Error) { + }) = 0; /// Execute the named Function in an already provided network on the device. /// functionName must match the name of a function already added. @@ -122,9 +121,7 @@ class DeviceManager { runtime::ResultCBTy resultCB) = 0; /// Stops execution and shuts down the Device. - virtual llvm::Error stop(bool block = true) { - return llvm::Error::success(); - }; + virtual Error stop(bool block = true) { return Error::success(); }; /// \returns the name of backend that powers this Device. llvm::StringRef getBackendName() { return config_.backendName; } diff --git a/include/glow/Backends/DummyDeviceManager.h b/include/glow/Backends/DummyDeviceManager.h index 69338143f7..c76e63cc8d 100644 --- a/include/glow/Backends/DummyDeviceManager.h +++ b/include/glow/Backends/DummyDeviceManager.h @@ -18,6 +18,7 @@ #include "glow/Backends/DeviceManager.h" #include "glow/Runtime/RuntimeTypes.h" +#include "llvm/Support/FormatVariadic.h" #include @@ -55,7 +56,7 @@ class DummyDeviceManager : public DeviceManager { callback( module, MAKE_ERR( - GlowErr::ErrorCode::RUNTIME_NET_NOT_FOUND, + ErrorValue::ErrorCode::RUNTIME_NET_NOT_FOUND, llvm::formatv("Function {0} not found", func.first).str())); return; } @@ -69,7 +70,7 @@ class DummyDeviceManager : public DeviceManager { } // Fire the ready CB. - callback(module, llvm::Error::success()); + callback(module, Error::success()); } /// Remove (and delete) the provided function, freeing @@ -77,7 +78,7 @@ class DummyDeviceManager : public DeviceManager { void evictNetwork(std::string functionName, EvictFunctionCBTy evictCB) override { functions_.erase(functionName); - evictCB(functionName, llvm::Error::success()); + evictCB(functionName, Error::success()); } /// Execute the named Function in an already provided network on the device. @@ -92,7 +93,7 @@ class DummyDeviceManager : public DeviceManager { if (funcIt == functions_.end()) { callback( 0, - MAKE_ERR(GlowErr::ErrorCode::RUNTIME_NET_NOT_FOUND, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_NET_NOT_FOUND, llvm::formatv("Function {0} not found", functionName).str()), std::move(context)); return 0; diff --git a/include/glow/Backends/QueueBackedDeviceManager.h b/include/glow/Backends/QueueBackedDeviceManager.h index aa866a6617..47bbbf003d 100644 --- a/include/glow/Backends/QueueBackedDeviceManager.h +++ b/include/glow/Backends/QueueBackedDeviceManager.h @@ -37,11 +37,11 @@ class QueueBackedDeviceManager : public DeviceManager { : DeviceManager(config), workThread_(1) {} virtual ~QueueBackedDeviceManager() { - llvm::toString(stop(true)); // will join workThread_ + ERR_TO_VOID(stop(true)); // will join workThread_ } /// Initialize the device. - llvm::Error init() override { return llvm::Error::success(); } + Error init() override { return Error::success(); } /// Load the provided module into the device, readyCB will be called when /// ready to use @@ -81,9 +81,9 @@ class QueueBackedDeviceManager : public DeviceManager { } /// Stops execution and shuts down the Device. - llvm::Error stop(bool block = true) override { + Error stop(bool block = true) override { workThread_.stop(block); - return llvm::Error::success(); + return Error::success(); } protected: diff --git a/include/glow/ExecutionEngine/ExecutionEngine.h b/include/glow/ExecutionEngine/ExecutionEngine.h index c820a570b8..27e5132569 100644 --- a/include/glow/ExecutionEngine/ExecutionEngine.h +++ b/include/glow/ExecutionEngine/ExecutionEngine.h @@ -92,7 +92,7 @@ class ExecutionEngine final { void clear(); /// \returns the DAG for the specified \p network. - llvm::Expected getDAG(llvm::StringRef network) { + Expected getDAG(llvm::StringRef network) { return hostManager_->getNetworkDAG(network); } diff --git a/include/glow/Exporter/CommonOperatorWriter.h b/include/glow/Exporter/CommonOperatorWriter.h index 47a922c05b..95b1a55e1e 100644 --- a/include/glow/Exporter/CommonOperatorWriter.h +++ b/include/glow/Exporter/CommonOperatorWriter.h @@ -29,15 +29,14 @@ template class CommonOperatorWriter : public ProtobufWriter { /// Declare pure virtual methods, one per each node kind. /// Derived class must to implement all of it. #define DEF_NODE(CLASS, NAME) \ - virtual llvm::Error write##NAME(const CLASS *node, \ - typename Traits::GraphProto &graph) = 0; + virtual Error write##NAME(const CLASS *node, \ + typename Traits::GraphProto &graph) = 0; #include "glow/AutoGenNodes.def" /// Function invokes the correspondent virtual method according to \p node /// type to serialize node information into \p graph (protobuf), reports - /// visited intermediate nodes through \p reporter, \returns llvm::Error. - llvm::Error writeOperator(const Node *node, - typename Traits::GraphProto &graph) { + /// visited intermediate nodes through \p reporter, \returns Error. + Error writeOperator(const Node *node, typename Traits::GraphProto &graph) { switch (node->getKind()) { #define DEF_NODE(CLASS, NAME) \ case glow::Kinded::Kind::CLASS##Kind: \ @@ -46,7 +45,7 @@ template class CommonOperatorWriter : public ProtobufWriter { default: llvm_unreachable( "Not reachable, values and instructions are not handled here"); - return llvm::Error::success(); + return Error::success(); } } diff --git a/include/glow/Exporter/ONNXModelWriter.h b/include/glow/Exporter/ONNXModelWriter.h index d2919f4edf..9838efef67 100644 --- a/include/glow/Exporter/ONNXModelWriter.h +++ b/include/glow/Exporter/ONNXModelWriter.h @@ -55,12 +55,12 @@ class ONNXModelWriter : public CommonOperatorWriter { ValueInfoType *valueProto); /// Writes all inputs and outputs with operator name \p opName from give Node /// \p node into protobuf \p proto. - static llvm::Error writeAllWithNode(const std::string &opName, - const Node *node, NodeType *proto); + static Error writeAllWithNode(const std::string &opName, const Node *node, + NodeType *proto); /// Writes all inputs and outputs with operator name \p opName from give Node /// \p node into created node protobuf using \p graph. - static llvm::Error writeAll(const std::string &opName, const Node *node, - GraphType &graph); + static Error writeAll(const std::string &opName, const Node *node, + GraphType &graph); // Finds if uses of \p node have node with the provided \p kind. static bool hasUsesOfKind(const Node *node, Kinded::Kind kind); @@ -76,18 +76,18 @@ class ONNXModelWriter : public CommonOperatorWriter { /// there otherwise if an error occurs it will abort. ONNXModelWriter(const std::string &modelFilename, Function &F, size_t irVersion, size_t opsetVersion, - llvm::Error *errPtr = nullptr, bool textMode = false); + Error *errPtr = nullptr, bool textMode = false); private: /// \returns error for the unexpected node kind. - static llvm::Error writeUnexpectedKind(const Node *node) { + static Error writeUnexpectedKind(const Node *node) { RETURN_ERR(strFormat("Glow can not export node %s, unsupported kind: %s.", node->getName().str().c_str(), node->getKindName())); } /// Declares the overriden all pure virtual methods, declared in base class. #define DEF_NODE(CLASS, NAME) \ - llvm::Error write##NAME(const CLASS *, GraphType &) override; + Error write##NAME(const CLASS *, GraphType &) override; #include "glow/AutoGenNodes.def" }; diff --git a/include/glow/Exporter/ProtobufWriter.h b/include/glow/Exporter/ProtobufWriter.h index 10c3498556..795a91155e 100644 --- a/include/glow/Exporter/ProtobufWriter.h +++ b/include/glow/Exporter/ProtobufWriter.h @@ -32,8 +32,8 @@ class ProtobufWriter { /// Output file stream. std::ofstream ff_; - llvm::Error writeModel(const ::google::protobuf::Message &modelProto, - bool textMode = false); + Error writeModel(const ::google::protobuf::Message &modelProto, + bool textMode = false); public: /// Constructs new ProtobufWriter object. It will write protopuf messages into @@ -41,7 +41,7 @@ class ProtobufWriter { /// If \p errPtr is not null then if an error occurs it will get assigned /// there otherwise if an error occurs it will abort. ProtobufWriter(const std::string &modelFilename, Function &F, - llvm::Error *errPtr = nullptr); + Error *errPtr = nullptr); }; } // namespace glow diff --git a/include/glow/Importer/Caffe2ModelLoader.h b/include/glow/Importer/Caffe2ModelLoader.h index 9719dc1b84..dfd5838bc8 100644 --- a/include/glow/Importer/Caffe2ModelLoader.h +++ b/include/glow/Importer/Caffe2ModelLoader.h @@ -40,7 +40,7 @@ class Value; class Caffe2ModelLoader : public CommonOperatorLoader { /// \returns True if the operator has broadcasting activated. - llvm::Expected getBroadcast(const ArgumentDictionaryTy &dict) override; + Expected getBroadcast(const ArgumentDictionaryTy &dict) override; /// \returns True if the operator with the name \p typeName has support for /// multidirectional broadcasting. @@ -48,53 +48,51 @@ class Caffe2ModelLoader /// Load the weight tensors from the 'init' file and register them in the map /// \p tensors. - llvm::Error loadWeightsFromNet(caffe2::NetDef &net); + Error loadWeightsFromNet(caffe2::NetDef &net); /// Loads an individual weight \p op. - llvm::Error loadWeight(const caffe2::OperatorDef &op); + Error loadWeight(const caffe2::OperatorDef &op); /// Load the structure of the network from the 'net' file. - llvm::Error loadNetwork(caffe2::NetDef &net); + Error loadNetwork(caffe2::NetDef &net); /// Load the operator \p op into the network. This creates one or more nodes /// in the network. - llvm::Error loadOperator(const caffe2::OperatorDef &op); + Error loadOperator(const caffe2::OperatorDef &op); /// \returns True if the operator \p op is successfully folded. - llvm::Expected foldOperator(const caffe2::OperatorDef &op); + Expected foldOperator(const caffe2::OperatorDef &op); /// Load the Conv or ConvRelu operators. - llvm::Error loadConv(const caffe2::OperatorDef &op, - ArgumentDictionaryTy &dict); + Error loadConv(const caffe2::OperatorDef &op, ArgumentDictionaryTy &dict); /// Load the Int8Conv or Int8ConvRelu operators. - llvm::Error loadConvQuantized(const caffe2::OperatorDef &op, - ArgumentDictionaryTy &dict); + Error loadConvQuantized(const caffe2::OperatorDef &op, + ArgumentDictionaryTy &dict); /// Reads a network (weights or structure) from the serialized protocol /// buffer file. - llvm::Expected loadProtoFile(const std::string &filename); + Expected loadProtoFile(const std::string &filename); /// loadInputs calls this function for each member in its target arguments. /// Currently we are supporting two tensorprototypes: /// caffe2::TensorProto, caffe2::QTensorProto template - llvm::Error loadInputsWithTensorProtoType(const caffe2::NetDef &net, - bool loadInputsAsPlaceholders, - const TensorProtoType &in); + Error loadInputsWithTensorProtoType(const caffe2::NetDef &net, + bool loadInputsAsPlaceholders, + const TensorProtoType &in); /// Load the inputs from the NetDef. If \p loadInputsAsPlaceholders is /// true then this will load each graph input as a placeholder otherwise it /// will create an empty tensor for each input. - llvm::Error loadInputs(const caffe2::NetDef &net, - bool loadInputsAsPlaceholders); + Error loadInputs(const caffe2::NetDef &net, bool loadInputsAsPlaceholders); /// \returns Expected if a NetDef can be constructed from the /// in-memory serialized protobuf. /// Loads ModelProto from the in-memory serialized protobuf \p /// c2Model with the model size \p c2ModelSize. - static llvm::Expected loadProto(const void *c2Model, - size_t c2ModelSize); + static Expected loadProto(const void *c2Model, + size_t c2ModelSize); /// Creates a Caffe2 model loader to build \p F. /// Loads the ONNIXFI \p model from memory of \p modelSize size, @@ -106,7 +104,7 @@ class Caffe2ModelLoader uint32_t weightsCount, const onnxTensorDescriptorV1 *weightDescriptors, Function &F, bool loadInputsAsPlaceholders, - llvm::Error *errPtr = nullptr); + Error *errPtr = nullptr); friend class ONNXIFIModelLoader; @@ -114,8 +112,8 @@ class Caffe2ModelLoader /// \p loader is successful. The folding utility uses temporary /// loader \p tmpLoader, and associated temporary function \p F. template - friend llvm::Error constantFoldInLoader(Function *F, LoaderType &tmpLoader, - LoaderType *loader, const OpType &op); + friend Error constantFoldInLoader(Function *F, LoaderType &tmpLoader, + LoaderType *loader, const OpType &op); public: /// Loads the caffe2 model that's represented by a network description file, @@ -129,12 +127,12 @@ class Caffe2ModelLoader const std::string &netWeightFilename, llvm::ArrayRef names, llvm::ArrayRef types, Function &F, - llvm::Error *errPtr = nullptr); + Error *errPtr = nullptr); /// Creates a Caffe2 model loader to build \p F. /// If \p errPtr is not null then if an error occurs it will get assigned /// there otherwise if an error occurs it will abort. - Caffe2ModelLoader(Function &F, llvm::Error *errPtr); + Caffe2ModelLoader(Function &F, Error *errPtr); }; } // namespace glow diff --git a/include/glow/Importer/CommonOperatorLoader.h b/include/glow/Importer/CommonOperatorLoader.h index d851715d15..6a7884e931 100644 --- a/include/glow/Importer/CommonOperatorLoader.h +++ b/include/glow/Importer/CommonOperatorLoader.h @@ -59,7 +59,7 @@ class CommonOperatorLoader : public ProtobufLoader { /// result.offsets and result.scales are the quantization scales and offsets /// of the onnxTensorDescriptorV1 if there were more than 1. If there is /// exactly 1 scale and offset then result.t will be a quantized glow tensor. - inline llvm::Expected + inline Expected loadWeight(const onnxTensorDescriptorV1 &in) { // Only support CPU memory tensors. if (in.memoryType != ONNXIFI_MEMORY_TYPE_CPU) { @@ -119,7 +119,7 @@ class CommonOperatorLoader : public ProtobufLoader { static_cast(in.dataType))); } - return llvm::Expected(std::move(result)); + return Expected(std::move(result)); } // This is a caffe2 offset shift. @@ -164,13 +164,13 @@ class CommonOperatorLoader : public ProtobufLoader { static_cast(in.dataType))); } - return llvm::Expected(std::move(result)); + return Expected(std::move(result)); } /// Merge shape \p shape into \p mergeShape, following multidirectional /// broadcasting rules. - llvm::Error mergeMultidirectionalBroadcast(std::vector &mergeShape, - llvm::ArrayRef shape) { + Error mergeMultidirectionalBroadcast(std::vector &mergeShape, + llvm::ArrayRef shape) { size_t shift = mergeShape.size() - shape.size(); for (size_t i = 0; i < shape.size(); i++) { if (shape[i] != 1) { @@ -181,7 +181,7 @@ class CommonOperatorLoader : public ProtobufLoader { } // Otherwise, just leave mergeShape[i] as it is. } - return llvm::Error::success(); + return Error::success(); } protected: @@ -193,8 +193,7 @@ class CommonOperatorLoader : public ProtobufLoader { std::unordered_map; /// \returns True if the operator has broadcasting activated. - virtual llvm::Expected - getBroadcast(const ArgumentDictionaryTy &dict) = 0; + virtual Expected getBroadcast(const ArgumentDictionaryTy &dict) = 0; /// \returns True if the operator with the name \p typeName has support /// for multidirectional broadcasting. @@ -203,31 +202,30 @@ class CommonOperatorLoader : public ProtobufLoader { /// Associate the name of operation outputs to a NodeValues corresponding to /// node \p node. If \p numOutputs is lower than 0, then all outputs are /// associated. Otherwise, the first \p numOutputs outputs are associated. - llvm::Error addNodeAsOutput(const OpType &op, Node *node, - int numOutputs = -1) { + Error addNodeAsOutput(const OpType &op, Node *node, int numOutputs = -1) { RETURN_ERR_IF_NOT(numOutputs <= op.output_size(), "Can't register more than outputs in the operation."); numOutputs = (numOutputs < 0) ? op.output_size() : numOutputs; for (int i = 0; i < numOutputs; i++) { nodeValueByName_[op.output(i)] = NodeValue(node, i); } - return llvm::Error::success(); + return Error::success(); } /// Loads RELU operator, given its protobuf representation and parsed args. - llvm::Error loadRelu(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadRelu(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); auto *R = G_.createRELU(opName, in); RETURN_IF_ERR(addNodeAsOutput(op, R)); - return llvm::Error::success(); + return Error::success(); } /// Loads PRELU operator, given its protobuf representation and parsed args. /// Follows undirectional broadcasting described here: /// https://github.com/onnx/onnx/blob/fb1a80692c1ab0bd27b1072f2e7bffacba336777/docs/Broadcasting.md - llvm::Error loadPRelu(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadPRelu(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; @@ -244,37 +242,37 @@ class CommonOperatorLoader : public ProtobufLoader { auto *finalSlope = G_.createBroadcast(opName, slope, targetDim, axis); auto *R = G_.createPRELU(opName, in, finalSlope); RETURN_IF_ERR(addNodeAsOutput(op, R)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadSigmoid(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadSigmoid(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); auto *S = G_.createSigmoid(opName, in); RETURN_IF_ERR(addNodeAsOutput(op, S)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadTanh(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadTanh(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); auto *T = G_.createTanh(opName, in); RETURN_IF_ERR(addNodeAsOutput(op, T)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadExp(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadExp(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); auto *E = G_.createExp(opName, in); RETURN_IF_ERR(addNodeAsOutput(op, E)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadShape(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadShape(const OpType &op, ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); @@ -285,31 +283,31 @@ class CommonOperatorLoader : public ProtobufLoader { RETURN_IF_ERR(createAndRegisterConstant(op.output(0), std::move(T))); - return llvm::Error::success(); + return Error::success(); } /// Loads Sqrt operator, given its protobuf representation and parsed args. - llvm::Error loadSqrt(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadSqrt(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); auto *R = G_.createPow(opName, in, 0.5f); RETURN_IF_ERR(addNodeAsOutput(op, R)); - return llvm::Error::success(); + return Error::success(); } /// Loads Reciprocal operator, given its protobuf representation and parsed /// args. - llvm::Error loadReciprocal(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadReciprocal(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); auto *R = G_.createPow(opName, in, -1.0f); RETURN_IF_ERR(addNodeAsOutput(op, R)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadSum(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadSum(const OpType &op, ArgumentDictionaryTy &dict) { if (op.input_size() == 1) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); @@ -336,10 +334,10 @@ class CommonOperatorLoader : public ProtobufLoader { Node *node = G_.createBatchedReduceAdd(opName, concat, /* axis */ {0}); RETURN_IF_ERR(addNodeAsOutput(op, node)); } - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadSoftmax(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadSoftmax(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; @@ -368,10 +366,10 @@ class CommonOperatorLoader : public ProtobufLoader { auto origInDims = in.getType()->dims(); auto *RN = G_.createReshape("reshapeOutput", SM, origInDims); RETURN_IF_ERR(addNodeAsOutput(op, RN)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadLRN(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadLRN(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); @@ -395,11 +393,11 @@ class CommonOperatorLoader : public ProtobufLoader { // LRN in Caffe2 has a scale_ output, but I believe it's unused for // inference. So explicitly only set output 0. nodeValueByName_[op.output(0)] = N->getResult(); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadMinMax(llvm::StringRef typeName, const OpType &op, - ArgumentDictionaryTy &dict) { + Error loadMinMax(llvm::StringRef typeName, const OpType &op, + ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in0; ASSIGN_VALUE_OR_RETURN_ERR(in0, getNodeValueByName(op.input(0))); @@ -416,10 +414,10 @@ class CommonOperatorLoader : public ProtobufLoader { } RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - static llvm::Expected + static Expected handleBatchMatMulTranspose(Function &F, ArgumentDictionaryTy &dict, llvm::StringRef key, NodeValue input) { if (!dict.count(key)) { @@ -448,8 +446,8 @@ class CommonOperatorLoader : public ProtobufLoader { return input; } - llvm::Error loadBatchMatMul(const OpType &op, ArgumentDictionaryTy &dict, - bool isBatched) { + Error loadBatchMatMul(const OpType &op, ArgumentDictionaryTy &dict, + bool isBatched) { const std::string &opName = loadOperatorName(op); NodeValue LHS; ASSIGN_VALUE_OR_RETURN_ERR(LHS, getNodeValueByName(op.input(0))); @@ -472,11 +470,11 @@ class CommonOperatorLoader : public ProtobufLoader { } RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadArithmetic(llvm::StringRef typeName, const OpType &op, - ArgumentDictionaryTy &dict) { + Error loadArithmetic(llvm::StringRef typeName, const OpType &op, + ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in0; ASSIGN_VALUE_OR_RETURN_ERR(in0, getNodeValueByName(op.input(0))); @@ -540,10 +538,10 @@ class CommonOperatorLoader : public ProtobufLoader { } RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadSplit(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadSplit(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); @@ -564,10 +562,10 @@ class CommonOperatorLoader : public ProtobufLoader { // so only use 0 here as the node value result. nodeValueByName_[op.output(i)] = outputs[i]->getResult(); } - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadReshape(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadReshape(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); @@ -631,11 +629,11 @@ class CommonOperatorLoader : public ProtobufLoader { // Caffe2 sometimes outputs old_shape which goes unused. We do not currently // support it, so explicitly only set the first output. nodeValueByName_[op.output(0)] = node->getResult(); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadTranspose(const OpType &op, ArgumentDictionaryTy &dict, - llvm::StringRef permArgName) { + Error loadTranspose(const OpType &op, ArgumentDictionaryTy &dict, + llvm::StringRef permArgName) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); @@ -654,10 +652,10 @@ class CommonOperatorLoader : public ProtobufLoader { auto *T = G_.createTranspose(opName, in, perm); RETURN_IF_ERR(addNodeAsOutput(op, T)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadFlatten(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadFlatten(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); @@ -667,17 +665,17 @@ class CommonOperatorLoader : public ProtobufLoader { } auto *node = G_.createFlatten(opName, in, axis); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadIdentity(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadIdentity(const OpType &op, ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); nodeValueByName_[op.output(0)] = in; - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadTopK(const OpType &op, ArgumentDictionaryTy &dict) { + Error loadTopK(const OpType &op, ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); @@ -708,11 +706,11 @@ class CommonOperatorLoader : public ProtobufLoader { auto *R = G_.createTopK(opName, in, k); RETURN_IF_ERR(addNodeAsOutput(op, R)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadReduceOp(llvm::StringRef typeName, const OpType &op, - ArgumentDictionaryTy &dict) { + Error loadReduceOp(llvm::StringRef typeName, const OpType &op, + ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); @@ -734,7 +732,7 @@ class CommonOperatorLoader : public ProtobufLoader { auto it = std::unique(shapeAxes.begin(), shapeAxes.end()); if (it != shapeAxes.end()) { RETURN_ERR("Axes values are not unique.", - GlowErr::ErrorCode::MODEL_LOADER_UNSUPPORTED_SHAPE); + ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_SHAPE); } } @@ -769,10 +767,10 @@ class CommonOperatorLoader : public ProtobufLoader { } RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadBatchOneHot(const OpType &op) { + Error loadBatchOneHot(const OpType &op) { const std::string &opName = loadOperatorName(op); NodeValue data; ASSIGN_VALUE_OR_RETURN_ERR(data, getNodeValueByName(op.input(0))); @@ -783,10 +781,10 @@ class CommonOperatorLoader : public ProtobufLoader { auto *node = G_.createBatchOneHot(opName, data, lengths, values); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadSparseLengthsSum(const OpType &op) { + Error loadSparseLengthsSum(const OpType &op) { NodeValue in0; ASSIGN_VALUE_OR_RETURN_ERR(in0, getNodeValueByName(op.input(0))); NodeValue in1; @@ -795,10 +793,10 @@ class CommonOperatorLoader : public ProtobufLoader { ASSIGN_VALUE_OR_RETURN_ERR(in2, getNodeValueByName(op.input(2))); auto *node = G_.createSparseLengthsSum(loadOperatorName(op), in0, in1, in2); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadSparseLengthsWeightedSum(const OpType &op) { + Error loadSparseLengthsWeightedSum(const OpType &op) { NodeValue in0; ASSIGN_VALUE_OR_RETURN_ERR(in0, getNodeValueByName(op.input(0))); NodeValue in1; @@ -810,18 +808,18 @@ class CommonOperatorLoader : public ProtobufLoader { auto *node = G_.createSparseLengthsWeightedSum(loadOperatorName(op), in0, in1, in2, in3); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadLengthsToRanges(const OpType &op) { + Error loadLengthsToRanges(const OpType &op) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); auto *node = G_.createLengthsToRanges(loadOperatorName(op), in); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadBatchBoxCox(const OpType &op) { + Error loadBatchBoxCox(const OpType &op) { NodeValue data; ASSIGN_VALUE_OR_RETURN_ERR(data, getNodeValueByName(op.input(0))); NodeValue lambda1; @@ -831,21 +829,20 @@ class CommonOperatorLoader : public ProtobufLoader { auto *node = G_.createBatchBoxCox(loadOperatorName(op), data, lambda1, lambda2); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadDotProduct(const OpType &op) { + Error loadDotProduct(const OpType &op) { NodeValue X; ASSIGN_VALUE_OR_RETURN_ERR(X, getNodeValueByName(op.input(0))); NodeValue Y; ASSIGN_VALUE_OR_RETURN_ERR(Y, getNodeValueByName(op.input(1))); auto *node = G_.createDotProduct(loadOperatorName(op), X, Y); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadReplaceNaN(const OpType &op, - const ArgumentDictionaryTy &dict) { + Error loadReplaceNaN(const OpType &op, const ArgumentDictionaryTy &dict) { // Load the input and NaN replacement value: NodeValue input; ASSIGN_VALUE_OR_RETURN_ERR(input, getNodeValueByName(op.input(0))); @@ -856,10 +853,10 @@ class CommonOperatorLoader : public ProtobufLoader { } auto *node = G_.createReplaceNaN(loadOperatorName(op), input, value); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadLengthsSum(const OpType &op) { + Error loadLengthsSum(const OpType &op) { const std::string &opName = loadOperatorName(op); NodeValue data; ASSIGN_VALUE_OR_RETURN_ERR(data, getNodeValueByName(op.input(0))); @@ -871,11 +868,10 @@ class CommonOperatorLoader : public ProtobufLoader { auto *node = G_.createLengthsSum(opName, data, lengths); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadExpandDims(const OpType &op, - const ArgumentDictionaryTy &dict) { + Error loadExpandDims(const OpType &op, const ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); auto dims = dict.find("dims"); @@ -886,10 +882,10 @@ class CommonOperatorLoader : public ProtobufLoader { G_.createExpandDims(loadOperatorName(op), in, getShape(dims->second)); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadClip(const OpType &op, const ArgumentDictionaryTy &dict) { + Error loadClip(const OpType &op, const ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); float cmin = std::numeric_limits::lowest(); @@ -904,11 +900,10 @@ class CommonOperatorLoader : public ProtobufLoader { auto *node = G_.createClip(loadOperatorName(op), in, cmin, cmax); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadSparseToDense(const OpType &op, - const ArgumentDictionaryTy &dict) { + Error loadSparseToDense(const OpType &op, const ArgumentDictionaryTy &dict) { if (op.input_size() != 3) { RETURN_ERR("SparseToDense operator must have three inputs."); } @@ -923,11 +918,11 @@ class CommonOperatorLoader : public ProtobufLoader { auto *node = G_.createSparseToDense(loadOperatorName(op), indices, values, dataToInferDim); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadSparseToDenseMask(const OpType &op, - const ArgumentDictionaryTy &dict) { + Error loadSparseToDenseMask(const OpType &op, + const ArgumentDictionaryTy &dict) { size_t inputSize = op.input_size(); if (inputSize != 3 && inputSize != 4) { RETURN_ERR("SparseToDenseMask operator must have 3 or 4 inputs."); @@ -961,11 +956,11 @@ class CommonOperatorLoader : public ProtobufLoader { auto *node = G_.createSparseToDenseMask( loadOperatorName(op), indices, values, defaultValue, lengths, mask); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadGatherOps(const std::string &typeName, const OpType &op, - const ArgumentDictionaryTy &dict) { + Error loadGatherOps(const std::string &typeName, const OpType &op, + const ArgumentDictionaryTy &dict) { NodeValue data; ASSIGN_VALUE_OR_RETURN_ERR(data, getNodeValueByName(op.input(0))); @@ -985,11 +980,11 @@ class CommonOperatorLoader : public ProtobufLoader { Node *GN = G_.createGather(loadOperatorName(op), data, indices, batchDims); RETURN_IF_ERR(addNodeAsOutput(op, GN)); - return llvm::Error::success(); + return Error::success(); } - llvm::Error loadGatherRanges(const std::string &typeName, const OpType &op, - const ArgumentDictionaryTy &dict) { + Error loadGatherRanges(const std::string &typeName, const OpType &op, + const ArgumentDictionaryTy &dict) { NodeValue data; ASSIGN_VALUE_OR_RETURN_ERR(data, getNodeValueByName(op.input(0))); RETURN_ERR_IF_NOT(data.dims().size() == 1, "Data must be a 1D vector."); @@ -1009,11 +1004,11 @@ class CommonOperatorLoader : public ProtobufLoader { Node *GR = G_.createGatherRanges(loadOperatorName(op), data, ranges, maxOutputSize); RETURN_IF_ERR(addNodeAsOutput(op, GR)); - return llvm::Error::success(); + return Error::success(); } // Loads Less operator. Internally it's a cmpLT Node. - llvm::Error loadLess(const OpType &op, const ArgumentDictionaryTy &dict) { + Error loadLess(const OpType &op, const ArgumentDictionaryTy &dict) { // Input Type. NodeValue xNV; ASSIGN_VALUE_OR_RETURN_ERR(xNV, getNodeValueByName(op.input(0))); @@ -1029,7 +1024,7 @@ class CommonOperatorLoader : public ProtobufLoader { xNode, yNode); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } using ProtobufLoader::ProtobufLoader; @@ -1037,9 +1032,9 @@ class CommonOperatorLoader : public ProtobufLoader { /// If operator type is supported, returns Expected and creates new /// operator. Returns Operator if operator type is not supported. /// Returns Error if an error occurred - llvm::Expected tryLoadCommonOperator(llvm::StringRef typeName, - const OpType &op, - ArgumentDictionaryTy &dict) { + Expected tryLoadCommonOperator(llvm::StringRef typeName, + const OpType &op, + ArgumentDictionaryTy &dict) { if (typeName == "Relu") { RETURN_IF_ERR(loadRelu(op, dict)); return true; @@ -1194,7 +1189,7 @@ class CommonOperatorLoader : public ProtobufLoader { /// Utility function which computes the resulting shape in case of /// multidirectional broadcasting. - llvm::Expected> + Expected> computeMultidirectionalBroadcast(llvm::ArrayRef shape0, llvm::ArrayRef shape1) { size_t numDims0 = shape0.size(); @@ -1214,18 +1209,17 @@ class CommonOperatorLoader : public ProtobufLoader { /// Associate all outputs of \p op with nodes in \p NVs. Number of outputs of /// \p op should match the number of elements of \p NVs. /// \returns error code in case of error. - llvm::Error assignNodeOutputs(const OpType &op, - llvm::ArrayRef NVs) { + Error assignNodeOutputs(const OpType &op, llvm::ArrayRef NVs) { RETURN_ERR_IF_NOT(NVs.size() == op.output_size(), "Output size mismatch."); for (size_t i = 0; i < NVs.size(); i++) { nodeValueByName_[op.output(i)] = NVs[i]; } - return llvm::Error::success(); + return Error::success(); } /// Load pre-trained weights from \p weightDescriptors. - llvm::Error loadWeights(uint32_t weightsCount, - const onnxTensorDescriptorV1 *weightDescriptors) { + Error loadWeights(uint32_t weightsCount, + const onnxTensorDescriptorV1 *weightDescriptors) { for (uint32_t i = 0; i < weightsCount; ++i) { const char *name = weightDescriptors[i].name; @@ -1251,7 +1245,7 @@ class CommonOperatorLoader : public ProtobufLoader { } } - return llvm::Error::success(); + return Error::success(); } }; diff --git a/include/glow/Importer/ONNXIFIModelLoader.h b/include/glow/Importer/ONNXIFIModelLoader.h index 4b2b7011f2..e3e5345c53 100644 --- a/include/glow/Importer/ONNXIFIModelLoader.h +++ b/include/glow/Importer/ONNXIFIModelLoader.h @@ -52,7 +52,7 @@ class ONNXIFIModelLoader { /// provided such as when the graph being loaded is actually a small patch of /// a larger graph because the graph inputs in this case may represent /// internal values for the larger graph. - static llvm::Expected> + static Expected> parse(const void *onnxModel, uint32_t onnxModelSize, uint32_t weightsCount, const onnxTensorDescriptorV1 *weightDescriptors, Function &F, bool loadInputsAsPlaceholders = true, bool use_onnx = true); diff --git a/include/glow/Importer/ONNXModelLoader.h b/include/glow/Importer/ONNXModelLoader.h index b96558bddb..e7abfdbd92 100644 --- a/include/glow/Importer/ONNXModelLoader.h +++ b/include/glow/Importer/ONNXModelLoader.h @@ -41,7 +41,7 @@ class ONNXModelLoader : public CommonOperatorLoader { /// \returns True if the operator has broadcasting activated. - llvm::Expected getBroadcast(const ArgumentDictionaryTy &dict) override; + Expected getBroadcast(const ArgumentDictionaryTy &dict) override; /// \returns True if the operator with the name \p typeName has support for /// multidirectional broadcasting. @@ -49,15 +49,15 @@ class ONNXModelLoader /// Converts a ONNX TensorProto DataType enum to the Glow element type. /// Supports only non quantized and signed types. - llvm::Expected + Expected convertTensorProtoDataType(ONNX_NAMESPACE::TensorProto_DataType t); /// Load the operator \p op into the network. This creates one or more nodes /// in the network. \returns Error if operator \p op cannot be loaded. - llvm::Error loadOperator(const ONNX_NAMESPACE::NodeProto &op); + Error loadOperator(const ONNX_NAMESPACE::NodeProto &op); /// \returns True if the operator\ op is successfully folded. - llvm::Expected foldOperator(const ONNX_NAMESPACE::NodeProto &op); + Expected foldOperator(const ONNX_NAMESPACE::NodeProto &op); /// ONNX model ir_version; size_t irVersion_; @@ -66,204 +66,201 @@ class ONNXModelLoader size_t opsetVersion_; /// Load Constant ONNX operator. - llvm::Error loadConstant(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadConstant(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Slice ONNX operator. - llvm::Error loadSlice(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadSlice(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Conv ONNX operator. - llvm::Error loadConv(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadConv(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load MaxPool or AveragePool ONNX operator. \p typeName is the name of the /// ONNX operator being loaded, either MaxPool or AveragePool. - llvm::Error loadPool(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict, - llvm::StringRef typeName); + Error loadPool(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict, llvm::StringRef typeName); /// Load GlobalAveragePool ONNX operator. - llvm::Error loadGlobalAveragePool(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadGlobalAveragePool(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Squeeze ONNX operator. - llvm::Error loadSqueeze(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadSqueeze(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Unsqueeze ONNX operator. - llvm::Error loadUnsqueeze(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadUnsqueeze(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load ArgMax ONNX operator. - llvm::Error loadArgMax(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadArgMax(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load BatchNormalization ONNX operator. - llvm::Error loadBatchNormalization(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadBatchNormalization(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Concat ONNX operator. - llvm::Error loadConcat(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadConcat(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load FCTransposed ONNX operator. - llvm::Error loadFCTransposed(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadFCTransposed(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Gemm ONNX operator. - llvm::Error loadGemm(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadGemm(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load MatMul ONNX operator. - llvm::Error loadMatMul(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadMatMul(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Pad ONNX operator. - llvm::Error loadPad(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadPad(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Cast ONNX operator. - llvm::Error loadCast(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadCast(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load LeakyRelu ONNX operator. - llvm::Error loadLeakyRelu(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadLeakyRelu(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load SpaceToDepth ONNX operator. - llvm::Error loadSpaceToDepth(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadSpaceToDepth(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load ConstantOfShape ONNX operator. - llvm::Error loadConstantOfShape(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict, - bool isSplat); + Error loadConstantOfShape(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict, bool isSplat); /// Load Tile ONNX operator. - llvm::Error loadTile(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadTile(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Where ONNX operator. - llvm::Error loadWhere(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadWhere(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow specific operators, not defined in ONNX format /// Load Glow CmpEQ operator. - llvm::Error loadCmpEQ(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadCmpEQ(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow CmpLTE operator. - llvm::Error loadCmpLTE(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadCmpLTE(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow Select operator. - llvm::Error loadSelect(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadSelect(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow Quantize operator. - llvm::Error loadQuantize(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadQuantize(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow ConvertTo operator. - llvm::Error loadConvertTo(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadConvertTo(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow Dequantize operator. - llvm::Error loadDequantize(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadDequantize(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow Regression operator. - llvm::Error loadRegression(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadRegression(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow BatchedAdd operator. - llvm::Error loadBatchedAdd(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadBatchedAdd(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow ScatterAssign operator. - llvm::Error loadScatterAssign(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadScatterAssign(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow IntLookupTable operator. - llvm::Error loadIntLookupTable(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadIntLookupTable(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow LengthsRangeFill operator. - llvm::Error loadLengthsRangeFill(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadLengthsRangeFill(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow RescaleQuantized operator. - llvm::Error loadRescaleQuantized(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadRescaleQuantized(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow RowwiseQuantizedSparseLengthsWeightedSum operator. - llvm::Error loadRowwiseQuantizedSparseLengthsWeightedSum( + Error loadRowwiseQuantizedSparseLengthsWeightedSum( const ONNX_NAMESPACE::NodeProto &op, const ArgumentDictionaryTy &dict); /// Load Glow FusedRowwiseQuantizedSparseLengthsWeightedSum operator. - llvm::Error loadFusedRowwiseQuantizedSparseLengthsWeightedSum( + Error loadFusedRowwiseQuantizedSparseLengthsWeightedSum( const ONNX_NAMESPACE::NodeProto &op, const ArgumentDictionaryTy &dict); /// Load Glow RowwiseQuantizedFullyConnected operator. - llvm::Error - loadRowwiseQuantizedFullyConnected(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadRowwiseQuantizedFullyConnected(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow FullyConnected operator. - llvm::Error loadFullyConnected(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadFullyConnected(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); /// Load Glow Splat operator. - llvm::Error loadSplat(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict); + Error loadSplat(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict); protected: /// Load the network operators from the GraphProto. /// \returns Error if network cannot be loaded. - llvm::Error loadNetwork(ONNX_NAMESPACE::GraphProto &net); + Error loadNetwork(ONNX_NAMESPACE::GraphProto &net); /// Set the output nodes of the network \p net. Initializes the map from the /// names of the outputs to the save nodes that save each output. /// \returns Error if network cannot be loaded. - llvm::Error setOutputNodes(ONNX_NAMESPACE::GraphProto &net); + Error setOutputNodes(ONNX_NAMESPACE::GraphProto &net); /// Set ir verion and op version. - llvm::Error setVersion(ONNX_NAMESPACE::ModelProto MP); + Error setVersion(ONNX_NAMESPACE::ModelProto MP); /// \returns Expected if a ModelProto can be loaded from the /// stream \p iStream. - static llvm::Expected + static Expected loadProto(google::protobuf::io::ZeroCopyInputStream &iStream); /// Load the network initializers from the GraphProto. - llvm::Error loadInitializers(ONNX_NAMESPACE::GraphProto &net); + Error loadInitializers(ONNX_NAMESPACE::GraphProto &net); /// Load the inputs from the GraphProto. If \p loadInputsAsPlaceholders is /// true then this will load each graph input as a placeholder otherwise it /// will create an empty tensor for each input. - llvm::Error loadInputs(ONNX_NAMESPACE::GraphProto &net, - bool loadInputsAsPlaceholders); + Error loadInputs(ONNX_NAMESPACE::GraphProto &net, + bool loadInputsAsPlaceholders); /// \returns Expected if a ModelProto can be constructed from the /// contents of the file \p filename and Error otherwise. /// Loads ModelProto from the file containing serialized protobuf. - static llvm::Expected + static Expected loadProto(const std::string &filename); /// \returns Expected if a ModelProto can be constructed from the /// in-memory serialized protobuf. /// Loads ModelProto from the in-memory serialized protobuf \p /// onnxModel with the model size \p onnxModelSize. - static llvm::Expected - loadProto(const void *onnxModel, size_t onnxModelSize); + static Expected loadProto(const void *onnxModel, + size_t onnxModelSize); /// Checks that the inputs tensors are compatible with the inputs declared in /// the ONNX model. The input types in \p types match the list of names /// \p tensorNames. - llvm::Error checkInputs(ONNX_NAMESPACE::GraphProto &net, - llvm::ArrayRef tensorNames, - llvm::ArrayRef types); + Error checkInputs(ONNX_NAMESPACE::GraphProto &net, + llvm::ArrayRef tensorNames, + llvm::ArrayRef types); /// Creates a ONNX model loader to build \p F. /// Loads the ONNIXFI \p model from memory of \p modelSize size, @@ -273,7 +270,7 @@ class ONNXModelLoader /// parameter \p errPtr. ONNXModelLoader(const void *model, uint32_t modelSize, uint32_t weightsCount, const onnxTensorDescriptorV1 *weightDescriptors, Function &F, - bool loadInputsAsPlaceholders, llvm::Error *errPtr = nullptr); + bool loadInputsAsPlaceholders, Error *errPtr = nullptr); friend class ONNXIFIModelLoader; @@ -281,8 +278,8 @@ class ONNXModelLoader /// \p loader is successful. The folding utility uses temporary /// loader \p tmpLoader, and associated temporary function \p F. template - friend llvm::Error constantFoldInLoader(Function *F, LoaderType &tmpLoader, - LoaderType *loader, const OpType &op); + friend Error constantFoldInLoader(Function *F, LoaderType &tmpLoader, + LoaderType *loader, const OpType &op); public: /// \returns ONNX model ir_version; @@ -294,7 +291,7 @@ class ONNXModelLoader /// Creates a ONNX model loader to build \p F. /// If \p errPtr is not null then if an error occurs it will get assigned /// there otherwise if an error occurs it will abort. - ONNXModelLoader(Function &F, llvm::Error *errPtr = nullptr); + ONNXModelLoader(Function &F, Error *errPtr = nullptr); /// Loads the ONNX model that's represented by a model description file, /// serialized in \p modelDescFilename and populates the network into \p F. @@ -306,7 +303,7 @@ class ONNXModelLoader ONNXModelLoader(const std::string &modelDescFilename, llvm::ArrayRef tensorNames, llvm::ArrayRef types, Function &F, - llvm::Error *errPtr = nullptr); + Error *errPtr = nullptr); }; } // namespace glow diff --git a/include/glow/Importer/ProtobufLoader.h b/include/glow/Importer/ProtobufLoader.h index 8088a84d55..86f06c6cfe 100644 --- a/include/glow/Importer/ProtobufLoader.h +++ b/include/glow/Importer/ProtobufLoader.h @@ -59,20 +59,19 @@ std::string unexpectedNodeErrorMessage(const T &node, llvm::StringRef message) { } /// Reads a single integer. -template static llvm::Expected loadInt(const T *arg) { +template static Expected loadInt(const T *arg) { RETURN_ERR_IF_NOT(arg->has_i(), "Node has no Int value"); return arg->i(); } /// Reads a single float. -template static llvm::Expected loadFloat(const T *arg) { +template static Expected loadFloat(const T *arg) { RETURN_ERR_IF_NOT(arg->has_f(), "Node has no float value"); return arg->f(); } /// Reads a single string. -template -static llvm::Expected loadStr(const T *arg) { +template static Expected loadStr(const T *arg) { RETURN_ERR_IF_NOT(arg->has_s(), "Node has no str value"); return arg->s(); } @@ -130,12 +129,12 @@ class ProtobufLoader { /// under the name \p name. If an existing Placeholder is already registered /// under the same name then the tensor is thrown out and no new Constant /// is created. - llvm::Error createAndRegisterConstant(llvm::StringRef name, Tensor &&tensor); + Error createAndRegisterConstant(llvm::StringRef name, Tensor &&tensor); /// Create a new Placeholder of type \p T, and register it /// under the name \p name. \returns The newly created placeholder. - llvm::Expected - createAndRegisterPlaceholder(llvm::StringRef name, TypeRef T); + Expected createAndRegisterPlaceholder(llvm::StringRef name, + TypeRef T); /// \returns the NodeValue that was registered with the name \p name or /// a nullptr wrapped in a NodeValue if no node has been registered with this @@ -146,10 +145,10 @@ class ProtobufLoader { /// no Constant has been registered with this name. Constant *getConstantByNameOrNull(llvm::StringRef name) const; - /// \returns an llvm::Expected of the Constant registered with the given \p + /// \returns an Expected of the Constant registered with the given \p /// name and returns and Error if no Constant has been registered with this /// name. - llvm::Expected getConstantByName(llvm::StringRef name) const; + Expected getConstantByName(llvm::StringRef name) const; /// \returns whether or not a Constant has been registered with the given \p /// name. @@ -158,7 +157,7 @@ class ProtobufLoader { public: /// \returns the NodeValue that was registered with the name \p name. /// \pre hasNodeByName(name) - llvm::Expected getNodeValueByName(llvm::StringRef name) const; + Expected getNodeValueByName(llvm::StringRef name) const; /// \returns True if the node that's registered using \p name exists. bool hasNodeByName(llvm::StringRef name) const; @@ -170,7 +169,7 @@ class ProtobufLoader { /// occurs it will abort. ProtobufLoader(llvm::ArrayRef tensorNames, llvm::ArrayRef types, Function &F, - llvm::Error *errPtr = nullptr); + Error *errPtr = nullptr); ProtobufLoader(const ProtobufLoader &other) = delete; ProtobufLoader &operator=(const ProtobufLoader &) = delete; @@ -190,7 +189,7 @@ class ProtobufLoader { /// that there is only one output, returns Error otherwise. For image /// classification, this single final output is usually the result of the /// last softmax or regression layer. - llvm::Expected getSingleOutput() { + Expected getSingleOutput() { RETURN_ERR_IF_NOT(outputVarsByName_.size() == 1, "There must be only one output."); return outputVarsByName_.begin()->second; @@ -198,7 +197,7 @@ class ProtobufLoader { /// \returns the Placeholder for the external output with \p name. /// \pre outputVarsByName_.find(name) != outputVarsByName_.end() - llvm::Expected getOutputByName(llvm::StringRef name) const; + Expected getOutputByName(llvm::StringRef name) const; /// \returns True if the operator with name \p typeName having input node /// list as \p inputs is constant foldable. @@ -210,8 +209,8 @@ class ProtobufLoader { /// \p loader is successful. The folding utility uses temporary /// loader \p tmpLoader, and associated temporary function \p F. template -llvm::Error constantFoldInLoader(Function *F, LoaderType &tmpLoader, - LoaderType *loader, const OpType &op) { +Error constantFoldInLoader(Function *F, LoaderType &tmpLoader, + LoaderType *loader, const OpType &op) { PlaceholderBindings bindings; std::vector outTensors; Module *mod = F->getParent(); @@ -252,7 +251,7 @@ llvm::Error constantFoldInLoader(Function *F, LoaderType &tmpLoader, std::move(*outTensors[i]))); } - return llvm::Error::success(); + return Error::success(); } } // namespace glow diff --git a/include/glow/LLVMIRCodeGen/LLVMBackend.h b/include/glow/LLVMIRCodeGen/LLVMBackend.h index e8f4bbb256..bbd006a5cc 100644 --- a/include/glow/LLVMIRCodeGen/LLVMBackend.h +++ b/include/glow/LLVMIRCodeGen/LLVMBackend.h @@ -63,7 +63,7 @@ class LLVMBackend : public BackendUsingGlowIR { virtual std::unique_ptr compileIRWithoutConstants(IRFunction *IR) const; - virtual llvm::Expected> + virtual Expected> compile(Function *F, const BackendOptions &opts) const override; virtual void save(Function *F, llvm::StringRef outputDir, diff --git a/include/glow/LLVMIRCodeGen/LLVMCompiledFunction.h b/include/glow/LLVMIRCodeGen/LLVMCompiledFunction.h index db7063e0d9..6a154c54b1 100644 --- a/include/glow/LLVMIRCodeGen/LLVMCompiledFunction.h +++ b/include/glow/LLVMIRCodeGen/LLVMCompiledFunction.h @@ -30,7 +30,7 @@ class LLVMCompiledFunction : public CompiledFunction { /// \name CompiledFunction interface ///@{ - virtual llvm::Error execute(ExecutionContext *context) override; + virtual Error execute(ExecutionContext *context) override; virtual void collectConstants(const Module *module) override; diff --git a/include/glow/Optimizer/GraphOptimizer/CompilationContext.h b/include/glow/Optimizer/GraphOptimizer/CompilationContext.h index 9332bb65a5..f86d63898f 100644 --- a/include/glow/Optimizer/GraphOptimizer/CompilationContext.h +++ b/include/glow/Optimizer/GraphOptimizer/CompilationContext.h @@ -96,7 +96,7 @@ struct CompilationContext { /// \returns an error if the CompilationContext is malformed for whatever /// configuration it is set up for, otherwise returns success. - llvm::Error verify() const { + Error verify() const { RETURN_ERR_IF_NOT(!precisionConfig.useSetAsWhitelist || precisionConfig.convertToFP16, "Can only use the precisionModeKindSet as a whitelist in " @@ -104,21 +104,22 @@ struct CompilationContext { switch (precisionConfig.quantMode) { case QuantizationMode::Profile: - RETURN_ERR_IF_NOT(bindings, GlowErr::ErrorCode::COMPILE_CONTEXT_MALFORMED, + RETURN_ERR_IF_NOT(bindings, + ErrorValue::ErrorCode::COMPILE_CONTEXT_MALFORMED, "In Profiling mode, but bindings was not set.\n"); RETURN_ERR_IF_NOT(loweredInfoMap, - GlowErr::ErrorCode::COMPILE_CONTEXT_MALFORMED, + ErrorValue::ErrorCode::COMPILE_CONTEXT_MALFORMED, "In Profiling mode, but loweredInfoMap was not set.\n"); RETURN_ERR_IF_NOT(!precisionConfig.convertToFP16, - GlowErr::ErrorCode::COMPILE_CONTEXT_MALFORMED, + ErrorValue::ErrorCode::COMPILE_CONTEXT_MALFORMED, "Converting to FP16 while profiling is unsupported.\n"); break; case QuantizationMode::Quantize: RETURN_ERR_IF_NOT( - loweredInfoMap, GlowErr::ErrorCode::COMPILE_CONTEXT_MALFORMED, + loweredInfoMap, ErrorValue::ErrorCode::COMPILE_CONTEXT_MALFORMED, "In Quantization mode, but loweredInfoMap was not set.\n"); break; @@ -126,7 +127,7 @@ struct CompilationContext { break; } - return llvm::Error::success(); + return Error::success(); } }; diff --git a/include/glow/Optimizer/GraphOptimizer/GraphOptimizer.h b/include/glow/Optimizer/GraphOptimizer/GraphOptimizer.h index 31638f07b3..5e1566b24c 100644 --- a/include/glow/Optimizer/GraphOptimizer/GraphOptimizer.h +++ b/include/glow/Optimizer/GraphOptimizer/GraphOptimizer.h @@ -63,15 +63,13 @@ void profileQuantization(PlaceholderBindings &bindings, Function *F); /// Optimize the Function \p F given compilation options \p cctx for Backend \B. /// \returns success if all nodes in the final resulting optimized Function are /// supported by \p B; if not, this represents a compiler error. -llvm::Error optimizeFunction(Function *F, const Backend &B, - CompilationContext &cctx); +Error optimizeFunction(Function *F, const Backend &B, CompilationContext &cctx); /// Optimize the Function \p F given compilation options \p cctx performing /// backend-independent optimizations that can be done before lowering. /// \returns success if there were no compiler errors; if not, this represents a /// compiler error. -llvm::Error optimizeFunctionBeforeLowering(Function *F, - CompilationContext &cctx); +Error optimizeFunctionBeforeLowering(Function *F, CompilationContext &cctx); /// Perform a compile-time constant folding of the node \p N. /// \returns list of constants which are the result of the constant-folding. @@ -82,9 +80,9 @@ std::vector constantFold(Node *N); /// Execute function \p F by the \p backend using the provided \p bindings and /// the compilation context \p cctx. /// \returns error if function is not a constant function. -llvm::Error executeConstantFunction(Backend &backend, Function &F, - PlaceholderBindings &bindings, - CompilationContext &cctx); +Error executeConstantFunction(Backend &backend, Function &F, + PlaceholderBindings &bindings, + CompilationContext &cctx); /// Perform vertical split of FC weights in a given function. /// Optimization could facilitate parallel execution of FCs on multiple device diff --git a/include/glow/Optimizer/GraphOptimizer/TrainingPreparation.h b/include/glow/Optimizer/GraphOptimizer/TrainingPreparation.h index 2eaf6529d6..e24dc8ebb5 100644 --- a/include/glow/Optimizer/GraphOptimizer/TrainingPreparation.h +++ b/include/glow/Optimizer/GraphOptimizer/TrainingPreparation.h @@ -38,7 +38,7 @@ TensorInitializer getDefaultTensorInitializer(); /// Function takes glow::Function \p F, \p bindings, \p selected placeholder, // and \p initializer for the input weights. -llvm::Error prepareFunctionForTraining( +Error prepareFunctionForTraining( Function *F, PlaceholderBindings &bindings, Placeholder *&selected, TensorInitializer &&initializer = getDefaultTensorInitializer()); } // namespace glow diff --git a/include/glow/Partitioner/Partitioner.h b/include/glow/Partitioner/Partitioner.h index 9fd30c8a09..c54933e44a 100644 --- a/include/glow/Partitioner/Partitioner.h +++ b/include/glow/Partitioner/Partitioner.h @@ -81,8 +81,7 @@ class Partitioner final : public PartitionerBase { /// Verify the generated functions in module, and \returns error if any /// function is invalid. Dump partition logs from \p partitions and \p /// mapping. - llvm::Error finalize(const DAGListTy &partitions, - const NodeToFunctionMap &mapping); + Error finalize(const DAGListTy &partitions, const NodeToFunctionMap &mapping); /// After getting the initial partitions, adjust the partitions to minimize /// communication and computation cost. @@ -104,7 +103,7 @@ class Partitioner final : public PartitionerBase { /// Partition a function \p F based on backends \p backends. \returns the /// final partition result(or an err) and a map between partitions and backend /// names. \p cctx is used for functions optimization. - llvm::Expected + Expected backendBasedPartition(FunctionToBackendNameMap &funcToBackend, Function *F, std::vector &backends, CompilationContext &cctx); @@ -113,7 +112,7 @@ class Partitioner final : public PartitionerBase { /// on current functions in this module for backend \p backendName found in \p /// backendMap. \p cctx is used for function optimization. \returns the /// partition result or an error. - llvm::Expected + Expected createDAGWithoutPartition(llvm::StringRef backendName, std::map &backendMap, CompilationContext &cctx); @@ -150,7 +149,7 @@ class Partitioner final : public PartitionerBase { /// Based on \p partitionConfig passed into Partitioner, do user-defined /// partition. - llvm::Expected + Expected partitionFromConfig(const PartitionConfig &partitionConfig); /// This partition approach is used in Glow Quantization Profiling flow. The @@ -158,13 +157,12 @@ class Partitioner final : public PartitionerBase { /// backends. Then each sub-function will be compiled and run in CPU backend /// for profiling. \p cctx is used for function optimization. \returns the /// partition result or an error. - llvm::Expected - quantizationProfilingPartition(CompilationContext &cctx); + Expected quantizationProfilingPartition(CompilationContext &cctx); /// This partition approch first do the partition based on backend types, and /// then based on cost models(memory usage and performance). \p cctx is used /// for function optimization. \returns the partition result or an error. - llvm::Expected heterogeneousPartition(CompilationContext &cctx); + Expected heterogeneousPartition(CompilationContext &cctx); /// This partition approach is an experimental one. It tries to balance the /// workloads of each accelerator/device in addition to respecting memory @@ -173,15 +171,15 @@ class Partitioner final : public PartitionerBase { /// \p numDevices sub-networks. Now it is overwritten inside of /// loadBalcnedPartition. But in the future, it can be manually defined by /// users. - llvm::Expected loadBalancedPartition(CompilationContext &cctx, - size_t numDevices = 0); + Expected loadBalancedPartition(CompilationContext &cctx, + size_t numDevices = 0); /// Decompose each function in a module. Given the parameters, this function /// will choose different partition approches supported in this class: /// heterogeneous partition, user-defined partition or quantization profiling. /// \p cctx is used for function optimization. \returns the partition result /// or an error. - llvm::Expected partition(CompilationContext &cctx) override; + Expected partition(CompilationContext &cctx) override; }; } // namespace glow #endif // GLOW_PARTITIONER_PARTITIONER_H diff --git a/include/glow/Partitioner/PartitionerBase.h b/include/glow/Partitioner/PartitionerBase.h index 3766550a86..d8f173ca97 100644 --- a/include/glow/Partitioner/PartitionerBase.h +++ b/include/glow/Partitioner/PartitionerBase.h @@ -30,7 +30,7 @@ class PartitionerBase { /// Decompose each function in a module. \p cctx is used in function /// optimization. \returns the partition result. - virtual llvm::Expected partition(CompilationContext &cctx) = 0; + virtual Expected partition(CompilationContext &cctx) = 0; /// Dump the partition result \p partitions to a dot file with name \p /// dotFilename. Since now all functions belong to a function family and they diff --git a/include/glow/Partitioner/PartitionerValidation.h b/include/glow/Partitioner/PartitionerValidation.h index 4fd766d311..72f3a8c9c2 100644 --- a/include/glow/Partitioner/PartitionerValidation.h +++ b/include/glow/Partitioner/PartitionerValidation.h @@ -22,19 +22,19 @@ namespace glow { /// Check if \p partitions satisfies number of physical devices restriction. /// I.e. check if the number of logical devices is less than the given /// physical devices. -llvm::Error -logicalDevicesValidation(const NodeToFunctionMap &partitions, - const std::map &backendMap); +Error logicalDevicesValidation( + const NodeToFunctionMap &partitions, + const std::map &backendMap); /// Check if the memory usage of each partition meets the physical device /// memory restriction. -llvm::Error -memoryUsageValidation(const NodeToFunctionMap &partitions, - const std::map &backendMap); +Error memoryUsageValidation( + const NodeToFunctionMap &partitions, + const std::map &backendMap); /// Check if the current partition is a valid DAG. This check can only be called /// after a real partition is created and the DAG is generated. -llvm::Error dagValidation(const DAG &dag); +Error dagValidation(const DAG &dag); } // namespace glow #endif // GLOW_PARTITIONER_PARTITIONERVALIDATION_H diff --git a/include/glow/Runtime/Executor/ThreadPoolExecutor.h b/include/glow/Runtime/Executor/ThreadPoolExecutor.h index 9f2b2a9421..690f758c45 100644 --- a/include/glow/Runtime/Executor/ThreadPoolExecutor.h +++ b/include/glow/Runtime/Executor/ThreadPoolExecutor.h @@ -80,14 +80,14 @@ class ThreadPoolExecutor final : public Executor { /// Handle the result returned asynchronously by the DeviceManager. /// \p executionState is tracks the state of the run that the node that - /// finished executing belongs to, \p err is the llvm::Error returned by the + /// finished executing belongs to, \p err is the Error returned by the /// DeviceManager, \p ctx is the ExecutionContext that contains the outputs /// produced by \p node during the run. /// /// The main purpose of this function is to help move computation off of the /// DeviceManager thread pool on onto the one owned by this class. void handleDeviceManagerResult(std::shared_ptr executionState, - llvm::Error err, + Error err, std::unique_ptr ctx, const DAGNode *node); diff --git a/include/glow/Runtime/HostManager/HostManager.h b/include/glow/Runtime/HostManager/HostManager.h index 5af8d78a6f..4de55a3117 100644 --- a/include/glow/Runtime/HostManager/HostManager.h +++ b/include/glow/Runtime/HostManager/HostManager.h @@ -158,24 +158,24 @@ class HostManager final { /// Adds the network to the host and does the necessary setup work. This /// includes partitioning, provisioning, compiling and initializing /// backends. Additionally DAGs are created for each function and stored in - /// networks_. \returns an llvm::Error containing the results of the + /// networks_. \returns an Error containing the results of the /// operation. This function consumes the \p module so any pointers to data /// contained within the module should be considered invalid. The function is /// optimized based on \p cctx. If \p saturateHost is set to true the /// HostManager will try to use all available devices on the host. - llvm::Error addNetwork(std::unique_ptr module, - CompilationContext &cctx, bool saturateHost = false); + Error addNetwork(std::unique_ptr module, CompilationContext &cctx, + bool saturateHost = false); /// Given \p networkName removes that network from the host. This also /// removes the network from any backends setup to execute it. - /// \returns an llvm::Error indicating success or failure of the operation. - llvm::Error removeNetwork(llvm::StringRef networkName); + /// \returns an Error indicating success or failure of the operation. + Error removeNetwork(llvm::StringRef networkName); /// Returns true if \p networkName is already added to the host. bool networkAdded(llvm::StringRef networkName); /// Removes all networks from the host, and stops execution on all devices. - llvm::Error clearHost(); + Error clearHost(); /// Runs the network specified by \p networkName using /// the provided \p context, returns a runIdentifier which refers to the @@ -193,23 +193,23 @@ class HostManager final { /// A wrapper around runNetwork that provides a blocking interface for an /// inference request. Runs the network provided in \p networkName using \p - /// context. \returns an llvm::Error indicating success or failure. - llvm::Error runNetworkBlocking(llvm::StringRef networkName, - std::unique_ptr context); + /// context. \returns an Error indicating success or failure. + Error runNetworkBlocking(llvm::StringRef networkName, + std::unique_ptr context); /// A wrapper around runNetwork that provides a blocking interface for an /// inference request. Runs the network provided in \p networkName using \p - /// bindings for placeholder bindings. \returns an llvm::Error indicating + /// bindings for placeholder bindings. \returns an Error indicating /// success or failure. - llvm::Error runNetworkBlocking(llvm::StringRef networkName, - PlaceholderBindings &bindings); + Error runNetworkBlocking(llvm::StringRef networkName, + PlaceholderBindings &bindings); /// Initialize the HostManager with the given \p configs creating one /// DeviceManager for each config listed. - llvm::Error init(std::vector> configs); + Error init(std::vector> configs); /// Get the network DAG for \p network if it exists. - llvm::Expected getNetworkDAG(llvm::StringRef network); + Expected getNetworkDAG(llvm::StringRef network); ~HostManager(); }; diff --git a/include/glow/Runtime/Provisioner/Provisioner.h b/include/glow/Runtime/Provisioner/Provisioner.h index 703680c869..a9bd747092 100644 --- a/include/glow/Runtime/Provisioner/Provisioner.h +++ b/include/glow/Runtime/Provisioner/Provisioner.h @@ -37,12 +37,12 @@ class Provisioner final { /// 1. Retrieves each node's Function from the provided \p module. /// 2. Compiles it using the provided CompilationContext \p cctx. /// 3. Assigns a device and calls addNetwork on the chosen device(s). - /// \returns a GlowErr indicating if the operation was a success. - llvm::Error provision(DAGListTy &networks, Module &module, - CompilationContext &cctx); + /// \returns a Error indicating if the operation was a success. + Error provision(DAGListTy &networks, Module &module, + CompilationContext &cctx); /// Remove stored compiledFunction. - llvm::Error removeFunction(llvm::StringRef name); + Error removeFunction(llvm::StringRef name); private: /// Pointer to backend used for compilation. This currently gets reset per diff --git a/include/glow/Runtime/RuntimeTypes.h b/include/glow/Runtime/RuntimeTypes.h index 0f96455f7f..3208374f76 100644 --- a/include/glow/Runtime/RuntimeTypes.h +++ b/include/glow/Runtime/RuntimeTypes.h @@ -42,7 +42,7 @@ using DeviceManagerMapTy = std::map>; /// Callback type used by HostManager and DeviceManager, used to pass results of /// an inference request back to the caller. -using ResultCBTy = std::function)>; /// Data structure that contains device constraint information for each device. diff --git a/include/glow/Support/Error.h b/include/glow/Support/Error.h index cdc74ba7b7..43782db173 100644 --- a/include/glow/Support/Error.h +++ b/include/glow/Support/Error.h @@ -16,40 +16,219 @@ #ifndef GLOW_SUPPORT_ERROR_H #define GLOW_SUPPORT_ERROR_H +#include +#include #include #include -#include "llvm/Support/Error.h" -#include "llvm/Support/FormatVariadic.h" - #include +/// NOTE: please only use code and macros that resides outside of the detail +/// namespace in Error.h and Error.cpp so as to preserve a layer of +/// abstraction between Error/Expected types and the specific classes that +/// implement them. + namespace glow { -/// NOTE This should not be used directly, instead use EXIT_ON_ERR or -/// TEMP_EXIT_ON_ERR. Callable that takes an llvm::Error or llvm::Expected -/// and exits the program if the Error is not equivalent llvm::Error::success() -/// or the Expected contains an error that is not equivalent -/// llvm::Error::success() -/// TODO: replace this with a function that will print file and -/// line numbers also. -extern llvm::ExitOnError exitOnErr; - -/// Is true_type only if applied to llvm::Error or a descendant. -template -struct IsLLVMError : public std::is_base_of {}; +/// Consumes an Error \p err and \returns true iff the error contained an +/// ErrorValue. Calls the log method on ErrorValue if the optional argument \p +/// log is passed. +#define ERR_TO_BOOL(...) \ + (glow::detail::errorToBool(__FILE__, __LINE__, __VA_ARGS__)) + +/// Consumes an Error \p err and \returns "success" if it does not contain an +/// ErrorValue or the result of calling the log() if it does. +#define ERR_TO_STRING(err) (glow::detail::errorToString((err))) + +/// Consumes an Error \p err. Calls the log method on the ErrorValue if the +/// optional argument \p log is passed. +#define ERR_TO_VOID(...) \ + (glow::detail::errorToVoid(__FILE__, __LINE__, __VA_ARGS__)) + +/// Unwraps the T from within an Expected. If the Expected contains +/// an ErrorValue, the program will exit. +#define EXIT_ON_ERR(...) \ + (glow::detail::exitOnError(__FILE__, __LINE__, __VA_ARGS__)) + +/// A temporary placeholder for EXIT_ON_ERR. This should be used only during +/// refactoring to temporarily place an EXIT_ON_ERR and should eventually be +/// replaced with either an actual EXIT_ON_ERR or code that will propogate +/// potential errors up the stack. +#define TEMP_EXIT_ON_ERR(...) (EXIT_ON_ERR(__VA_ARGS__)) + +/// Makes a new Error. +#define MAKE_ERR(...) glow::detail::makeError(__FILE__, __LINE__, __VA_ARGS__) + +/// Makes a new Error and \returns that Error. +#define RETURN_ERR(...) \ + do { \ + return MAKE_ERR(__VA_ARGS__); \ + } while (0) + +/// Takes an Expected \p rhsOrErr and if it is an Error then returns +/// it, otherwise takes the value from rhsOrErr and assigns it to \p lhs. +#define ASSIGN_VALUE_OR_RETURN_ERR(lhs, rhsOrErr) \ + do { \ + auto rhsOrErrV = (rhsOrErr); \ + static_assert(glow::detail::IsExpected(), \ + "Expected value to be a Expected"); \ + if (rhsOrErrV) { \ + lhs = std::move(rhsOrErrV.get()); \ + } else { \ + return rhsOrErrV.takeError(); \ + } \ + } while (0) + +/// Takes an Expected \p rhsOrErr and if it is an Error then calls FAIL() +/// otherwise takes the value from rhsOrErr and assigns it to \p lhs. +#define ASSIGN_VALUE_OR_FAIL_TEST(lhs, rhsOrErr) \ + do { \ + auto rhsOrErrV = (rhsOrErr); \ + static_assert(glow::detail::IsExpected(), \ + "Expected value to be a Expected"); \ + if (rhsOrErrV) { \ + lhs = std::move(rhsOrErrV.get()); \ + } else { \ + FAIL() << errorToString(rhsOrErr.takeError()); \ + } \ + } while (0) -/// Is true_type only if applied to llvm::Expected. -template struct IsLLVMExpected : public std::false_type {}; +/// Takes an Error and returns it if it's not success. +// TODO: extend this to work with Expected as well. +#define RETURN_IF_ERR(err) \ + do { \ + if (auto errV = std::forward(err)) { \ + static_assert(glow::detail::IsError::value, \ + "Expected value to be a Error"); \ + return std::forward(errV); \ + } \ + } while (0) + +/// Takes an Error and if it contains an ErrorValue then calls FAIL(). +#define FAIL_TEST_IF_ERR(err) \ + do { \ + if (auto errV = std::forward(err)) { \ + static_assert(glow::detail::IsError::value, \ + "Expected value to be a Error"); \ + FAIL() << errorToString(std::move(errV)); \ + } \ + } while (0) + +/// Takes a predicate \p and if it is false then creates a new Error +/// and returns it. +#define RETURN_ERR_IF_NOT(p, ...) \ + do { \ + if (!(p)) { \ + RETURN_ERR(__VA_ARGS__); \ + } \ + } while (0) + +/// Forward declarations. +namespace detail { +class GlowError; +class GlowErrorSuccess; +class GlowErrorEmpty; +class GlowErrorValue; +template class GlowExpected; +} // namespace detail + +/// Type aliases to decouple Error and Expected from underlying implementation. +using Error = detail::GlowError; +using ErrorSuccess = detail::GlowErrorSuccess; +using ErrorEmpty = detail::GlowErrorEmpty; +using ErrorValue = detail::GlowErrorValue; +template using Expected = detail::GlowExpected; + +/// NOTE: detail namespace contains code that should not be used outside of +/// Error.h and Error.cpp. Please instead use types and macros defined above. +namespace detail { +/// enableCheckingErrors is used to enable assertions that every Error and +/// Expected has its status checked before it is destroyed. This should be +/// enabled in debug builds but turned off otherwise. +#ifndef NDEBUG +static constexpr bool enableCheckingErrors = true; +#else +static constexpr bool enableCheckingErrors = false; +#endif + +/// Is true_type only if applied to Error or a descendant. +template struct IsError : public std::is_base_of {}; + +/// Is true_type only if applied to Expected. +template struct IsExpected : public std::false_type {}; template -struct IsLLVMExpected> : public std::true_type {}; +struct IsExpected> : public std::true_type {}; + +/// CheckState is a common base class for Error and Expected that +/// tracks whether their state has been checked or not if DoChecks is true +/// and otherwise it does nothing and has no members so as to not take extra +/// space. This is used to ensure that all Errors and Expecteds are checked +/// before they are destroyed. +template class CheckState; + +/// Specialization of CheckState with checking enabled. +template <> class CheckState { + /// Whether or not the a check has occurred. + bool checked_ = false; -/// Represents errors in Glow. GlowErr track the file name and line number of -/// where they were created as well as a textual message and/or a error code to -/// help identify the type of error the occurred programtically. -class GlowErr final : public llvm::ErrorInfo { public: - /// Used by ErrorInfo::classID. - static const uint8_t ID; + /// Set the state of checked. + inline void setChecked(bool checked) { checked_ = checked; } + + /// Asserts that the state has been checked. + inline void ensureChecked() const { + assert(checked_ && "Unchecked Error or Expected"); + } + CheckState() : checked_(false) {} + + /// Destructor that is used to ensure that base classes have been checked. + ~CheckState() { ensureChecked(); } +}; + +/// Specialization of CheckState with checking disabled. +template <> class CheckState { +public: + inline void setChecked(bool checked) {} + inline void ensureChecked() const {} +}; + +/// Opaque is an aligned opaque container for some type T. It holds a T in-situ +/// but will not destroy it automatically when the Opaque is destroyed but +/// instead only when the destroy() method is called. +template class Opaque { +private: + alignas(T) char payload_[sizeof(T)]; + +public: + /// Sets the value within this Opaque container. + void set(T t) { new (payload_) T(std::forward(t)); } + + /// Gets the value within this Opaque container. + T &get() { return *reinterpret_cast(payload_); } + + /// Gets the value within this Opaque container. + const T &get() const { return *reinterpret_cast(payload_); } + + /// Call the destructor of the value in this container. + void destroy() { get().~T(); } +}; + +/// This method is the only way to destroy an Error \p error and mark it as +/// checked when it contains an ErrorValue. It \returns the contained +/// ErrorValue. +/// NOTE: This method should not be used directly, use one of the methods that +/// calls this. +std::unique_ptr takeErrorValue(GlowError error); + +/// Takes an Error \p error and asserts that it does not contain an ErrorValue. +/// Uses \p fileName and \p lineNumber for logging. +void exitOnError(const char *fileName, size_t lineNumber, GlowError error); + +/// ErrorValue contains information about an error that occurs at runtime. It is +/// not used directly but instead is passed around inside of the Error and +/// Expected containers. It should only be constructed using the makeError +/// method. +class GlowErrorValue final { +public: /// An enumeration of error codes representing various possible errors that /// could occur. /// NOTE: when updating this enum, also update ErrorCodeToString function @@ -97,89 +276,40 @@ class GlowErr final : public llvm::ErrorInfo { COMPILE_UNSUPPORTED_IR_AFTER_OPTIMIZE, }; - /// GlowErr is not convertable to std::error_code. This is included for - /// compatiblity with ErrorInfo. - virtual std::error_code convertToErrorCode() const override { - return llvm::inconvertibleErrorCode(); - } - - /// Log to \p OS relevant error information including the file name and - /// line number the GlowErr was created on as well as the message and/or error - /// code the GlowErr was created with. - void log(llvm::raw_ostream &OS) const override { - OS << "location: " << fileName_ << ":" << lineNumber_; + /// Log to \p os relevant error information including the file name and + /// line number the ErrorValue was created on as well as the message and/or + /// error code the ErrorValue was created with. + template void log(StreamT &os) const { + os << "location: " << fileName_ << ":" << lineNumber_; if (ec_ != ErrorCode::UNKNOWN) { - OS << " error code: " << errorCodeToString(ec_); + os << " error code: " << errorCodeToString(ec_); } if (!message_.empty()) { - OS << " message: " << message_; + os << " message: " << message_; } } - GlowErr(llvm::StringRef fileName, size_t lineNumber, llvm::StringRef message, - ErrorCode ec) + std::string logToString() const; + + GlowErrorValue(const char *fileName, size_t lineNumber, std::string message, + ErrorCode ec) : lineNumber_(lineNumber), fileName_(fileName), message_(message), ec_(ec) {} - GlowErr(llvm::StringRef fileName, size_t lineNumber, ErrorCode ec, - llvm::StringRef message) + GlowErrorValue(const char *fileName, size_t lineNumber, ErrorCode ec, + std::string message) : lineNumber_(lineNumber), fileName_(fileName), message_(message), ec_(ec) {} - GlowErr(llvm::StringRef fileName, size_t lineNumber, ErrorCode ec) + GlowErrorValue(const char *fileName, size_t lineNumber, ErrorCode ec) : lineNumber_(lineNumber), fileName_(fileName), ec_(ec) {} - GlowErr(llvm::StringRef fileName, size_t lineNumber, llvm::StringRef message) + GlowErrorValue(const char *fileName, size_t lineNumber, std::string message) : lineNumber_(lineNumber), fileName_(fileName), message_(message) {} private: /// Convert ErrorCode values to string. - static std::string errorCodeToString(const ErrorCode &ec) { - switch (ec) { - case ErrorCode::UNKNOWN: - return "UNKNOWN"; - case ErrorCode::MODEL_LOADER_UNSUPPORTED_SHAPE: - return "MODEL_LOADER_UNSUPPORTED_SHAPE"; - case ErrorCode::MODEL_LOADER_UNSUPPORTED_OPERATOR: - return "MODEL_LOADER_UNSUPPORTED_OPERATOR"; - case ErrorCode::MODEL_LOADER_UNSUPPORTED_ATTRIBUTE: - return "MODEL_LOADER_UNSUPPORTED_ATTRIBUTE"; - case ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE: - return "MODEL_LOADER_UNSUPPORTED_DATATYPE"; - case ErrorCode::MODEL_LOADER_UNSUPPORTED_ONNX_VERSION: - return "MODEL_LOADER_UNSUPPORTED_ONNX_VERSION"; - case ErrorCode::MODEL_LOADER_INVALID_PROTOBUF: - return "MODEL_LOADER_INVALID_PROTOBUF"; - case ErrorCode::PARTITIONER_ERROR: - return "PARTITIONER_ERROR"; - case ErrorCode::RUNTIME_ERROR: - return "RUNTIME_ERROR"; - case ErrorCode::RUNTIME_OUT_OF_DEVICE_MEMORY: - return "RUNTIME_OUT_OF_DEVICE_MEMORY"; - case ErrorCode::RUNTIME_NET_NOT_FOUND: - return "RUNTIME_NET_NOT_FOUND"; - case ErrorCode::RUNTIME_REQUEST_REFUSED: - return "RUNTIME_REQUEST_REFUSED"; - case ErrorCode::RUNTIME_DEVICE_NOT_FOUND: - return "RUNTIME_DEVICE_NOT_FOUND"; - case ErrorCode::RUNTIME_NET_BUSY: - return "RUNTIME_NET_BUSY"; - case ErrorCode::COMPILE_UNSUPPORTED_NODE_AFTER_OPTIMIZE: - return "COMPILE_UNSUPPORTED_NODE_AFTER_OPTIMIZE"; - case ErrorCode::COMPILE_CONTEXT_MALFORMED: - return "COMPILE_CONTEXT_MALFORMED"; - case ErrorCode::MODEL_WRITER_INVALID_FILENAME: - return "MODEL_WRITER_INVALID_FILENAME"; - case ErrorCode::MODEL_WRITER_SERIALIZATION_ERROR: - return "MODEL_WRITER_SERIALIZATION_ERROR"; - case ErrorCode::COMPILE_UNSUPPORTED_IR_AFTER_GENERATE: - return "COMPILE_UNSUPPORTED_IR_AFTER_GENERATE"; - case ErrorCode::COMPILE_UNSUPPORTED_IR_AFTER_OPTIMIZE: - return "COMPILE_UNSUPPORTED_IR_AFTER_OPTIMIZE"; - }; - - llvm_unreachable("unsupported ErrorCode"); - } + static std::string errorCodeToString(const ErrorCode &ec); /// The line number the error was generated on. size_t lineNumber_; @@ -191,43 +321,434 @@ class GlowErr final : public llvm::ErrorInfo { ErrorCode ec_ = ErrorCode::UNKNOWN; }; -/// Marks the Error \p err as as checked. \returns true if it contains an -/// error value and prints the message in the error value, returns false -/// otherwise. -inline bool errToBool(llvm::Error err) { - if (static_cast(err)) { - LOG(ERROR) << "Converting error to boolean: " - << llvm::toString(std::move(err)); - return true; - } - return false; +/// Overload for operator<< for logging an ErrorValue \p errorValue to a stream +/// \p os. +template +StreamT &operator<<(StreamT &os, const GlowErrorValue &errorValue) { + errorValue.log(os); + return os; } -template llvm::Error takeErr(llvm::Expected e) { - if (!bool(e)) { - return e.takeError(); +/// Error is a container for pointers to ErrorValues. If an ErrorValue is +/// contained Error ensures that it is checked before being destroyed. +class GlowError : protected detail::CheckState { + template friend class GlowExpected; + friend std::unique_ptr detail::takeErrorValue(GlowError); + + /// Pointer to ErrorValue managed by this Error. Can be null if no error + /// occurred. Use getters and setters defined below to access this since they + /// also will modify the CheckState. + std::unique_ptr errorValue_; + + /// \return true if an ErrorValue is contained. + inline bool hasErrorValue() const { return nullptr != errorValue_; } + + /// Sets the value of errorValue_ to \p errorValue ensuring not to overwrite + /// any previously contained ErrorValues that were unchecked. This is skipped + /// however if \p skipCheck is passed. + /// NOTE: skipCheck should only be used by constructors. + inline void setErrorValue(std::unique_ptr errorValue, + bool skipCheck = false) { + // Can't overwrite an existing error unless we say not to check. + if (skipCheck) { + assert(errorValue_ == nullptr && + "Trying to skip state check on an Error that " + "contains an ErrorValue is a bug because this should only happen " + "in a constructor and then no ErrorValue should be contained."); + } else { + ensureChecked(); + } + + errorValue_ = std::move(errorValue); + setChecked(false); + } + + /// \returns the contents of errorValue_ by moving them. Marks the Error as + /// checked no matter what. + /// NOTE: This is the only way to mark an Error that contains an ErrorValue as + /// checked. + inline std::unique_ptr takeErrorValue() { + setChecked(true); + return std::move(errorValue_); + } + +protected: + /// Construct a new empty Error. + explicit GlowError() { setErrorValue(nullptr, /*skipCheck*/ true); } + +public: + /// Construct an Error from an ErrorValue \p errorValue. + GlowError(std::unique_ptr errorValue) { + assert(errorValue && + "Cannot construct an Error from a null ErrorValue ptr"); + setErrorValue(std::move(errorValue), /*skipCheck*/ true); + } + + /// Move construct an Error from another Error \p other. + GlowError(GlowError &&other) { + setErrorValue(std::move(other.errorValue_), /*skipCheck*/ true); + other.setChecked(true); + } + + /// Construct an Error from an ErrorEmpty \p other. This is a special case + /// constructor that will mark the constructed Error as being checked. This + /// should only be used for creating Errors that will be passed into things + /// like fallible constructors of other classes to be written to. + GlowError(GlowErrorEmpty &&other); + + /// Move assign Error from another Error \p other. + GlowError &operator=(GlowError &&other) { + setErrorValue(std::move(other.errorValue_)); + other.setChecked(true); + return *this; + } + + /// Create an Error not containing an ErrorValue that is signifies success + /// instead of failure of an operation. + /// NOTE: this Error must still be checked before being destroyed. + static GlowErrorSuccess success(); + + /// Create an empty Error that is signifies that an operation has not yet + /// occurred. This should only be used when another Error will be assigned to + /// this Error for example when calling a fallible constructor that takes an + /// Error reference as a parameter. + /// NOTE: this Error is considered to be "pre-checked" and therefore can be + /// destroyed at any time. + static GlowErrorEmpty empty(); + + // Disable copying Errors. + GlowError(const GlowError &) = delete; + GlowError &operator=(const GlowError &) = delete; + + /// Overload of operator bool() that \returns true if no ErrorValue is + /// contained contained. + /// NOTE: This marks the Error as checked only if an ErrorValue is contained. + /// If an ErrorValue is contained then that ErrorValue must be handled in + /// order to mark as checked. + explicit operator bool() { + // Only mark as checked when there isn't an ErrorValue contained. + bool hasError = hasErrorValue(); + if (!hasError) { + setChecked(true); + } + return hasError; + } +}; + +/// ErrorSuccess is a special Error that is used to mark the absents of an +/// error. It shouldn't be constructed directly but instead using +/// Error::success(). +class GlowErrorSuccess final : public GlowError {}; + +/// See declaration in Error for details. +inline GlowErrorSuccess GlowError::success() { return GlowErrorSuccess(); } + +/// ErrorSuccess is a special Error that is used to contain the future state of +/// a fallible process that hasn't yet occurred. It shouldn't be +/// constructed directly but instead using Error::empty(). See comments on +/// Error::empty() method for more details. +class GlowErrorEmpty final : public GlowError {}; + +/// See declaration in Error for details. +inline GlowErrorEmpty GlowError::empty() { return GlowErrorEmpty(); } + +/// Expected is a templated container for either a value of type T or an +/// ErrorValue. It is used for fallible processes that may return either a value +/// or encounter an error. Expected ensures that its state has been checked for +/// errors before destruction. +template +class GlowExpected final + : protected detail::CheckState { + template + friend TT detail::exitOnError(const char *fileName, size_t lineNumber, + GlowExpected expected); + + /// Union type between ErrorValue and T. Holds both in Opaque containers so + /// that lifetime management is manual and tied to the lifetime of Expected. + union Payload { + detail::Opaque> asErrorValue; + detail::Opaque asValue; + }; + + /// A union that contains either an ErrorValue if an error occurred + /// or a value of type T. + Payload payload_; + + /// Whether or not payload_ contains an Error. Expected cannot be constructed + /// from ErrorSuccess so if an ErrorValue is contained it is a legitimate + /// Error. + bool isError_; + + /// Getter for isError_. + inline bool getIsError() const { return isError_; } + + /// Setter for isError_. + inline void setIsError(bool isError) { isError_ = isError; } + + /// Asserts that an ErrorValue is contained not a Value. + inline void ensureError() { + assert(getIsError() && "Trying to get an ErrorValue of an Expected that " + "doesn't contain an ErrorValue"); + } + + /// Asserts that a Value is contained not an ErrorValue + inline void ensureValue() { + assert( + !getIsError() && + "Trying to get a Value of an Expected that doesn't contain an Value"); + } + + /// Setter for payload_ that inserts an ErrorValue \p errorValue. If \p + /// skipCheck is true then don't check that the current payload has been + /// checked before setting otherwise do check. + /// NOTE: Only constructors of Expected should use skipCheck. + inline void setErrorValue(std::unique_ptr errorValue, + bool skipCheck = false) { + if (!skipCheck) { + ensureChecked(); + } + setIsError(true); + return payload_.asErrorValue.set(std::move(errorValue)); + } + + /// Getter for payload_ to retrieve an ErrorValue. Ensures that an ErrorValue + /// is contained and that it has been checked. + inline GlowErrorValue *getErrorValue() { + ensureError(); + ensureChecked(); + return payload_.asErrorValue.get().get(); + } + + /// Getter for payload_ to retrieve an ErrorValue. Ensures that an ErrorValue + /// is contained and that it has been checked. + inline const GlowErrorValue *getErrorValue() const { + ensureError(); + ensureChecked(); + return payload_.asErrorValue.get().get(); + } + + /// \returns the ErrorValue contents of payload_ by moving them. Marks the + /// Expected as checked no matter what. + /// NOTE: This is the only way to mark an Expected that contains an ErrorValue + /// as checked. + inline std::unique_ptr takeErrorValue() { + ensureError(); + setChecked(true); + return std::move(payload_.asErrorValue.get()); + } + + /// Sets payload_ with a value of type T \p value. If \p skipCheck is true + /// then don't check that the current payload has been checked before setting + /// otherwise do check. + /// NOTE: Only constructors of Expected should use skipCheck. + inline void setValue(T value, bool skipCheck = false) { + static_assert(!std::is_reference::value, + "Expected has not been equipped to hold references yet."); + + if (!skipCheck) { + ensureChecked(); + } + setIsError(false); + return payload_.asValue.set(std::move(value)); + } + + /// \returns a value T contained in payload_. Ensures that value is contained + /// by payload_ and that it has been checked already. + inline T *getValue() { + ensureValue(); + ensureChecked(); + return &payload_.asValue.get(); + } + + /// \returns a value T contained in payload_. Ensures that value is contained + /// by payload_ and that it has been checked already. + inline const T *getValue() const { + ensureValue(); + ensureChecked(); + return &payload_.asValue.get(); + } + + /// \returns the value contents of payload_ by moving them. Marks the Expected + /// as checked no matter what. + inline T takeValue() { + ensureValue(); + setChecked(true); + return std::move(payload_.asValue.get()); + } + +public: + /// Construct an Expected from an Error. The error must contain an ErrorValue. + /// Marks the Error as checked. + GlowExpected(GlowError error) { + assert(error.hasErrorValue() && + "Must have an ErrorValue to construct an Expected from an Error"); + setErrorValue(std::move(error.takeErrorValue()), /*skipCheck*/ true); + } + + /// Disallow construction of Expected from ErrorSuccess and ErrorEmpty. + GlowExpected(GlowErrorSuccess) = delete; + GlowExpected(GlowErrorEmpty) = delete; + + /// Move construct Expected from a value of type OtherT as long as OtherT + /// is convertible to T. + template + GlowExpected( + OtherT &&other, + typename std::enable_if::value>::type * = + nullptr) { + setValue(std::forward(other), /*skipCheck*/ true); + } + + /// Move construct Expected from another Expected. + GlowExpected(GlowExpected &&other) { + if (other.getIsError()) { + setErrorValue(std::move(other.takeErrorValue()), + /*skipCheck*/ true); + } else { + setValue(std::move(other.takeValue()), /*skipCheck*/ true); + } + } + + /// Move construct Expected from Expected as long as OtherT is + /// convertible to T. + template + GlowExpected( + GlowExpected &&other, + typename std::enable_if::value>::type * = + nullptr) { + if (other.getIsError()) { + setErrorValue(std::move(other.takeErrorValue()), + /*skipCheck*/ true); + } else { + setValue(std::move(other.takeValue()), /*skipCheck*/ true); + } + } + + /// Move assign Expected from another Expected. + GlowExpected &operator=(GlowExpected &&other) { + if (other.getIsError()) { + setErrorValue(std::move(other.takeErrorValue())); + } else { + setValue(std::move(other.takeValue())); + } + return *this; + } + + /// Destructor for Expected, manually destroys the constents of payload_. + ~GlowExpected() { + if (getIsError()) { + payload_.asErrorValue.destroy(); + } else { + payload_.asValue.destroy(); + } + } + + /// Overload for operator bool that returns true if no ErrorValue is + /// contained. Marks the state as checked if no ErrorValue is contained. + explicit operator bool() { + bool isError = getIsError(); + if (!isError) { + setChecked(true); + } + return !isError; + } + + /// Get a reference to a value contained by payload_. + T &get() { return *getValue(); } + + /// Get a const reference to a value contained by payload_. + const T &get() const { return *getValue(); } + + /// Construct and \returns an Error and takes ownership of any ErrorValue in + /// payload_. If no ErrorValue is in payload_ then return Error::success(). + /// Marks the Exected as checked no matter what. + GlowError takeError() { + if (getIsError()) { + return GlowError(takeErrorValue()); + } + setChecked(true); + return GlowError::success(); + } + + T *operator->() { return getValue(); } + + const T *operator->() const { return getValue(); } + + T &operator*() { return *getValue(); } + + const T &operator*() const { return *getValue(); } +}; + +/// Given an Expected, asserts that it contains a value T and \returns it. If +/// an ErrorValue is contained in the expected then logs this along with \p +/// fileName and \p lineNumber and aborts. +template +T exitOnError(const char *fileName, size_t lineNumber, + GlowExpected expected) { + if (expected) { + return expected.takeValue(); } else { - return llvm::Error::success(); + auto error = expected.takeError(); + std::unique_ptr errorValue = + detail::takeErrorValue(std::move(error)); + assert(errorValue != nullptr && "Expected should have a non-null " + "ErrorValue if bool(expected) is false"); + LOG(FATAL) << "exitOnError(Expected) at " << fileName << ":" + << lineNumber + << " got an unexpected ErrorValue: " << (*errorValue); } } -/// This class holds an llvm::Error provided via the add method. If an Error is +/// Constructs an ErrorValue from \p args then wraps and \returns it in an +/// Error. +/// NOTE: this should not be used directly, use macros defined at the top of +/// Error.h instead. +template GlowError makeError(Args &&... args) { + auto errorValue = std::unique_ptr( + new GlowErrorValue(std::forward(args)...)); + return GlowError(std::move(errorValue)); +} + +/// Given an Error \p error, destroys the Error and returns true if an +/// ErrorValue was contained. Logs if \p log is true and uses \p fileName and \p +/// lineNumber for additional logging information. +/// NOTE: this should not be used directly, use macros defined at the top of +/// Error.h instead. +bool errorToBool(const char *fileName, size_t lineNumber, GlowError error, + bool log = true); + +/// Given an Error \p error, destroys the Error and returns a string that is the +/// result of calling log() on the ErrorValue it contained if any and "success" +/// otherwise. +/// NOTE: this should not be used directly, use macros defined at the top of +/// Error.h instead. +std::string errorToString(GlowError error); + +/// Given an Error \p error, destroys the Error. Logs if \p log is true and uses +/// \p fileName and \p lineNumber for additional logging information. +/// NOTE: this should not be used directly, use macros defined at the top of +/// Error.h instead. +void errorToVoid(const char *fileName, size_t lineNumber, GlowError error, + bool log = true); +} // namespace detail + +/// This class holds an Error provided via the add method. If an Error is /// added when the class already holds an Error, it will discard the new Error /// in favor of the original one. All methods in OneErrOnly are thread-safe. class OneErrOnly { - llvm::Error err_ = llvm::Error::success(); + Error err_ = Error::success(); std::mutex m_; public: - /// Add a new llvm::Error \p err to be stored. If an existing Error has + /// Add a new Error \p err to be stored. If an existing Error has /// already been added then the contents of the new error will be logged and /// the new err will be discarded. \returns true if \p err was stored and /// \returns false otherwise. If \p err is an empty Error then does nothing /// and \returns false; - bool set(llvm::Error err); + bool set(Error err); - /// \returns the stored llvm:Error clearing out the storage of the class. - llvm::Error get(); + /// \returns the stored Error clearing out the storage of the class. + Error get(); /// \returns true if contains an Error and false otherwise. bool containsErr(); @@ -235,84 +756,4 @@ class OneErrOnly { } // end namespace glow -/// Unwraps the T from within an llvm::Expected. If the Expected contains -/// an error, the program will exit. -#define EXIT_ON_ERR(...) (glow::exitOnErr(__VA_ARGS__)) - -/// A temporary placeholder for EXIT_ON_ERR. This should be used only during -/// refactoring to temporarily place an EXIT_ON_ERR and should eventually be -/// replaced with either an actual EXIT_ON_ERR or code that will propogate -/// potential errors up the stack. -#define TEMP_EXIT_ON_ERR(...) (EXIT_ON_ERR(__VA_ARGS__)) - -/// Make a new GlowErr. -#define MAKE_ERR(...) \ - llvm::make_error(__FILE__, __LINE__, __VA_ARGS__) - -/// Makes a new GlowErr and returns it. -#define RETURN_ERR(...) \ - do { \ - return MAKE_ERR(__VA_ARGS__); \ - } while (0) - -/// Takes an llvm::Expected \p lhsOrErr and if it is an Error then returns -/// it, otherwise takes the value from lhsOrErr and assigns it to \p rhs. -#define ASSIGN_VALUE_OR_RETURN_ERR(rhs, lhsOrErr) \ - do { \ - auto lhsOrErrV = (lhsOrErr); \ - static_assert(glow::IsLLVMExpected(), \ - "Expected value to be a llvm::Expected"); \ - if (lhsOrErrV) { \ - rhs = std::move(lhsOrErrV.get()); \ - } else { \ - return lhsOrErrV.takeError(); \ - } \ - } while (0) - -/// Takes an llvm::Expected \p lhsOrErr and if it is an Error then returns -/// false, otherwise takes the value from lhsOrErr and assigns it to \p rhs. -#define ASSIGN_VALUE_OR_RETURN_FALSE(rhs, lhsOrErr) \ - do { \ - auto lhsOrErrV = (lhsOrErr); \ - static_assert(glow::IsLLVMExpected(), \ - "Expected value to be a llvm::Expected"); \ - if (lhsOrErrV) { \ - rhs = std::move(lhsOrErrV.get()); \ - } else { \ - return false; \ - } \ - } while (0) - -/// Takes an llvm::Error and returns it if it's not success. -// TODO: extend this to work with llvm::Expected as well. -#define RETURN_IF_ERR(err) \ - do { \ - if (auto errV = std::forward(err)) { \ - static_assert(glow::IsLLVMError::value, \ - "Expected value to be a llvm::Error"); \ - return std::forward(errV); \ - } \ - } while (0) - -/// Takes a predicate \p and if it is false then creates a new GlowErr -/// and returns it. -#define RETURN_ERR_IF_NOT(p, ...) \ - do { \ - if (!(p)) { \ - RETURN_ERR(__VA_ARGS__); \ - } \ - } while (0) - -/// Marks the given llvm::Error as checked as long as it's value is equal to -/// llvm::Error::success(). This macro should be used as little as possible but -/// but is useful for example for creating dummy Errors that can be passed into -/// fallible constructor by reference to be filled in the event an Error occurs. -#define MARK_ERR_CHECKED(err) \ - do { \ - bool success = !(err); \ - (void)success; \ - assert(success && "MARK_ERR_CHECKED should not be called on an " \ - "llvm::Error that contains an actual error."); \ - } while (0) - #endif // GLOW_SUPPORT_ERROR_H diff --git a/lib/Backends/CPU/CPUDeviceManager.cpp b/lib/Backends/CPU/CPUDeviceManager.cpp index 50d43e74f5..929f441192 100644 --- a/lib/Backends/CPU/CPUDeviceManager.cpp +++ b/lib/Backends/CPU/CPUDeviceManager.cpp @@ -17,6 +17,7 @@ #include "CPUFunction.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/FormatVariadic.h" #include "llvm/Support/raw_ostream.h" namespace glow { @@ -97,8 +98,9 @@ void CPUDeviceManager::addNetworkImpl(const Module *module, } if (usedMemoryBytes_ + allFunctionsMemoryBytes > maxMemoryBytes_) { - readyCB(module, MAKE_ERR(GlowErr::ErrorCode::RUNTIME_OUT_OF_DEVICE_MEMORY, - "Failed to add network: not enough memory")); + readyCB(module, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_OUT_OF_DEVICE_MEMORY, + "Failed to add network: not enough memory")); return; } @@ -117,7 +119,7 @@ void CPUDeviceManager::addNetworkImpl(const Module *module, exportMemoryCounters(); // Fire the ready CB. - readyCB(module, llvm::Error::success()); + readyCB(module, Error::success()); } void CPUDeviceManager::evictNetworkImpl(std::string functionName, @@ -130,7 +132,7 @@ void CPUDeviceManager::evictNetworkImpl(std::string functionName, functions_.erase(it); } else { evictCB(functionName, - MAKE_ERR(GlowErr::ErrorCode::RUNTIME_NET_NOT_FOUND, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_NET_NOT_FOUND, strFormat("Could not find function with name %s to evict", functionName.c_str()))); return; @@ -138,7 +140,7 @@ void CPUDeviceManager::evictNetworkImpl(std::string functionName, // Export change in memory usage. exportMemoryCounters(); - evictCB(functionName, llvm::Error::success()); + evictCB(functionName, Error::success()); } void CPUDeviceManager::runFunctionImpl( @@ -153,7 +155,7 @@ void CPUDeviceManager::runFunctionImpl( dmRun.addArg("reason", "function not found"); TRACE_EVENT_SCOPE_END_NAMED(dmRun); resultCB(id, - MAKE_ERR(GlowErr::ErrorCode::RUNTIME_NET_NOT_FOUND, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_NET_NOT_FOUND, llvm::formatv("Function {0} not found", function).str()), std::move(context)); return; diff --git a/lib/Backends/CPU/CPUFunction.cpp b/lib/Backends/CPU/CPUFunction.cpp index d554dff9ec..bb7f425a28 100644 --- a/lib/Backends/CPU/CPUFunction.cpp +++ b/lib/Backends/CPU/CPUFunction.cpp @@ -25,6 +25,6 @@ CPUFunction::CPUFunction(std::unique_ptr JIT, runtime::RuntimeBundle &&runtimeBundle) : LLVMCompiledFunction(std::move(JIT), std::move(runtimeBundle)) {} -llvm::Error CPUFunction::execute(ExecutionContext *context) { +Error CPUFunction::execute(ExecutionContext *context) { return LLVMCompiledFunction::execute(context); } diff --git a/lib/Backends/CPU/CPUFunction.h b/lib/Backends/CPU/CPUFunction.h index c475cc2ac8..802de71efc 100644 --- a/lib/Backends/CPU/CPUFunction.h +++ b/lib/Backends/CPU/CPUFunction.h @@ -33,7 +33,7 @@ class CPUFunction final : public LLVMCompiledFunction { /// \name CompiledFunction interface ///@{ ~CPUFunction() override = default; - llvm::Error execute(ExecutionContext *context) override; + Error execute(ExecutionContext *context) override; /// \returns the backend used to compile this function. virtual std::string getCompileBackendName() const override { return "CPU"; } diff --git a/lib/Backends/Habana/Habana.cpp b/lib/Backends/Habana/Habana.cpp index 7e547200c9..b83735e3e6 100644 --- a/lib/Backends/Habana/Habana.cpp +++ b/lib/Backends/Habana/Habana.cpp @@ -24,6 +24,7 @@ #include "synapse.h" #include "llvm/Support/FileSystem.h" +#include "llvm/Support/FormatVariadic.h" #include #include @@ -32,7 +33,7 @@ using namespace glow; /// Get a path to a temporary file for the compiled recipe. -static llvm::Expected getRecipeFile() { +static Expected getRecipeFile() { llvm::SmallString<64> path; auto err = llvm::sys::fs::createTemporaryFile("glow", "recipe", path); RETURN_ERR_IF_NOT( @@ -447,7 +448,7 @@ allocateGraphTensors(Function *F) { return tensors; } -llvm::Expected> +Expected> HabanaBackend::compile(Function *F, const BackendOptions &opts) const { chk(synCreateGraph(synDeviceGoya)); @@ -1070,7 +1071,7 @@ HabanaBackend::compile(Function *F, const BackendOptions &opts) const { LOG(INFO) << "Compilation took " << duration / 1000.0 << " [ms]"; chk(synDestroyGraph()); - return llvm::Expected>( + return Expected>( llvm::make_unique(runtime::RuntimeBundle::create(*F), recipeName, F)); } diff --git a/lib/Backends/Habana/Habana.h b/lib/Backends/Habana/Habana.h index c34578b4f3..d2b366aaa0 100644 --- a/lib/Backends/Habana/Habana.h +++ b/lib/Backends/Habana/Habana.h @@ -42,7 +42,7 @@ class HabanaBackend final : public Backend { std::string getBackendName() const override { return getName(); } static std::string getName() { return "Habana"; } - llvm::Expected> + Expected> compile(Function *F, const BackendOptions &opts) const override; bool isOpSupported(const NodeInfo &NI) const override; diff --git a/lib/Backends/Habana/HabanaDeviceManager.cpp b/lib/Backends/Habana/HabanaDeviceManager.cpp index 638ad1467d..7b6067f982 100644 --- a/lib/Backends/Habana/HabanaDeviceManager.cpp +++ b/lib/Backends/Habana/HabanaDeviceManager.cpp @@ -19,6 +19,7 @@ #include "glow/Runtime/StatsExporter.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/FormatVariadic.h" #include "llvm/Support/raw_ostream.h" #include "synapse.h" @@ -77,7 +78,7 @@ HabanaDeviceManager::~HabanaDeviceManager() { } } -llvm::Error HabanaDeviceManager::init() { +Error HabanaDeviceManager::init() { std::lock_guard lock(synapseMtx_); // If this is the first HabanaDeviceManager to be created, initialize the @@ -111,10 +112,10 @@ llvm::Error HabanaDeviceManager::init() { RETURN_ERR("Failed to create HabanaDeviceManager thread pools"); } - return llvm::Error::success(); + return Error::success(); } -llvm::Error HabanaDeviceManager::updateMemoryUsage() { +Error HabanaDeviceManager::updateMemoryUsage() { // TODO: Use synGetMemInfo once implemented. // Use GlowHabanaMemory if it is defined from GFLAGS or llvm params, @@ -135,7 +136,7 @@ llvm::Error HabanaDeviceManager::updateMemoryUsage() { freeMemory_ -= runtimeBundle.getMutableWeightSize(); } - return llvm::Error::success(); + return Error::success(); } void HabanaDeviceManager::addNetwork(const Module *module, @@ -214,7 +215,7 @@ void HabanaDeviceManager::addNetwork(const Module *module, return; } - readyCB(module, llvm::Error::success()); + readyCB(module, Error::success()); } void HabanaDeviceManager::evictNetwork(std::string functionName, @@ -272,7 +273,7 @@ void HabanaDeviceManager::evictNetwork(std::string functionName, return; } - evictCB(functionName, llvm::Error::success()); + evictCB(functionName, Error::success()); } void HabanaDeviceManager::runFunctionImpl(RunIdentifierTy runId, @@ -418,7 +419,7 @@ void HabanaDeviceManager::runFunctionImpl(RunIdentifierTy runId, // Return the IO buffer to the IO buffer pool. ioBufferPool->put(std::move(ioBuffer)); - resultCB(runId, llvm::Error::success(), std::move(ctx)); + resultCB(runId, Error::success(), std::move(ctx)); } }); } @@ -439,10 +440,10 @@ HabanaDeviceManager::runFunction(std::string functionName, return runId; } -llvm::Error HabanaDeviceManager::stop(bool block) { +Error HabanaDeviceManager::stop(bool block) { runPool_->stop(block); waitPool_->stop(block); - return llvm::Error::success(); + return Error::success(); } uint64_t HabanaDeviceManager::getMaximumMemory() const { return totalMemory_; } diff --git a/lib/Backends/Habana/HabanaDeviceManager.h b/lib/Backends/Habana/HabanaDeviceManager.h index e4c46f3305..0307409699 100644 --- a/lib/Backends/Habana/HabanaDeviceManager.h +++ b/lib/Backends/Habana/HabanaDeviceManager.h @@ -113,7 +113,7 @@ class HabanaDeviceManager : public DeviceManager { /// Update the totalMemory_ and freeMemory_ counts for the device based once /// per-function memory estimates. This function is not thread safe and should /// only be invoked while holding synapseLock. - llvm::Error updateMemoryUsage(); + Error updateMemoryUsage(); public: /// Constructor. @@ -126,7 +126,7 @@ class HabanaDeviceManager : public DeviceManager { /// See DeviceManager and QueueBackedDeviceManager for the documentation of /// the interface below. - llvm::Error init() override; + Error init() override; void addNetwork(const Module *module, FunctionMapTy functions, ReadyCBTy readyCB) override; @@ -138,7 +138,7 @@ class HabanaDeviceManager : public DeviceManager { std::unique_ptr ctx, runtime::ResultCBTy resultCB) override; - llvm::Error stop(bool block) override; + Error stop(bool block) override; uint64_t getMaximumMemory() const override; uint64_t getAvailableMemory() const override; diff --git a/lib/Backends/Habana/HabanaFunction.cpp b/lib/Backends/Habana/HabanaFunction.cpp index d5e3f3eb72..77b4c5acf5 100644 --- a/lib/Backends/Habana/HabanaFunction.cpp +++ b/lib/Backends/Habana/HabanaFunction.cpp @@ -30,7 +30,7 @@ HabanaIOBuffer::HabanaIOBuffer( const std::unordered_map &offsets) : deviceId_(deviceId), buffer_(buffer), offsets_(offsets) {} -llvm::Expected HabanaIOBuffer::get(const Placeholder *p) const { +Expected HabanaIOBuffer::get(const Placeholder *p) const { RETURN_ERR_IF_NOT(offsets_.count(p) > 0, "Placeholder not in IO buffer!"); return buffer_ + offsets_.find(p)->second; } @@ -206,7 +206,7 @@ void HabanaFunction::findIOPlaceholders(Function *F) { } /// Retrieve and dump debug info about a topology. -static llvm::Error dumpTopologyInfo(uint32_t deviceId, uint64_t topologyId) { +static Error dumpTopologyInfo(uint32_t deviceId, uint64_t topologyId) { uint32_t numOfInputs; uint32_t numOfOutputs; uint32_t numOfIntermediates; @@ -233,7 +233,7 @@ static llvm::Error dumpTopologyInfo(uint32_t deviceId, uint64_t topologyId) { VLOG(1) << "Topology intermediates: " << intermediateTensorNames[i]; } - return llvm::Error::success(); + return Error::success(); } HabanaFunction::~HabanaFunction() { @@ -243,7 +243,7 @@ HabanaFunction::~HabanaFunction() { << "Failed to remove file at " << recipeName_ << ".bin"; } -llvm::Error HabanaFunction::execute(ExecutionContext *context) { +Error HabanaFunction::execute(ExecutionContext *context) { auto *tc = context->getTraceContext(); TRACE_EVENT_SCOPE_NAMED(tc, TraceLevel::RUNTIME, "execute", exEvent); exEvent.addArg("recipe", recipeName_); @@ -357,7 +357,7 @@ llvm::Error HabanaFunction::execute(ExecutionContext *context) { static_cast(context->getDeviceBindings()) ->setHandle(HabanaWaitHandle(deviceId, handle, std::move(inputInfo), std::move(outputInfo))); - return llvm::Error::success(); + return Error::success(); } } // namespace glow diff --git a/lib/Backends/Habana/HabanaFunction.h b/lib/Backends/Habana/HabanaFunction.h index b3b2c6ff57..b19ca47bca 100644 --- a/lib/Backends/Habana/HabanaFunction.h +++ b/lib/Backends/Habana/HabanaFunction.h @@ -46,8 +46,8 @@ class HabanaIOBuffer { HabanaIOBuffer &operator=(HabanaIOBuffer &&src) = delete; /// Get a pointer to the buffer at which to read/store Placeholder \p p. - /// \returns a GlowErr if an error occurred. - llvm::Expected get(const Placeholder *p) const; + /// \returns a Error if an error occurred. + Expected get(const Placeholder *p) const; private: /// The device that this buffer is located on. @@ -201,7 +201,7 @@ class HabanaFunction final : public CompiledFunction { const std::string &getRecipeName() const { return recipeName_; } - llvm::Error execute(ExecutionContext *context) override; + Error execute(ExecutionContext *context) override; ///@} /// \returns the backend used to compile this function. diff --git a/lib/Backends/Habana/HabanaUtils.h b/lib/Backends/Habana/HabanaUtils.h index 22bcad8835..aa6e6e41ee 100644 --- a/lib/Backends/Habana/HabanaUtils.h +++ b/lib/Backends/Habana/HabanaUtils.h @@ -22,17 +22,17 @@ #include namespace glow { -/// Given a synStatus \p status, evaluates to llvm::Error::success() if status -/// is synSuccess and evaluates to an llvm::Error otherwise. +/// Given a synStatus \p status, evaluates to Error::success() if status +/// is synSuccess and evaluates to an Error otherwise. #define chk_make_err(status) \ status == synSuccess \ - ? llvm::Error::success() \ + ? Error::success() \ : MAKE_ERR( \ strFormat("Expected synStatus be synSuccess (%d), instead got %d", \ synSuccess, status)) -/// Given a synStatus \p status, returns an llvm::Error::success() if status is -/// synSuccess and returns an llvm::Error otherwise. +/// Given a synStatus \p status, returns an Error::success() if status is +/// synSuccess and returns an Error otherwise. #define chk(status) \ do { \ auto res = (status); \ diff --git a/lib/Backends/Interpreter/Interpreter.cpp b/lib/Backends/Interpreter/Interpreter.cpp index 514ff8b217..7ae1e097f9 100644 --- a/lib/Backends/Interpreter/Interpreter.cpp +++ b/lib/Backends/Interpreter/Interpreter.cpp @@ -27,7 +27,7 @@ using namespace glow; -llvm::Expected> +Expected> Interpreter::compile(Function *F, const BackendOptions &opts) const { TraceInfo traceInfo = buildManualTraceInfo(F); auto IR = generateAndOptimizeIR(F, *this, shouldShareBuffers()); @@ -44,8 +44,7 @@ Interpreter::compile(Function *F, const BackendOptions &opts) const { } compiledFunc->setTraceInfo(std::move(traceInfo)); - return llvm::Expected>( - std::move(compiledFunc)); + return Expected>(std::move(compiledFunc)); } std::unique_ptr diff --git a/lib/Backends/Interpreter/Interpreter.h b/lib/Backends/Interpreter/Interpreter.h index a60d19af8d..5c6acd5d07 100644 --- a/lib/Backends/Interpreter/Interpreter.h +++ b/lib/Backends/Interpreter/Interpreter.h @@ -44,7 +44,7 @@ class Interpreter final : public BackendUsingGlowIR { std::unique_ptr compileIRWithoutConstants(std::unique_ptr IR) const; - llvm::Expected> + Expected> compile(Function *F, const BackendOptions &opts) const override; bool isOpSupported(const NodeInfo &NI) const override; diff --git a/lib/Backends/Interpreter/InterpreterDeviceManager.cpp b/lib/Backends/Interpreter/InterpreterDeviceManager.cpp index 329d5efce8..1acd2f7607 100644 --- a/lib/Backends/Interpreter/InterpreterDeviceManager.cpp +++ b/lib/Backends/Interpreter/InterpreterDeviceManager.cpp @@ -17,6 +17,7 @@ #include "Interpreter.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/FormatVariadic.h" #include "llvm/Support/raw_ostream.h" static llvm::cl::OptionCategory @@ -95,8 +96,9 @@ void InterpreterDeviceManager::addNetworkImpl(const Module *module, } if (usedMemoryBytes_ + allFunctionsMemoryBytes > maxMemoryBytes_) { - readyCB(module, MAKE_ERR(GlowErr::ErrorCode::RUNTIME_OUT_OF_DEVICE_MEMORY, - "Failed to add network: not enough memory")); + readyCB(module, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_OUT_OF_DEVICE_MEMORY, + "Failed to add network: not enough memory")); return; } @@ -114,7 +116,7 @@ void InterpreterDeviceManager::addNetworkImpl(const Module *module, // Export changes to memory use. exportMemoryCounters(); // Fire the ready CB. - readyCB(module, llvm::Error::success()); + readyCB(module, Error::success()); } void InterpreterDeviceManager::evictNetworkImpl(std::string functionName, @@ -128,13 +130,13 @@ void InterpreterDeviceManager::evictNetworkImpl(std::string functionName, functions_.erase(it); } else { evictCB(functionName, - MAKE_ERR(GlowErr::ErrorCode::RUNTIME_NET_NOT_FOUND, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_NET_NOT_FOUND, strFormat("Could not find function with name %s to evict", functionName.c_str()))); return; } exportMemoryCounters(); - evictCB(functionName, llvm::Error::success()); + evictCB(functionName, Error::success()); } void InterpreterDeviceManager::runFunctionImpl( @@ -149,7 +151,7 @@ void InterpreterDeviceManager::runFunctionImpl( dmRun.addArg("reason", "function not found"); TRACE_EVENT_SCOPE_END_NAMED(dmRun); resultCB(id, - MAKE_ERR(GlowErr::ErrorCode::RUNTIME_NET_NOT_FOUND, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_NET_NOT_FOUND, llvm::formatv("Function {0} not found", function).str()), std::move(context)); return; diff --git a/lib/Backends/Interpreter/InterpreterFunction.cpp b/lib/Backends/Interpreter/InterpreterFunction.cpp index 8ea6570288..35177d2151 100644 --- a/lib/Backends/Interpreter/InterpreterFunction.cpp +++ b/lib/Backends/Interpreter/InterpreterFunction.cpp @@ -49,7 +49,7 @@ void InterpreterFunction::collectConstants(const Module *module) { } } -llvm::Error InterpreterFunction::execute(ExecutionContext *context) { +Error InterpreterFunction::execute(ExecutionContext *context) { BoundInterpreterFunction boundFunc(constants_); auto res = boundFunc.execute(F_.get(), context); { @@ -177,8 +177,8 @@ void BoundInterpreterFunction::deleteTensor(const Value *v) { tensors_.erase(it); } -llvm::Error BoundInterpreterFunction::execute(IRFunction *F, - ExecutionContext *context) { +Error BoundInterpreterFunction::execute(IRFunction *F, + ExecutionContext *context) { { TRACE_EVENT_SCOPE(context, TraceLevel::RUNTIME, "registerTensors"); @@ -238,5 +238,5 @@ llvm::Error BoundInterpreterFunction::execute(IRFunction *F, } } - return llvm::Error::success(); + return Error::success(); } diff --git a/lib/Backends/Interpreter/InterpreterFunction.h b/lib/Backends/Interpreter/InterpreterFunction.h index 74c228166e..180ed5bf67 100644 --- a/lib/Backends/Interpreter/InterpreterFunction.h +++ b/lib/Backends/Interpreter/InterpreterFunction.h @@ -57,7 +57,7 @@ class InterpreterFunction final : public CompiledFunction { ///@{ ~InterpreterFunction() override; - llvm::Error execute(ExecutionContext *context) override; + Error execute(ExecutionContext *context) override; /// Collects constants for runtime. void collectConstants(const Module *module) override; @@ -93,7 +93,7 @@ class BoundInterpreterFunction { ~BoundInterpreterFunction(); - llvm::Error execute(IRFunction *F, ExecutionContext *context); + Error execute(IRFunction *F, ExecutionContext *context); private: /// \returns a pointer to the tensor that is saved under \p v. diff --git a/lib/Backends/OpenCL/OpenCL.cpp b/lib/Backends/OpenCL/OpenCL.cpp index 60bdd687ed..c63796c1fa 100644 --- a/lib/Backends/OpenCL/OpenCL.cpp +++ b/lib/Backends/OpenCL/OpenCL.cpp @@ -548,7 +548,7 @@ static void topK(Tensor &outW, Tensor &indW, Tensor &inW, size_t k) { } } -llvm::Error OpenCLFunction::execute(ExecutionContext *context) { +Error OpenCLFunction::execute(ExecutionContext *context) { auto clBindings = static_cast( context->getDeviceBindings()); @@ -1441,7 +1441,7 @@ llvm::Error OpenCLFunction::execute(ExecutionContext *context) { kernelLaunches.clear(); } - return llvm::Error::success(); + return Error::success(); } uint64_t OpenCLFunction::copyValueToDevice( @@ -1727,7 +1727,7 @@ OCLBackend::compileIR(std::unique_ptr IR) const { return function; } -llvm::Expected> +Expected> OCLBackend::compile(Function *F, const BackendOptions &opts) const { TraceInfo traceInfo = buildManualTraceInfo(F); @@ -1749,8 +1749,7 @@ OCLBackend::compile(Function *F, const BackendOptions &opts) const { llvm::make_unique(std::move(IR), std::move(bundle), std::move(traceInfo)); - return llvm::Expected>( - std::move(compiledFunc)); + return Expected>(std::move(compiledFunc)); } bool OCLBackend::isOpSupported(const NodeInfo &NI) const { diff --git a/lib/Backends/OpenCL/OpenCL.h b/lib/Backends/OpenCL/OpenCL.h index 525b1f4794..b1239c1f3e 100644 --- a/lib/Backends/OpenCL/OpenCL.h +++ b/lib/Backends/OpenCL/OpenCL.h @@ -109,7 +109,7 @@ class OpenCLFunction final : public CompiledFunction { ///@{ ~OpenCLFunction() override; - llvm::Error execute(ExecutionContext *context) override; + Error execute(ExecutionContext *context) override; /// Collects constants for runtime. void collectConstants(const Module *module) override; @@ -205,7 +205,7 @@ class OCLBackend final : public BackendUsingGlowIR { std::unique_ptr compileIR(std::unique_ptr IR) const override; - llvm::Expected> + Expected> compile(Function *F, const BackendOptions &opts) const override; bool transformPostLowering(Function *F, diff --git a/lib/Backends/OpenCL/OpenCLDeviceManager.cpp b/lib/Backends/OpenCL/OpenCLDeviceManager.cpp index 5b8172680a..6705905b5e 100644 --- a/lib/Backends/OpenCL/OpenCLDeviceManager.cpp +++ b/lib/Backends/OpenCL/OpenCLDeviceManager.cpp @@ -27,6 +27,7 @@ #include "llvm/ADT/StringExtras.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/FormatVariadic.h" #include "llvm/Support/raw_ostream.h" #define DEBUG_TYPE "opencl" @@ -56,12 +57,12 @@ OpenCLBuffer::~OpenCLBuffer() { clReleaseMemObject(buffer_); } } // namespace glow /// Helper method to parse a string parameter to an unsigned. \returns -/// llvm::Expected with either the value or an error. -static llvm::Expected parseInputAsUnsigned(std::string input) { +/// Expected with either the value or an error. +static Expected parseInputAsUnsigned(std::string input) { char *end; auto parsed = strtol(input.c_str(), &end, 10); if (end == input.c_str() || *end != '\0') { - return MAKE_ERR(GlowErr::ErrorCode::RUNTIME_ERROR, + return MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_ERROR, "Invalid input expected integer got: " + input); } return parsed; @@ -83,7 +84,7 @@ OpenCLCommandQueuePool::~OpenCLCommandQueuePool() { } } -llvm::Expected OpenCLCommandQueuePool::requestCommandQueue( +Expected OpenCLCommandQueuePool::requestCommandQueue( cl_command_queue_properties properties) { OpenCLCommandQueue ret; // Get the vector that has queues with the desired properties. @@ -138,7 +139,7 @@ unsigned OpenCLCommandQueuePool::getNumQueuesAvailableForProperties( return it != queuesAvailableByProps_.end() ? it->second : 0; } -llvm::Expected OpenCLDeviceManager::allocDeviceBuffer(uint64_t size) { +Expected OpenCLDeviceManager::allocDeviceBuffer(uint64_t size) { const uint64_t alignment = 128; // Always allocate buffers properly aligned to hold values of any type. size = alignedSize(size, alignment); @@ -150,7 +151,7 @@ llvm::Expected OpenCLDeviceManager::allocDeviceBuffer(uint64_t size) { OpenCLDeviceManager::OpenCLDeviceManager(const DeviceConfig &config) : QueueBackedDeviceManager(config) {} -llvm::Error OpenCLDeviceManager::parseConfig() { +Error OpenCLDeviceManager::parseConfig() { auto it = config_.parameters.find("deviceId"); unsigned value; if (it != config_.parameters.end()) { @@ -169,15 +170,15 @@ llvm::Error OpenCLDeviceManager::parseConfig() { } else if (it->second == "false") { clDoProfile = false; } else { - return MAKE_ERR(GlowErr::ErrorCode::RUNTIME_ERROR, + return MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_ERROR, "Invalid input expected true or false got: " + it->second); } } - return llvm::Error::success(); + return Error::success(); } -llvm::Error OpenCLDeviceManager::init() { +Error OpenCLDeviceManager::init() { // The OpenCL Backend defines three command line options: doProfile, deviceId, // and platformId. If the parameter is not provided we use the CL // options from the OpenCl Backend. @@ -238,7 +239,7 @@ llvm::Error OpenCLDeviceManager::init() { Stats()->incrementCounter(kDevicesUsedOpenCL); exportMemoryCounters(); - return llvm::Error::success(); + return Error::success(); } OpenCLDeviceManager::~OpenCLDeviceManager() { @@ -296,8 +297,9 @@ void OpenCLDeviceManager::addNetworkImpl(const Module *module, if (usedMemoryBytes_ + sizeInBytes > maxMemoryBytes_) { // Free the constants. bundle.freeConstants(); - readyCB(module, MAKE_ERR(GlowErr::ErrorCode::RUNTIME_OUT_OF_DEVICE_MEMORY, - "Failed to add network: not enough memory")); + readyCB(module, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_OUT_OF_DEVICE_MEMORY, + "Failed to add network: not enough memory")); return; } @@ -309,7 +311,7 @@ void OpenCLDeviceManager::addNetworkImpl(const Module *module, if (!commands) { readyCB(module, MAKE_ERR( - GlowErr::ErrorCode::RUNTIME_OUT_OF_DEVICE_MEMORY, + ErrorValue::ErrorCode::RUNTIME_OUT_OF_DEVICE_MEMORY, "Failed to add network: could not create CL command queue.")); return; } @@ -371,7 +373,7 @@ void OpenCLDeviceManager::addNetworkImpl(const Module *module, exportMemoryCounters(); // Fire the ready CB. - readyCB(module, llvm::Error::success()); + readyCB(module, Error::success()); } void OpenCLDeviceManager::evictNetworkImpl(std::string functionName, @@ -389,16 +391,16 @@ void OpenCLDeviceManager::evictNetworkImpl(std::string functionName, } } else { evictCB(functionName, - MAKE_ERR(GlowErr::ErrorCode::RUNTIME_NET_NOT_FOUND, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_NET_NOT_FOUND, strFormat("Could not find function with name %s to evict", functionName.c_str()))); return; } exportMemoryCounters(); - evictCB(functionName, llvm::Error::success()); + evictCB(functionName, Error::success()); } -llvm::Expected +Expected OpenCLDeviceManager::requestRunCommandQueue(CompiledFunction *function) { auto traceInfo = function->getTraceInfo(); cl_command_queue_properties props = @@ -422,7 +424,7 @@ void OpenCLDeviceManager::runFunctionImpl( dmRun.addArg("reason", "function not found"); TRACE_EVENT_SCOPE_END_NAMED(dmRun); resultCB(id, - MAKE_ERR(GlowErr::ErrorCode::RUNTIME_NET_NOT_FOUND, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_NET_NOT_FOUND, llvm::formatv("Function {} not found", function).str()), std::move(context)); return; diff --git a/lib/Backends/OpenCL/OpenCLDeviceManager.h b/lib/Backends/OpenCL/OpenCLDeviceManager.h index 2ab142a86e..07eacb8ff4 100644 --- a/lib/Backends/OpenCL/OpenCLDeviceManager.h +++ b/lib/Backends/OpenCL/OpenCLDeviceManager.h @@ -70,7 +70,7 @@ class OpenCLCommandQueuePool { void setDevice(const cl_device_id device) { device_ = device; } /// Request a command queue from the pool that has the properties specified /// in \p properties. - llvm::Expected + Expected requestCommandQueue(cl_command_queue_properties properties = 0); /// Return the command queue \p queue to the pool. void returnCommandQueue(OpenCLCommandQueue &queue); @@ -145,7 +145,7 @@ class OpenCLDeviceManager : public QueueBackedDeviceManager { std::map> buffers_; /// Allocate a device buffer of required \p size. - llvm::Expected allocDeviceBuffer(uint64_t size); + Expected allocDeviceBuffer(uint64_t size); /// Device name. std::string name_; @@ -154,7 +154,7 @@ class OpenCLDeviceManager : public QueueBackedDeviceManager { OpenCLCommandQueuePool commandQueuePool_; /// Requests a command queue for the current run. - llvm::Expected + Expected requestRunCommandQueue(CompiledFunction *function); /// Returns a command queue. @@ -165,11 +165,11 @@ class OpenCLDeviceManager : public QueueBackedDeviceManager { ~OpenCLDeviceManager(); - llvm::Error init() override; + Error init() override; - /// Parse config object provided at initialization \returns llvm::Error + /// Parse config object provided at initialization \returns Error /// indicating success/failure. - llvm::Error parseConfig(); + Error parseConfig(); /// Returns the amount of memory in bytes available on the device when no /// models are loaded. uint64_t getMaximumMemory() const override; diff --git a/lib/CodeGen/MemoryAllocator.cpp b/lib/CodeGen/MemoryAllocator.cpp index 74484cd2fa..92ce47eb06 100644 --- a/lib/CodeGen/MemoryAllocator.cpp +++ b/lib/CodeGen/MemoryAllocator.cpp @@ -21,7 +21,6 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Debug.h" -#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #define DEBUG_TYPE "memory-allocator" diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp index 55ff9e1293..f4f4a522ca 100644 --- a/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/lib/ExecutionEngine/ExecutionEngine.cpp @@ -104,11 +104,10 @@ void ExecutionEngine::runInternal(ExecutionContext &context, std::unique_ptr contextPtr(&context); std::promise runPromise; auto fut = runPromise.get_future(); - llvm::Error runErr = llvm::Error::success(); - MARK_ERR_CHECKED(runErr); + Error runErr = Error::empty(); hostManager_->runNetwork( name, std::move(contextPtr), - [&runPromise, &runErr](runtime::RunIdentifierTy, llvm::Error err, + [&runPromise, &runErr](runtime::RunIdentifierTy, Error err, std::unique_ptr contextPtr) { // Don't delete context. contextPtr.release(); diff --git a/lib/Exporter/ONNXModelWriter.cpp b/lib/Exporter/ONNXModelWriter.cpp index 6ccef4ec15..a47537fbad 100644 --- a/lib/Exporter/ONNXModelWriter.cpp +++ b/lib/Exporter/ONNXModelWriter.cpp @@ -184,8 +184,8 @@ void inputsToProto(const Node *node, ONNX_NAMESPACE::NodeProto *proto) { /// visited, signaling that such nodes must be ignored, /// \returns error. template -llvm::Error writeMatMulKind(const T *node, ONNX_TRAITS::GraphProto &graph, - ReportedNodes &reporter) { +Error writeMatMulKind(const T *node, ONNX_TRAITS::GraphProto &graph, + ReportedNodes &reporter) { auto *proto = graph.add_node(); proto->set_name(node->getName()); proto->set_op_type("MatMul"); @@ -212,7 +212,7 @@ llvm::Error writeMatMulKind(const T *node, ONNX_TRAITS::GraphProto &graph, } outputsToProto(node, proto); - return llvm::Error::success(); + return Error::success(); } /// Writes Arithmetic operators with name \p opName from Node \p node into @@ -220,9 +220,8 @@ llvm::Error writeMatMulKind(const T *node, ONNX_TRAITS::GraphProto &graph, /// visited, signaling that such nodes must be ignored, /// \returns error. template -llvm::Error writeArithmetic(const std::string &opName, const T *node, - ONNX_TRAITS::GraphProto &graph, - ReportedNodes &reporter) { +Error writeArithmetic(const std::string &opName, const T *node, + ONNX_TRAITS::GraphProto &graph, ReportedNodes &reporter) { auto *proto = graph.add_node(); proto->set_name(node->getName()); proto->set_op_type(opName); @@ -290,7 +289,7 @@ llvm::Error writeArithmetic(const std::string &opName, const T *node, addValueAttribute(proto, "broadcast", 1UL); } - return llvm::Error::success(); + return Error::success(); } void tensorShapeFromInput(const std::string &name, TypeRef ty, @@ -311,7 +310,7 @@ void tensorShapeFromInput(const std::string &name, TypeRef ty, ONNXModelWriter::ONNXModelWriter(const std::string &modelFilename, Function &F, size_t irVersion, size_t opsetVersion, - llvm::Error *errPtr, bool textMode) + Error *errPtr, bool textMode) : CommonOperatorWriter(modelFilename, F, errPtr), opsetVersion_(opsetVersion) { // If errPtr already contains an error then don't continue with constructor. @@ -319,9 +318,9 @@ ONNXModelWriter::ONNXModelWriter(const std::string &modelFilename, Function &F, return; } - // Lambda to setup the ONNXModelWriter and return any llvm::Errors that were + // Lambda to setup the ONNXModelWriter and return any Errors that were // raised. - auto setup = [&]() -> llvm::Error { + auto setup = [&]() -> Error { // Loop through all nodes, output Graph to Model protobuf. ONNX_NAMESPACE::ModelProto modelProto; modelProto.set_ir_version(irVersion); @@ -432,18 +431,17 @@ void ONNXModelWriter::tensorShapeFromPlaceholder(const Placeholder *PH, tensorShapeFromInput(PH->getName(), PH->getType(), valueProto); } -llvm::Error ONNXModelWriter::writeAllWithNode(const std::string &opName, - const Node *node, - NodeType *proto) { +Error ONNXModelWriter::writeAllWithNode(const std::string &opName, + const Node *node, NodeType *proto) { proto->set_name(node->getName()); proto->set_op_type(opName); inputsToProto(node, proto); outputsToProto(node, proto); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writeAll(const std::string &opName, - const Node *node, GraphType &graph) { +Error ONNXModelWriter::writeAll(const std::string &opName, const Node *node, + GraphType &graph) { return writeAllWithNode(opName, node, graph.add_node()); } @@ -459,7 +457,7 @@ bool ONNXModelWriter::hasUsesOfKind(const Node *node, Kinded::Kind kind) { //===-----------------------------------------------------------------===// // Operators Supported by ONNX //===-----------------------------------------------------------------===// -llvm::Error ONNXModelWriter::writePad(const PadNode *node, GraphType &graph) { +Error ONNXModelWriter::writePad(const PadNode *node, GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. switch (node->getMode()) { @@ -474,7 +472,7 @@ llvm::Error ONNXModelWriter::writePad(const PadNode *node, GraphType &graph) { break; default: RETURN_ERR("Pad: Invalid mode", - GlowErr::ErrorCode::MODEL_WRITER_SERIALIZATION_ERROR); + ErrorValue::ErrorCode::MODEL_WRITER_SERIALIZATION_ERROR); } addValueAttribute(proto, "pads", node->getPads()); @@ -486,8 +484,7 @@ llvm::Error ONNXModelWriter::writePad(const PadNode *node, GraphType &graph) { return writeAllWithNode("Pad", node, proto); } -llvm::Error ONNXModelWriter::writeConcat(const ConcatNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeConcat(const ConcatNode *node, GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "axis", node->getDim()); @@ -495,15 +492,15 @@ llvm::Error ONNXModelWriter::writeConcat(const ConcatNode *node, return writeAllWithNode("Concat", node, proto); } -llvm::Error ONNXModelWriter::writeTranspose(const TransposeNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeTranspose(const TransposeNode *node, + GraphType &graph) { // Some nodes create transpose for outputs. auto *input = node->getInput().getNode(); if (llvm::dyn_cast(input) || llvm::dyn_cast(input) || llvm::dyn_cast(input) || llvm::dyn_cast(input)) { - return llvm::Error::success(); + return Error::success(); } auto *proto = graph.add_node(); @@ -513,8 +510,8 @@ llvm::Error ONNXModelWriter::writeTranspose(const TransposeNode *node, return writeAllWithNode("Transpose", node, proto); } -llvm::Error ONNXModelWriter::writeConvolution(const ConvolutionNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeConvolution(const ConvolutionNode *node, + GraphType &graph) { assert(node->getLayout() == NHWC && "can only write NHWC Convolutions"); auto *proto = graph.add_node(); // Add dictionary entries. @@ -547,12 +544,11 @@ llvm::Error ONNXModelWriter::writeConvolution(const ConvolutionNode *node, // Use the output of transpose node. outputKindToProto(Kinded::Kind::TransposeNodeKind, node, proto); - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelWriter::writeBatchedReduceMean(const BatchedReduceMeanNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeBatchedReduceMean(const BatchedReduceMeanNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "axes", node->getAxes()); @@ -567,12 +563,11 @@ ONNXModelWriter::writeBatchedReduceMean(const BatchedReduceMeanNode *node, addValueAttribute(proto, "keepdims", 1); } - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelWriter::writeBatchedReduceAdd(const BatchedReduceAddNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeBatchedReduceAdd(const BatchedReduceAddNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. unsigned_t axis = node->getAxis(); @@ -589,12 +584,11 @@ ONNXModelWriter::writeBatchedReduceAdd(const BatchedReduceAddNode *node, addValueAttribute(proto, "keepdims", 1); } - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelWriter::writeBatchedReduceMin(const BatchedReduceMinNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeBatchedReduceMin(const BatchedReduceMinNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Find dictionary entries. addValueAttribute(proto, "axes", node->getAxes()); @@ -602,9 +596,8 @@ ONNXModelWriter::writeBatchedReduceMin(const BatchedReduceMinNode *node, return writeAllWithNode("ReduceMin", node, proto); } -llvm::Error -ONNXModelWriter::writeBatchNormalization(const BatchNormalizationNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeBatchNormalization( + const BatchNormalizationNode *node, GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "epsilon", node->getEpsilon()); @@ -620,12 +613,11 @@ ONNXModelWriter::writeBatchNormalization(const BatchNormalizationNode *node, proto->add_input(node->getVar().getNode()->getName()); outputsToProto(node, proto); - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelWriter::writeMeanVarNormalization(const MeanVarNormalizationNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeMeanVarNormalization( + const MeanVarNormalizationNode *node, GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "channel", node->getChannelIdx()); @@ -636,11 +628,10 @@ ONNXModelWriter::writeMeanVarNormalization(const MeanVarNormalizationNode *node, inputsToProto(node, proto); outputsToProto(node, proto); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writeSlice(const SliceNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeSlice(const SliceNode *node, GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. auto starts = node->getStart(); @@ -683,10 +674,10 @@ llvm::Error ONNXModelWriter::writeSlice(const SliceNode *node, attrEnds->add_ints(outs[b] + starts[b]); } } - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writePow(const PowNode *node, GraphType &graph) { +Error ONNXModelWriter::writePow(const PowNode *node, GraphType &graph) { auto *proto = graph.add_node(); proto->set_name(node->getName()); proto->add_input(node->getLHS().getNode()->getName()); @@ -713,10 +704,10 @@ llvm::Error ONNXModelWriter::writePow(const PowNode *node, GraphType &graph) { } reportedNodes_.insert(RHSN); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writeTopK(const TopKNode *node, GraphType &graph) { +Error ONNXModelWriter::writeTopK(const TopKNode *node, GraphType &graph) { auto *proto = graph.add_node(); Tensor scalar(ElemKind::Int64ITy, {1}); @@ -730,11 +721,10 @@ llvm::Error ONNXModelWriter::writeTopK(const TopKNode *node, GraphType &graph) { RETURN_IF_ERR(writeAllWithNode("TopK", node, proto)); proto->add_input("k"); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writeArgMax(const ArgMaxNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeArgMax(const ArgMaxNode *node, GraphType &graph) { auto *proto = graph.add_node(); Tensor axis(ElemKind::Int64ITy, {1}); @@ -753,11 +743,10 @@ llvm::Error ONNXModelWriter::writeArgMax(const ArgMaxNode *node, writeTensor(keepDims, tensorProto); RETURN_IF_ERR(writeAllWithNode("ArgMax", node, proto)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writePRelu(const PReluNode *node, - GraphType &graph) { +Error ONNXModelWriter::writePRelu(const PReluNode *node, GraphType &graph) { auto *proto = graph.add_node(); proto->set_name(node->getName()); proto->set_op_type("PRelu"); @@ -784,11 +773,10 @@ llvm::Error ONNXModelWriter::writePRelu(const PReluNode *node, } outputsToProto(node, proto); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writeGather(const GatherNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeGather(const GatherNode *node, GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. auto batchDims = node->getBatchDims(); @@ -801,25 +789,23 @@ llvm::Error ONNXModelWriter::writeGather(const GatherNode *node, } } -llvm::Error ONNXModelWriter::writeMatMul(const MatMulNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeMatMul(const MatMulNode *node, GraphType &graph) { return writeMatMulKind(node, graph, reportedNodes_); } -llvm::Error ONNXModelWriter::writeBatchMatMul(const BatchMatMulNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeBatchMatMul(const BatchMatMulNode *node, + GraphType &graph) { return writeMatMulKind(node, graph, reportedNodes_); } -llvm::Error ONNXModelWriter::writeReshape(const ReshapeNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeReshape(const ReshapeNode *node, GraphType &graph) { // ReduceMean/ReduceSum nodes create reshape for the output. // Therefore check if this reshape has BatchedReduceMean/BatchedReduceAdd // node as input. const Node *input = node->getInput().getNode(); if (llvm::dyn_cast(input) || llvm::dyn_cast(input)) { - return llvm::Error::success(); + return Error::success(); } auto *proto = graph.add_node(); @@ -830,8 +816,8 @@ llvm::Error ONNXModelWriter::writeReshape(const ReshapeNode *node, return writeAllWithNode("Reshape", node, proto); } -llvm::Error ONNXModelWriter::writeBucketize(const BucketizeNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeBucketize(const BucketizeNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "boundaries", node->getBoundaries()); @@ -839,8 +825,8 @@ llvm::Error ONNXModelWriter::writeBucketize(const BucketizeNode *node, return writeAllWithNode("Bucketize", node, proto); } -llvm::Error ONNXModelWriter::writeResizeNearest(const ResizeNearestNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeResizeNearest(const ResizeNearestNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Find dictionary entries. addValueAttribute(proto, "height_scale", node->getHeightScale()); @@ -849,19 +835,18 @@ llvm::Error ONNXModelWriter::writeResizeNearest(const ResizeNearestNode *node, return writeAllWithNode(node->getName(), node, proto); } -llvm::Error ONNXModelWriter::writeSoftMax(const SoftMaxNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeSoftMax(const SoftMaxNode *node, GraphType &graph) { auto *proto = graph.add_node(); proto->set_name(node->getName()); proto->set_op_type("Softmax"); outputsToProto(node, proto); // Find input from Reshape node proto->add_input(node->getInput().getNode()->getName()); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writeReplaceNaN(const ReplaceNaNNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeReplaceNaN(const ReplaceNaNNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. float value = node->getValue(); @@ -871,8 +856,8 @@ llvm::Error ONNXModelWriter::writeReplaceNaN(const ReplaceNaNNode *node, return writeAllWithNode("ReplaceNaN", node, proto); } -llvm::Error ONNXModelWriter::writeGatherRanges(const GatherRangesNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeGatherRanges(const GatherRangesNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "maxOutputSize", node->getOutput().dims()[0]); @@ -880,9 +865,8 @@ llvm::Error ONNXModelWriter::writeGatherRanges(const GatherRangesNode *node, return writeAllWithNode("GatherRanges", node, proto); } -llvm::Error -ONNXModelWriter::writeSparseToDenseMask(const SparseToDenseMaskNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeSparseToDenseMask(const SparseToDenseMaskNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "mask", node->getMask()); @@ -890,9 +874,8 @@ ONNXModelWriter::writeSparseToDenseMask(const SparseToDenseMaskNode *node, return writeAllWithNode("SparseToDenseMask", node, proto); } -llvm::Error -ONNXModelWriter::writeAdaptiveAvgPool(const AdaptiveAvgPoolNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeAdaptiveAvgPool(const AdaptiveAvgPoolNode *node, + GraphType &graph) { auto *proto = graph.add_node(); const auto outShape = ShapeNHWC(node->getResult().dims()); @@ -902,7 +885,7 @@ ONNXModelWriter::writeAdaptiveAvgPool(const AdaptiveAvgPoolNode *node, return writeAllWithNode("AdaptiveAvgPool", node, proto); } -llvm::Error ONNXModelWriter::writeLocalResponseNormalization( +Error ONNXModelWriter::writeLocalResponseNormalization( const LocalResponseNormalizationNode *node, GraphType &graph) { auto *proto = graph.add_node(); proto->set_name(node->getName()); @@ -922,11 +905,11 @@ llvm::Error ONNXModelWriter::writeLocalResponseNormalization( addValueAttribute(proto, "beta", node->getBeta()); addValueAttribute(proto, "bias", node->getK()); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writeBatchBoxCox(const BatchBoxCoxNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeBatchBoxCox(const BatchBoxCoxNode *node, + GraphType &graph) { auto *proto = graph.add_node(); addValueAttribute(proto, "epsilon", node->getEpsilon()); return writeAllWithNode("BatchBoxCox", node, proto); @@ -935,8 +918,7 @@ llvm::Error ONNXModelWriter::writeBatchBoxCox(const BatchBoxCoxNode *node, //===-----------------------------------------------------------------===// // Operators Supported by Glow only //===-----------------------------------------------------------------===// -llvm::Error ONNXModelWriter::writeModulo(const ModuloNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeModulo(const ModuloNode *node, GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "divisor", node->getDivisor()); @@ -969,24 +951,22 @@ void writePool(const T *node, ONNX_NAMESPACE::NodeProto *proto) { } } // namespace -llvm::Error ONNXModelWriter::writeAvgPool(const AvgPoolNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeAvgPool(const AvgPoolNode *node, GraphType &graph) { auto *proto = graph.add_node(); proto->set_op_type("AveragePool"); writePool(node, proto); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writeMaxPool(const MaxPoolNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeMaxPool(const MaxPoolNode *node, GraphType &graph) { auto *proto = graph.add_node(); proto->set_op_type("MaxPool"); writePool(node, proto); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writeConvolution3D(const Convolution3DNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeConvolution3D(const Convolution3DNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "kernel_shape", node->getKernels()); @@ -997,8 +977,8 @@ llvm::Error ONNXModelWriter::writeConvolution3D(const Convolution3DNode *node, return writeAllWithNode("Convolution3D", node, proto); } -llvm::Error ONNXModelWriter::writeSpaceToDepth(const SpaceToDepthNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeSpaceToDepth(const SpaceToDepthNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Find input from Transpose node @@ -1016,11 +996,11 @@ llvm::Error ONNXModelWriter::writeSpaceToDepth(const SpaceToDepthNode *node, // Use the output of transpose node, if any. outputKindToProto(Kinded::Kind::TransposeNodeKind, node, proto); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writeChannelShuffle(const ChannelShuffleNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeChannelShuffle(const ChannelShuffleNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "group", node->getGroup()); @@ -1029,9 +1009,8 @@ llvm::Error ONNXModelWriter::writeChannelShuffle(const ChannelShuffleNode *node, return writeAllWithNode("ChannelShuffle", node, proto); } -llvm::Error -ONNXModelWriter::writeQuantizationProfile(const QuantizationProfileNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeQuantizationProfile( + const QuantizationProfileNode *node, GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "name", node->getProfiledNodeName()); @@ -1040,8 +1019,8 @@ ONNXModelWriter::writeQuantizationProfile(const QuantizationProfileNode *node, return writeAllWithNode("QuantizationProfile", node, proto); } -llvm::Error ONNXModelWriter::writeTraceEvent(const TraceEventNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeTraceEvent(const TraceEventNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "name", node->getEventName()); @@ -1051,8 +1030,8 @@ llvm::Error ONNXModelWriter::writeTraceEvent(const TraceEventNode *node, return writeAllWithNode("TraceEvent", node, proto); } -llvm::Error ONNXModelWriter::writeInsertTensor(const InsertTensorNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeInsertTensor(const InsertTensorNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "start", node->getStart()); @@ -1062,7 +1041,7 @@ llvm::Error ONNXModelWriter::writeInsertTensor(const InsertTensorNode *node, return writeAllWithNode("InsertTensor", node, proto); } -llvm::Error ONNXModelWriter::writeChannelwiseQuantizedConvolution( +Error ONNXModelWriter::writeChannelwiseQuantizedConvolution( const ChannelwiseQuantizedConvolutionNode *node, GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. @@ -1075,8 +1054,7 @@ llvm::Error ONNXModelWriter::writeChannelwiseQuantizedConvolution( return writeAllWithNode("ChannelwiseQuantizedConvolution", node, proto); } -llvm::Error ONNXModelWriter::writeSplat(const SplatNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeSplat(const SplatNode *node, GraphType &graph) { auto *proto = graph.add_node(); // Convert value to tensor with result shape Tensor tensor(ElemKind::FloatTy, node->getResult().dims()); @@ -1092,26 +1070,26 @@ llvm::Error ONNXModelWriter::writeSplat(const SplatNode *node, return writeAllWithNode("Splat", node, proto); } -llvm::Error ONNXModelWriter::writeAdd(const AddNode *node, GraphType &graph) { +Error ONNXModelWriter::writeAdd(const AddNode *node, GraphType &graph) { return writeArithmetic("Add", node, graph, reportedNodes_); } -llvm::Error ONNXModelWriter::writeDiv(const DivNode *node, GraphType &graph) { +Error ONNXModelWriter::writeDiv(const DivNode *node, GraphType &graph) { return writeArithmetic("Div", node, graph, reportedNodes_); } -llvm::Error ONNXModelWriter::writeMul(const MulNode *node, GraphType &graph) { +Error ONNXModelWriter::writeMul(const MulNode *node, GraphType &graph) { return writeArithmetic("Mul", node, graph, reportedNodes_); } -llvm::Error ONNXModelWriter::writeSub(const SubNode *node, GraphType &graph) { +Error ONNXModelWriter::writeSub(const SubNode *node, GraphType &graph) { return writeArithmetic("Sub", node, graph, reportedNodes_); } // Default exporting algorithm. #define DEF_ALL_WRITER_NODE(NAME) \ - llvm::Error ONNXModelWriter::write##NAME(const NAME##Node *node, \ - GraphType &graph) { \ + Error ONNXModelWriter::write##NAME(const NAME##Node *node, \ + GraphType &graph) { \ return writeAll(#NAME, node, graph); \ } @@ -1142,8 +1120,8 @@ DEF_ALL_WRITER_NODE(RowwiseQuantizedSparseLengthsWeightedSum) DEF_ALL_WRITER_NODE(FusedRowwiseQuantizedSparseLengthsSum) DEF_ALL_WRITER_NODE(FusedRowwiseQuantizedSparseLengthsWeightedSum) -llvm::Error ONNXModelWriter::writeConvertTo(const ConvertToNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeConvertTo(const ConvertToNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "shape", node->getResult().dims()); @@ -1151,8 +1129,7 @@ llvm::Error ONNXModelWriter::writeConvertTo(const ConvertToNode *node, return writeAllWithNode("ConvertTo", node, proto); } -llvm::Error ONNXModelWriter::writeSelect(const SelectNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeSelect(const SelectNode *node, GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "shape", node->getResult().dims()); @@ -1160,8 +1137,8 @@ llvm::Error ONNXModelWriter::writeSelect(const SelectNode *node, return writeAllWithNode("Select", node, proto); } -llvm::Error ONNXModelWriter::writeQuantize(const QuantizeNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeQuantize(const QuantizeNode *node, + GraphType &graph) { auto *proto = graph.add_node(); auto outTy = node->getResult().getType(); // Add dictionary entries. @@ -1171,8 +1148,8 @@ llvm::Error ONNXModelWriter::writeQuantize(const QuantizeNode *node, return writeAllWithNode("Quantize", node, proto); } -llvm::Error ONNXModelWriter::writeIntLookupTable(const IntLookupTableNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeIntLookupTable(const IntLookupTableNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "shape", node->getResult().dims()); @@ -1190,9 +1167,8 @@ llvm::Error ONNXModelWriter::writeIntLookupTable(const IntLookupTableNode *node, return writeAllWithNode("IntLookupTable", node, proto); } -llvm::Error -ONNXModelWriter::writeLengthsRangeFill(const LengthsRangeFillNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeLengthsRangeFill(const LengthsRangeFillNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "size", node->getResult().dims()[0]); @@ -1200,9 +1176,8 @@ ONNXModelWriter::writeLengthsRangeFill(const LengthsRangeFillNode *node, return writeAllWithNode("LengthsRangeFill", node, proto); } -llvm::Error -ONNXModelWriter::writeRescaleQuantized(const RescaleQuantizedNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeRescaleQuantized(const RescaleQuantizedNode *node, + GraphType &graph) { auto *proto = graph.add_node(); auto outTy = node->getResult().getType(); // Add dictionary entries. @@ -1212,8 +1187,8 @@ ONNXModelWriter::writeRescaleQuantized(const RescaleQuantizedNode *node, return writeAllWithNode("RescaleQuantized", node, proto); } -llvm::Error ONNXModelWriter::writeFullyConnected(const FullyConnectedNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeFullyConnected(const FullyConnectedNode *node, + GraphType &graph) { auto *proto = graph.add_node(); proto->set_name(node->getName()); proto->set_op_type("FCTransposed"); @@ -1230,11 +1205,11 @@ llvm::Error ONNXModelWriter::writeFullyConnected(const FullyConnectedNode *node, proto->add_input(node->getWeights().getNode()->getName()); proto->add_input(node->getBias().getNode()->getName()); outputsToProto(node, proto); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writeSparseToDense(const SparseToDenseNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeSparseToDense(const SparseToDenseNode *node, + GraphType &graph) { auto *proto = graph.add_node(); RETURN_IF_ERR(writeAllWithNode("SparseToDense", node, proto)); @@ -1250,10 +1225,10 @@ llvm::Error ONNXModelWriter::writeSparseToDense(const SparseToDenseNode *node, auto *inputProto = graph.add_input(); tensorShapeFromInput("dataToInferDim", outTy, inputProto); proto->add_input("dataToInferDim"); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelWriter::writeTile(const TileNode *node, GraphType &graph) { +Error ONNXModelWriter::writeTile(const TileNode *node, GraphType &graph) { auto *proto = graph.add_node(); // unwind Tile @@ -1289,12 +1264,12 @@ llvm::Error ONNXModelWriter::writeTile(const TileNode *node, GraphType &graph) { // Add indices as input to the Tile node proto->add_input("indices"); - return llvm::Error::success(); + return Error::success(); } // Unsupported for export Glow nodes. #define DEF_UNSUPPORTED_STORAGE(NAME) \ - llvm::Error ONNXModelWriter::write##NAME(const NAME *node, GraphType &) { \ + Error ONNXModelWriter::write##NAME(const NAME *node, GraphType &) { \ return writeUnexpectedKind(node); \ } @@ -1305,8 +1280,7 @@ DEF_UNSUPPORTED_STORAGE(Storage) // Unsupported for export Glow nodes. #define DEF_UNSUPPORTED_NODE(NAME) \ - llvm::Error ONNXModelWriter::write##NAME(const NAME##Node *node, \ - GraphType &) { \ + Error ONNXModelWriter::write##NAME(const NAME##Node *node, GraphType &) { \ return writeUnexpectedKind(node); \ } @@ -1340,8 +1314,8 @@ DEF_UNSUPPORTED_NODE(AdaptiveAvgPoolGrad) #ifdef GLOW_WITH_CPU -llvm::Error ONNXModelWriter::writeCPUMaxSplat(const CPUMaxSplatNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeCPUMaxSplat(const CPUMaxSplatNode *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "value", node->getSplatValue()); @@ -1349,8 +1323,8 @@ llvm::Error ONNXModelWriter::writeCPUMaxSplat(const CPUMaxSplatNode *node, return writeAllWithNode("CPUMaxSplat", node, proto); } -llvm::Error ONNXModelWriter::writeCPUConvDKKC8(const CPUConvDKKC8Node *node, - GraphType &graph) { +Error ONNXModelWriter::writeCPUConvDKKC8(const CPUConvDKKC8Node *node, + GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "kernel_shape", node->getKernels()); @@ -1365,9 +1339,8 @@ llvm::Error ONNXModelWriter::writeCPUConvDKKC8(const CPUConvDKKC8Node *node, #ifdef GLOW_WITH_OPENCL -llvm::Error -ONNXModelWriter::writeOCLBatchedReduceAdd(const OCLBatchedReduceAddNode *node, - GraphType &graph) { +Error ONNXModelWriter::writeOCLBatchedReduceAdd( + const OCLBatchedReduceAddNode *node, GraphType &graph) { auto *proto = graph.add_node(); // Add dictionary entries. addValueAttribute(proto, "axis", node->getAxis()); diff --git a/lib/Exporter/ProtobufWriter.cpp b/lib/Exporter/ProtobufWriter.cpp index c850806f83..e3e121e1e1 100644 --- a/lib/Exporter/ProtobufWriter.cpp +++ b/lib/Exporter/ProtobufWriter.cpp @@ -21,7 +21,7 @@ namespace glow { ProtobufWriter::ProtobufWriter(const std::string &modelFilename, Function &F, - llvm::Error *errPtr) + Error *errPtr) : G_(F) { // Verify that the version of the library that we linked against is // compatible with the version of the headers we compiled against. @@ -32,14 +32,14 @@ ProtobufWriter::ProtobufWriter(const std::string &modelFilename, Function &F, return; } - // Lambda to setup the ProtobufWriter and return any llvm::Errors that were + // Lambda to setup the ProtobufWriter and return any Errors that were // raised. - auto setup = [&]() -> llvm::Error { + auto setup = [&]() -> Error { // Try to open file for write ff_.open(modelFilename, std::ios::out | std::ios::trunc | std::ios::binary); RETURN_ERR_IF_NOT(ff_, "Can't find the output file name: " + modelFilename, - GlowErr::ErrorCode::MODEL_WRITER_INVALID_FILENAME); - return llvm::Error::success(); + ErrorValue::ErrorCode::MODEL_WRITER_INVALID_FILENAME); + return Error::success(); }; if (errPtr) { @@ -49,9 +49,8 @@ ProtobufWriter::ProtobufWriter(const std::string &modelFilename, Function &F, } } -llvm::Error -ProtobufWriter::writeModel(const ::google::protobuf::Message &modelProto, - bool textMode) { +Error ProtobufWriter::writeModel(const ::google::protobuf::Message &modelProto, + bool textMode) { { ::google::protobuf::io::OstreamOutputStream zeroCopyOutput(&ff_); // Write the content. @@ -59,21 +58,21 @@ ProtobufWriter::writeModel(const ::google::protobuf::Message &modelProto, RETURN_ERR_IF_NOT( google::protobuf::TextFormat::Print(modelProto, &zeroCopyOutput), "Can't write to the output file name", - GlowErr::ErrorCode::MODEL_WRITER_SERIALIZATION_ERROR); + ErrorValue::ErrorCode::MODEL_WRITER_SERIALIZATION_ERROR); } else { ::google::protobuf::io::CodedOutputStream codedOutput(&zeroCopyOutput); // Write the size. size_t size = modelProto.ByteSize(); codedOutput.WriteVarint32(size); modelProto.SerializeToCodedStream(&codedOutput); - RETURN_ERR_IF_NOT(!codedOutput.HadError(), - "Can't write to the output file name", - GlowErr::ErrorCode::MODEL_WRITER_SERIALIZATION_ERROR); + RETURN_ERR_IF_NOT( + !codedOutput.HadError(), "Can't write to the output file name", + ErrorValue::ErrorCode::MODEL_WRITER_SERIALIZATION_ERROR); } } ff_.flush(); ff_.close(); - return llvm::Error::success(); + return Error::success(); } } // namespace glow diff --git a/lib/IR/IRGen.cpp b/lib/IR/IRGen.cpp index 303da1c775..7a645a5067 100644 --- a/lib/IR/IRGen.cpp +++ b/lib/IR/IRGen.cpp @@ -451,7 +451,7 @@ void IRFunction::generateIR(const Backend &B) { if (!B.verify(*this)) { EXIT_ON_ERR( - MAKE_ERR(GlowErr::ErrorCode::COMPILE_UNSUPPORTED_IR_AFTER_GENERATE, + MAKE_ERR(ErrorValue::ErrorCode::COMPILE_UNSUPPORTED_IR_AFTER_GENERATE, "Unsupported instruction(s) found after generating IR " + getName().str() + " for backend " + B.getBackendName())); } diff --git a/lib/Importer/Caffe2ModelLoader.cpp b/lib/Importer/Caffe2ModelLoader.cpp index 28f503a6a3..f590df78bc 100644 --- a/lib/Importer/Caffe2ModelLoader.cpp +++ b/lib/Importer/Caffe2ModelLoader.cpp @@ -72,7 +72,7 @@ namespace { /// Creates tensor \p T from the input \p in. Note, there is no data associated /// with the Tensor. This method makes sure that the tensor is created with the /// proper shape and element type. -llvm::Expected +Expected createAndSetTensorType(const caffe2::TensorProto &in) { std::vector dim; for (auto d : in.dims()) { @@ -105,10 +105,10 @@ createAndSetTensorType(const caffe2::TensorProto &in) { in.name().c_str())); } - return llvm::Expected(std::move(result)); + return Expected(std::move(result)); } -llvm::Expected +Expected createAndSetTensorType(const caffe2::QTensorProto &in) { std::vector dim; for (auto d : in.dims()) { @@ -166,7 +166,7 @@ createAndSetTensorType(const caffe2::QTensorProto &in) { RETURN_ERR("Only int8, uint8, and int32 qtensors are supported"); } - return llvm::Expected(std::move(result)); + return Expected(std::move(result)); } } // namespace @@ -179,7 +179,7 @@ static ArgumentDictionaryTy loadArgumentMap(const caffe2::OperatorDef &op) { return dict; } -static llvm::Expected> +static Expected> getPads(const ArgumentDictionaryTy &dict) { if (dict.count("pad")) { int pad; @@ -206,7 +206,7 @@ getPads(const ArgumentDictionaryTy &dict) { } /// Translates the "order" field of dictionary \p dict into a channel number. -static llvm::Expected getChannel(const ArgumentDictionaryTy &dict) { +static Expected getChannel(const ArgumentDictionaryTy &dict) { std::string order = "NCHW"; // default auto orderIt = dict.find("order"); if (orderIt != dict.end()) { @@ -220,9 +220,9 @@ static llvm::Expected getChannel(const ArgumentDictionaryTy &dict) { RETURN_ERR("Invalid order field"); } -static llvm::Expected> -getSizeHW(ArgumentDictionaryTy &dict, const std::string &name, - unsigned_t defaultValue) { +static Expected> getSizeHW(ArgumentDictionaryTy &dict, + const std::string &name, + unsigned_t defaultValue) { if (dict.count(name)) { int value; ASSIGN_VALUE_OR_RETURN_ERR(value, loadInt(dict[name])); @@ -241,7 +241,7 @@ getSizeHW(ArgumentDictionaryTy &dict, const std::string &name, return std::vector{defaultValue, defaultValue}; } -llvm::Expected +Expected Caffe2ModelLoader::loadProtoFile(const std::string &filename) { std::ifstream ff(filename, std::ios::in | std::ios::binary); RETURN_ERR_IF_NOT(ff, @@ -267,8 +267,8 @@ Caffe2ModelLoader::loadProtoFile(const std::string &filename) { return net; } -llvm::Expected -Caffe2ModelLoader::loadProto(const void *c2Model, size_t c2ModelSize) { +Expected Caffe2ModelLoader::loadProto(const void *c2Model, + size_t c2ModelSize) { google::protobuf::io::ArrayInputStream arrayStream(c2Model, c2ModelSize); // Construct and configure a Coded Input Stream google::protobuf::io::CodedInputStream codedStream(&arrayStream); @@ -281,7 +281,7 @@ Caffe2ModelLoader::loadProto(const void *c2Model, size_t c2ModelSize) { return MP; } -llvm::Expected +Expected Caffe2ModelLoader::getBroadcast(const ArgumentDictionaryTy &dict) { if (!dict.count("broadcast")) { return false; @@ -297,8 +297,8 @@ bool Caffe2ModelLoader::hasMultidirectionalBroadcast( return false; } -llvm::Error Caffe2ModelLoader::loadConv(const caffe2::OperatorDef &op, - ArgumentDictionaryTy &dict) { +Error Caffe2ModelLoader::loadConv(const caffe2::OperatorDef &op, + ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Load the inputs: @@ -381,11 +381,11 @@ llvm::Error Caffe2ModelLoader::loadConv(const caffe2::OperatorDef &op, node = G_.createTranspose(opName, node, NHWC2NCHW); } RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error Caffe2ModelLoader::loadConvQuantized(const caffe2::OperatorDef &op, - ArgumentDictionaryTy &dict) { +Error Caffe2ModelLoader::loadConvQuantized(const caffe2::OperatorDef &op, + ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Load the inputs: @@ -532,11 +532,10 @@ llvm::Error Caffe2ModelLoader::loadConvQuantized(const caffe2::OperatorDef &op, node = G_.createTranspose(opName, node, NHWC2NCHW); } RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } -llvm::Expected -Caffe2ModelLoader::foldOperator(const caffe2::OperatorDef &op) { +Expected Caffe2ModelLoader::foldOperator(const caffe2::OperatorDef &op) { const unsigned numInputs = op.input_size(); const std::string &typeName = op.type(); llvm::SmallVector inputs; @@ -556,13 +555,13 @@ Caffe2ModelLoader::foldOperator(const caffe2::OperatorDef &op) { Function *tmpF = G_.getParent()->createFunction("eval_const_fold__"); Caffe2ModelLoader tmpLoader(*tmpF, nullptr); bool foldStatus = - !errToBool(constantFoldInLoader( + !ERR_TO_BOOL(constantFoldInLoader( tmpF, tmpLoader, this, op)); G_.getParent()->eraseFunction(tmpF); return foldStatus; } -llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { +Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { ArgumentDictionaryTy dict = loadArgumentMap(op); const std::string &typeName = op.type(); @@ -571,7 +570,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { ASSIGN_VALUE_OR_RETURN_ERR(loadCommonOperatorSuccess, tryLoadCommonOperator(typeName, op, dict)); if (loadCommonOperatorSuccess) { - return llvm::Error::success(); + return Error::success(); } const std::string &opName = loadOperatorName(op); @@ -604,7 +603,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { auto *add = G_.createAdd(opName + ".sum", outTy, in0, in1); auto *relu = G_.createRELU(opName + ".relu", add); RETURN_IF_ERR(addNodeAsOutput(op, relu)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Int8Relu") { @@ -620,7 +619,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { yZeroPoint - OFFSETSHIFT); auto *relu = G_.createRELU(opName, in, outTy); RETURN_IF_ERR(addNodeAsOutput(op, relu)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Int8Quantize") { @@ -639,7 +638,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { yZeroPoint - OFFSETSHIFT); Node *N = G_.createQuantize(opName, in, outTy); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Int8Dequantize") { @@ -647,7 +646,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); auto *node = G_.createDequantize(opName, in); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "MaxPool" || typeName == "AveragePool" || @@ -741,7 +740,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { node = G_.createTranspose(opName, node->getNthResult(resIdx), NHWC2NCHW); } RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "SpatialBN") { @@ -767,7 +766,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { channel, epsilon); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Bucketize") { @@ -778,7 +777,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { std::vector boundaries = getFloats(dict["boundaries"]); auto *node = G_.createBucketizeNode(opName, in, boundaries); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "ResizeNearest") { @@ -805,7 +804,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { auto *node = G_.createResizeNearest(opName, finalIn, heightScale, widthScale); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Concat") { @@ -857,7 +856,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { nodeValueByName_[op.output(0)] = finalNode; // Concat may have a second output in Caffe2 (split_info), but we don't // use it for inference - return llvm::Error::success(); + return Error::success(); } if (typeName == "FC" || typeName == "FCTransposed" || typeName == "Int8FC" || @@ -968,7 +967,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { // Save the outputs: RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "ChannelShuffle") { @@ -982,7 +981,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { Node *node = G_.createChannelShuffle(opName, in, group, kernel); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Squeeze") { @@ -991,7 +990,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { auto dims = getShape(dict["dims"]); Node *node = G_.createSqueeze(opName, in, dims); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Log") { @@ -1001,7 +1000,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { // Create the log: auto *R = G_.createLog(opName, in); RETURN_IF_ERR(addNodeAsOutput(op, R)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Logit") { @@ -1018,7 +1017,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { auto *node = G_.createLogit(opName, input, eps); // Save the outputs: RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "EQ") { @@ -1028,7 +1027,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { ASSIGN_VALUE_OR_RETURN_ERR(in1, getNodeValueByName(op.input(1))); auto *node = G_.createCmpEQ(opName, in0, in1); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Tile") { @@ -1041,24 +1040,24 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { auto *node = G_.createTile(opName, in, tiles, axis); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Free") { // Glow frees memory automatically. - return llvm::Error::success(); + return Error::success(); } if (typeName == "StopGradient" || typeName == "ScaleGradient") { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); // Currently Caffe2 importer only supports inference. RETURN_IF_ERR(addNodeAsOutput(op, in)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Transpose") { RETURN_IF_ERR(loadTranspose(op, dict, "axes")); - return llvm::Error::success(); + return Error::success(); } if (typeName == "NCHW2NHWC") { @@ -1066,7 +1065,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); auto *node = G_.createTranspose(opName, in, NCHW2NHWC); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "CopyCPUToMKL" || typeName == "CopyMKLToCPU" || @@ -1077,7 +1076,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); RETURN_IF_ERR(addNodeAsOutput(op, in)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Slice") { @@ -1108,12 +1107,12 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { Node *SN = G_.createSlice(opName, data, newStarts, newEnds); RETURN_IF_ERR(addNodeAsOutput(op, SN)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "MatMul") { RETURN_IF_ERR(loadBatchMatMul(op, dict, false)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Cast") { @@ -1139,7 +1138,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { } RETURN_IF_ERR(addNodeAsOutput(op, in)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "ScatterAssign") { @@ -1155,13 +1154,13 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { G_.createReshape("indices.2d", indices, {indices.dims()[0], 1}); Node *SAN = G_.createScatterData(opName, data, indices2D, slices); RETURN_IF_ERR(addNodeAsOutput(op, SAN)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "ConstantFill" || typeName == "GivenTensorIntFill" || typeName == "GivenTensorInt64Fill") { RETURN_IF_ERR(loadWeight(op)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "SigmoidCrossEntropyWithLogits") { @@ -1172,7 +1171,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { Node *SCEL = G_.createSigmoidCrossEntropyWithLogits(opName, logits, targets); RETURN_IF_ERR(addNodeAsOutput(op, SCEL)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "ElementwiseLinear") { @@ -1192,7 +1191,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { Node *EL = G_.createElementwiseLinear(opName, X, w, b, axis); RETURN_IF_ERR(addNodeAsOutput(op, EL)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "AveragedLoss") { @@ -1200,7 +1199,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); auto *node = G_.createBatchedReduceMean(opName, in, 0); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Mod") { @@ -1220,7 +1219,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { auto *node = G_.createModulo(opName, in, divisor, signFollowDivisor); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "Sqr") { @@ -1228,7 +1227,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); auto *pow = G_.createPow(opName, in, /* exp */ 2); RETURN_IF_ERR(addNodeAsOutput(op, pow)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "SparseLengthsWeightedSum8BitsRowwise" || @@ -1337,7 +1336,7 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { } RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } if (typeName == "LengthsRangeFill") { @@ -1355,20 +1354,19 @@ llvm::Error Caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) { auto *LRF = G_.createLengthsRangeFill(opName, lengths, maxOutputSize); RETURN_IF_ERR(addNodeAsOutput(op, LRF)); - return llvm::Error::success(); + return Error::success(); } RETURN_ERR(unexpectedNodeErrorMessage(op, "Unsupported operator.")); } template -llvm::Error -Caffe2ModelLoader::loadInputsWithTensorProtoType(const caffe2::NetDef &net, - bool loadInputsAsPlaceholders, - const TensorProtoType &in) { +Error Caffe2ModelLoader::loadInputsWithTensorProtoType( + const caffe2::NetDef &net, bool loadInputsAsPlaceholders, + const TensorProtoType &in) { // Skip static weights if (getConstantByNameOrNull(in.name())) { - return llvm::Error::success(); + return Error::success(); } LoadWeightResult loadRes; @@ -1420,11 +1418,11 @@ Caffe2ModelLoader::loadInputsWithTensorProtoType(const caffe2::NetDef &net, createAndRegisterConstant(scalesName, std::move(*loadRes.scales))); } } - return llvm::Error::success(); + return Error::success(); } -llvm::Error Caffe2ModelLoader::loadInputs(const caffe2::NetDef &net, - bool loadInputsAsPlaceholders) { +Error Caffe2ModelLoader::loadInputs(const caffe2::NetDef &net, + bool loadInputsAsPlaceholders) { const caffe2::Argument *arg = nullptr, *qarg = nullptr; for (auto i = 0, e = net.arg_size(); i < e && (!arg || !qarg); ++i) { if (net.arg(i).name() == "input_shape_info") { @@ -1450,10 +1448,10 @@ llvm::Error Caffe2ModelLoader::loadInputs(const caffe2::NetDef &net, } } - return llvm::Error::success(); + return Error::success(); } -llvm::Error Caffe2ModelLoader::loadNetwork(caffe2::NetDef &net) { +Error Caffe2ModelLoader::loadNetwork(caffe2::NetDef &net) { /// Load the network operators: for (int i = 0; i < net.op_size(); i++) { auto &op = net.op(i); @@ -1477,13 +1475,13 @@ llvm::Error Caffe2ModelLoader::loadNetwork(caffe2::NetDef &net) { auto *SN = G_.createSave("save_" + outputName, r); outputVarsByName_[outputName] = SN->getPlaceholder(); } - return llvm::Error::success(); + return Error::success(); } /// Fills \p T with data from \p values. template -static llvm::Error fillTensor(Tensor &T, ElemKind kind, - llvm::ArrayRef dim, RangeTy values) { +static Error fillTensor(Tensor &T, ElemKind kind, llvm::ArrayRef dim, + RangeTy values) { T.reset(kind, dim); auto TH = T.getHandle(); RETURN_ERR_IF_NOT((size_t)values.size() == T.size(), @@ -1495,10 +1493,10 @@ static llvm::Error fillTensor(Tensor &T, ElemKind kind, for (auto num : values) { TH.raw(i++) = num; } - return llvm::Error::success(); + return Error::success(); } -llvm::Error Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) { +Error Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) { ArgumentDictionaryTy dict = loadArgumentMap(op); const std::string &typeName = op.type(); @@ -1542,7 +1540,7 @@ llvm::Error Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) { RETURN_ERR(strFormat("Unhandled tensor fill type: %s", typeName.c_str())); } RETURN_IF_ERR(createAndRegisterConstant(op.output().Get(0), std::move(T))); - return llvm::Error::success(); + return Error::success(); } if (typeName == "GivenTensorByteStringToUInt8Fill") { @@ -1586,7 +1584,7 @@ llvm::Error Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) { pos, T.size())); RETURN_IF_ERR(createAndRegisterConstant(o, std::move(T))); } - return llvm::Error::success(); + return Error::success(); } // Load quantized tensors: @@ -1661,7 +1659,7 @@ llvm::Error Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) { RETURN_IF_ERR(createAndRegisterConstant(o, std::move(T))); } - return llvm::Error::success(); + return Error::success(); } // Load tensors with constant fill: @@ -1680,7 +1678,7 @@ llvm::Error Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) { // If the tensor is pre-populated by the user of this class then we don't // need to allocate a new tensor. if (getConstantByNameOrNull(name)) { - return llvm::Error::success(); + return Error::success(); } Tensor T; @@ -1734,7 +1732,7 @@ llvm::Error Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) { RETURN_IF_ERR(createAndRegisterConstant(name, std::move(T))); - return llvm::Error::success(); + return Error::success(); } if (typeName == "UniformFill") { @@ -1777,20 +1775,20 @@ llvm::Error Caffe2ModelLoader::loadWeight(const caffe2::OperatorDef &op) { RETURN_IF_ERR(createAndRegisterConstant(name, std::move(T))); - return llvm::Error::success(); + return Error::success(); } RETURN_ERR(unexpectedNodeErrorMessage(op, "Unsupported weight kind")); } -llvm::Error Caffe2ModelLoader::loadWeightsFromNet(caffe2::NetDef &net) { +Error Caffe2ModelLoader::loadWeightsFromNet(caffe2::NetDef &net) { for (auto &op : net.op()) { RETURN_IF_ERR(loadWeight(op)); } - return llvm::Error::success(); + return Error::success(); } -Caffe2ModelLoader::Caffe2ModelLoader(Function &F, llvm::Error *errPtr) +Caffe2ModelLoader::Caffe2ModelLoader(Function &F, Error *errPtr) : CommonOperatorLoader({}, {}, F, errPtr) { deleteUnusedConstants(); } @@ -1799,16 +1797,16 @@ Caffe2ModelLoader::Caffe2ModelLoader(const std::string &netDescFilename, const std::string &netWeightFilename, llvm::ArrayRef names, llvm::ArrayRef types, Function &F, - llvm::Error *errPtr) + Error *errPtr) : CommonOperatorLoader(names, types, F, errPtr) { // if errPtr already contains an error then don't continue with constructor if (errPtr && *errPtr) { return; } - // Lambda to setup the Caffe2ModelLoader and return any llvm::Errors that + // Lambda to setup the Caffe2ModelLoader and return any Errors that // were raised. - auto setup = [&]() -> llvm::Error { + auto setup = [&]() -> Error { // The caffe2 network descriptor that we are deserializing. caffe2::NetDef networkDef; ASSIGN_VALUE_OR_RETURN_ERR(networkDef, loadProtoFile(netDescFilename)); @@ -1827,7 +1825,7 @@ Caffe2ModelLoader::Caffe2ModelLoader(const std::string &netDescFilename, deleteUnusedConstants(); - return llvm::Error::success(); + return Error::success(); }; if (errPtr) { @@ -1840,16 +1838,16 @@ Caffe2ModelLoader::Caffe2ModelLoader(const std::string &netDescFilename, Caffe2ModelLoader::Caffe2ModelLoader( const void *model, uint32_t modelSize, uint32_t weightsCount, const onnxTensorDescriptorV1 *weightDescriptors, Function &F, - bool loadInputsAsPlaceholders, llvm::Error *errPtr) + bool loadInputsAsPlaceholders, Error *errPtr) : CommonOperatorLoader({}, {}, F, errPtr) { // if errPtr already contains an error then don't continue with constructor if (errPtr && *errPtr) { return; } - // Lambda to setup the Caffe2ModelLoader and return any llvm::Errors that were + // Lambda to setup the Caffe2ModelLoader and return any Errors that were // raised. - auto setup = [&]() -> llvm::Error { + auto setup = [&]() -> Error { caffe2::NetDef networkDef; ASSIGN_VALUE_OR_RETURN_ERR(networkDef, loadProto(model, modelSize)); @@ -1867,7 +1865,7 @@ Caffe2ModelLoader::Caffe2ModelLoader( deleteUnusedConstants(); - return llvm::Error::success(); + return Error::success(); }; if (errPtr) { diff --git a/lib/Importer/ONNXIFIModelLoader.cpp b/lib/Importer/ONNXIFIModelLoader.cpp index f7f92ef75b..cdc3657323 100644 --- a/lib/Importer/ONNXIFIModelLoader.cpp +++ b/lib/Importer/ONNXIFIModelLoader.cpp @@ -22,14 +22,13 @@ namespace glow { -llvm::Expected> ONNXIFIModelLoader::parse( +Expected> ONNXIFIModelLoader::parse( const void *model, uint32_t modelSize, uint32_t weightsCount, const onnxTensorDescriptorV1 *weightDescriptors, Function &F, bool loadInputsAsPlaceholders, bool use_onnx) { std::unique_ptr loader(new ONNXIFIModelLoader()); - llvm::Error loaderConstructionErr = llvm::Error::success(); - MARK_ERR_CHECKED(loaderConstructionErr); + Error loaderConstructionErr = Error::empty(); if (use_onnx) { std::unique_ptr onnxLoader(new ONNXModelLoader( @@ -52,6 +51,6 @@ llvm::Expected> ONNXIFIModelLoader::parse( loader->core_ = std::move(c2Loader); } - return llvm::Expected>(std::move(loader)); + return Expected>(std::move(loader)); } } // namespace glow diff --git a/lib/Importer/ONNXModelLoader.cpp b/lib/Importer/ONNXModelLoader.cpp index a2f2631ea6..6e3f7f0356 100644 --- a/lib/Importer/ONNXModelLoader.cpp +++ b/lib/Importer/ONNXModelLoader.cpp @@ -20,7 +20,6 @@ #include "glow/Graph/Nodes.h" #include "llvm/Support/Casting.h" -#include "llvm/Support/Error.h" #include "google/protobuf/io/coded_stream.h" #include "google/protobuf/io/zero_copy_stream_impl.h" @@ -39,7 +38,7 @@ namespace { /// Creates tensor \p T from the input \p in. Note, there is no data associated /// with the Tensor. This method makes sure that the tensor is created with the /// proper shape and element type. -llvm::Error setTensorType(const ONNX_NAMESPACE::TypeProto &in, Tensor *T) { +Error setTensorType(const ONNX_NAMESPACE::TypeProto &in, Tensor *T) { std::vector dim; for (auto d : in.tensor_type().shape().dim()) { dim.push_back(d.dim_value()); @@ -47,15 +46,15 @@ llvm::Error setTensorType(const ONNX_NAMESPACE::TypeProto &in, Tensor *T) { if (in.tensor_type().elem_type() == ONNX_NAMESPACE::TensorProto::FLOAT) { T->reset(ElemKind::FloatTy, dim); - return llvm::Error::success(); + return Error::success(); } else if (in.tensor_type().elem_type() == ONNX_NAMESPACE::TensorProto::INT64) { T->reset(ElemKind::Int64ITy, dim); - return llvm::Error::success(); + return Error::success(); } else if (in.tensor_type().elem_type() == ONNX_NAMESPACE::TensorProto::INT32) { T->reset(ElemKind::Int32ITy, dim); - return llvm::Error::success(); + return Error::success(); } else { RETURN_ERR("Only float and index tensors are supported"); } @@ -75,8 +74,8 @@ loadArgumentMap(const ONNX_NAMESPACE::NodeProto &op) { return dict; } -llvm::Error ONNXModelLoader::loadInputs(ONNX_NAMESPACE::GraphProto &net, - bool loadInputsAsPlaceholders) { +Error ONNXModelLoader::loadInputs(ONNX_NAMESPACE::GraphProto &net, + bool loadInputsAsPlaceholders) { for (const auto &in : net.input()) { // Skip static weights. if (getConstantByNameOrNull(in.name())) { @@ -97,11 +96,10 @@ llvm::Error ONNXModelLoader::loadInputs(ONNX_NAMESPACE::GraphProto &net, RETURN_IF_ERR(createAndRegisterConstant(in.name(), std::move(T))); } } - return llvm::Error::success(); + return Error::success(); } -llvm::Expected -ONNXModelLoader::getBroadcast(const ArgumentDictionaryTy &dict) { +Expected ONNXModelLoader::getBroadcast(const ArgumentDictionaryTy &dict) { // Starting with opset 7, broadcasting is implicit and doesn't require any // attribute. if (opsetVersion_ > 6) { @@ -129,7 +127,7 @@ bool ONNXModelLoader::hasMultidirectionalBroadcast( return false; } -llvm::Expected ONNXModelLoader::convertTensorProtoDataType( +Expected ONNXModelLoader::convertTensorProtoDataType( ONNX_NAMESPACE::TensorProto_DataType t) { switch (t) { case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: @@ -145,13 +143,13 @@ llvm::Expected ONNXModelLoader::convertTensorProtoDataType( RETURN_ERR("Non supported ONNX type"); } -llvm::Error ONNXModelLoader::setVersion(ONNX_NAMESPACE::ModelProto MP) { +Error ONNXModelLoader::setVersion(ONNX_NAMESPACE::ModelProto MP) { irVersion_ = MP.ir_version(); opsetVersion_ = 0; RETURN_ERR_IF_NOT( irVersion_ >= 3, "This ONNX model with ir_version < 3 is too old to be supported.", - GlowErr::ErrorCode::MODEL_LOADER_UNSUPPORTED_ONNX_VERSION); + ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_ONNX_VERSION); for (const auto &imp : MP.opset_import()) { if (!imp.has_domain() || imp.domain() == "") { opsetVersion_ = imp.version(); @@ -160,10 +158,10 @@ llvm::Error ONNXModelLoader::setVersion(ONNX_NAMESPACE::ModelProto MP) { } RETURN_ERR_IF_NOT(opsetVersion_ > 0, "The opset of this ONNX model is not supported."); - return llvm::Error::success(); + return Error::success(); } -llvm::Expected +Expected ONNXModelLoader::loadProto(google::protobuf::io::ZeroCopyInputStream &iStream) { // Construct and configure a Coded Input Stream google::protobuf::io::CodedInputStream codedStream(&iStream); @@ -173,23 +171,23 @@ ONNXModelLoader::loadProto(google::protobuf::io::ZeroCopyInputStream &iStream) { ONNX_NAMESPACE::ModelProto MP; bool parseNet = MP.ParseFromCodedStream(&codedStream); RETURN_ERR_IF_NOT(parseNet, "Failed to parse ModelProto", - GlowErr::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); + ErrorValue::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); return MP; } -llvm::Expected +Expected ONNXModelLoader::loadProto(const void *onnxModel, size_t onnxModelSize) { google::protobuf::io::ArrayInputStream arrayStream(onnxModel, onnxModelSize); return loadProto(arrayStream); } -llvm::Expected +Expected ONNXModelLoader::loadProto(const std::string &filename) { std::ifstream ff(filename, std::ios::in | std::ios::binary); RETURN_ERR_IF_NOT(ff, strFormat("Can't find the model or network files for %s.", filename.c_str()), - GlowErr::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); + ErrorValue::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); // TODO: intend to find a way to reuse the following function later // for the text format onnx model: @@ -202,7 +200,7 @@ ONNXModelLoader::loadProto(const std::string &filename) { bool parseNet = google::protobuf::TextFormat::ParseFromString(str, &MP); RETURN_ERR_IF_NOT(parseNet, "Failed to parse ModelProto", - GlowErr::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); + ErrorValue::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); return MP; } @@ -219,10 +217,10 @@ using Pads = std::vector; /// \p kdim : kernel sizes (HW) /// \p sdim: stride sizes (HW) /// \p idim: input sizes (HW) -llvm::Expected getPads(const ArgumentDictionaryTy &dict, - llvm::ArrayRef kdim, - llvm::ArrayRef sdim, - llvm::ArrayRef idim) { +Expected getPads(const ArgumentDictionaryTy &dict, + llvm::ArrayRef kdim, + llvm::ArrayRef sdim, + llvm::ArrayRef idim) { if (dict.count("pads")) { return getShape(dict.at("pads")); } @@ -270,8 +268,7 @@ llvm::Expected getPads(const ArgumentDictionaryTy &dict, } /// Loads tensor \p T from the input \p in. -static llvm::Error loadTensor(const ONNX_NAMESPACE::TensorProto &in, - Tensor *T) { +static Error loadTensor(const ONNX_NAMESPACE::TensorProto &in, Tensor *T) { std::vector dim; for (auto d : in.dims()) { dim.push_back(d); @@ -291,7 +288,7 @@ static llvm::Error loadTensor(const ONNX_NAMESPACE::TensorProto &in, inStream.read(T->getUnsafePtr(), T->size() * sizeof(float)); } else { RETURN_ERR("Unsupported Tensor format.", - GlowErr::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); + ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } } else if (in.data_type() == ONNX_NAMESPACE::TensorProto::INT64) { T->reset(ElemKind::Int64ITy, dim); @@ -307,7 +304,7 @@ static llvm::Error loadTensor(const ONNX_NAMESPACE::TensorProto &in, inStream.read(T->getUnsafePtr(), T->size() * sizeof(int64_t)); } else { RETURN_ERR("Unsupported Tensor format.", - GlowErr::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); + ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } } else if (in.data_type() == ONNX_NAMESPACE::TensorProto::INT32) { // There are few cases when we will have int32 tensors. For example, the @@ -325,7 +322,7 @@ static llvm::Error loadTensor(const ONNX_NAMESPACE::TensorProto &in, inStream.read(T->getUnsafePtr(), T->size() * sizeof(int32_t)); } else { RETURN_ERR("Unsupported Tensor format.", - GlowErr::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); + ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } } else if (in.data_type() == ONNX_NAMESPACE::TensorProto::BOOL) { T->reset(ElemKind::BoolTy, dim); @@ -334,17 +331,17 @@ static llvm::Error loadTensor(const ONNX_NAMESPACE::TensorProto &in, inStream.read(T->getUnsafePtr(), T->size() * sizeof(bool)); } else { RETURN_ERR("Unsupported Tensor format.", - GlowErr::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); + ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } } else { RETURN_ERR("Only float and index tensors are supported", - GlowErr::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); + ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); } - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadConstant(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadConstant(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { /* output: "Parameter6" name: "Parameter6" @@ -373,19 +370,19 @@ llvm::Error ONNXModelLoader::loadConstant(const ONNX_NAMESPACE::NodeProto &op, // If the tensor is pre-populated by the user of this class then we don't // need to allocate a new tensor. if (getConstantByNameOrNull(name)) { - return llvm::Error::success(); + return Error::success(); } RETURN_ERR_IF_NOT(dict.at("value")->type() == ONNX_NAMESPACE::AttributeProto::TENSOR, "Only Tensor type constants are supported.", - GlowErr::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); + ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE); Tensor T; RETURN_IF_ERR(loadTensor(dict.at("value")->t(), &T)); RETURN_IF_ERR(createAndRegisterConstant(name, std::move(T))); - return llvm::Error::success(); + return Error::success(); } /// Retrieves data from a constant Tensor and stores it in a vector. @@ -397,8 +394,8 @@ static void helperSetter(Constant *constT, std::vector &vec) { } } -llvm::Error ONNXModelLoader::loadSlice(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadSlice(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue data; ASSIGN_VALUE_OR_RETURN_ERR(data, getNodeValueByName(op.input(0))); @@ -535,11 +532,11 @@ llvm::Error ONNXModelLoader::loadSlice(const ONNX_NAMESPACE::NodeProto &op, Node *SN = G_.createSlice(opName, data, newStarts, newEnds); RETURN_IF_ERR(addNodeAsOutput(op, SN)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadConv(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadConv(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Load the attributes std::vector strides(2, 1); @@ -641,12 +638,12 @@ llvm::Error ONNXModelLoader::loadConv(const ONNX_NAMESPACE::NodeProto &op, auto *N = G_.createTranspose(opName, node, NHWC2NCHW); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadPool(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict, - llvm::StringRef typeName) { +Error ONNXModelLoader::loadPool(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict, + llvm::StringRef typeName) { const std::string &opName = loadOperatorName(op); // Load the inputs: @@ -661,7 +658,7 @@ llvm::Error ONNXModelLoader::loadPool(const ONNX_NAMESPACE::NodeProto &op, if (in.dims().size() != 4 || kernels.size() != 2) { // Glow only handles 2D pooling currently. RETURN_ERR("Glow only handles 2D pooling currently.", - GlowErr::ErrorCode::MODEL_LOADER_UNSUPPORTED_SHAPE); + ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_SHAPE); } auto *tr = G_.createTranspose(opName, in, NCHW2NHWC); @@ -686,7 +683,7 @@ llvm::Error ONNXModelLoader::loadPool(const ONNX_NAMESPACE::NodeProto &op, if (op.output_size() > 1) { if (typeName != "MaxPool") { RETURN_ERR("Argmax output is only supported for MaxPool!", - GlowErr::ErrorCode::MODEL_LOADER_UNSUPPORTED_OPERATOR); + ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_OPERATOR); } node = G_.createMaxPool(opName, tr, kernels, strides, pads); @@ -705,11 +702,11 @@ llvm::Error ONNXModelLoader::loadPool(const ONNX_NAMESPACE::NodeProto &op, auto *N = G_.createTranspose(opName, NodeValue(node, idx), NHWC2NCHW); RETURN_IF_ERR(addNodeAsOutput(op, N)); } - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadArgMax(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadArgMax(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; @@ -724,12 +721,11 @@ llvm::Error ONNXModelLoader::loadArgMax(const ONNX_NAMESPACE::NodeProto &op, } Node *node = G_.createArgMax(opName, in, axis, keepDims); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelLoader::loadGlobalAveragePool(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadGlobalAveragePool( + const ONNX_NAMESPACE::NodeProto &op, const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Load the inputs: @@ -752,11 +748,11 @@ ONNXModelLoader::loadGlobalAveragePool(const ONNX_NAMESPACE::NodeProto &op, Node *node = G_.createAvgPool(opName, tr, kernels, strides, pads); auto *N = G_.createTranspose(opName, node, NHWC2NCHW); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadSqueeze(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadSqueeze(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; @@ -764,11 +760,11 @@ llvm::Error ONNXModelLoader::loadSqueeze(const ONNX_NAMESPACE::NodeProto &op, auto axes = getShape(dict.at("axes")); Node *node = G_.createSqueeze(opName, in, axes); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadUnsqueeze(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadUnsqueeze(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; @@ -776,12 +772,11 @@ llvm::Error ONNXModelLoader::loadUnsqueeze(const ONNX_NAMESPACE::NodeProto &op, auto axes = getShape(dict.at("axes")); Node *node = G_.createExpandDims(opName, in, axes); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelLoader::loadBatchNormalization(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadBatchNormalization( + const ONNX_NAMESPACE::NodeProto &op, const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; @@ -811,11 +806,11 @@ ONNXModelLoader::loadBatchNormalization(const ONNX_NAMESPACE::NodeProto &op, // the non supported features are actually requested by the ONNX model. RETURN_IF_ERR(addNodeAsOutput(op, node, 1)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadConcat(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadConcat(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); const unsigned numInputs = op.input_size(); @@ -832,12 +827,11 @@ llvm::Error ONNXModelLoader::loadConcat(const ONNX_NAMESPACE::NodeProto &op, Node *node = G_.createConcat(opName, inputs, axis); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelLoader::loadFCTransposed(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadFCTransposed(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in; @@ -873,11 +867,11 @@ ONNXModelLoader::loadFCTransposed(const ONNX_NAMESPACE::NodeProto &op, auto *node = G_.createFullyConnected(opName, in, W, B); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadGemm(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadGemm(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue A; @@ -912,11 +906,11 @@ llvm::Error ONNXModelLoader::loadGemm(const ONNX_NAMESPACE::NodeProto &op, Node *node = G_.createAdd(opName, mul, C); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadMatMul(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadMatMul(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue LHS; @@ -926,11 +920,11 @@ llvm::Error ONNXModelLoader::loadMatMul(const ONNX_NAMESPACE::NodeProto &op, Node *node = G_.createMatMul(opName, LHS, RHS); RETURN_IF_ERR(addNodeAsOutput(op, node)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadLeakyRelu(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadLeakyRelu(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { // Input Type. NodeValue input; ASSIGN_VALUE_OR_RETURN_ERR(input, getNodeValueByName(op.input(0))); @@ -956,11 +950,11 @@ llvm::Error ONNXModelLoader::loadLeakyRelu(const ONNX_NAMESPACE::NodeProto &op, RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadPad(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadPad(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Input @@ -982,7 +976,7 @@ llvm::Error ONNXModelLoader::loadPad(const ONNX_NAMESPACE::NodeProto &op, mode = PaddingMode::EDGE; } else { RETURN_ERR("Pad: Invalid mode", - GlowErr::ErrorCode::MODEL_LOADER_UNSUPPORTED_ATTRIBUTE); + ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_ATTRIBUTE); } } float value = 0.f; // Default @@ -1012,11 +1006,11 @@ llvm::Error ONNXModelLoader::loadPad(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createPad(opName, input, outTy, mode, pads, value); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadCast(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadCast(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); // Input type @@ -1032,7 +1026,7 @@ llvm::Error ONNXModelLoader::loadCast(const ONNX_NAMESPACE::NodeProto &op, RETURN_ERR_IF_NOT( ONNX_NAMESPACE::TensorProto_DataType_IsValid(toONNXTypeValue), "Cast: invalid target type", - GlowErr::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); + ErrorValue::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); ASSIGN_VALUE_OR_RETURN_ERR( targetKind, convertTensorProtoDataType( ONNX_NAMESPACE::TensorProto_DataType(toONNXTypeValue))); @@ -1046,12 +1040,11 @@ llvm::Error ONNXModelLoader::loadCast(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createConvertTo(opName, input, targetKind); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelLoader::loadSpaceToDepth(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadSpaceToDepth(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { // Input Type NodeValue input; @@ -1072,13 +1065,12 @@ ONNXModelLoader::loadSpaceToDepth(const ONNX_NAMESPACE::NodeProto &op, RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelLoader::loadConstantOfShape(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict, - bool isSplat) { +Error ONNXModelLoader::loadConstantOfShape(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict, + bool isSplat) { Tensor T(ElemKind::FloatTy, {1}); T.getHandle().raw(0) = 0.0; @@ -1139,11 +1131,11 @@ ONNXModelLoader::loadConstantOfShape(const ONNX_NAMESPACE::NodeProto &op, SN = G_.createSplat(loadOperatorName(op), ty, T.getHandle().raw(0)); } RETURN_IF_ERR(addNodeAsOutput(op, SN)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadTile(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadTile(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { const std::string &opName = loadOperatorName(op); NodeValue in, repeats; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); @@ -1170,10 +1162,10 @@ llvm::Error ONNXModelLoader::loadTile(const ONNX_NAMESPACE::NodeProto &op, } RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Expected +Expected ONNXModelLoader::foldOperator(const ONNX_NAMESPACE::NodeProto &op) { const unsigned numInputs = op.input_size(); const std::string &typeName = op.op_type(); @@ -1194,15 +1186,15 @@ ONNXModelLoader::foldOperator(const ONNX_NAMESPACE::NodeProto &op) { Function *tmpF = G_.getParent()->createFunction("eval_const_fold__"); ONNXModelLoader tmpLoader(*tmpF); tmpLoader.opsetVersion_ = opsetVersion_; - bool foldStatus = !errToBool( + bool foldStatus = !ERR_TO_BOOL( constantFoldInLoader( tmpF, tmpLoader, this, op)); G_.getParent()->eraseFunction(tmpF); return foldStatus; } -llvm::Error ONNXModelLoader::loadWhere(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadWhere(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue cNV; ASSIGN_VALUE_OR_RETURN_ERR(cNV, getNodeValueByName(op.input(0))); NodeValue xNV; @@ -1217,11 +1209,11 @@ llvm::Error ONNXModelLoader::loadWhere(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createNodeWithBroadcast(opName, -1, cNV, xNV, yNV); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadCmpEQ(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadCmpEQ(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue LHS; ASSIGN_VALUE_OR_RETURN_ERR(LHS, getNodeValueByName(op.input(0))); NodeValue RHS; @@ -1230,11 +1222,11 @@ llvm::Error ONNXModelLoader::loadCmpEQ(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createCmpEQ(loadOperatorName(op), LHS, RHS); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadCmpLTE(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadCmpLTE(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue LHS; ASSIGN_VALUE_OR_RETURN_ERR(LHS, getNodeValueByName(op.input(0))); NodeValue RHS; @@ -1243,11 +1235,11 @@ llvm::Error ONNXModelLoader::loadCmpLTE(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createCmpLTE(loadOperatorName(op), LHS, RHS); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadSelect(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadSelect(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue Cond; ASSIGN_VALUE_OR_RETURN_ERR(Cond, getNodeValueByName(op.input(0))); NodeValue LHS; @@ -1261,11 +1253,11 @@ llvm::Error ONNXModelLoader::loadSelect(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createSelect(loadOperatorName(op), outTy, Cond, LHS, RHS); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadQuantize(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadQuantize(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); float scale; @@ -1279,11 +1271,11 @@ llvm::Error ONNXModelLoader::loadQuantize(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createQuantize(loadOperatorName(op), in, outTy); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadConvertTo(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadConvertTo(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); @@ -1293,22 +1285,22 @@ llvm::Error ONNXModelLoader::loadConvertTo(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createConvertTo(loadOperatorName(op), in, outTy); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadDequantize(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadDequantize(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); Node *N = G_.createDequantize(loadOperatorName(op), in); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadRegression(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadRegression(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); NodeValue expected; @@ -1317,11 +1309,11 @@ llvm::Error ONNXModelLoader::loadRegression(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createRegression(loadOperatorName(op), in, expected); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadBatchedAdd(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadBatchedAdd(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue batch; ASSIGN_VALUE_OR_RETURN_ERR(batch, getNodeValueByName(op.input(0))); NodeValue sample; @@ -1330,12 +1322,11 @@ llvm::Error ONNXModelLoader::loadBatchedAdd(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createBatchedAdd(loadOperatorName(op), batch, sample); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelLoader::loadScatterAssign(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadScatterAssign(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue data; ASSIGN_VALUE_OR_RETURN_ERR(data, getNodeValueByName(op.input(0))); NodeValue indices; @@ -1346,12 +1337,11 @@ ONNXModelLoader::loadScatterAssign(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createScatterData(loadOperatorName(op), data, indices, slices); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelLoader::loadIntLookupTable(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadIntLookupTable(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); @@ -1362,12 +1352,11 @@ ONNXModelLoader::loadIntLookupTable(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createIntLookupTable(loadOperatorName(op), in, values, outTy); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelLoader::loadLengthsRangeFill(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadLengthsRangeFill(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue lengths; ASSIGN_VALUE_OR_RETURN_ERR(lengths, getNodeValueByName(op.input(0))); unsigned_t size; @@ -1376,12 +1365,11 @@ ONNXModelLoader::loadLengthsRangeFill(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createLengthsRangeFill(loadOperatorName(op), lengths, size); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelLoader::loadRescaleQuantized(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadRescaleQuantized(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); float scale; @@ -1396,10 +1384,10 @@ ONNXModelLoader::loadRescaleQuantized(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createRescaleQuantized(loadOperatorName(op), in, outTy); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadRowwiseQuantizedSparseLengthsWeightedSum( +Error ONNXModelLoader::loadRowwiseQuantizedSparseLengthsWeightedSum( const ONNX_NAMESPACE::NodeProto &op, const ArgumentDictionaryTy &dict) { Constant *data; ASSIGN_VALUE_OR_RETURN_ERR(data, getConstantByName(op.input(0))); @@ -1418,10 +1406,10 @@ llvm::Error ONNXModelLoader::loadRowwiseQuantizedSparseLengthsWeightedSum( loadOperatorName(op), data, scales, offsets, weights, indices, lengths); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadFusedRowwiseQuantizedSparseLengthsWeightedSum( +Error ONNXModelLoader::loadFusedRowwiseQuantizedSparseLengthsWeightedSum( const ONNX_NAMESPACE::NodeProto &op, const ArgumentDictionaryTy &dict) { NodeValue data; ASSIGN_VALUE_OR_RETURN_ERR(data, getNodeValueByName(op.input(0))); @@ -1436,12 +1424,11 @@ llvm::Error ONNXModelLoader::loadFusedRowwiseQuantizedSparseLengthsWeightedSum( loadOperatorName(op), data, weights, indices, lengths); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error -ONNXModelLoader::loadFullyConnected(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadFullyConnected(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { NodeValue in; ASSIGN_VALUE_OR_RETURN_ERR(in, getNodeValueByName(op.input(0))); Constant *W; @@ -1457,21 +1444,21 @@ ONNXModelLoader::loadFullyConnected(const ONNX_NAMESPACE::NodeProto &op, Node *N = G_.createFullyConnected(loadOperatorName(op), in, W, B, axis); RETURN_IF_ERR(addNodeAsOutput(op, N)); - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadSplat(const ONNX_NAMESPACE::NodeProto &op, - const ArgumentDictionaryTy &dict) { +Error ONNXModelLoader::loadSplat(const ONNX_NAMESPACE::NodeProto &op, + const ArgumentDictionaryTy &dict) { return loadConstantOfShape(op, dict, true /* isSplat */); } -llvm::Error ONNXModelLoader::loadRowwiseQuantizedFullyConnected( +Error ONNXModelLoader::loadRowwiseQuantizedFullyConnected( const ONNX_NAMESPACE::NodeProto &op, const ArgumentDictionaryTy &dict) { // TODO RETURN_ERR("Not implemented."); } -llvm::Error ONNXModelLoader::loadOperator(const ONNX_NAMESPACE::NodeProto &op) { +Error ONNXModelLoader::loadOperator(const ONNX_NAMESPACE::NodeProto &op) { ArgumentDictionaryTy dict = loadArgumentMap(op); const std::string &typeName = op.op_type(); @@ -1480,7 +1467,7 @@ llvm::Error ONNXModelLoader::loadOperator(const ONNX_NAMESPACE::NodeProto &op) { ASSIGN_VALUE_OR_RETURN_ERR(tryLoadCommonOperatorResult, tryLoadCommonOperator(typeName, op, dict)); if (tryLoadCommonOperatorResult) { - return llvm::Error::success(); + return Error::success(); } if (typeName == "Constant") { @@ -1600,20 +1587,20 @@ llvm::Error ONNXModelLoader::loadOperator(const ONNX_NAMESPACE::NodeProto &op) { } RETURN_ERR("Failed to load operator " + typeName + " .", - GlowErr::ErrorCode::MODEL_LOADER_UNSUPPORTED_OPERATOR); + ErrorValue::ErrorCode::MODEL_LOADER_UNSUPPORTED_OPERATOR); } -llvm::Error ONNXModelLoader::loadInitializers(ONNX_NAMESPACE::GraphProto &net) { +Error ONNXModelLoader::loadInitializers(ONNX_NAMESPACE::GraphProto &net) { // Load the network initializaers: for (const auto &in : net.initializer()) { Tensor T; RETURN_IF_ERR(loadTensor(in, &T)); RETURN_IF_ERR(createAndRegisterConstant(in.name(), std::move(T))); } - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::setOutputNodes(ONNX_NAMESPACE::GraphProto &net) { +Error ONNXModelLoader::setOutputNodes(ONNX_NAMESPACE::GraphProto &net) { if (net.output_size() == 0) { RETURN_ERR("Net output size must be greater than 0"); } @@ -1626,10 +1613,10 @@ llvm::Error ONNXModelLoader::setOutputNodes(ONNX_NAMESPACE::GraphProto &net) { outputVarsByName_[outputName] = SN->getPlaceholder(); } - return llvm::Error::success(); + return Error::success(); } -llvm::Error ONNXModelLoader::loadNetwork(ONNX_NAMESPACE::GraphProto &net) { +Error ONNXModelLoader::loadNetwork(ONNX_NAMESPACE::GraphProto &net) { /// Load the network operators: for (int i = 0; i < net.node_size(); i++) { auto &op = net.node(i); @@ -1643,18 +1630,17 @@ llvm::Error ONNXModelLoader::loadNetwork(ONNX_NAMESPACE::GraphProto &net) { RETURN_IF_ERR(loadOperator(op)); } - return llvm::Error::success(); + return Error::success(); } -ONNXModelLoader::ONNXModelLoader(Function &F, llvm::Error *errPtr) +ONNXModelLoader::ONNXModelLoader(Function &F, Error *errPtr) : CommonOperatorLoader({}, {}, F, errPtr) { deleteUnusedConstants(); } -llvm::Error -ONNXModelLoader::checkInputs(ONNX_NAMESPACE::GraphProto &net, - llvm::ArrayRef tensorNames, - llvm::ArrayRef types) { +Error ONNXModelLoader::checkInputs(ONNX_NAMESPACE::GraphProto &net, + llvm::ArrayRef tensorNames, + llvm::ArrayRef types) { for (size_t i = 0; i < tensorNames.size(); i++) { // Look if a corresponding input exists. for (int j = 0; j < net.input_size(); j++) { @@ -1680,22 +1666,22 @@ ONNXModelLoader::checkInputs(ONNX_NAMESPACE::GraphProto &net, } } } - return llvm::Error::success(); + return Error::success(); } ONNXModelLoader::ONNXModelLoader(const std::string &modelDescFilename, llvm::ArrayRef tensorNames, llvm::ArrayRef types, Function &F, - llvm::Error *errPtr) + Error *errPtr) : CommonOperatorLoader(tensorNames, types, F, errPtr) { // if errPtr already contains an error then don't continue with constructor if (errPtr && *errPtr) { return; } - // Lambda to setup the ONNXModelLoader and return any llvm::Errors that were + // Lambda to setup the ONNXModelLoader and return any Errors that were // raised. - auto setup = [&]() -> llvm::Error { + auto setup = [&]() -> Error { // The ONNX model that we are deserializing. ONNX_NAMESPACE::ModelProto modelDef; ASSIGN_VALUE_OR_RETURN_ERR(modelDef, loadProto(modelDescFilename)); @@ -1720,7 +1706,7 @@ ONNXModelLoader::ONNXModelLoader(const std::string &modelDescFilename, deleteUnusedConstants(); - return llvm::Error::success(); + return Error::success(); }; if (errPtr) { @@ -1733,16 +1719,16 @@ ONNXModelLoader::ONNXModelLoader(const std::string &modelDescFilename, ONNXModelLoader::ONNXModelLoader( const void *model, uint32_t modelSize, uint32_t weightsCount, const onnxTensorDescriptorV1 *weightDescriptors, Function &F, - bool loadInputsAsPlaceholders, llvm::Error *errPtr) + bool loadInputsAsPlaceholders, Error *errPtr) : CommonOperatorLoader({}, {}, F, errPtr) { // if errPtr already contains an error then don't continue with constructor if (errPtr && *errPtr) { return; } - // Lambda to setup the ONNXModelLoader and return any llvm::Errors that were + // Lambda to setup the ONNXModelLoader and return any Errors that were // raised. - auto setup = [&]() -> llvm::Error { + auto setup = [&]() -> Error { ONNX_NAMESPACE::ModelProto modelDef; ASSIGN_VALUE_OR_RETURN_ERR(modelDef, loadProto(model, modelSize)); @@ -1762,7 +1748,7 @@ ONNXModelLoader::ONNXModelLoader( deleteUnusedConstants(); - return llvm::Error::success(); + return Error::success(); }; if (errPtr) { diff --git a/lib/Importer/ProtobufLoader.cpp b/lib/Importer/ProtobufLoader.cpp index 8e4a22b323..2633f66cb3 100644 --- a/lib/Importer/ProtobufLoader.cpp +++ b/lib/Importer/ProtobufLoader.cpp @@ -72,7 +72,7 @@ Constant *ProtobufLoader::getConstantByNameOrNull(llvm::StringRef name) const { return res ? res : nullptr; } -llvm::Expected +Expected ProtobufLoader::getConstantByName(llvm::StringRef name) const { auto *ptr = getConstantByNameOrNull(name); RETURN_ERR_IF_NOT( @@ -84,7 +84,7 @@ bool ProtobufLoader::hasConstantByName(llvm::StringRef name) const { return getConstantByNameOrNull(name) != nullptr; } -llvm::Expected +Expected ProtobufLoader::getOutputByName(llvm::StringRef name) const { auto it = outputVarsByName_.find(name); RETURN_ERR_IF_NOT( @@ -104,7 +104,7 @@ ProtobufLoader::getNodeValueByNameOrNullNodeValue(llvm::StringRef name) const { return NodeValue(nullptr); } -llvm::Expected +Expected ProtobufLoader::getNodeValueByName(llvm::StringRef name) const { RETURN_ERR_IF_NOT(hasNodeByName(name), llvm::Twine("No node under name ", name).str()); @@ -113,20 +113,20 @@ ProtobufLoader::getNodeValueByName(llvm::StringRef name) const { return node; } -llvm::Error ProtobufLoader::createAndRegisterConstant(llvm::StringRef name, - Tensor &&tensor) { +Error ProtobufLoader::createAndRegisterConstant(llvm::StringRef name, + Tensor &&tensor) { auto it = nodeValueByName_.find(name); if (it != nodeValueByName_.end()) { if (llvm::dyn_cast(it->second.getNode())) { // Placeholders take precedence over Constants. - return llvm::Error::success(); + return Error::success(); } } // Note: We do not support training from models loaded from protos, so // trainable is always set to false here. Constant *node = G_.getParent()->createConstant(name, std::move(tensor)); nodeValueByName_[name] = node->getOutput(); - return llvm::Error::success(); + return Error::success(); } void ProtobufLoader::deleteUnusedConstants() { @@ -150,7 +150,7 @@ void ProtobufLoader::deleteUnusedConstants() { } } -llvm::Expected +Expected ProtobufLoader::createAndRegisterPlaceholder(llvm::StringRef name, TypeRef T) { RETURN_ERR_IF_NOT( !hasNodeByName(name), @@ -166,7 +166,7 @@ bool ProtobufLoader::hasNodeByName(llvm::StringRef name) const { ProtobufLoader::ProtobufLoader(llvm::ArrayRef tensorNames, llvm::ArrayRef types, Function &F, - llvm::Error *errPtr) + Error *errPtr) : G_(F) { // Verify that the version of the library that we linked against is // compatible with the version of the headers we compiled against. @@ -177,9 +177,9 @@ ProtobufLoader::ProtobufLoader(llvm::ArrayRef tensorNames, return; } - // Lambda to setup the ProtobufLoader and return any llvm::Errors that were + // Lambda to setup the ProtobufLoader and return any Errors that were // raised. - auto setup = [&]() -> llvm::Error { + auto setup = [&]() -> Error { RETURN_ERR_IF_NOT(tensorNames.size() == types.size(), "Invalid initialization list"); for (size_t i = 0, e = tensorNames.size(); i < e; i++) { @@ -191,7 +191,7 @@ ProtobufLoader::ProtobufLoader(llvm::ArrayRef tensorNames, return placeholderOrErr.takeError(); } } - return llvm::Error::success(); + return Error::success(); }; if (errPtr) { diff --git a/lib/LLVMIRCodeGen/LLVMBackend.cpp b/lib/LLVMIRCodeGen/LLVMBackend.cpp index ee412d36b8..d244921d3c 100644 --- a/lib/LLVMIRCodeGen/LLVMBackend.cpp +++ b/lib/LLVMIRCodeGen/LLVMBackend.cpp @@ -131,7 +131,7 @@ LLVMBackend::compileIRWithoutConstants(IRFunction *IR) const { return createCompiledFunction(std::move(JIT), std::move(runtimeInfo)); } -llvm::Expected> +Expected> LLVMBackend::compile(Function *F, const BackendOptions &opts) const { TraceInfo traceInfo = buildManualTraceInfo(F); auto IR = generateAndOptimizeIR(F, *this, shouldShareBuffers()); @@ -148,8 +148,7 @@ LLVMBackend::compile(Function *F, const BackendOptions &opts) const { } compiledFunc->setTraceInfo(std::move(traceInfo)); - return llvm::Expected>( - std::move(compiledFunc)); + return Expected>(std::move(compiledFunc)); } void LLVMBackend::save(Function *F, llvm::StringRef outputDir, diff --git a/lib/LLVMIRCodeGen/LLVMCompiledFunction.cpp b/lib/LLVMIRCodeGen/LLVMCompiledFunction.cpp index b3684397d4..4897fef7a9 100644 --- a/lib/LLVMIRCodeGen/LLVMCompiledFunction.cpp +++ b/lib/LLVMIRCodeGen/LLVMCompiledFunction.cpp @@ -66,7 +66,7 @@ void LLVMCompiledFunction::updatePlaceholders( } } -llvm::Error LLVMCompiledFunction::execute(ExecutionContext *context) { +Error LLVMCompiledFunction::execute(ExecutionContext *context) { uint8_t *baseActivationsAddress{nullptr}; /// Base address for Mutable weights memory block, Inputs and Outputs. @@ -94,7 +94,7 @@ llvm::Error LLVMCompiledFunction::execute(ExecutionContext *context) { auto *traceContext = context->getTraceContext(); TRACE_EVENT_SCOPE_NAMED(traceContext, TraceLevel::RUNTIME, "findJitmainSymbol", fjEvent); - llvm::Expected address = NULL; + Expected address = NULL; { std::lock_guard lock(JITLock_); auto sym = JIT_->findSymbol("jitmain"); @@ -102,7 +102,14 @@ llvm::Error LLVMCompiledFunction::execute(ExecutionContext *context) { DCHECK(sym) << "Unable to JIT the code!"; // We know address is success since we just made it. Mark it as checked. if (address) { - address = sym.getAddress(); + auto addrOrLLVMError = sym.getAddress(); + if (addrOrLLVMError) { + address = addrOrLLVMError.get(); + } else { + address = MAKE_ERR( + strFormat("Failed to get address: %s", + llvm::toString(addrOrLLVMError.takeError()).data())); + } } } using JitFuncType = @@ -135,7 +142,7 @@ llvm::Error LLVMCompiledFunction::execute(ExecutionContext *context) { translateTraceEvents(context); } - return llvm::Error::success(); + return Error::success(); } void LLVMCompiledFunction::translateTraceEvents( diff --git a/lib/Onnxifi/Base.cpp b/lib/Onnxifi/Base.cpp index fc8028b64e..cc023058f7 100644 --- a/lib/Onnxifi/Base.cpp +++ b/lib/Onnxifi/Base.cpp @@ -47,7 +47,7 @@ onnxStatus Backend::checkGraphCompatibility(const void *onnxModel, // TODO: Use a more specific ONNXIFI error code here to denote what about // this operator is not supported (shape, type, etc). LOG(ERROR) << "Error when loading protobuf: " - << llvm::toString(loaderOrErr.takeError()); + << ERR_TO_STRING(loaderOrErr.takeError()); return ONNXIFI_STATUS_UNSUPPORTED_OPERATOR; } diff --git a/lib/Onnxifi/HostManagerOnnxifi.cpp b/lib/Onnxifi/HostManagerOnnxifi.cpp index 5c9f82b04b..32288126d2 100644 --- a/lib/Onnxifi/HostManagerOnnxifi.cpp +++ b/lib/Onnxifi/HostManagerOnnxifi.cpp @@ -85,7 +85,7 @@ onnxStatus HostManagerBackend::addNetwork(std::unique_ptr module) { auto err = hostManager_->addNetwork(std::move(module), cctx, GlowSaturateHost); - if (errToBool(std::move(err))) { + if (ERR_TO_BOOL(std::move(err))) { return ONNXIFI_STATUS_INTERNAL_ERROR; } @@ -96,7 +96,7 @@ onnxStatus HostManagerBackend::removeNetwork(const Graph *graph) { auto hostManagerGraph = static_cast(graph); auto error = hostManager_->removeNetwork(hostManagerGraph->getName()); - if (errorToBool(std::move(error))) { + if (ERR_TO_BOOL(std::move(error))) { return ONNXIFI_STATUS_INTERNAL_ERROR; } @@ -115,7 +115,7 @@ HostManagerGraph::initGraph(const void *onnxModel, size_t onnxModelSize, // TODO: make better error reporting. std::unique_ptr loader = - TEMP_EXIT_ON_ERR(ONNXIFIModelLoader::parse( + EXIT_ON_ERR(ONNXIFIModelLoader::parse( onnxModel, onnxModelSize, weightCount, weightDescriptors, *function, true /*loadInputsAsPlaceholders*/, backendPtr_->getUseOnnx())); @@ -136,14 +136,13 @@ onnxStatus HostManagerGraph::run(std::unique_ptr ctx, onnxTraceEventList *traceEvents) { backendPtr_->runNetwork( this, std::move(ctx), - [outputEvent, traceEvents](runtime::RunIdentifierTy runId, - llvm::Error err, + [outputEvent, traceEvents](runtime::RunIdentifierTy runId, Error err, std::unique_ptr ctx) { TRACE_EVENT_SCOPE(ctx->getTraceContext(), TraceLevel::RUNTIME, "Onnxifi::callback"); - // If an Error occurred then log it in errToBool and signal the output + // If an Error occurred then log it in ERR_TO_BOOL and signal the output // event. - if (errToBool(std::move(err))) { + if (ERR_TO_BOOL(std::move(err))) { outputEvent->signal(ONNXIFI_STATUS_INTERNAL_ERROR); return; } diff --git a/lib/Onnxifi/InlineOnnxifi.cpp b/lib/Onnxifi/InlineOnnxifi.cpp index e2ca3edd5a..2133f0a33c 100644 --- a/lib/Onnxifi/InlineOnnxifi.cpp +++ b/lib/Onnxifi/InlineOnnxifi.cpp @@ -48,7 +48,7 @@ InlineGraph::initGraph(const void *onnxModel, size_t onnxModelSize, function_ = executionEngine_.getModule().createFunction("function"); std::unique_ptr loader = - TEMP_EXIT_ON_ERR(ONNXIFIModelLoader::parse( + EXIT_ON_ERR(ONNXIFIModelLoader::parse( onnxModel, onnxModelSize, weightCount, weightDescriptors, *function_, true /*loadInputsAsPlaceholders*/, backendPtr_->getUseOnnx())); diff --git a/lib/Optimizer/GraphOptimizer/ConstantFolding.cpp b/lib/Optimizer/GraphOptimizer/ConstantFolding.cpp index 908c93633f..e775ab281b 100644 --- a/lib/Optimizer/GraphOptimizer/ConstantFolding.cpp +++ b/lib/Optimizer/GraphOptimizer/ConstantFolding.cpp @@ -165,7 +165,7 @@ evaluateConstantOperation(Backend &backend, CompilationContext &cctx, Node *C) { /// Check if function \p F consists of constant operations only. LLVM_ATTRIBUTE_USED -llvm::Error verifyConstantFunction(Backend &backend, Function &F) { +Error verifyConstantFunction(Backend &backend, Function &F) { // Perform the checks in DEBUG builds only. for (auto &N : F.getNodes()) { // Saving results is fine. @@ -187,7 +187,7 @@ llvm::Error verifyConstantFunction(Backend &backend, Function &F) { } RETURN_ERR("Expected constant operation"); } - return llvm::Error::success(); + return Error::success(); } /// Perform a compile-time constant folding of the node \p N using the provided @@ -205,16 +205,16 @@ std::vector constantFoldNodeImpl(Backend &backend, Node *N) { } // namespace -llvm::Error glow::executeConstantFunction(Backend &backend, Function &F, - PlaceholderBindings &bindings, - CompilationContext &cctx) { +Error glow::executeConstantFunction(Backend &backend, Function &F, + PlaceholderBindings &bindings, + CompilationContext &cctx) { // Perform the checks in DEBUG builds only. #ifndef NDEBUG RETURN_IF_ERR(verifyConstantFunction(backend, F)); #endif auto compiledF = compile(backend, F, cctx); run(backend, *compiledF, bindings); - return llvm::Error::success(); + return Error::success(); } /// Perform constant folding in the function \p F . Any non-trivial node (i.e. diff --git a/lib/Optimizer/GraphOptimizer/GraphOptimizer.cpp b/lib/Optimizer/GraphOptimizer/GraphOptimizer.cpp index 70b5ec9307..ee438af132 100644 --- a/lib/Optimizer/GraphOptimizer/GraphOptimizer.cpp +++ b/lib/Optimizer/GraphOptimizer/GraphOptimizer.cpp @@ -2939,8 +2939,8 @@ static void transformForPrecisionMode(const Backend &B, Function *F, } } -llvm::Error glow::optimizeFunctionBeforeLowering(Function *F, - CompilationContext &cctx) { +Error glow::optimizeFunctionBeforeLowering(Function *F, + CompilationContext &cctx) { LOG_SCOPE(F->getLogContext(), "glow::optimizeFunctionBeforeLowering") // Verify the function pre-optimization/lowering. @@ -2959,13 +2959,13 @@ llvm::Error glow::optimizeFunctionBeforeLowering(Function *F, // Optimize the graph. Only runs optimizations that are target-independent. ::glow::optimize(F, cctx); - return llvm::Error::success(); + return Error::success(); } // NOTE: When updating this function, please also update the documentation in // docs/GraphOptimizationPipeline.md -llvm::Error glow::optimizeFunction(Function *F, const Backend &B, - CompilationContext &cctx) { +Error glow::optimizeFunction(Function *F, const Backend &B, + CompilationContext &cctx) { LOG_SCOPE(F->getLogContext(), "glow::optimizeFunction") RETURN_IF_ERR(optimizeFunctionBeforeLowering(F, cctx)); @@ -3001,12 +3001,12 @@ llvm::Error glow::optimizeFunction(Function *F, const Backend &B, // state became lowered. Do one more verification pass to make sure everything // is in order and to bail if it is not. if (!B.verify(*F)) { - return MAKE_ERR(GlowErr::ErrorCode::COMPILE_UNSUPPORTED_NODE_AFTER_OPTIMIZE, - "Unsupported node(s) found after optimizing Function " + - F->getName().str() + " for backend " + - B.getBackendName()); + return MAKE_ERR( + ErrorValue::ErrorCode::COMPILE_UNSUPPORTED_NODE_AFTER_OPTIMIZE, + "Unsupported node(s) found after optimizing Function " + + F->getName().str() + " for backend " + B.getBackendName()); } - return llvm::Error::success(); + return Error::success(); } bool glow::executeVerticalFCWeightsSplit(Function *F, unsigned numOfChunks, diff --git a/lib/Optimizer/GraphOptimizer/TrainingPreparation.cpp b/lib/Optimizer/GraphOptimizer/TrainingPreparation.cpp index bfed2c3225..81dc733eb3 100644 --- a/lib/Optimizer/GraphOptimizer/TrainingPreparation.cpp +++ b/lib/Optimizer/GraphOptimizer/TrainingPreparation.cpp @@ -75,10 +75,9 @@ TensorInitializer getDefaultTensorInitializer() { return defaultTensorInitializer; } -llvm::Error prepareFunctionForTraining(Function *F, - PlaceholderBindings &bindings, - Placeholder *&selected, - TensorInitializer &&initializer) { +Error prepareFunctionForTraining(Function *F, PlaceholderBindings &bindings, + Placeholder *&selected, + TensorInitializer &&initializer) { auto &nodes = F->getNodes(); @@ -129,6 +128,6 @@ llvm::Error prepareFunctionForTraining(Function *F, } } - return llvm::Error::success(); + return Error::success(); } } // namespace glow diff --git a/lib/Optimizer/IROptimizer/IROptimizer.cpp b/lib/Optimizer/IROptimizer/IROptimizer.cpp index 41b1f394ea..80272d94b0 100644 --- a/lib/Optimizer/IROptimizer/IROptimizer.cpp +++ b/lib/Optimizer/IROptimizer/IROptimizer.cpp @@ -1655,7 +1655,7 @@ glow::generateAndOptimizeIR(Function *F, const Backend &B, ::glow::optimize(*IR, shouldShareBuffers); if (!B.verify(*IR)) { EXIT_ON_ERR(MAKE_ERR( - GlowErr::ErrorCode::COMPILE_UNSUPPORTED_IR_AFTER_OPTIMIZE, + ErrorValue::ErrorCode::COMPILE_UNSUPPORTED_IR_AFTER_OPTIMIZE, "Unsupported instruction(s) found after optimizing IR " + IR->getName().str() + " for backend " + B.getBackendName())); } diff --git a/lib/Partitioner/Partitioner.cpp b/lib/Partitioner/Partitioner.cpp index c33c4401e0..f070bab0f9 100644 --- a/lib/Partitioner/Partitioner.cpp +++ b/lib/Partitioner/Partitioner.cpp @@ -72,13 +72,13 @@ void Partitioner::init() { } } -llvm::Error Partitioner::finalize(const DAGListTy &partitions, - const NodeToFunctionMap &mapping) { +Error Partitioner::finalize(const DAGListTy &partitions, + const NodeToFunctionMap &mapping) { // Validate the functions after partitioning. for (Function *subF : module_->getFunctions()) { if (!subF->verify()) { - return MAKE_ERR(GlowErr::ErrorCode::PARTITIONER_ERROR, + return MAKE_ERR(ErrorValue::ErrorCode::PARTITIONER_ERROR, "Conversion led to invalid function " + subF->getName().str()); } @@ -97,7 +97,7 @@ llvm::Error Partitioner::finalize(const DAGListTy &partitions, for (const auto &node : partitions[0].nodes) { Function *subF = module_->getFunction(node->name); if (!subF) { - return MAKE_ERR(GlowErr::ErrorCode::PARTITIONER_ERROR, + return MAKE_ERR(ErrorValue::ErrorCode::PARTITIONER_ERROR, "Invalid function name " + node->name); } subF->dumpDAG("partitionLogicalID" + @@ -105,7 +105,7 @@ llvm::Error Partitioner::finalize(const DAGListTy &partitions, subF->getName().str() + "__" + node->backendName + ".dot"); } } - return llvm::Error::success(); + return Error::success(); } Partitioner::Partitioner(Module *parent, const std::vector &devices, @@ -248,7 +248,7 @@ void Partitioner::saturateHost(unsigned logicalDeviceCount, } } -llvm::Expected Partitioner::backendBasedPartition( +Expected Partitioner::backendBasedPartition( FunctionToBackendNameMap &funcToBackend, Function *F, std::vector &backends, CompilationContext &cctx) { NodeToFunctionMap mapping; @@ -289,7 +289,7 @@ llvm::Expected Partitioner::backendBasedPartition( } } if (nodeToBackendName.find(&N) == nodeToBackendName.end()) { - return MAKE_ERR(GlowErr::ErrorCode::PARTITIONER_ERROR, + return MAKE_ERR(ErrorValue::ErrorCode::PARTITIONER_ERROR, "Node is not supported by any of the provided backends"); } } @@ -396,7 +396,7 @@ void Partitioner::genBackendMap( } } -llvm::Expected Partitioner::createDAGWithoutPartition( +Expected Partitioner::createDAGWithoutPartition( llvm::StringRef backendName, std::map &backendMap, CompilationContext &cctx) { DAGListTy partitions; @@ -430,12 +430,11 @@ llvm::Expected Partitioner::createDAGWithoutPartition( return std::move(partitions); } -llvm::Expected -Partitioner::loadBalancedPartition(CompilationContext &cctx, - size_t numDevices) { +Expected Partitioner::loadBalancedPartition(CompilationContext &cctx, + size_t numDevices) { if (module_->getFunctions().size() != 1) { return MAKE_ERR( - GlowErr::ErrorCode::PARTITIONER_ERROR, + ErrorValue::ErrorCode::PARTITIONER_ERROR, strFormat("Invalid : %lu functions in a module. Now in load-balanced " "partition flow, the module can only contain 1 function", module_->getFunctions().size())); @@ -579,7 +578,7 @@ Partitioner::loadBalancedPartition(CompilationContext &cctx, // Throw error if we were not able to put this node into any partition if (curPartition >= numDevices) { - return MAKE_ERR(GlowErr::ErrorCode::PARTITIONER_ERROR, + return MAKE_ERR(ErrorValue::ErrorCode::PARTITIONER_ERROR, "Load balance partition error"); } } @@ -607,13 +606,13 @@ Partitioner::loadBalancedPartition(CompilationContext &cctx, return std::move(partitions); } -llvm::Expected +Expected Partitioner::quantizationProfilingPartition(CompilationContext &cctx) { // For quantization profiling flow, currently we assume there is only 1 // function in a module. if (module_->getFunctions().size() != 1) { return MAKE_ERR( - GlowErr::ErrorCode::PARTITIONER_ERROR, + ErrorValue::ErrorCode::PARTITIONER_ERROR, strFormat( "Invalid : %lu functions in a module. In quantization profiling " "partition flow, the module can only contain 1 function", @@ -647,7 +646,7 @@ Partitioner::quantizationProfilingPartition(CompilationContext &cctx) { return std::move(partitions); } -llvm::Expected +Expected Partitioner::heterogeneousPartition(CompilationContext &cctx) { DAGListTy partitions; // Prepare the mapping between BackendName and BackendInfo. @@ -682,7 +681,7 @@ Partitioner::heterogeneousPartition(CompilationContext &cctx) { // in a module is supported. if (module_->getFunctions().size() != 1) { return MAKE_ERR( - GlowErr::ErrorCode::PARTITIONER_ERROR, + ErrorValue::ErrorCode::PARTITIONER_ERROR, strFormat("Invalid : %lu functions in a module. Now in heterogeneous " "partition flow, the module can only contain 1 function", module_->getFunctions().size())); @@ -692,7 +691,7 @@ Partitioner::heterogeneousPartition(CompilationContext &cctx) { // in a module is supported. if (module_->getFunctions().size() != 1) { return MAKE_ERR( - GlowErr::ErrorCode::PARTITIONER_ERROR, + ErrorValue::ErrorCode::PARTITIONER_ERROR, strFormat( "Invalid : %lu functions in a module. Now in heterogeneous partition\ flow, the module can only contain 1 function", @@ -760,7 +759,7 @@ Partitioner::heterogeneousPartition(CompilationContext &cctx) { return std::move(partitions); } -llvm::Expected +Expected Partitioner::partitionFromConfig(const PartitionConfig &partitionConfig) { DAGListTy partitions; // Prepare the mapping between BackendName and BackendInfo. @@ -768,7 +767,7 @@ Partitioner::partitionFromConfig(const PartitionConfig &partitionConfig) { genBackendMap(backendMap_, backendHolder, backends); Function *F = module_->getFunction(partitionConfig.funcName); if (!F) { - return MAKE_ERR(GlowErr::ErrorCode::PARTITIONER_ERROR, + return MAKE_ERR(ErrorValue::ErrorCode::PARTITIONER_ERROR, strFormat("Can't find function %s in current module.", F->getName().str().data())); } @@ -855,7 +854,7 @@ Partitioner::partitionFromConfig(const PartitionConfig &partitionConfig) { return std::move(partitions); } -llvm::Expected Partitioner::partition(CompilationContext &cctx) { +Expected Partitioner::partition(CompilationContext &cctx) { if (partitionConfig_.enabled()) { // Call user-defined partition flow. return partitionFromConfig(partitionConfig_); diff --git a/lib/Partitioner/PartitionerValidation.cpp b/lib/Partitioner/PartitionerValidation.cpp index a600091681..36645673b6 100644 --- a/lib/Partitioner/PartitionerValidation.cpp +++ b/lib/Partitioner/PartitionerValidation.cpp @@ -15,10 +15,12 @@ */ #include "glow/Partitioner/PartitionerValidation.h" +#include "llvm/Support/FormatVariadic.h" + namespace glow { -llvm::Error -logicalDevicesValidation(const NodeToFunctionMap &partitions, - const std::map &backendMap) { +Error logicalDevicesValidation( + const NodeToFunctionMap &partitions, + const std::map &backendMap) { std::map> partitionsNum; for (auto &func : partitions.getPartitions()) { auto backendName = partitions.getPartitionBackendName(func); @@ -38,12 +40,12 @@ logicalDevicesValidation(const NodeToFunctionMap &partitions, partitionsNum[backendName].size()) .str()); } - return llvm::Error::success(); + return Error::success(); } -llvm::Error -memoryUsageValidation(const NodeToFunctionMap &partitions, - const std::map &backendMap) { +Error memoryUsageValidation( + const NodeToFunctionMap &partitions, + const std::map &backendMap) { for (auto &func : partitions.getPartitions()) { auto backendName = partitions.getPartitionBackendName(func); auto usedMemSize = partitions.getGraphMemInfo(func).getTotalMemSize(); @@ -56,7 +58,7 @@ memoryUsageValidation(const NodeToFunctionMap &partitions, usedMemSize, availableMemSize, backendName) .str()); } - return llvm::Error::success(); + return Error::success(); } /// \returns true if \p node contains no cycles. \p path contains the nodes in a @@ -83,7 +85,7 @@ static bool isDAG(DAGNode *node, llvm::SmallSet &path, return true; } -llvm::Error dagValidation(const DAG &dag) { +Error dagValidation(const DAG &dag) { auto *root = dag.root.get(); llvm::SmallSet path; llvm::SmallSet visited; @@ -98,6 +100,6 @@ llvm::Error dagValidation(const DAG &dag) { // There should not be isolated nodes in partitions. RETURN_ERR_IF_NOT((visited.size() == dag.nodes.size() + 1), "Invalid partition: isolated node is detected."); - return llvm::Error::success(); + return Error::success(); } } // namespace glow diff --git a/lib/Runtime/Executor/ThreadPoolExecutor.cpp b/lib/Runtime/Executor/ThreadPoolExecutor.cpp index ba29209b1c..e8c2860aaa 100644 --- a/lib/Runtime/Executor/ThreadPoolExecutor.cpp +++ b/lib/Runtime/Executor/ThreadPoolExecutor.cpp @@ -79,7 +79,7 @@ void ThreadPoolExecutor::run(const DAGNode *root, // Don't process new requests if the executor is shutting down. if (shuttingDown_) { cb(runId, - MAKE_ERR(GlowErr::ErrorCode::RUNTIME_REQUEST_REFUSED, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_REQUEST_REFUSED, "ThreadPoolExecutor is shutting down"), std::move(context)); return; @@ -88,7 +88,7 @@ void ThreadPoolExecutor::run(const DAGNode *root, // If list of roots is empty, there is nothing to do. Give back the // bindings so the caller can reuse it. if (!root) { - cb(runId, llvm::Error::success(), std::move(context)); + cb(runId, Error::success(), std::move(context)); return; } @@ -134,7 +134,7 @@ void ThreadPoolExecutor::executeDAGNode( if (deviceManagerIt == deviceManagers_.end()) { // Mark the node as no longer executing. executionState->getErrorContainer().set( - MAKE_ERR(GlowErr::ErrorCode::RUNTIME_DEVICE_NOT_FOUND, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_DEVICE_NOT_FOUND, "Cannot find the DeviceManager specified.")); executionState->decrementInflightNodes(); inflightBarrier_.decrement(); @@ -151,7 +151,7 @@ void ThreadPoolExecutor::executeDAGNode( deviceManager->runFunction( node->name, std::move(nodeCtx), [this, executionState, - node](RunIdentifierTy id, llvm::Error err, + node](RunIdentifierTy id, Error err, std::unique_ptr resultCtx) { // Immediately move the handling of the result onto this run's executor // to avoid doing work on the DeviceManager thread. @@ -165,7 +165,7 @@ void ThreadPoolExecutor::executeDAGNode( } void ThreadPoolExecutor::handleDeviceManagerResult( - std::shared_ptr executionState, llvm::Error err, + std::shared_ptr executionState, Error err, std::unique_ptr ctx, const DAGNode *node) { // If executionState is null, that means that the object was deleted diff --git a/lib/Runtime/HostManager/HostManager.cpp b/lib/Runtime/HostManager/HostManager.cpp index 20e8fd44bf..fe1ac3fbf6 100644 --- a/lib/Runtime/HostManager/HostManager.cpp +++ b/lib/Runtime/HostManager/HostManager.cpp @@ -26,6 +26,7 @@ #include "glow/Support/Support.h" #include "llvm/Support/CommandLine.h" +#include "llvm/Support/FormatVariadic.h" #include @@ -50,7 +51,7 @@ HostManager::HostManager(const HostConfig &hostConfig) : config_(hostConfig) {} HostManager::HostManager( std::vector> deviceConfigs) { // TODO: move all initialization out of constructor. - TEMP_EXIT_ON_ERR(init(std::move(deviceConfigs))); + EXIT_ON_ERR(init(std::move(deviceConfigs))); } HostManager::HostManager( @@ -58,19 +59,18 @@ HostManager::HostManager( const HostConfig &hostConfig) : config_(hostConfig) { // TODO: move all initialization out of constructor. - TEMP_EXIT_ON_ERR(init(std::move(deviceConfigs))); + EXIT_ON_ERR(init(std::move(deviceConfigs))); } -llvm::Expected HostManager::getNetworkDAG(llvm::StringRef network) { +Expected HostManager::getNetworkDAG(llvm::StringRef network) { auto it = networks_.find(network); if (it == networks_.end()) { - return MAKE_ERR(GlowErr::ErrorCode::RUNTIME_ERROR, "Network not found."); + return MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_ERROR, "Network not found."); } - return it->second.dag; + return &it->second.dag; } -llvm::Error -HostManager::init(std::vector> configs) { +Error HostManager::init(std::vector> configs) { DeviceIDTy deviceCount = 0; for (auto &config : configs) { @@ -89,7 +89,7 @@ HostManager::init(std::vector> configs) { provisioner_.reset(new Provisioner(devices_)); executor_.reset(new ThreadPoolExecutor(devices_, config_.executorThreads)); exportMemoryCounters(); - return llvm::Error::success(); + return Error::success(); } void HostManager::exportMemoryCounters() { @@ -105,7 +105,7 @@ void HostManager::exportMemoryCounters() { } HostManager::~HostManager() { - llvm::toString(clearHost()); + ERR_TO_VOID(clearHost()); exportMemoryCounters(); } @@ -116,9 +116,8 @@ void HostManager::cleanupAddNetwork(llvm::ArrayRef names) { exportMemoryCounters(); } -llvm::Error HostManager::addNetwork(std::unique_ptr module, - CompilationContext &cctx, - bool saturateHost) { +Error HostManager::addNetwork(std::unique_ptr module, + CompilationContext &cctx, bool saturateHost) { std::vector names; { std::lock_guard networkLock(networkLock_); @@ -130,7 +129,7 @@ llvm::Error HostManager::addNetwork(std::unique_ptr module, processingNetworks_.find(name) != processingNetworks_.end()) { cleanupAddNetwork(names); return MAKE_ERR( - GlowErr::ErrorCode::RUNTIME_ERROR, + ErrorValue::ErrorCode::RUNTIME_ERROR, "Failed to add network: already have a function called " + name); } // Add the network to processingNetworks_ so we know it's being worked on. @@ -188,7 +187,7 @@ llvm::Error HostManager::addNetwork(std::unique_ptr module, // Since for profiling the provisioner will be reset, we only allow one // network in one HM. if (networks_.size() > 0) { - return MAKE_ERR(GlowErr::ErrorCode::RUNTIME_ERROR, + return MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_ERROR, "For quantization profiling flow, there can't be other " "registered networks before this one"); } @@ -229,20 +228,20 @@ llvm::Error HostManager::addNetwork(std::unique_ptr module, } cleanupAddNetwork(names); } - return llvm::Error::success(); + return Error::success(); } -llvm::Error HostManager::removeNetwork(llvm::StringRef networkName) { +Error HostManager::removeNetwork(llvm::StringRef networkName) { std::lock_guard networkLock(networkLock_); auto networkIterator = networks_.find(networkName); if (networkIterator == networks_.end()) { - return llvm::Error::success(); + return Error::success(); } if (processingNetworks_.find(networkName) != processingNetworks_.end()) { // Return an error, the network is in an incomplete state likely because // it is still being added by a different call. - return MAKE_ERR(GlowErr::ErrorCode::RUNTIME_NET_BUSY, + return MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_NET_BUSY, llvm::formatv("Cannot remove the network {0}, as it is " "currently being modified.", networkName) @@ -251,7 +250,7 @@ llvm::Error HostManager::removeNetwork(llvm::StringRef networkName) { // Issue an error as there are outstanding runs for the network if (networkIterator->second.refcount != 0) { - return MAKE_ERR(GlowErr::ErrorCode::RUNTIME_NET_BUSY, + return MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_NET_BUSY, llvm::formatv("Cannot remove the network {0}, as there are " "still outstanding runs", networkName) @@ -264,11 +263,11 @@ llvm::Error HostManager::removeNetwork(llvm::StringRef networkName) { for (auto device : node->deviceIDs) { std::promise removeNetwork; auto done = removeNetwork.get_future(); - std::unique_ptr removeErr; + std::unique_ptr removeErr; devices_[device]->evictNetwork( node->name, - [&removeNetwork, &removeErr](std::string name, llvm::Error err) { - removeErr = llvm::make_unique(std::move(err)); + [&removeNetwork, &removeErr](std::string name, Error err) { + removeErr = llvm::make_unique(std::move(err)); removeNetwork.set_value(); }); done.get(); @@ -287,7 +286,7 @@ bool HostManager::networkAdded(llvm::StringRef networkName) { return networks_.find(networkName) != networks_.end(); } -llvm::Error HostManager::clearHost() { +Error HostManager::clearHost() { // shutdown the executor, blocking on any current inflight and prevent new // requests from being serviced. executor_->shutdown(); @@ -314,17 +313,17 @@ llvm::Error HostManager::clearHost() { return errContainer.get(); } -llvm::Error HostManager::runNetworkBlocking(llvm::StringRef networkName, - PlaceholderBindings &bindings) { +Error HostManager::runNetworkBlocking(llvm::StringRef networkName, + PlaceholderBindings &bindings) { std::unique_ptr phBindings(&bindings); std::unique_ptr context = llvm::make_unique(std::move(phBindings)); std::promise runPromise; auto fut = runPromise.get_future(); - std::unique_ptr runErr; + std::unique_ptr runErr; runNetwork( networkName, std::move(context), - [&runPromise, &runErr](runtime::RunIdentifierTy, llvm::Error err, + [&runPromise, &runErr](runtime::RunIdentifierTy, Error err, std::unique_ptr contextPtr) { // Don't delete ph bindings since they were created from a passed in // reference. @@ -332,7 +331,7 @@ llvm::Error HostManager::runNetworkBlocking(llvm::StringRef networkName, contextPtr->movePlaceholderBindings(); phBind.release(); - runErr = llvm::make_unique(std::move(err)); + runErr = llvm::make_unique(std::move(err)); runPromise.set_value(); }); @@ -340,17 +339,16 @@ llvm::Error HostManager::runNetworkBlocking(llvm::StringRef networkName, return std::move(*DCHECK_NOTNULL(runErr.get())); } -llvm::Error -HostManager::runNetworkBlocking(llvm::StringRef networkName, - std::unique_ptr context) { +Error HostManager::runNetworkBlocking( + llvm::StringRef networkName, std::unique_ptr context) { std::promise runPromise; auto fut = runPromise.get_future(); - std::unique_ptr runErr; + std::unique_ptr runErr; runNetwork( networkName, std::move(context), - [&runPromise, &runErr](runtime::RunIdentifierTy, llvm::Error err, + [&runPromise, &runErr](runtime::RunIdentifierTy, Error err, std::unique_ptr contextPtr) { - runErr = llvm::make_unique(std::move(err)); + runErr = llvm::make_unique(std::move(err)); runPromise.set_value(); }); @@ -371,7 +369,7 @@ void HostManager::dispatchNextRun() { networks_[request.networkName].dag.root.get(), std::move(request.context), request.requestID, [this, callback = request.callback, name = request.networkName]( - RunIdentifierTy runID, llvm::Error err, + RunIdentifierTy runID, Error err, std::unique_ptr context) { { std::lock_guard networkLock(networkLock_); @@ -414,7 +412,7 @@ HostManager::runNetwork(llvm::StringRef networkName, if (network == nullptr) { callback( currentRun, - MAKE_ERR(GlowErr::ErrorCode::RUNTIME_NET_NOT_FOUND, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_NET_NOT_FOUND, llvm::formatv("Function {0} not found", networkName).str()), std::move(context)); return currentRun; @@ -430,7 +428,7 @@ HostManager::runNetwork(llvm::StringRef networkName, callback( currentRun, MAKE_ERR( - GlowErr::ErrorCode::RUNTIME_REQUEST_REFUSED, + ErrorValue::ErrorCode::RUNTIME_REQUEST_REFUSED, strFormat( "The number of allowed queued requests has been exceeded. " "queued requests: %lu allowed requests: %zu", diff --git a/lib/Runtime/Provisioner/Provisioner.cpp b/lib/Runtime/Provisioner/Provisioner.cpp index b20765232c..164f92c7be 100644 --- a/lib/Runtime/Provisioner/Provisioner.cpp +++ b/lib/Runtime/Provisioner/Provisioner.cpp @@ -20,6 +20,8 @@ #include "glow/Graph/Graph.h" #include "glow/Support/Debug.h" +#include "llvm/Support/FormatVariadic.h" + #include #include #include @@ -57,8 +59,8 @@ Provisioner::Provisioner(DeviceManagerMapTy &devices) { } } -llvm::Error Provisioner::provision(DAGListTy &networks, Module &module, - CompilationContext &cctx) { +Error Provisioner::provision(DAGListTy &networks, Module &module, + CompilationContext &cctx) { // Walk the networks and group by logicalDeviceId. std::map> logicalDevices; // List of functions being added. @@ -75,7 +77,7 @@ llvm::Error Provisioner::provision(DAGListTy &networks, Module &module, for (auto &name : localActiveNames) { activeFunctions_.erase(name); } - return MAKE_ERR(GlowErr::ErrorCode::RUNTIME_NET_BUSY, + return MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_NET_BUSY, llvm::formatv("Cannot add the network {0}, as it is " "currently being provisioned.", node->name) @@ -189,7 +191,7 @@ llvm::Error Provisioner::provision(DAGListTy &networks, Module &module, if (logicalDeviceSize[i].second > deviceMemory[j].second) { cleanupProvision(localActiveNames); return MAKE_ERR( - GlowErr::ErrorCode::RUNTIME_OUT_OF_DEVICE_MEMORY, + ErrorValue::ErrorCode::RUNTIME_OUT_OF_DEVICE_MEMORY, llvm::formatv("Not enough memory to provision functions " "onto devices. Need {0} bytes, have {1}.", logicalDeviceSize[i].second, deviceMemory[j].second) @@ -199,11 +201,11 @@ llvm::Error Provisioner::provision(DAGListTy &networks, Module &module, DeviceIDTy logicalID = logicalDeviceSize[i].first; std::promise addPromise; auto ready = addPromise.get_future(); - std::unique_ptr addErr; + std::unique_ptr addErr; devices_[deviceID]->addNetwork( &module, functionMaps[logicalID], - [&addErr, &addPromise](const Module *, llvm::Error err) { - addErr = llvm::make_unique(std::move(err)); + [&addErr, &addPromise](const Module *, Error err) { + addErr = llvm::make_unique(std::move(err)); addPromise.set_value(); }); ready.wait(); @@ -221,22 +223,22 @@ llvm::Error Provisioner::provision(DAGListTy &networks, Module &module, } } cleanupProvision(localActiveNames); - return llvm::Error::success(); + return Error::success(); }; -llvm::Error Provisioner::removeFunction(llvm::StringRef name) { +Error Provisioner::removeFunction(llvm::StringRef name) { std::lock_guard functionsLock(functionsLock_); auto it = activeFunctions_.find(name); if (it != activeFunctions_.end()) { return MAKE_ERR( - GlowErr::ErrorCode::RUNTIME_NET_BUSY, + ErrorValue::ErrorCode::RUNTIME_NET_BUSY, llvm::formatv("Could not remove network: {0} as it is currently " "being provisioned.", name) .str()); } functions_.erase(name); - return llvm::Error::success(); + return Error::success(); } void Provisioner::cleanupProvision(llvm::ArrayRef names) { diff --git a/lib/Support/Error.cpp b/lib/Support/Error.cpp index a51852d3f1..d64ba99938 100644 --- a/lib/Support/Error.cpp +++ b/lib/Support/Error.cpp @@ -16,13 +16,10 @@ #include "glow/Support/Error.h" -namespace glow { -llvm::ExitOnError exitOnErr("Encountered an error, exiting.\n"); - -/// ID used by llvm::ErrorInfo::isA's dynamic typing. -uint8_t const GlowErr::ID = 0; +#include -bool OneErrOnly::set(llvm::Error err) { +namespace glow { +bool OneErrOnly::set(Error err) { // Don't do anything in the case of empty Error. if (!err) { return false; @@ -37,12 +34,12 @@ bool OneErrOnly::set(llvm::Error err) { // No update happening so don't need the lock any more. lock.unlock(); LOG(ERROR) << "OneErrOnly already has an Error, discarding new Error: " - << llvm::toString(std::move(err)); + << errorToString(std::move(err)); return false; } } -llvm::Error OneErrOnly::get() { +Error OneErrOnly::get() { std::unique_lock lock(m_); auto err = std::move(err_); return err; @@ -52,4 +49,110 @@ bool OneErrOnly::containsErr() { std::unique_lock lock(m_); return static_cast(err_); } + +namespace detail { +std::string GlowErrorValue::logToString() const { + std::stringstream ss; + log(ss); + return ss.str(); +} + +std::string GlowErrorValue::errorCodeToString(const ErrorCode &ec) { + switch (ec) { + case ErrorCode::UNKNOWN: + return "UNKNOWN"; + case ErrorCode::MODEL_LOADER_UNSUPPORTED_SHAPE: + return "MODEL_LOADER_UNSUPPORTED_SHAPE"; + case ErrorCode::MODEL_LOADER_UNSUPPORTED_OPERATOR: + return "MODEL_LOADER_UNSUPPORTED_OPERATOR"; + case ErrorCode::MODEL_LOADER_UNSUPPORTED_ATTRIBUTE: + return "MODEL_LOADER_UNSUPPORTED_ATTRIBUTE"; + case ErrorCode::MODEL_LOADER_UNSUPPORTED_DATATYPE: + return "MODEL_LOADER_UNSUPPORTED_DATATYPE"; + case ErrorCode::MODEL_LOADER_UNSUPPORTED_ONNX_VERSION: + return "MODEL_LOADER_UNSUPPORTED_ONNX_VERSION"; + case ErrorCode::MODEL_LOADER_INVALID_PROTOBUF: + return "MODEL_LOADER_INVALID_PROTOBUF"; + case ErrorCode::PARTITIONER_ERROR: + return "PARTITIONER_ERROR"; + case ErrorCode::RUNTIME_ERROR: + return "RUNTIME_ERROR"; + case ErrorCode::RUNTIME_OUT_OF_DEVICE_MEMORY: + return "RUNTIME_OUT_OF_DEVICE_MEMORY"; + case ErrorCode::RUNTIME_NET_NOT_FOUND: + return "RUNTIME_NET_NOT_FOUND"; + case ErrorCode::RUNTIME_REQUEST_REFUSED: + return "RUNTIME_REQUEST_REFUSED"; + case ErrorCode::RUNTIME_DEVICE_NOT_FOUND: + return "RUNTIME_DEVICE_NOT_FOUND"; + case ErrorCode::RUNTIME_NET_BUSY: + return "RUNTIME_NET_BUSY"; + case ErrorCode::COMPILE_UNSUPPORTED_NODE_AFTER_OPTIMIZE: + return "COMPILE_UNSUPPORTED_NODE_AFTER_OPTIMIZE"; + case ErrorCode::COMPILE_CONTEXT_MALFORMED: + return "COMPILE_CONTEXT_MALFORMED"; + case ErrorCode::MODEL_WRITER_INVALID_FILENAME: + return "MODEL_WRITER_INVALID_FILENAME"; + case ErrorCode::MODEL_WRITER_SERIALIZATION_ERROR: + return "MODEL_WRITER_SERIALIZATION_ERROR"; + case ErrorCode::COMPILE_UNSUPPORTED_IR_AFTER_GENERATE: + return "COMPILE_UNSUPPORTED_IR_AFTER_GENERATE"; + case ErrorCode::COMPILE_UNSUPPORTED_IR_AFTER_OPTIMIZE: + return "COMPILE_UNSUPPORTED_IR_AFTER_OPTIMIZE"; + }; + LOG(FATAL) << "Unsupported ErrorCode"; +} + +std::unique_ptr takeErrorValue(GlowError error) { + return error.takeErrorValue(); +} + +void exitOnError(const char *fileName, size_t lineNumber, GlowError error) { + if (error) { + std::unique_ptr errorValue = + detail::takeErrorValue(std::move(error)); + assert(errorValue != nullptr && + "Error should have a non-null ErrorValue if bool(error) is true"); + LOG(FATAL) << "exitOnError(Error) at " << fileName << ":" << lineNumber + << " got an unexpected ErrorValue: " << (*errorValue); + } +} + +bool errorToBool(const char *fileName, size_t lineNumber, GlowError error, + bool log) { + std::unique_ptr errorValue = + detail::takeErrorValue(std::move(error)); + if (errorValue) { + if (log) { + LOG(ERROR) << "Converting Error to bool at " << fileName << ":" + << lineNumber << ": " << (*errorValue); + } + return true; + } else { + return false; + } +} + +std::string errorToString(GlowError error) { + std::unique_ptr errorValue = + detail::takeErrorValue(std::move(error)); + if (errorValue) { + return errorValue->logToString(); + } else { + return "success"; + } +} + +void errorToVoid(const char *fileName, size_t lineNumber, GlowError error, + bool log) { + errorToBool(fileName, lineNumber, std::move(error), log); +} + +GlowError::GlowError(GlowErrorEmpty &&other) { + setErrorValue(std::move(other.errorValue_), /*skipCheck*/ true); + setChecked(true); + other.setChecked(true); +} + +} // namespace detail } // namespace glow diff --git a/tests/benchmark/RuntimeBench.cpp b/tests/benchmark/RuntimeBench.cpp index 86b4ab1903..9941cbb013 100644 --- a/tests/benchmark/RuntimeBench.cpp +++ b/tests/benchmark/RuntimeBench.cpp @@ -111,7 +111,7 @@ void setUpDeviceManagerCommon( deviceManager = std::unique_ptr(DeviceManager::createDeviceManager( DeviceConfig(backend->getBackendName()))); - bool error = errToBool(deviceManager->init()); + bool error = ERR_TO_BOOL(deviceManager->init()); if (error) { state.SkipWithError("Unable to set up DeviceManager - failed to " @@ -135,10 +135,10 @@ void setUpDeviceManagerCommon( // Add all compiled functions to the DeviceManager instance. std::promise promise; std::future future = promise.get_future(); - deviceManager->addNetwork( - mod.get(), funcs, [&promise](const Module * /*mod*/, llvm::Error err) { - promise.set_value(errToBool(std::move(err))); - }); + deviceManager->addNetwork(mod.get(), funcs, + [&promise](const Module * /*mod*/, Error err) { + promise.set_value(ERR_TO_BOOL(std::move(err))); + }); future.wait(); error = future.get(); @@ -166,8 +166,8 @@ void tearDownDeviceManagerCommon( std::promise promise; std::future future = promise.get_future(); deviceManager->evictNetwork( - func.first, [&promise](std::string /*name*/, llvm::Error err) { - promise.set_value(errToBool(std::move(err))); + func.first, [&promise](std::string /*name*/, Error err) { + promise.set_value(ERR_TO_BOOL(std::move(err))); }); future.wait(); bool error = future.get(); @@ -181,7 +181,7 @@ void tearDownDeviceManagerCommon( deviceManagerFunctions.clear(); // Stop the device. - bool error = errToBool(deviceManager->stop()); + bool error = ERR_TO_BOOL(deviceManager->stop()); if (error) { state.SkipWithError("Unable to tear down DeviceManager - failed to stop " "DeviceManager!"); @@ -306,7 +306,7 @@ class HostManagerBenchmark : public RuntimeBenchmark { // Add the module to the HostManager instance. CompilationContext cctx; - bool error = errToBool(hostManager_->addNetwork(std::move(mod), cctx)); + bool error = ERR_TO_BOOL(hostManager_->addNetwork(std::move(mod), cctx)); if (error) { state.SkipWithError("Unable to set up host manager - failed to add " "module!"); @@ -322,7 +322,7 @@ class HostManagerBenchmark : public RuntimeBenchmark { } // Clear all networks and stop all devices. - bool error = errToBool(hostManager_->clearHost()); + bool error = ERR_TO_BOOL(hostManager_->clearHost()); if (error) { state.SkipWithError( "Unable to tear down host manager - failed to clear host!"); @@ -344,8 +344,7 @@ class HostManagerBenchmark : public RuntimeBenchmark { std::future future = promise.get_future(); hostManager_->runNetwork( function, std::move(ctx), - [&promise, &ctx](runtime::RunIdentifierTy /*runId*/, - llvm::Error /*err*/, + [&promise, &ctx](runtime::RunIdentifierTy /*runId*/, Error /*err*/, std::unique_ptr result) { ctx = std::move(result); promise.set_value(); @@ -446,8 +445,7 @@ class ExecutorBenchmark : public RuntimeBenchmark { std::future future = promise.get_future(); executor_->run( (dag_->root).get(), std::move(ctx), /*runId=*/0, - [&promise, &ctx](runtime::RunIdentifierTy /*runId*/, - llvm::Error /*err*/, + [&promise, &ctx](runtime::RunIdentifierTy /*runId*/, Error /*err*/, std::unique_ptr result) { ctx = std::move(result); promise.set_value(); @@ -510,8 +508,7 @@ class DeviceManagerBenchmark : public RuntimeBenchmark { std::future future = promise.get_future(); deviceManager_->runFunction( func.first, std::move(ctx), - [&promise, &ctx](runtime::RunIdentifierTy /*runId*/, - llvm::Error /*err*/, + [&promise, &ctx](runtime::RunIdentifierTy /*runId*/, Error /*err*/, std::unique_ptr result) { ctx = std::move(result); promise.set_value(); diff --git a/tests/unittests/BackendCorrectnessTest.cpp b/tests/unittests/BackendCorrectnessTest.cpp index 505cbdd88b..2c9f33a4ca 100644 --- a/tests/unittests/BackendCorrectnessTest.cpp +++ b/tests/unittests/BackendCorrectnessTest.cpp @@ -179,7 +179,7 @@ class MockCPUBackend : public BackendUsingGlowIR { std::string getBackendName() const override { return "CPU"; } - llvm::Expected> + Expected> compile(Function *F, const BackendOptions &opts) const override { return backend_->compile(F, opts); } @@ -258,7 +258,7 @@ TEST_P(BackendCorrectnessTest, dataParallelStackingTest) { MockCPUBackend backend; auto function = backend.compileIR(std::move(M)); - ASSERT_FALSE(errToBool(function->execute(ctx.get()))); + ASSERT_FALSE(ERR_TO_BOOL(function->execute(ctx.get()))); auto H = outputTensor->getHandle(); EXPECT_EQ(H.at(0), 3); EXPECT_EQ(H.at(1), 4); diff --git a/tests/unittests/BackendTest.cpp b/tests/unittests/BackendTest.cpp index f6c39a99d4..3f3f6bb4f4 100644 --- a/tests/unittests/BackendTest.cpp +++ b/tests/unittests/BackendTest.cpp @@ -132,8 +132,8 @@ TEST(RuntimeBundle, BundleSymbolInfo) { auto *qp = F->createQuantizationProfile(bindings, "qp", input); EE.compile(CompilationMode::Infer); - auto dag = EE.getDAG("main"); - ASSERT_TRUE((bool)dag); + runtime::DAG *dag; + ASSIGN_VALUE_OR_FAIL_TEST(dag, EE.getDAG("main")); assert(dag->nodes.size() > 0 && "Empty DAG list"); auto table = dag->nodes[0]->runtimeBundle->getSymbolTable(); // Check that placeholders and constants are correctly labelled. @@ -192,8 +192,8 @@ TEST(RuntimeBundle, ContiguousPlaceholder) { bindings.allocate(A); bindings.allocate(Ex); EE.compile(cctx); - auto dag = EE.getDAG("main"); - ASSERT_TRUE((bool)dag); + runtime::DAG *dag; + ASSIGN_VALUE_OR_FAIL_TEST(dag, EE.getDAG("main")); auto &table = dag->nodes[0]->runtimeBundle->getSymbolTable(); std::vector tableContainer; @@ -323,10 +323,9 @@ TEST_P(BackendExecTest, debugPrint) { std::promise addPromise; auto fut = addPromise.get_future(); - llvm::Error addErr = llvm::Error::success(); - MARK_ERR_CHECKED(addErr); + Error addErr = Error::empty(); device->addNetwork(&EE_.getModule(), std::move(functionMap), - [&addPromise, &addErr](const Module *, llvm::Error err) { + [&addPromise, &addErr](const Module *, Error err) { addErr = std::move(err); addPromise.set_value(); }); @@ -335,11 +334,10 @@ TEST_P(BackendExecTest, debugPrint) { // Run the function. std::promise runPromise; fut = runPromise.get_future(); - llvm::Error runErr = llvm::Error::success(); - MARK_ERR_CHECKED(runErr); + Error runErr = Error::empty(); device->runFunction(name, std::move(ctx), [&runPromise, &runErr, - &ctx](runtime::RunIdentifierTy, llvm::Error err, + &ctx](runtime::RunIdentifierTy, Error err, std::unique_ptr contextPtr) { ctx = std::move(contextPtr); runErr = std::move(err); diff --git a/tests/unittests/BackendTestUtils.cpp b/tests/unittests/BackendTestUtils.cpp index 5c14a93994..cb878f58e7 100644 --- a/tests/unittests/BackendTestUtils.cpp +++ b/tests/unittests/BackendTestUtils.cpp @@ -1131,10 +1131,9 @@ void insertCompiledFunction(llvm::StringRef name, CompiledFunction *func, std::promise addPromise; auto fut = addPromise.get_future(); - llvm::Error addErr = llvm::Error::success(); - MARK_ERR_CHECKED(addErr); + Error addErr = Error::empty(); device->addNetwork(mod, std::move(functionMap), - [&addPromise, &addErr](const Module *, llvm::Error err) { + [&addPromise, &addErr](const Module *, Error err) { addErr = std::move(err); addPromise.set_value(); }); @@ -1147,11 +1146,10 @@ void runOnDevice(ExecutionContext &context, llvm::StringRef name, std::unique_ptr contextPtr(&context); std::promise runPromise; auto fut = runPromise.get_future(); - llvm::Error runErr = llvm::Error::success(); - MARK_ERR_CHECKED(runErr); + Error runErr = Error::empty(); device->runFunction( name, std::move(contextPtr), - [&runPromise, &runErr](runtime::RunIdentifierTy, llvm::Error err, + [&runPromise, &runErr](runtime::RunIdentifierTy, Error err, std::unique_ptr contextPtr) { // Don't delete context. contextPtr.release(); diff --git a/tests/unittests/BackendTestUtils.h b/tests/unittests/BackendTestUtils.h index 54e2b019d2..9a693d996b 100644 --- a/tests/unittests/BackendTestUtils.h +++ b/tests/unittests/BackendTestUtils.h @@ -142,16 +142,14 @@ class MockBackend : public Backend { MockFunction(runtime::RuntimeBundle &&bundle) : CompiledFunction(std::move(bundle)) {} - llvm::Error execute(ExecutionContext *) override { - return llvm::Error::success(); - } + Error execute(ExecutionContext *) override { return Error::success(); } std::string getCompileBackendName() const override { return "Interpreter"; } }; std::string getBackendName() const override { return "Interpreter"; } - llvm::Expected> + Expected> compile(Function *F, const BackendOptions &) const override { return llvm::make_unique(runtime::RuntimeBundle::create(*F)); } @@ -176,16 +174,14 @@ class MockBackendCustomIRGen : public Backend { MockFunction(runtime::RuntimeBundle &&bundle) : CompiledFunction(std::move(bundle)) {} - llvm::Error execute(ExecutionContext *) override { - return llvm::Error::success(); - } + Error execute(ExecutionContext *) override { return Error::success(); } std::string getCompileBackendName() const override { return "Interpreter"; } }; std::string getBackendName() const override { return "Interpreter"; } - llvm::Expected> + Expected> compile(Function *F, const BackendOptions &) const override { return llvm::make_unique(runtime::RuntimeBundle::create(*F)); } diff --git a/tests/unittests/CMakeLists.txt b/tests/unittests/CMakeLists.txt index 38f964e060..b217fd4013 100755 --- a/tests/unittests/CMakeLists.txt +++ b/tests/unittests/CMakeLists.txt @@ -419,6 +419,15 @@ add_glow_test(NAME UtilsTest COMMAND ${GLOW_BINARY_DIR}/tests/UtilsTest --gtest_output=xml:UtilsTest.xml WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +add_executable(ErrorTest + ErrorTest.cpp) +target_link_libraries(ErrorTest + PRIVATE + Support + gtest + TestMain) +add_glow_test(ErrorTest ${GLOW_BINARY_DIR}/tests/ErrorTest --gtest_output=xml:ErrorTest.xml) + LIST(APPEND UNOPT_TESTS true) add_custom_target(test_unopt ${UNOPT_TESTS} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/unittests/Caffe2ImporterTest.cpp b/tests/unittests/Caffe2ImporterTest.cpp index c145a25450..b243d70ee0 100644 --- a/tests/unittests/Caffe2ImporterTest.cpp +++ b/tests/unittests/Caffe2ImporterTest.cpp @@ -344,12 +344,12 @@ TEST(caffe2, maxPoolLegacyPadding) { Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1}); - llvm::Error err(llvm::Error::success()); + Error err(Error::success()); Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"}, {&inputs.getType()}, *F, &err); // Test that the error is the expected one. - auto msg = llvm::toString(std::move(err)); + auto msg = ERR_TO_STRING(std::move(err)); ASSERT_NE(msg.find("MaxPool nodes with legacy caffe padding are " "deprecated and not supported."), std::string::npos); diff --git a/tests/unittests/DeviceManagerTest.cpp b/tests/unittests/DeviceManagerTest.cpp index b1a7ef21f8..d7c4e96c40 100644 --- a/tests/unittests/DeviceManagerTest.cpp +++ b/tests/unittests/DeviceManagerTest.cpp @@ -38,10 +38,10 @@ class DeviceManagerTest : public ::testing::TestWithParam { backendName = GetParam(); device.reset(DeviceManager::createDeviceManager(DeviceConfig(backendName))); ASSERT_TRUE(device.get()); - ASSERT_FALSE(errToBool(device->init())); + ASSERT_FALSE(ERR_TO_BOOL(device->init())); } - void TearDown() override { EXPECT_FALSE(errToBool(device->stop())); } + void TearDown() override { EXPECT_FALSE(ERR_TO_BOOL(device->stop())); } std::string backendName; std::unique_ptr device{nullptr}; @@ -92,8 +92,9 @@ std::pair, std::future> getFutureHelper() { template void callbackHelper(std::promise &promise, ResultType res, - llvm::Error err) { - promise.set_value(!errToBool(std::move(err)) ? std::move(res) : ResultType()); + Error err) { + promise.set_value(!ERR_TO_BOOL(std::move(err)) ? std::move(res) + : ResultType()); } TEST_P(DeviceManagerTest, Basic) { @@ -107,7 +108,7 @@ TEST_P(DeviceManagerTest, Basic) { std::tie(promise, future) = getFutureHelper(); device->addNetwork(module.get(), std::move(functions), - [&promise](const Module *module, llvm::Error err) { + [&promise](const Module *module, Error err) { callbackHelper(promise, module, std::move(err)); }); @@ -133,7 +134,7 @@ TEST_P(DeviceManagerTest, Basic) { std::tie(runPromise, runFuture) = getFutureHelper>(); device->runFunction("main", std::move(context), - [&runPromise](RunIdentifierTy, llvm::Error err, + [&runPromise](RunIdentifierTy, Error err, std::unique_ptr context) { callbackHelper(runPromise, std::move(context), std::move(err)); @@ -174,7 +175,7 @@ TEST_P(DeviceManagerTest, PartialTensorCopy) { std::tie(promise, future) = getFutureHelper(); device->addNetwork(module.get(), std::move(functions), - [&promise](const Module *module, llvm::Error err) { + [&promise](const Module *module, Error err) { callbackHelper(promise, module, std::move(err)); }); @@ -201,7 +202,7 @@ TEST_P(DeviceManagerTest, PartialTensorCopy) { std::tie(runPromise, runFuture) = getFutureHelper>(); device->runFunction("main", std::move(context), - [&runPromise](RunIdentifierTy, llvm::Error err, + [&runPromise](RunIdentifierTy, Error err, std::unique_ptr context) { callbackHelper(runPromise, std::move(context), std::move(err)); @@ -226,7 +227,7 @@ TEST_P(DeviceManagerTest, MultiRun) { std::future future; std::tie(promise, future) = getFutureHelper(); device->addNetwork(module.get(), std::move(functions), - [&promise](const Module *module, llvm::Error err) { + [&promise](const Module *module, Error err) { callbackHelper(promise, module, std::move(err)); }); future.wait_for(std::chrono::seconds(2)); @@ -262,14 +263,14 @@ TEST_P(DeviceManagerTest, MultiRun) { std::tie(runP2, runF2) = getFutureHelper>(); device->runFunction("main", std::move(context1), - [&runP1](RunIdentifierTy, llvm::Error err, + [&runP1](RunIdentifierTy, Error err, std::unique_ptr context) { callbackHelper(runP1, std::move(context), std::move(err)); }); device->runFunction("main", std::move(context2), - [&runP2](RunIdentifierTy, llvm::Error err, + [&runP2](RunIdentifierTy, Error err, std::unique_ptr context) { callbackHelper(runP2, std::move(context), std::move(err)); @@ -331,7 +332,7 @@ TEST_P(DeviceManagerTest, MultiFunction) { std::future future; std::tie(promise, future) = getFutureHelper(); device->addNetwork(module.get(), std::move(functions), - [&promise](const Module *module, llvm::Error err) { + [&promise](const Module *module, Error err) { callbackHelper(promise, module, std::move(err)); }); future.wait_for(std::chrono::seconds(2)); @@ -357,14 +358,14 @@ TEST_P(DeviceManagerTest, MultiFunction) { std::tie(runP2, runF2) = getFutureHelper>(); device->runFunction("func1", std::move(context1), - [&runP1](RunIdentifierTy, llvm::Error err, + [&runP1](RunIdentifierTy, Error err, std::unique_ptr context) { callbackHelper(runP1, std::move(context), std::move(err)); }); device->runFunction("func2", std::move(context2), - [&runP2](RunIdentifierTy, llvm::Error err, + [&runP2](RunIdentifierTy, Error err, std::unique_ptr context) { callbackHelper(runP2, std::move(context), std::move(err)); @@ -400,7 +401,7 @@ TEST_P(DeviceManagerTest, MultiModule) { std::future future; std::tie(promise, future) = getFutureHelper(); device->addNetwork(module1.get(), std::move(functions1), - [&promise](const Module *module, llvm::Error err) { + [&promise](const Module *module, Error err) { callbackHelper(promise, module, std::move(err)); }); future.wait_for(std::chrono::seconds(2)); @@ -408,7 +409,7 @@ TEST_P(DeviceManagerTest, MultiModule) { std::tie(promise, future) = getFutureHelper(); device->addNetwork(module2.get(), std::move(functions2), - [&promise](const Module *module, llvm::Error err) { + [&promise](const Module *module, Error err) { callbackHelper(promise, module, std::move(err)); }); future.wait_for(std::chrono::seconds(2)); @@ -439,14 +440,14 @@ TEST_P(DeviceManagerTest, MultiModule) { std::tie(runP2, runF2) = getFutureHelper>(); device->runFunction("func1", std::move(context1), - [&runP1](RunIdentifierTy, llvm::Error err, + [&runP1](RunIdentifierTy, Error err, std::unique_ptr context) { callbackHelper(runP1, std::move(context), std::move(err)); }); device->runFunction("func2", std::move(context2), - [&runP2](RunIdentifierTy, llvm::Error err, + [&runP2](RunIdentifierTy, Error err, std::unique_ptr context) { callbackHelper(runP2, std::move(context), std::move(err)); @@ -504,7 +505,7 @@ TEST_P(DeviceManagerTest, ReuseModule) { std::future future; std::tie(promise, future) = getFutureHelper(); device->addNetwork(module.get(), std::move(functions), - [&promise](const Module *module, llvm::Error err) { + [&promise](const Module *module, Error err) { callbackHelper(promise, module, std::move(err)); }); future.wait_for(std::chrono::seconds(2)); @@ -512,7 +513,7 @@ TEST_P(DeviceManagerTest, ReuseModule) { std::tie(promise, future) = getFutureHelper(); device->addNetwork(module.get(), std::move(functions2), - [&promise](const Module *module, llvm::Error err) { + [&promise](const Module *module, Error err) { callbackHelper(promise, module, std::move(err)); }); future.wait_for(std::chrono::seconds(2)); @@ -538,14 +539,14 @@ TEST_P(DeviceManagerTest, ReuseModule) { std::tie(runP2, runF2) = getFutureHelper>(); device->runFunction("func1", std::move(context1), - [&runP1](RunIdentifierTy, llvm::Error err, + [&runP1](RunIdentifierTy, Error err, std::unique_ptr context) { callbackHelper(runP1, std::move(context), std::move(err)); }); device->runFunction("func2", std::move(context2), - [&runP2](RunIdentifierTy, llvm::Error err, + [&runP2](RunIdentifierTy, Error err, std::unique_ptr context) { callbackHelper(runP2, std::move(context), std::move(err)); @@ -600,7 +601,7 @@ TEST(DeviceManagerTest, AvailableMemory) { auto config = DeviceConfig("CPU"); config.setDeviceMemory(expectedBytes); CPUDeviceManager cpuCoreDevice(config); - ASSERT_FALSE(errToBool(cpuCoreDevice.init())); + ASSERT_FALSE(ERR_TO_BOOL(cpuCoreDevice.init())); EXPECT_EQ(cpuCoreDevice.getMaximumMemory(), expectedBytes); EXPECT_EQ(cpuCoreDevice.getAvailableMemory(), expectedBytes); @@ -609,7 +610,7 @@ TEST(DeviceManagerTest, AvailableMemory) { std::tie(promise, future) = getFutureHelper(); cpuCoreDevice.addNetwork(module.get(), compiledFunctions, - [&promise](const Module *module, llvm::Error err) { + [&promise](const Module *module, Error err) { callbackHelper(promise, module, std::move(err)); }); @@ -626,7 +627,7 @@ TEST(DeviceManagerTest, AvailableMemory) { std::tie(promise, future) = getFutureHelper(); cpuCoreDevice.addNetwork(module2.get(), compileFunctions("CPU", module2.get(), backing), - [&promise](const Module *module, llvm::Error err) { + [&promise](const Module *module, Error err) { callbackHelper(promise, module, std::move(err)); }); @@ -644,7 +645,7 @@ TEST(DeviceManagerTest, AvailableMemory) { std::future evictFuture; std::tie(evictPromise, evictFuture) = getFutureHelper(); cpuCoreDevice.evictNetwork( - "main", [&evictPromise](std::string functionName, llvm::Error err) { + "main", [&evictPromise](std::string functionName, Error err) { callbackHelper(evictPromise, functionName, std::move(err)); }); evictFuture.wait_for(std::chrono::seconds(2)); @@ -654,7 +655,7 @@ TEST(DeviceManagerTest, AvailableMemory) { std::tie(promise, future) = getFutureHelper(); cpuCoreDevice.addNetwork(module2.get(), compileFunctions("CPU", module2.get(), backing), - [&promise](const Module *module, llvm::Error err) { + [&promise](const Module *module, Error err) { callbackHelper(promise, module, std::move(err)); }); @@ -664,7 +665,7 @@ TEST(DeviceManagerTest, AvailableMemory) { EXPECT_EQ(cpuCoreDevice.getMaximumMemory(), expectedBytes); EXPECT_EQ(cpuCoreDevice.getAvailableMemory(), 0); - EXPECT_FALSE(errToBool(cpuCoreDevice.stop())); + EXPECT_FALSE(ERR_TO_BOOL(cpuCoreDevice.stop())); // Test CPU DeviceConfig. auto cpuConfigEmpty = DeviceConfig("CPU"); @@ -680,7 +681,7 @@ TEST(DeviceManagerTest, AvailableMemory) { TEST(DeviceManagerTest, DummyDeviceManager) { DummyDeviceManager deviceManager{DeviceConfig("Interpreter")}; - ASSERT_FALSE(errToBool(deviceManager.init())); + ASSERT_FALSE(ERR_TO_BOOL(deviceManager.init())); auto module = makeBasicModule(); std::vector> backing; @@ -691,7 +692,7 @@ TEST(DeviceManagerTest, DummyDeviceManager) { std::future future; std::tie(promise, future) = getFutureHelper(); deviceManager.addNetwork(module.get(), std::move(functions), - [&promise](const Module *module, llvm::Error err) { + [&promise](const Module *module, Error err) { callbackHelper(promise, module, std::move(err)); }); // no need to wait. @@ -717,7 +718,7 @@ TEST(DeviceManagerTest, DummyDeviceManager) { getFutureHelper>(); deviceManager.runFunction( "main", std::move(context1), - [&runPromise](RunIdentifierTy, llvm::Error err, + [&runPromise](RunIdentifierTy, Error err, std::unique_ptr context) { callbackHelper(runPromise, std::move(context), std::move(err)); }); @@ -732,7 +733,7 @@ TEST(DeviceManagerTest, DummyDeviceManager) { ASSERT_TRUE(result); EXPECT_TRUE(result->isEqual(output1)); - EXPECT_FALSE(errToBool(deviceManager.stop())); + EXPECT_FALSE(ERR_TO_BOOL(deviceManager.stop())); } #endif // GLOW_WITH_CPU diff --git a/tests/unittests/ErrorTest.cpp b/tests/unittests/ErrorTest.cpp new file mode 100644 index 0000000000..edb37d7275 --- /dev/null +++ b/tests/unittests/ErrorTest.cpp @@ -0,0 +1,144 @@ +/** + * Copyright (c) 2017-present, Facebook, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "glow/Support/Error.h" + +#include "gtest/gtest.h" + +using namespace glow; + +TEST(Error, BasicError) { + auto err = MAKE_ERR("some error"); + EXPECT_TRUE(ERR_TO_BOOL(std::move(err))); +} + +TEST(Error, ErrorSuccess) { + auto err = Error::success(); + EXPECT_FALSE(ERR_TO_BOOL(std::move(err))); +} + +TEST(Error, ErrorSuccessReturn) { + auto f = []() -> Error { return Error::success(); }; + auto err = f(); + EXPECT_FALSE(ERR_TO_BOOL(std::move(err))); +} + +TEST(Error, ErrorString) { + const char *msg = "some error"; + auto err = MAKE_ERR(msg); + auto str = ERR_TO_STRING(std::move(err)); + EXPECT_NE(str.find(msg), std::string::npos) + << "Error should preserve the given message"; +} + +TEST(Error, BasicOpaque) { + using glow::detail::Opaque; + + Opaque opaqueInt; + opaqueInt.set(42); + EXPECT_EQ(opaqueInt.get(), 42); +} + +TEST(Error, OpaqueDestructorCall) { + using glow::detail::Opaque; + + /// Struct that takes a pointer to a boolean in it's constructor and set's the + /// boolean to true when it is destructed. + struct SetFlagOnDestruct { + bool *b_ = nullptr; + + SetFlagOnDestruct(bool *b) : b_(b) {} + + ~SetFlagOnDestruct() { + if (b_ != nullptr) { + *b_ = true; + } + } + + SetFlagOnDestruct(SetFlagOnDestruct &&other) { std::swap(b_, other.b_); } + + SetFlagOnDestruct &operator=(SetFlagOnDestruct &&other) { + std::swap(b_, other.b_); + return *this; + } + }; + + bool b1 = false; + SetFlagOnDestruct flagSetter1(&b1); + + bool b2 = false; + SetFlagOnDestruct flagSetter2(&b2); + + { + Opaque opaque1; + opaque1.set(std::move(flagSetter1)); + } + + { + Opaque opaque2; + opaque2.set(std::move(flagSetter2)); + opaque2.destroy(); + } + + ASSERT_FALSE(b1) << "The destructor of the contents of Opaque shouldn't be " + "called when Opaque is destroyed"; + + EXPECT_TRUE(b2) << "The destructor of the contents of Opaque should be " + "called when Opaque.destroy() is called"; + + // Check size and alignment of Opaque + EXPECT_EQ(sizeof(Opaque), sizeof(SetFlagOnDestruct)); + EXPECT_EQ(alignof(Opaque), alignof(SetFlagOnDestruct)); +} + +TEST(Error, ExpectedValue) { + Expected stringOrErr("hello world"); + if (stringOrErr) { + EXPECT_EQ(stringOrErr.get(), "hello world"); + } else { + FAIL() << "This expected should have a value"; + } +} + +TEST(Error, ExpectedError) { + const char *msg = "some error"; + auto err = MAKE_ERR(msg); + Expected intOrErr = std::move(err); + if (intOrErr) { + FAIL() << "This expected should not have a value"; + } else { + auto err2 = intOrErr.takeError(); + auto str = ERR_TO_STRING(std::move(err2)); + EXPECT_NE(str.find(msg), std::string::npos) + << "Expected should preserve the given message"; + } +} + +TEST(Error, ExpectedTakeErrorWithoutError) { + Expected intOrErr(42); + auto err = intOrErr.takeError(); + EXPECT_FALSE(err); +} + +TEST(Error, EmptyErrors) { + Error err = Error::empty(); + + auto f = [&]() { err = MAKE_ERR("something"); }; + + f(); + + EXPECT_TRUE(ERR_TO_BOOL(std::move(err))); +} diff --git a/tests/unittests/HabanaGlowTest.cpp b/tests/unittests/HabanaGlowTest.cpp index 1d28ded815..df6683a1a0 100644 --- a/tests/unittests/HabanaGlowTest.cpp +++ b/tests/unittests/HabanaGlowTest.cpp @@ -381,11 +381,11 @@ TEST_F(HabanaBackendTest, SetDeviceMemory) { configFull.setDeviceMemory(32768); // With no commandline or deviceConfig, the memory should be default 7 <<20. glow::runtime::HabanaDeviceManager device1(configEmpty, 1, 1); - llvm::Error err1 = device1.init(); + Error err1 = device1.init(); EXPECT_EQ(defaultMemory * 1024, device1.getMaximumMemory()); // With only deviceConfig, the memory should be set by deviceConfig. glow::runtime::HabanaDeviceManager device2(configFull, 1, 1); - llvm::Error err2 = device2.init(); + Error err2 = device2.init(); EXPECT_EQ(32768, device2.getMaximumMemory()); } @@ -1535,8 +1535,8 @@ TEST_F(HabanaBackendTest, SingleFunctionMultiThreadMultiDevice) { deviceManager->addNetwork( &mod_, functions, [promise = addNetworkPromise](const Module * /*module*/, - llvm::Error err) mutable { - promise->set_value(errToBool(std::move(err))); + Error err) mutable { + promise->set_value(ERR_TO_BOOL(std::move(err))); }); } @@ -1586,9 +1586,9 @@ TEST_F(HabanaBackendTest, SingleFunctionMultiThreadMultiDevice) { functionName, std::move(inputExecutionContexts[j]), [&threadDonePromise, &completeIterations, expectedResultBindings = outputBindings[j]]( - RunIdentifierTy runId, llvm::Error err, + RunIdentifierTy runId, Error err, std::unique_ptr resultContext) { - EXPECT_FALSE(errToBool(std::move(err))); + EXPECT_FALSE(ERR_TO_BOOL(std::move(err))); EXPECT_TRUE(PlaceholderBindings::compare( resultContext->getPlaceholderBindings(), expectedResultBindings.get())); @@ -1614,7 +1614,7 @@ TEST_F(HabanaBackendTest, SingleFunctionMultiThreadMultiDevice) { // Stop all devices. for (auto &deviceManager : deviceManagers) { - EXPECT_FALSE(errToBool(deviceManager->stop())); + EXPECT_FALSE(ERR_TO_BOOL(deviceManager->stop())); } } diff --git a/tests/unittests/HostManagerTest.cpp b/tests/unittests/HostManagerTest.cpp index d8bc8337b5..e52a564b91 100644 --- a/tests/unittests/HostManagerTest.cpp +++ b/tests/unittests/HostManagerTest.cpp @@ -51,7 +51,7 @@ createHostManager(llvm::StringRef backendName, return hostManager; } -llvm::Error addNetwork(HostManager *manager, std::string name) { +Error addNetwork(HostManager *manager, std::string name) { std::unique_ptr module = llvm::make_unique(); Function *F = module->createFunction(name); auto *X = @@ -67,10 +67,10 @@ llvm::Error addNetwork(HostManager *manager, std::string name) { void addAndRemoveNetwork(HostManager *manager, unsigned int functionNumber) { std::string name = "function" + std::to_string(functionNumber); - errToBool(addNetwork(manager, name)); + ERR_TO_BOOL(addNetwork(manager, name)); // Removal can return an error if the network is in the process of being // added. That is fine we expect it in this test. - errToBool(manager->removeNetwork(name)); + ERR_TO_BOOL(manager->removeNetwork(name)); } TEST_F(HostManagerTest, newHostManager) { createHostManager("CPU"); } @@ -79,7 +79,7 @@ TEST_F(HostManagerTest, addNetwork) { auto module = setupModule(6); auto hostManager = createHostManager("CPU"); CompilationContext cctx; - ASSERT_FALSE(errToBool(hostManager->addNetwork(std::move(module), cctx))); + ASSERT_FALSE(ERR_TO_BOOL(hostManager->addNetwork(std::move(module), cctx))); } TEST_F(HostManagerTest, runNetwork) { @@ -98,28 +98,27 @@ TEST_F(HostManagerTest, runNetwork) { auto hostManager = createHostManager("CPU"); CompilationContext cctx; - ASSERT_FALSE(errToBool(hostManager->addNetwork(std::move(module), cctx))); + ASSERT_FALSE(ERR_TO_BOOL(hostManager->addNetwork(std::move(module), cctx))); std::promise runNetwork; auto ready = runNetwork.get_future(); - std::unique_ptr runErr; + std::unique_ptr runErr; hostManager->runNetwork("main", std::move(context), [&runNetwork, &saveTensor, &context, &runErr]( - RunIdentifierTy runID, llvm::Error err, + RunIdentifierTy runID, Error err, std::unique_ptr context_) { auto HX = saveTensor->getHandle(); EXPECT_NEAR(HX.at({0}), 1, 1E-5); EXPECT_NEAR(HX.at({1}), 4, 1E-5); EXPECT_NEAR(HX.at({2}), 9, 1E-5); context = std::move(context_); - runErr = - llvm::make_unique(std::move(err)); + runErr = llvm::make_unique(std::move(err)); runNetwork.set_value(); }); ready.wait(); - EXPECT_FALSE(errToBool(std::move(*DCHECK_NOTNULL(runErr.get())))); + EXPECT_FALSE(ERR_TO_BOOL(std::move(*DCHECK_NOTNULL(runErr.get())))); // reset runErr runErr = nullptr; @@ -128,19 +127,18 @@ TEST_F(HostManagerTest, runNetwork) { ready = newRun.get_future(); hostManager->runNetwork("main", std::move(context), [&newRun, &saveTensor, &runErr]( - RunIdentifierTy runID, llvm::Error err, + RunIdentifierTy runID, Error err, std::unique_ptr context_) { auto HX = saveTensor->getHandle(); EXPECT_NEAR(HX.at({0}), 1, 1E-5); EXPECT_NEAR(HX.at({1}), 4, 1E-5); EXPECT_NEAR(HX.at({2}), 9, 1E-5); - runErr = - llvm::make_unique(std::move(err)); + runErr = llvm::make_unique(std::move(err)); newRun.set_value(); }); ready.wait(); - EXPECT_FALSE(errToBool(std::move(*DCHECK_NOTNULL(runErr.get())))); + EXPECT_FALSE(ERR_TO_BOOL(std::move(*DCHECK_NOTNULL(runErr.get())))); } /// Test that HostManager properly handles concurrent add/remove requests with @@ -191,32 +189,32 @@ TEST_F(HostManagerTest, ConfigureHostManager) { config.maxQueueSize = 0; auto hostManager = createHostManager("Interpreter", std::move(config)); - EXPECT_FALSE(errToBool(addNetwork(hostManager.get(), "main"))); + EXPECT_FALSE(ERR_TO_BOOL(addNetwork(hostManager.get(), "main"))); auto context = llvm::make_unique(); auto context2 = llvm::make_unique(); - std::unique_ptr runErr; + std::unique_ptr runErr; std::shared_ptr lock = std::make_shared(); std::unique_lock guard(*lock); /// Don't care a about the first one. hostManager->runNetwork("main", std::move(context), - [lock](RunIdentifierTy runID, llvm::Error err, + [lock](RunIdentifierTy runID, Error err, std::unique_ptr context_) { - errToBool(std::move(err)); + ERR_TO_BOOL(std::move(err)); }); hostManager->runNetwork( "main", std::move(context2), - [&runErr](RunIdentifierTy runID, llvm::Error err, + [&runErr](RunIdentifierTy runID, Error err, std::unique_ptr context_) { - runErr = llvm::make_unique(std::move(err)); + runErr = llvm::make_unique(std::move(err)); }); guard.unlock(); // Don't need a future, error CB called inline. - EXPECT_TRUE(errToBool(std::move(*DCHECK_NOTNULL(runErr.get())))); + EXPECT_TRUE(ERR_TO_BOOL(std::move(*DCHECK_NOTNULL(runErr.get())))); } /// Test that the HostManager properly enqueues requests. @@ -227,7 +225,7 @@ TEST_F(HostManagerTest, QueueTest) { config.maxActiveRequests = 1; auto hostManager = createHostManager("Interpreter", std::move(config)); - EXPECT_FALSE(errToBool(addNetwork(hostManager.get(), "main"))); + EXPECT_FALSE(ERR_TO_BOOL(addNetwork(hostManager.get(), "main"))); auto context = llvm::make_unique(); auto context2 = llvm::make_unique(); @@ -244,7 +242,7 @@ TEST_F(HostManagerTest, QueueTest) { // requests. hostManager->runNetwork("main", std::move(context), [&run1p, &counter, &dispatchDone]( - RunIdentifierTy runID, llvm::Error err, + RunIdentifierTy runID, Error err, std::unique_ptr context) { EXIT_ON_ERR(std::move(err)); run1p.set_value(counter++); @@ -253,7 +251,7 @@ TEST_F(HostManagerTest, QueueTest) { // Set the priority of the second to 1. hostManager->runNetwork( "main", std::move(context2), - [&run2p, &counter](RunIdentifierTy runID, llvm::Error err, + [&run2p, &counter](RunIdentifierTy runID, Error err, std::unique_ptr context) { EXIT_ON_ERR(std::move(err)); run2p.set_value(counter++); @@ -264,7 +262,7 @@ TEST_F(HostManagerTest, QueueTest) { // after run1. hostManager->runNetwork( "main", std::move(context3), - [&run3p, &counter](RunIdentifierTy runID, llvm::Error err, + [&run3p, &counter](RunIdentifierTy runID, Error err, std::unique_ptr context) { EXIT_ON_ERR(std::move(err)); run3p.set_value(counter++); diff --git a/tests/unittests/OCLTest.cpp b/tests/unittests/OCLTest.cpp index a2f007acdc..4cc594eadd 100644 --- a/tests/unittests/OCLTest.cpp +++ b/tests/unittests/OCLTest.cpp @@ -31,7 +31,7 @@ #include "glow/IR/Instrs.h" #include "gtest/gtest.h" -/// Takes an llvm::Expected \p rhsOrErrV, asserts that it is not an error, +/// Takes an Expected \p rhsOrErrV, asserts that it is not an error, /// and takes the value from rhsOrErrV and assigns it to \p lhs. #define ASSERT_AND_ASSIGN_VALUE(lhs, rhsOrErrV) \ do { \ @@ -132,21 +132,21 @@ TEST(OpenCLCorrectnessTest, SetDeviceMemory) { // This memory size can be limited by deviceConfig. // No setting at all, default memory size from OpenCL device info. OpenCLDeviceManager openCLDeviceDefault(openCLConfigEmpty); - llvm::Error err1 = openCLDeviceDefault.init(); - ASSERT_FALSE(errToBool(std::move(err1))); + Error err1 = openCLDeviceDefault.init(); + ASSERT_FALSE(ERR_TO_BOOL(std::move(err1))); uint64_t memSize = openCLDeviceDefault.getMaximumMemory(); // If limited by deviceConfig. OpenCLDeviceManager openCLDeviceSetByDeviceConfig(openCLConfigFull); - llvm::Error err2 = openCLDeviceSetByDeviceConfig.init(); - ASSERT_FALSE(errToBool(std::move(err2))); + Error err2 = openCLDeviceSetByDeviceConfig.init(); + ASSERT_FALSE(ERR_TO_BOOL(std::move(err2))); EXPECT_EQ(openCLDeviceSetByDeviceConfig.getMaximumMemory(), 32768); // If devicConfig defines larger memory size than the OpenCL device info, // then fall back to default. auto openCLConfigLarger = DeviceConfig("OpenCL"); openCLConfigLarger.setDeviceMemory(memSize + 10000); OpenCLDeviceManager openCLDeviceLarger(openCLConfigLarger); - llvm::Error err3 = openCLDeviceLarger.init(); - ASSERT_FALSE(errToBool(std::move(err3))); + Error err3 = openCLDeviceLarger.init(); + ASSERT_FALSE(ERR_TO_BOOL(std::move(err3))); EXPECT_EQ(openCLDeviceLarger.getMaximumMemory(), memSize); } @@ -193,8 +193,9 @@ TEST_F(OpenCLCommandQueuePoolTest, ErrorWhenNotInitialized) { pool_.setContext(nullptr); pool_.setDevice(0); - // A request for a command queue should return an llvm::Error. - ASSERT_FALSE(pool_.requestCommandQueue()); + // A request for a command queue should return an Error. + auto err = pool_.requestCommandQueue().takeError(); + ASSERT_TRUE(ERR_TO_BOOL(std::move(err))); } /// Tests that the pool reuses queues. @@ -203,7 +204,7 @@ TEST_F(OpenCLCommandQueuePoolTest, QueueReuse) { runtime::OpenCLCommandQueue queue; // Request a queue. - llvm::Expected queueOrError = + Expected queueOrError = pool_.requestCommandQueue(0); ASSERT_AND_ASSIGN_VALUE(queue, queueOrError); backingQueue1 = queue.backingQueue; @@ -230,7 +231,7 @@ TEST_F(OpenCLCommandQueuePoolTest, NoQueueReuseWithDifferentProps) { runtime::OpenCLCommandQueue queue; // Request a queue. - llvm::Expected queueOrError = + Expected queueOrError = pool_.requestCommandQueue(0); ASSERT_AND_ASSIGN_VALUE(queue, queueOrError); backingQueue1 = queue.backingQueue; diff --git a/tests/unittests/OnnxExporterTest.cpp b/tests/unittests/OnnxExporterTest.cpp index 7bc2faff9d..49ce312153 100644 --- a/tests/unittests/OnnxExporterTest.cpp +++ b/tests/unittests/OnnxExporterTest.cpp @@ -39,18 +39,18 @@ void testLoadAndSaveONNXModel(const std::string &name) { Function *F = mod.createFunction("main"); size_t irVer = 0, opsetVer = 0; - llvm::Error err = llvm::Error::success(); + Error err = Error::success(); { ONNXModelLoader onnxLD(name, {}, {}, *F, &err); irVer = onnxLD.getIrVersion(); opsetVer = onnxLD.getOpSetVersion(); } - ASSERT_FALSE(handleErrors(std::move(err), [&name](const GlowErr &GE) { + if (err) { llvm::errs() << "ONNXModelLoader failed to load model: " << name << ": "; - GE.log(llvm::errs()); - llvm::errs() << "\n"; - })); + llvm::errs() << ERR_TO_STRING(std::move(err)) << "\n"; + FAIL(); + } llvm::SmallString<64> path; auto tempFileRes = diff --git a/tests/unittests/OnnxImporterTest.cpp b/tests/unittests/OnnxImporterTest.cpp index c69d3327be..928996970f 100644 --- a/tests/unittests/OnnxImporterTest.cpp +++ b/tests/unittests/OnnxImporterTest.cpp @@ -32,20 +32,19 @@ using namespace glow; using namespace std; /// Loads onnxtxt model file \p filename and \returns ModelProto object. -llvm::Expected -loadProto(const std::string &filename) { +Expected loadProto(const std::string &filename) { std::ifstream ff(filename, std::ios::in | std::ios::binary); RETURN_ERR_IF_NOT(ff, strFormat("Can't find the model or network files for %s.", filename.c_str()), - GlowErr::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); + ErrorValue::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); if (filename.find(".onnxtxt") != std::string::npos) { std::string str((std::istreambuf_iterator(ff)), std::istreambuf_iterator()); ONNX_NAMESPACE::ModelProto MP; bool parseNet = google::protobuf::TextFormat::ParseFromString(str, &MP); RETURN_ERR_IF_NOT(parseNet, "Failed to parse ModelProto", - GlowErr::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); + ErrorValue::ErrorCode::MODEL_LOADER_INVALID_PROTOBUF); return MP; } RETURN_ERR("Can't load proto file"); @@ -53,11 +52,11 @@ loadProto(const std::string &filename) { /// Saves ModelProto object \p model as onnxtxt model file \p filename /// and \returns true if successful. -llvm::Expected saveProto(const std::string &filename, - ONNX_NAMESPACE::ModelProto &model) { +Expected saveProto(const std::string &filename, + ONNX_NAMESPACE::ModelProto &model) { std::ofstream ff(filename, std::ios::out); RETURN_ERR_IF_NOT(ff, "Can't write the proto file.", - GlowErr::ErrorCode::RUNTIME_ERROR); + ErrorValue::ErrorCode::RUNTIME_ERROR); if (filename.find(".onnxtxt") != std::string::npos) { std::string onnx_message = model.DebugString(); ff << onnx_message; @@ -71,14 +70,14 @@ llvm::Expected saveProto(const std::string &filename, /// Replaces placeholders with names \p tensorNames in model proto object \p /// model with initializers of same name and values specified in input tensor /// array \p tensors and \returns true if successful. -llvm::Expected +Expected replacePlaceholderWithConstant(ONNX_NAMESPACE::ModelProto &model, llvm::ArrayRef tensorNames, llvm::ArrayRef tensors) { ONNX_NAMESPACE::NodeProto np; ONNX_NAMESPACE::GraphProto *gp = model.mutable_graph(); RETURN_ERR_IF_NOT(gp, "Can't get mutable graph.", - GlowErr::ErrorCode::RUNTIME_ERROR); + ErrorValue::ErrorCode::RUNTIME_ERROR); for (size_t i = 0; i < tensorNames.size(); i++) { for (int j = 0; j < gp->input_size(); j++) { ONNX_NAMESPACE::ValueInfoProto *valueInfo = gp->mutable_input(j); @@ -127,21 +126,20 @@ replacePlaceholderWithConstant(ONNX_NAMESPACE::ModelProto &model, /// by replacing input tensors with name \p tensorNames, and values \p tensors /// and then checking against expected output expectedTensors. \returns true /// if the test completes without error. -bool checkConstFoldedOutput(std::string NetFilename, - llvm::ArrayRef tensorNames, - llvm::ArrayRef tensors, - llvm::ArrayRef expectedTensors) { +Error checkConstFoldedOutput(std::string NetFilename, + llvm::ArrayRef tensorNames, + llvm::ArrayRef tensors, + llvm::ArrayRef expectedTensors) { ONNX_NAMESPACE::ModelProto modelDef; llvm::SmallVector resultPath; llvm::sys::fs::createTemporaryFile("dummy", "onnxtxt", resultPath); std::string netFilename(resultPath.begin(), resultPath.end()); - ASSIGN_VALUE_OR_RETURN_FALSE(modelDef, loadProto(NetFilename)); + ASSIGN_VALUE_OR_RETURN_ERR(modelDef, loadProto(NetFilename)); // Replace placeholders in the original onnx model with constants. - if (!replacePlaceholderWithConstant(modelDef, tensorNames, tensors)) - return false; - if (!saveProto(netFilename, modelDef)) - return false; + RETURN_IF_ERR(replacePlaceholderWithConstant(modelDef, tensorNames, tensors) + .takeError()); + RETURN_IF_ERR(saveProto(netFilename, modelDef).takeError()); setConstantFoldLoaderOpsFlag(true); // It is expected that loading will fold the whole graph and output @@ -156,15 +154,13 @@ bool checkConstFoldedOutput(std::string NetFilename, // match the expectedTensors passed in. for (int i = 0; i < modelDef.graph().output_size(); i++) { NodeValue NV; - ASSIGN_VALUE_OR_RETURN_FALSE( + ASSIGN_VALUE_OR_RETURN_ERR( NV, onnxLD.getNodeValueByName(modelDef.graph().output(i).name())); auto *constOut = llvm::dyn_cast(NV.getNode()); - if (!constOut) { - return false; - } + RETURN_ERR_IF_NOT(constOut, "Failed cast to Constant"); EXPECT_TRUE(expectedTensors[i]->isEqual(constOut->getPayload())); } - return true; + return Error::success(); } template @@ -252,8 +248,8 @@ importArithMultiBroadcastTest(std::string fileName, EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]); } // Constant Folding Test. - EXPECT_TRUE(checkConstFoldedOutput(NetFilename, {"data"}, {&data}, - {bindings.get(graphOutputVar)})); + FAIL_TEST_IF_ERR(checkConstFoldedOutput(NetFilename, {"data"}, {&data}, + {bindings.get(graphOutputVar)})); } /// Test loading LeakyRelu op from an ONNX model. @@ -471,8 +467,8 @@ static void testImportPRelu(std::string filename, } // Constant Folding Test. - EXPECT_TRUE(checkConstFoldedOutput(NetFileName, {"data"}, {&data}, - {bindings.get(graphOutputVar)})); + FAIL_TEST_IF_ERR(checkConstFoldedOutput(NetFileName, {"data"}, {&data}, + {bindings.get(graphOutputVar)})); } TEST(onnx, importPreluSlopeHasSameShape) { @@ -696,8 +692,8 @@ static void averagePoolTestHelper(std::string &filename, } // Constant Folding Test. - EXPECT_TRUE(checkConstFoldedOutput(NetFilename, {"x"}, {&data}, - {bindings.get(graphOutputVar)})); + FAIL_TEST_IF_ERR(checkConstFoldedOutput(NetFilename, {"x"}, {&data}, + {bindings.get(graphOutputVar)})); } /// Test loading AveragePool op from a ONNX model. @@ -785,7 +781,7 @@ TEST(onnx, reduceMean4Dto3D) { } // Constant Folding Test. - EXPECT_TRUE( + FAIL_TEST_IF_ERR( checkConstFoldedOutput(netFilename, {"x"}, {&x}, {bindings.get(output)})); } @@ -829,7 +825,7 @@ TEST(onnx, reduceMean4Dto4D) { } // Constant Folding Test. - EXPECT_TRUE( + FAIL_TEST_IF_ERR( checkConstFoldedOutput(netFilename, {"x"}, {&x}, {bindings.get(output)})); } @@ -869,7 +865,7 @@ TEST(onnx, reduceSum4D) { EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]); } // Constant Folding Test. - EXPECT_TRUE( + FAIL_TEST_IF_ERR( checkConstFoldedOutput(netFilename, {"x"}, {&x}, {bindings.get(output)})); } @@ -916,7 +912,7 @@ TEST(onnx, reduceMean2AvgPoolKeepDims) { EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]); } // Constant Folding Test. - EXPECT_TRUE( + FAIL_TEST_IF_ERR( checkConstFoldedOutput(netFilename, {"x"}, {&x}, {bindings.get(output)})); } @@ -965,7 +961,7 @@ TEST(onnx, reduceMean2AvgPoolNoKeepDims) { } // Constant Folding Test. - EXPECT_TRUE( + FAIL_TEST_IF_ERR( checkConstFoldedOutput(netFilename, {"x"}, {&x}, {bindings.get(output)})); } @@ -1156,7 +1152,7 @@ TEST(onnx, importClip) { } // Constant Folding Test. - EXPECT_TRUE( + FAIL_TEST_IF_ERR( checkConstFoldedOutput(netFilename, {"x"}, {&x}, {bindings.get(output)})); } @@ -1224,9 +1220,9 @@ TEST(onnx, importBatchMatMul) { } } // Constant Folding Test. - EXPECT_TRUE(checkConstFoldedOutput(netFilename, {"inputs_0", "inputs_1"}, - {&inputs_0, &inputs_1}, - {bindings.get(output)})); + FAIL_TEST_IF_ERR(checkConstFoldedOutput(netFilename, {"inputs_0", "inputs_1"}, + {&inputs_0, &inputs_1}, + {bindings.get(output)})); } /// Test loading BatchBoxCox op from an ONNX model. @@ -1305,7 +1301,7 @@ TEST(onnx, importBatchBoxCox) { } // Constant Folding Test. - EXPECT_TRUE(checkConstFoldedOutput( + FAIL_TEST_IF_ERR(checkConstFoldedOutput( netFilename, {"data", "lambda1", "lambda2"}, {&data, &lambda1, &lambda2}, {bindings.get(output)})); } @@ -1397,8 +1393,9 @@ TEST(onnx, importSumN) { } // Constant Folding Test. - EXPECT_TRUE(checkConstFoldedOutput(netFilename, {"i0", "i1", "i2"}, - {&i0, &i1, &i2}, {bindings.get(output)})); + FAIL_TEST_IF_ERR(checkConstFoldedOutput(netFilename, {"i0", "i1", "i2"}, + {&i0, &i1, &i2}, + {bindings.get(output)})); } /// Test loading Sum with one input and one output @@ -1442,7 +1439,7 @@ TEST(onnx, importSum1) { ASSERT_TRUE(llvm::isa(save->getInput().getNode())); // Constant Folding Test. - EXPECT_TRUE( + FAIL_TEST_IF_ERR( checkConstFoldedOutput(netFilename, {"x"}, {&x}, {bindings.get(output)})); } @@ -1871,8 +1868,8 @@ static void importSliceTest(std::string fileName, const char *inputName, } // Constant Folding Test. - EXPECT_TRUE(checkConstFoldedOutput(NetFilename, {inputName}, {&data}, - {bindings.get(graphOutputVar)})); + FAIL_TEST_IF_ERR(checkConstFoldedOutput(NetFilename, {inputName}, {&data}, + {bindings.get(graphOutputVar)})); } TEST(onnx, importSliceDynamicNoAxes) { @@ -2022,10 +2019,9 @@ static void importPad(std::string fileName, const char *inputName, getNCHWData(&data, inputShape[0], inputShape[1], inputShape[2], inputShape[3]); if (expectLoadError) { - llvm::Error err = llvm::Error::success(); - MARK_ERR_CHECKED(err); + Error err = Error::empty(); ONNXModelLoader(NetFilename, {inputName}, {&data.getType()}, *F, &err); - EXPECT_TRUE(errToBool(std::move(err))); + EXPECT_TRUE(ERR_TO_BOOL(std::move(err))); return; } ONNXModelLoader onnxLD(NetFilename, {inputName}, {&data.getType()}, *F); @@ -2202,8 +2198,8 @@ TEST(onnx, shape) { } // Constant Folding Test. - EXPECT_TRUE(checkConstFoldedOutput(netFilename, {"input"}, {&x}, - {bindings.get(output)})); + FAIL_TEST_IF_ERR(checkConstFoldedOutput(netFilename, {"input"}, {&x}, + {bindings.get(output)})); } TEST(onnx, tile) { @@ -2292,7 +2288,7 @@ TEST(onnx, topK) { } // Constant Folding Test. - EXPECT_TRUE( + FAIL_TEST_IF_ERR( checkConstFoldedOutput(netFilename, {"scores"}, {&x}, {outputT, indexT})); } diff --git a/tests/unittests/PartitionerTest.cpp b/tests/unittests/PartitionerTest.cpp index a4b6a1f07e..8077db41e3 100644 --- a/tests/unittests/PartitionerTest.cpp +++ b/tests/unittests/PartitionerTest.cpp @@ -163,7 +163,7 @@ TEST_F(PartitionerTest, Basic1) { Partitioner myPartitioner(&EEP.getModule(), devices, false, true); CompilationContext cctx; auto dagList = myPartitioner.partition(cctx); - EXPECT_TRUE((bool)dagList); + ASSERT_TRUE((bool)dagList); EXPECT_EQ(EEP.getModule().getFunctions().size(), 3); EXPECT_EQ(dagList->size(), 1); EXPECT_TRUE(checkSaveNode(EEP.getModule())); @@ -249,13 +249,13 @@ TEST_F(PartitionerTest, Basic2) { {2048, "Interpreter"}}; Partitioner myPartitioner(&EEP.getModule(), devices, /* saturateHost */ true); CompilationContext cctx; - auto dagList = myPartitioner.partition(cctx); - EXPECT_TRUE((bool)dagList); + runtime::DAGListTy dagList; + ASSIGN_VALUE_OR_FAIL_TEST(dagList, myPartitioner.partition(cctx)); EXPECT_EQ(EEP.getModule().getFunctions().size(), 2); - EXPECT_EQ(dagList->size(), 1); + EXPECT_EQ(dagList.size(), 1); ASSERT_TRUE(checkSaveNode(EEP.getModule())); - for (auto &dag : dagList.get()) { + for (auto &dag : dagList) { for (auto &node : dag.nodes) { // Since saturateHost is set true, in this case, there should be 2 copys // of the partitions. @@ -267,7 +267,7 @@ TEST_F(PartitionerTest, Basic2) { bindings_.clear(); bindings_.allocate(EEP.getModule().getPlaceholders()); EEP.compile(cctx); - for (auto it = dagList->begin(); it != dagList->end(); ++it) { + for (auto it = dagList.begin(); it != dagList.end(); ++it) { updateInputPlaceholders(bindings_, {bindings_.getPlaceholderByName("input"), bindings_.getPlaceholderByName("input1")}, @@ -344,7 +344,7 @@ TEST_F(PartitionerTest, Error1) { Partitioner myPartitioner(&EEP.getModule(), devices); CompilationContext cctx; auto dagList = myPartitioner.partition(cctx); - EXPECT_TRUE(glow::errToBool(dagList.takeError())); + EXPECT_TRUE(ERR_TO_BOOL(dagList.takeError())); } /// This one tests the roofline computed with compute, memory and @@ -462,7 +462,7 @@ TEST_F(PartitionerTest, SelectRepFunc) { CompilationContext cctx; auto dagList = myPartitioner.partition(cctx); - EXPECT_TRUE((bool)dagList); + ASSERT_TRUE((bool)dagList); } /// Create a mock backend and rewrite the isOpSupported function @@ -477,9 +477,7 @@ class MockBackend : public Backend { MockFunction(llvm::StringRef backendName, runtime::RuntimeBundle &&bundle) : CompiledFunction(std::move(bundle)), backendName(backendName) {} - llvm::Error execute(ExecutionContext *) override { - return llvm::Error::success(); - } + Error execute(ExecutionContext *) override { return Error::success(); } std::string getCompileBackendName() const override { return backendName; } @@ -488,7 +486,7 @@ class MockBackend : public Backend { std::string getBackendName() const override { return backendName; } - llvm::Expected> + Expected> compile(Function *F, const BackendOptions &opts) const override { return llvm::make_unique(backendName, runtime::RuntimeBundle::create(*F)); @@ -604,7 +602,7 @@ TEST_F(PartitionerTest, SimpleHeterogeneousPartitioning) { Partitioner partitioner(&mod_, devices, backends, /* saturateHost */ true); CompilationContext cctx; auto dagList = partitioner.partition(cctx); - EXPECT_TRUE((bool)dagList); + ASSERT_TRUE((bool)dagList); EXPECT_EQ(mod_.getFunctions().size(), 3); EXPECT_EQ(dagList->size(), 1); ASSERT_TRUE(checkSaveNode(mod_)); @@ -624,7 +622,7 @@ TEST_F(PartitionerTest, heterogeneousPartitioningWithNonSupportedNodes) { Partitioner partitioner(&mod_, devices); CompilationContext cctx; auto dagList = partitioner.partition(cctx); - EXPECT_TRUE((bool)dagList); + ASSERT_TRUE((bool)dagList); EXPECT_EQ(mod_.getFunctions().size(), 3); EXPECT_EQ(dagList->size(), 1); ASSERT_TRUE(checkSaveNode(mod_)); @@ -647,7 +645,7 @@ TEST_F(PartitionerTest, heterogeneousPartitioningWithSupportedNodes) { Partitioner partitioner(&mod_, devices); CompilationContext cctx; auto dagList = partitioner.partition(cctx); - EXPECT_TRUE((bool)dagList); + ASSERT_TRUE((bool)dagList); EXPECT_EQ(mod_.getFunctions().size(), 3); EXPECT_EQ(dagList->size(), 1); ASSERT_TRUE(checkSaveNode(mod_)); @@ -682,7 +680,7 @@ TEST_F(PartitionerTest, logicalIDTest0) { Partitioner partitioner(&mod_, devices, /* saturateHost */ true); CompilationContext cctx; auto dagList = partitioner.partition(cctx); - EXPECT_TRUE((bool)dagList); + ASSERT_TRUE((bool)dagList); // Check there are 3 partitions. EXPECT_EQ(mod_.getFunctions().size(), 3); EXPECT_EQ(dagList->size(), 1); @@ -716,7 +714,7 @@ TEST_F(PartitionerTest, logicalIDTest1) { Partitioner partitioner(&mod_, devices, backends, /* saturateHost */ true); CompilationContext cctx; auto dagList = partitioner.partition(cctx); - EXPECT_TRUE((bool)dagList); + ASSERT_TRUE((bool)dagList); EXPECT_EQ(mod_.getFunctions().size(), 3); EXPECT_EQ(dagList->size(), 1); ASSERT_TRUE(checkSaveNode(mod_)); @@ -891,7 +889,7 @@ TEST_F(PartitionerTest, memoryUsageValidation1) { Partitioner myPartitioner(&mod_, devices); CompilationContext cctx; auto dagList = myPartitioner.partition(cctx); - EXPECT_TRUE(glow::errToBool(dagList.takeError())); + EXPECT_TRUE(ERR_TO_BOOL(dagList.takeError())); } /// This one test dagValidation in partitioner : p1->p2, p2->p1. @@ -920,7 +918,7 @@ TEST_F(PartitionerTest, dagValidation1) { auto partitioner = Partitioner(&mod_, devices, false, false, partitionConfig); CompilationContext cctx; auto dagList = partitioner.partition(cctx); - EXPECT_TRUE(glow::errToBool(dagList.takeError())); + EXPECT_TRUE(ERR_TO_BOOL(dagList.takeError())); } /// This one test dagValidation in partitioner: p0->p1, p1->p2, p2->p1. @@ -952,7 +950,7 @@ TEST_F(PartitionerTest, dagValidation2) { auto partitioner = Partitioner(&mod_, devices, false, false, partitionConfig); CompilationContext cctx; auto dagList = partitioner.partition(cctx); - EXPECT_TRUE(glow::errToBool(dagList.takeError())); + EXPECT_TRUE(ERR_TO_BOOL(dagList.takeError())); } /// This one tests partition from a user-defined config. @@ -972,7 +970,7 @@ TEST_F(PartitionerTest, partitionFromConfig) { Partitioner partitioner(&mod_, devices, false, false, partitionConfig); CompilationContext cctx; auto dagList = partitioner.partition(cctx); - EXPECT_TRUE((bool)dagList); + ASSERT_TRUE((bool)dagList); EXPECT_EQ(mod_.getFunctions().size(), 3); EXPECT_EQ(dagList->size(), 1); ASSERT_TRUE(checkSaveNode(mod_)); @@ -996,7 +994,7 @@ TEST_F(PartitionerTest, partitionFromConfigDirectCall) { Partitioner partitioner(&mod_, devices); CompilationContext cctx; auto dagList = partitioner.partitionFromConfig(partitionConfig); - EXPECT_TRUE((bool)dagList); + ASSERT_TRUE((bool)dagList); EXPECT_EQ(mod_.getFunctions().size(), 3); EXPECT_EQ(dagList->size(), 1); ASSERT_TRUE(checkSaveNode(mod_)); @@ -1075,7 +1073,7 @@ TEST_F(PartitionerTest, loadBalancedPartition) { Partitioner myPartitioner(&EEP.getModule(), devices, false, true); CompilationContext cctx; auto dagList = myPartitioner.loadBalancedPartition(cctx); - EXPECT_TRUE((bool)dagList); + ASSERT_TRUE((bool)dagList); EXPECT_EQ(EEP.getModule().getFunctions().size(), 3); EXPECT_EQ(dagList->size(), 1); EXPECT_TRUE(checkSaveNode(EEP.getModule())); diff --git a/tests/unittests/ProvisionerTest.cpp b/tests/unittests/ProvisionerTest.cpp index 9ffdeb69c2..34ac7f71b2 100644 --- a/tests/unittests/ProvisionerTest.cpp +++ b/tests/unittests/ProvisionerTest.cpp @@ -82,7 +82,7 @@ TEST_F(ProvisionerTest, provisionDag) { Provisioner provisioner(devices); auto err = provisioner.provision(networks, *mod.get(), cctx); // Expect that there was no Error when provisioning - EXPECT_FALSE(errToBool(std::move(err))); + EXPECT_FALSE(ERR_TO_BOOL(std::move(err))); } TEST_F(ProvisionerTest, provisionDagFail) { @@ -101,5 +101,5 @@ TEST_F(ProvisionerTest, provisionDagFail) { Provisioner provisioner(devices); auto err = provisioner.provision(networks, *mod.get(), cctx); // Expect that there was an Error when provisioning - EXPECT_TRUE(errToBool(std::move(err))); + EXPECT_TRUE(ERR_TO_BOOL(std::move(err))); } diff --git a/tests/unittests/QuantizationTest.cpp b/tests/unittests/QuantizationTest.cpp index 645146c169..0f4255212a 100644 --- a/tests/unittests/QuantizationTest.cpp +++ b/tests/unittests/QuantizationTest.cpp @@ -27,7 +27,6 @@ #include "gtest/gtest.h" #include "llvm/ADT/SmallVector.h" -#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/FileSystem.h" namespace glow { @@ -69,7 +68,7 @@ class MockQuantBackend : public Backend { std::string getBackendName() const override { return "Interpreter"; } - llvm::Expected> + Expected> compile(Function *F, const BackendOptions &opts) const override { return backend_->compile(F, opts); } diff --git a/tests/unittests/ThreadPoolExecutorTest.cpp b/tests/unittests/ThreadPoolExecutorTest.cpp index 576abd67d1..962899449b 100644 --- a/tests/unittests/ThreadPoolExecutorTest.cpp +++ b/tests/unittests/ThreadPoolExecutorTest.cpp @@ -55,12 +55,12 @@ class TestDeviceManager final : public runtime::DeviceManager { if (!resultMap_.erase(functionName)) { evictCB( functionName, - MAKE_ERR(GlowErr::ErrorCode::RUNTIME_NET_NOT_FOUND, + MAKE_ERR(ErrorValue::ErrorCode::RUNTIME_NET_NOT_FOUND, strFormat("Could not find function with name %s to evict", functionName.c_str()))); return; } - evictCB(functionName, llvm::Error::success()); + evictCB(functionName, Error::success()); } /// Look up the previously registered response for \p functionName and @@ -107,7 +107,7 @@ class TestDeviceManager final : public runtime::DeviceManager { } if (successResult) { - resultCB(runId, llvm::Error::success(), std::move(context)); + resultCB(runId, Error::success(), std::move(context)); } else { resultCB(runId, MAKE_ERR("An error occurred"), std::move(context)); } @@ -239,11 +239,11 @@ class ExecutorTest final { std::future future = promise.get_future(); executor_->run(root_.get(), std::move(inputContext_), runId_, [&promise, &executorRunId, &executorOutputContext]( - RunIdentifierTy runId, llvm::Error err, + RunIdentifierTy runId, Error err, std::unique_ptr context) { executorRunId = runId; executorOutputContext = std::move(context); - promise.set_value(errToBool(std::move(err))); + promise.set_value(ERR_TO_BOOL(std::move(err))); }); bool runSuccess = !future.get(); @@ -642,18 +642,18 @@ TEST_F(ThreadPoolExecutorTest, EmptyDAG) { // Call Executor::run(). std::promise promise; std::future future = promise.get_future(); - std::unique_ptr runErr; + std::unique_ptr runErr; executor_->run(nullptr, std::move(testContext), testRunId, [&runErr, &promise, &executorRunId, &executorOutputContext]( - RunIdentifierTy runId, llvm::Error err, + RunIdentifierTy runId, Error err, std::unique_ptr context) { executorRunId = runId; executorOutputContext = std::move(context); - runErr = llvm::make_unique(std::move(err)); + runErr = llvm::make_unique(std::move(err)); promise.set_value(); }); - EXPECT_FALSE(errToBool(std::move(*DCHECK_NOTNULL(runErr.get())))); + EXPECT_FALSE(ERR_TO_BOOL(std::move(*DCHECK_NOTNULL(runErr.get())))); EXPECT_EQ(executorRunId, testRunId); diff --git a/tools/loader/ImageClassifier.cpp b/tools/loader/ImageClassifier.cpp index 513e48233f..5d294c329d 100644 --- a/tools/loader/ImageClassifier.cpp +++ b/tools/loader/ImageClassifier.cpp @@ -419,7 +419,7 @@ static void runInference(runtime::HostManager *hostManager, std::string name, std::atomic &dispatched) { hostManager->runNetwork(name, std::move(batch), [&runPromise, &inflight, &dispatched, hostManager, - name](runtime::RunIdentifierTy, llvm::Error err, + name](runtime::RunIdentifierTy, Error err, std::unique_ptr contextPtr) { EXIT_ON_ERR(std::move(err)); // Kick off another run. diff --git a/torch_glow/src/CachingGraphRunner.cpp b/torch_glow/src/CachingGraphRunner.cpp index a8d62c0b28..5bdd0db424 100644 --- a/torch_glow/src/CachingGraphRunner.cpp +++ b/torch_glow/src/CachingGraphRunner.cpp @@ -49,7 +49,7 @@ namespace { static std::mutex graphCacheMutex; } -llvm::Expected +Expected CachingGraphRunner::loadImpl(torch::jit::Stack &stack) { const auto inputs = torch::jit::last(stack, graph_->inputs().size()); @@ -82,8 +82,8 @@ CachingGraphRunner::loadImpl(torch::jit::Stack &stack) { return perGlowGraphInfoMap_[hash].get(); } -llvm::Error CachingGraphRunner::runImpl(const PerGlowGraphInfo &info, - torch::jit::Stack &stack) const { +Error CachingGraphRunner::runImpl(const PerGlowGraphInfo &info, + torch::jit::Stack &stack) const { size_t numInputs = info.inputPlaceholders.size(); const auto inputs = torch::jit::last(stack, numInputs); @@ -126,7 +126,7 @@ llvm::Error CachingGraphRunner::runImpl(const PerGlowGraphInfo &info, return err; } -llvm::Error CachingGraphRunner::run(torch::jit::Stack &stack) { +Error CachingGraphRunner::run(torch::jit::Stack &stack) { PerGlowGraphInfo *info; ASSIGN_VALUE_OR_RETURN_ERR(info, loadImpl(stack)); return runImpl(*DCHECK_NOTNULL(info), stack); @@ -139,7 +139,7 @@ CachingGraphRunner::CachingGraphRunner(torch::jit::Graph *graph, CachingGraphRunner::~CachingGraphRunner() { // Remove Glow functions saved in HostManager when being destroyed. for (auto &kv : perGlowGraphInfoMap_) { - glow::errToBool(hostManager_->removeNetwork(kv.second->functionName)); + ERR_TO_BOOL(hostManager_->removeNetwork(kv.second->functionName)); } } diff --git a/torch_glow/src/CachingGraphRunner.h b/torch_glow/src/CachingGraphRunner.h index 7eaeb1f253..e46fd230c1 100644 --- a/torch_glow/src/CachingGraphRunner.h +++ b/torch_glow/src/CachingGraphRunner.h @@ -57,13 +57,12 @@ class CachingGraphRunner { /// info is returned immediately. Otherwise this loads the /// subgraph into the owned HostManager, creates a PerGlowGraphInfo which is /// cached for the given inputs, and then \returns this PerGlowGraphInfo. - llvm::Expected loadImpl(torch::jit::Stack &stack); + Expected loadImpl(torch::jit::Stack &stack); /// Given a PerGlowGraphInfo \p info for a subgraph that was previously /// loaded, this runs the Glow function that corresponds to that /// PerGlowGraphInfo in the shape of the inputs with the given \p stack. - llvm::Error runImpl(const PerGlowGraphInfo &info, - torch::jit::Stack &stack) const; + Error runImpl(const PerGlowGraphInfo &info, torch::jit::Stack &stack) const; /// Given a \p stack of inputs, computes the hash for the inputs on the stack. size_t computeGraphHash(const c10::ArrayRef inputs) const; @@ -78,7 +77,7 @@ class CachingGraphRunner { /// those inputs. If this is the first time this PyTorch graph has been run /// with inputs matching the hash of those on the stack then this first loads /// it as a Glow Function and compiles. \returns error of failure. - llvm::Error run(torch::jit::Stack &stack); + Error run(torch::jit::Stack &stack); }; } // namespace glow diff --git a/torch_glow/src/GlowIValue.cpp b/torch_glow/src/GlowIValue.cpp index 2606bfcd53..5b539427a9 100644 --- a/torch_glow/src/GlowIValue.cpp +++ b/torch_glow/src/GlowIValue.cpp @@ -110,67 +110,67 @@ bool GlowIValue::isTuple() const { return Tag::Tuple == tag_; } strFormat("Expected GlowIValue with tag %s but found %s", \ tagToStr((EXPECTED_TAG)), tagToStr(tag_))) -llvm::Expected GlowIValue::toTensor() { +Expected GlowIValue::toTensor() { ExpectTag(Tag::Tensor); return payload_.asTensor; } -llvm::Expected GlowIValue::toTensor() const { +Expected GlowIValue::toTensor() const { ExpectTag(Tag::Tensor); return payload_.asTensor; } -llvm::Expected GlowIValue::toDouble() const { +Expected GlowIValue::toDouble() const { ExpectTag(Tag::Double); return payload_.asDouble; } -llvm::Expected GlowIValue::toInt() const { +Expected GlowIValue::toInt() const { ExpectTag(Tag::Int); return payload_.asInt; } -llvm::Expected GlowIValue::toBool() const { +Expected GlowIValue::toBool() const { ExpectTag(Tag::Bool); return payload_.asBool; } -llvm::Expected *> GlowIValue::toIntList() { +Expected *> GlowIValue::toIntList() { ExpectTag(Tag::IntList); return payload_.asIntList; } -llvm::Expected *> GlowIValue::toIntList() const { +Expected *> GlowIValue::toIntList() const { ExpectTag(Tag::IntList); return payload_.asIntList; } -llvm::Expected *> GlowIValue::toDoubleList() { +Expected *> GlowIValue::toDoubleList() { ExpectTag(Tag::DoubleList); return payload_.asDoubleList; } -llvm::Expected *> GlowIValue::toDoubleList() const { +Expected *> GlowIValue::toDoubleList() const { ExpectTag(Tag::DoubleList); return payload_.asDoubleList; } -llvm::Expected *> GlowIValue::toBoolList() { +Expected *> GlowIValue::toBoolList() { ExpectTag(Tag::BoolList); return payload_.asBoolList; } -llvm::Expected *> GlowIValue::toBoolList() const { +Expected *> GlowIValue::toBoolList() const { ExpectTag(Tag::BoolList); return payload_.asBoolList; } -llvm::Expected *> GlowIValue::toTuple() { +Expected *> GlowIValue::toTuple() { ExpectTag(Tag::Tuple); return payload_.asTuple; } -llvm::Expected *> GlowIValue::toTuple() const { +Expected *> GlowIValue::toTuple() const { ExpectTag(Tag::Tuple); return payload_.asTuple; } @@ -234,7 +234,7 @@ void GlowIValue::fromTuple(std::vector glowIValList) { std::swap(glowIValList, *payload_.asTuple); } -llvm::Error GlowIValue::fromIValue(const at::IValue &ival) { +Error GlowIValue::fromIValue(const at::IValue &ival) { reset(); if (ival.isNone()) { fromNone(); @@ -272,7 +272,7 @@ llvm::Error GlowIValue::fromIValue(const at::IValue &ival) { } else { RETURN_ERR("Encountered unhandled IValue type"); } - return llvm::Error::success(); + return Error::success(); } } // namespace glow diff --git a/torch_glow/src/GlowIValue.h b/torch_glow/src/GlowIValue.h index e1f4b586d8..b4fdb90623 100644 --- a/torch_glow/src/GlowIValue.h +++ b/torch_glow/src/GlowIValue.h @@ -98,45 +98,45 @@ class GlowIValue { bool isTuple() const; /// \returns Payload a glow Tensor or error if the tag is not Tensor. - llvm::Expected toTensor(); + Expected toTensor(); /// \returns Payload a Tensor* or error if the tag is not Tensor. - llvm::Expected toTensor() const; + Expected toTensor() const; /// \returns Payload a double or error if the tag is not Double. - llvm::Expected toDouble() const; + Expected toDouble() const; /// \returns Payload a int or error if the tag is not Int. - llvm::Expected toInt() const; + Expected toInt() const; /// \returns Payload a bool or error if the tag is not Bool. - llvm::Expected toBool() const; + Expected toBool() const; /// \returns Payload a vector of ints or error if the tag is not IntList. - llvm::Expected *> toIntList(); + Expected *> toIntList(); /// \returns Payload a vector of ints or error if the tag is not IntList. - llvm::Expected *> toIntList() const; + Expected *> toIntList() const; /// \returns Payload a vector of doubles or error if the tag is not /// DoubleList. - llvm::Expected *> toDoubleList(); + Expected *> toDoubleList(); /// \returns Payload a vector of doubles or error if the tag is not /// DoubleList. - llvm::Expected *> toDoubleList() const; + Expected *> toDoubleList() const; /// \returns Payload a vector of bools or error if the tag is not BoolList. - llvm::Expected *> toBoolList(); + Expected *> toBoolList(); /// \returns Payload a vector of bools or error if the tag is not BoolList. - llvm::Expected *> toBoolList() const; + Expected *> toBoolList() const; /// \returns Payload a vector of GlowIValues or error if the tag is not Tuple. - llvm::Expected *> toTuple(); + Expected *> toTuple(); /// \returns Payload a vector of GlowIValues or error if the tag is not Tuple. - llvm::Expected *> toTuple() const; + Expected *> toTuple() const; /// Set the tag to None. void fromNone(); @@ -166,7 +166,7 @@ class GlowIValue { void fromTuple(std::vector glowIValList); /// Given a PyTorch IValue \p ival, set the tag to the analogous Tag. - llvm::Error fromIValue(const at::IValue &ival); + Error fromIValue(const at::IValue &ival); }; } // namespace glow diff --git a/torch_glow/src/PyTorchCommon.cpp b/torch_glow/src/PyTorchCommon.cpp index 2c27b8d366..b80cd4442b 100644 --- a/torch_glow/src/PyTorchCommon.cpp +++ b/torch_glow/src/PyTorchCommon.cpp @@ -126,11 +126,11 @@ void registerGlowOp() { std::make_shared(graph.get(), getHostManager()); return [graphRunner](torch::jit::Stack &stack) { - llvm::Error err = graphRunner->run(stack); + Error err = graphRunner->run(stack); if (static_cast(err)) { // PyTorch framework expects an exception been thrown here. - throw std::invalid_argument(llvm::toString(std::move(err))); + throw std::invalid_argument(ERR_TO_STRING(std::move(err))); } return 0; }; diff --git a/torch_glow/src/PyTorchFileLoader.cpp b/torch_glow/src/PyTorchFileLoader.cpp index 2f7979d1ef..2fb904de64 100644 --- a/torch_glow/src/PyTorchFileLoader.cpp +++ b/torch_glow/src/PyTorchFileLoader.cpp @@ -58,12 +58,11 @@ static at::Symbol getFusionSymbol() { static thread_local LocalFusionFunction localFusionInfo; /// Loads JIT Graph into Glow Function. -llvm::Error -loadJitGraphToGlowFunction(torch::jit::Stack &stack, torch::jit::Graph &graph, - glow::Function &f, - std::vector &inputPlaceholders, - std::vector &outputPlaceholders, - const PyTorchLoaderSettings &settings) { +Error loadJitGraphToGlowFunction( + torch::jit::Stack &stack, torch::jit::Graph &graph, glow::Function &f, + std::vector &inputPlaceholders, + std::vector &outputPlaceholders, + const PyTorchLoaderSettings &settings) { const auto &graphInputs = graph.inputs(); const auto numInputs = graphInputs.size(); auto inputs = torch::jit::last(stack, numInputs); @@ -84,20 +83,19 @@ loadJitGraphToGlowFunction(torch::jit::Stack &stack, torch::jit::Graph &graph, stack.push_back(at::IValue(var)); } - return llvm::Error::success(); + return Error::success(); } /// Runs Module forward pass, triggers custom fusion pass if local thread /// Glow function is set. -llvm::Error -evaluateModuleGraph(std::shared_ptr &module, - const std::vector &inputs) { +Error evaluateModuleGraph(std::shared_ptr &module, + const std::vector &inputs) { try { module->forward(inputs); } catch (const std::exception &x) { RETURN_ERR(x.what()); } - return llvm::Error::success(); + return Error::success(); } /// Helper struct, which on constructor registers custom fusion pass @@ -117,7 +115,7 @@ struct RegisterCustomFusionPass { *localFusionInfo.inputPlaceholders, *localFusionInfo.outputPlaceholders, *localFusionInfo.settings); if (static_cast(err)) { - throw std::invalid_argument(llvm::toString(std::move(err))); + throw std::invalid_argument(ERR_TO_STRING(std::move(err))); } return 0; }; @@ -136,7 +134,7 @@ struct RegisterCustomFusionPass { } // namespace /*static*/ -llvm::Error PyTorchFileLoader::loadPyTorchModel( +Error PyTorchFileLoader::loadPyTorchModel( const std::string &fileName, std::shared_ptr &module) { try { @@ -146,11 +144,11 @@ llvm::Error PyTorchFileLoader::loadPyTorchModel( RETURN_ERR(strFormat("Cannot load model from file: %s, , reason: %s", fileName.c_str(), x.what())); } - return llvm::Error::success(); + return Error::success(); } /*static*/ -llvm::Error PyTorchFileLoader::loadPyTorchGraph( +Error PyTorchFileLoader::loadPyTorchGraph( const std::string &fileName, const std::vector &inputs, glow::Function &F, std::vector &inputPlaceholders, std::vector &outputPlaceholders, bool sanityCheck) { @@ -175,11 +173,11 @@ llvm::Error PyTorchFileLoader::loadPyTorchGraph( RETURN_IF_ERR(err); - return sanityCheck ? performSanityCheck() : llvm::Error::success(); + return sanityCheck ? performSanityCheck() : Error::success(); } /*static*/ -llvm::Error PyTorchFileLoader::parsePyTorchGraphForOnnxTraining( +Error PyTorchFileLoader::parsePyTorchGraphForOnnxTraining( const std::string &fileName, const std::vector &inputs, glow::Function &F, std::vector &inputPlaceholders, std::vector &outputPlaceholders) { @@ -207,7 +205,7 @@ llvm::Error PyTorchFileLoader::parsePyTorchGraphForOnnxTraining( // Sanity check, after "fusionSymbol" node, not other nodes should exist. /*static*/ -llvm::Error PyTorchFileLoader::performSanityCheck() { +Error PyTorchFileLoader::performSanityCheck() { std::shared_ptr subgraph = torch::jit::lastExecutedOptimizedGraph(); size_t fusedNodes = 0, missedNodes = 0; @@ -226,7 +224,7 @@ llvm::Error PyTorchFileLoader::performSanityCheck() { fusedNodes == 1 && missedNodes == 0, glow::strFormat("Fused optimized nodes: %lu, missing nodes: %lu", fusedNodes, missedNodes)); - return llvm::Error::success(); + return Error::success(); } } // namespace glow diff --git a/torch_glow/src/PyTorchFileLoader.h b/torch_glow/src/PyTorchFileLoader.h index ef064599da..9acea30743 100644 --- a/torch_glow/src/PyTorchFileLoader.h +++ b/torch_glow/src/PyTorchFileLoader.h @@ -19,7 +19,7 @@ #include "PyTorchCommon.h" #include "glow/Graph/Graph.h" -#include "llvm/Support/Error.h" +#include "glow/Support/Error.h" #include namespace glow { @@ -28,12 +28,12 @@ namespace glow { class PyTorchFileLoader { /// Performs sanity check making sure custom fuse pass succeeded as expected, /// \returns error otherwise. - static llvm::Error performSanityCheck(); + static Error performSanityCheck(); public: /// Takes a model file \p fileName, loads model into torch Module \p module, /// \returns error if any. - static llvm::Error + static Error loadPyTorchModel(const std::string &fileName, std::shared_ptr &module); @@ -45,7 +45,7 @@ class PyTorchFileLoader { /// Method is thread safe, internally it uses local thread structures for /// executing custom fusion pass, registered globally. No other passes or /// other treads calling this method will be affected. - static llvm::Error + static Error loadPyTorchGraph(const std::string &fileName, const std::vector &inputs, glow::Function &F, @@ -57,7 +57,7 @@ class PyTorchFileLoader { /// stack of \p inputs into Glow Function \p F and fills out input \p /// inputPlaceholders, output \p outputPlaceholders placeholders, \returns /// error if any. Method is thread safe. - static llvm::Error parsePyTorchGraphForOnnxTraining( + static Error parsePyTorchGraphForOnnxTraining( const std::string &fileName, const std::vector &inputs, glow::Function &F, std::vector &inputPlaceholders, diff --git a/torch_glow/src/PyTorchModelLoader.cpp b/torch_glow/src/PyTorchModelLoader.cpp index 28a50ad30f..942d93ccd7 100644 --- a/torch_glow/src/PyTorchModelLoader.cpp +++ b/torch_glow/src/PyTorchModelLoader.cpp @@ -26,16 +26,16 @@ namespace glow { namespace { /// Downcast a double to a float. -llvm::Expected to32Bit(double val) { +Expected to32Bit(double val) { RETURN_ERR_IF_NOT(val <= std::numeric_limits::max() || val >= std::numeric_limits::lowest(), glow::strFormat("Value %f is out of limit.", val)); - return llvm::Expected(static_cast(val)); + return Expected(static_cast(val)); } -/// Unwrap a llvm::Expected and call to32Bit(double) or any contained return +/// Unwrap a Expected and call to32Bit(double) or any contained return /// Error. -llvm::Expected to32Bit(llvm::Expected expectedVal) { +Expected to32Bit(Expected expectedVal) { if (expectedVal) { return to32Bit(*expectedVal); } else { @@ -47,8 +47,8 @@ llvm::Expected to32Bit(llvm::Expected expectedVal) { /// of the GlowIValue in the case it's a IntList or Tuple of Ints checking there /// are exactly size elements or if the GlowIValue is an Int then it will /// replicate it size times then return that. -llvm::Expected> -expandIntIValIfNeeded(const GlowIValue &glowIVal, size_t size) { +Expected> expandIntIValIfNeeded(const GlowIValue &glowIVal, + size_t size) { // If the GlowIValue is a single int then make size copies of it. if (glowIVal.isInt()) { std::vector out; @@ -97,11 +97,10 @@ expandIntIValIfNeeded(const GlowIValue &glowIVal, size_t size) { } } -/// Unwrap llvm::Expected and call +/// Unwrap Expected and call /// expandIntIValIfNeeded(GlowIValue), propagates any Errors. -llvm::Expected> -expandIntIValIfNeeded(llvm::Expected expectedGlowIVal, - size_t size) { +Expected> +expandIntIValIfNeeded(Expected expectedGlowIVal, size_t size) { if (expectedGlowIVal) { return expandIntIValIfNeeded(**expectedGlowIVal, size); } else { @@ -112,7 +111,7 @@ expandIntIValIfNeeded(llvm::Expected expectedGlowIVal, /// Given a GlowIValue \p glowIVal, \returns if the GlowIValue is an Int return /// it's value, if it's a IntList or Tuple of Ints then check that all elements /// are the same then return the first one. -llvm::Expected contractIntIValIfNeeded(const GlowIValue &glowIVal) { +Expected contractIntIValIfNeeded(const GlowIValue &glowIVal) { if (glowIVal.isInt()) { return glowIVal.toInt(); } @@ -156,10 +155,10 @@ llvm::Expected contractIntIValIfNeeded(const GlowIValue &glowIVal) { } } -/// Unwrap a llvm::Expected \p expectedGlowIVal and call +/// Unwrap a Expected \p expectedGlowIVal and call /// contractIntIValIfNeeded(GlowIValue), propogate any Errors. -llvm::Expected -contractIntIValIfNeeded(llvm::Expected expectedGlowIVal) { +Expected +contractIntIValIfNeeded(Expected expectedGlowIVal) { if (expectedGlowIVal) { return contractIntIValIfNeeded(**expectedGlowIVal); } else { @@ -167,9 +166,9 @@ contractIntIValIfNeeded(llvm::Expected expectedGlowIVal) { } } -/// Unwrap a llvm::Expected \p expectedIVal and call toDouble, +/// Unwrap a Expected \p expectedIVal and call toDouble, /// propogate any Errors. -llvm::Expected iValToDouble(llvm::Expected expectedIVal) { +Expected iValToDouble(Expected expectedIVal) { if (expectedIVal) { return (*expectedIVal)->toDouble(); } else { @@ -177,9 +176,9 @@ llvm::Expected iValToDouble(llvm::Expected expectedIVal) { } } -/// Unwrap a llvm::Expected \p expectedIVal and call toInt, +/// Unwrap a Expected \p expectedIVal and call toInt, /// propogate any Errors. -llvm::Expected iValToInt(llvm::Expected expectedIVal) { +Expected iValToInt(Expected expectedIVal) { if (expectedIVal) { return (*expectedIVal)->toInt(); } else { @@ -187,9 +186,9 @@ llvm::Expected iValToInt(llvm::Expected expectedIVal) { } } -/// Unwrap a llvm::Expected \p expectedIVal and call toBool, +/// Unwrap a Expected \p expectedIVal and call toBool, /// propogate any Errors. -llvm::Expected iValToBool(llvm::Expected expectedIVal) { +Expected iValToBool(Expected expectedIVal) { if (expectedIVal) { return (*expectedIVal)->toBool(); } else { @@ -197,10 +196,10 @@ llvm::Expected iValToBool(llvm::Expected expectedIVal) { } } -/// Unwrap a llvm::Expected \p expectedIVal and call toIntList, +/// Unwrap a Expected \p expectedIVal and call toIntList, /// propogate any Errors. -llvm::Expected *> -iValToIntList(llvm::Expected expectedIVal) { +Expected *> +iValToIntList(Expected expectedIVal) { if (expectedIVal) { return (*expectedIVal)->toIntList(); } else { @@ -212,8 +211,8 @@ iValToIntList(llvm::Expected expectedIVal) { /// indicates that the size should be equal to or greater than that size (for /// example -2 means at least 2). template -llvm::Error checkInputAndOutputSizes(const T &inputs, int64_t inputsSize, - const T &outputs, int64_t outputsSize) { +Error checkInputAndOutputSizes(const T &inputs, int64_t inputsSize, + const T &outputs, int64_t outputsSize) { if (inputsSize >= 0) { RETURN_ERR_IF_NOT(inputs.size() == inputsSize, glow::strFormat("Expected exactly %lu inputs, got %lu.", @@ -235,7 +234,7 @@ llvm::Error checkInputAndOutputSizes(const T &inputs, int64_t inputsSize, glow::strFormat("Expected at least %lu outputs, got %lu.", (size_t)outputsSize, outputs.size())); } - return llvm::Error::success(); + return Error::success(); } /// Given a vector \p original containing elements of some type, \returns a @@ -250,11 +249,11 @@ std::vector castVector(const std::vector &original) { return out; } -/// Unwrap a llvm::Expected> \p originalExpected and calls +/// Unwrap a Expected> \p originalExpected and calls /// castVector() with the contents, propagates any Errors. template -llvm::Expected> -castVector(llvm::Expected> originalExpected) { +Expected> +castVector(Expected> originalExpected) { if (originalExpected) { return castVector(*originalExpected); } else { @@ -262,11 +261,10 @@ castVector(llvm::Expected> originalExpected) { } } -/// Unwrap a llvm::Expected \p originalExpected and calls +/// Unwrap a Expected \p originalExpected and calls /// static_cast() with the contents, propagates any Errors. template -llvm::Expected -static_cast_expected(llvm::Expected originalExpected) { +Expected static_cast_expected(Expected originalExpected) { if (originalExpected) { return static_cast(*originalExpected); } else { @@ -537,7 +535,7 @@ bool PyTorchModelLoader::isNodeSupported(const torch::jit::Node *ptNode) { return mapping.count(ptNode->kind()) != 0; } -llvm::Error PyTorchModelLoader::freezeWeights(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::freezeWeights(const torch::jit::Node *ptNode) { const auto &mapping = getSymbolsMapping(); const auto it = mapping.find(ptNode->kind()); @@ -593,10 +591,10 @@ llvm::Error PyTorchModelLoader::freezeWeights(const torch::jit::Node *ptNode) { frozenInputIndices_->insert(inputIndex); } } - return llvm::Error::success(); + return Error::success(); } -llvm::Error PyTorchModelLoader::loadNode(const torch::jit::Node *node) { +Error PyTorchModelLoader::loadNode(const torch::jit::Node *node) { const auto &mapping = getSymbolsMapping(); auto it = mapping.find(node->kind()); @@ -606,25 +604,25 @@ llvm::Error PyTorchModelLoader::loadNode(const torch::jit::Node *node) { return (this->*it->second.loadFn)(node); } -llvm::Error PyTorchModelLoader::addValueMapping(const torch::jit::Value *value, - glow::NodeValue nodeValue, - bool wasFrozen) { +Error PyTorchModelLoader::addValueMapping(const torch::jit::Value *value, + glow::NodeValue nodeValue, + bool wasFrozen) { ValueMapping mapping(std::move(nodeValue), wasFrozen); auto p = valueMap_.emplace(value, std::move(mapping)); RETURN_ERR_IF_NOT(p.second, glow::strFormat("Value %s is already mapped", value->debugNameBase().c_str())); - return llvm::Error::success(); + return Error::success(); } void PyTorchModelLoader::removeValueMapping(const torch::jit::Value *value) { valueMap_.erase(value); } -llvm::Error PyTorchModelLoader::addValueMapping(const torch::jit::Value *value, - glow::GlowIValue glowIValue, - bool wasFrozen) { +Error PyTorchModelLoader::addValueMapping(const torch::jit::Value *value, + glow::GlowIValue glowIValue, + bool wasFrozen) { glow::Constant *glowConstant = nullptr; if (glowIValue.isTensor()) { glow::Tensor *t; @@ -640,7 +638,7 @@ llvm::Error PyTorchModelLoader::addValueMapping(const torch::jit::Value *value, value->debugNameBase().c_str())); } - return llvm::Error::success(); + return Error::success(); } bool PyTorchModelLoader::hasGlowNodeValueForValue( @@ -667,14 +665,14 @@ bool PyTorchModelLoader::hasGlowIValueForValue(const torch::jit::Value *value, if (ignoreNones) { // Already checked ValueMappingType above. - const auto *glowIVal = exitOnErr(it->second.getMappedGlowIValue()); + const auto *glowIVal = EXIT_ON_ERR(it->second.getMappedGlowIValue()); return !glowIVal->isNone(); } return true; } -llvm::Expected +Expected PyTorchModelLoader::getGlowNodeValueForValue(const torch::jit::Value *value) { auto it = valueMap_.find(value); if (it == valueMap_.end()) { @@ -690,7 +688,7 @@ PyTorchModelLoader::getGlowNodeValueForValue(const torch::jit::Value *value) { return mappingValue.getMappedNodeValue(); } -llvm::Expected +Expected PyTorchModelLoader::getGlowIValueForValue(const torch::jit::Value *value) { auto it = valueMap_.find(value); if (it == valueMap_.end()) { @@ -705,7 +703,7 @@ PyTorchModelLoader::getGlowIValueForValue(const torch::jit::Value *value) { return mappingValue.getMappedGlowIValue(); } -llvm::Error PyTorchModelLoader::loadMul(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadMul(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 2, outputs, 1)); @@ -721,7 +719,7 @@ llvm::Error PyTorchModelLoader::loadMul(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode->getResult()); } -llvm::Error PyTorchModelLoader::loadDiv(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadDiv(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 2, outputs, 1)); @@ -737,7 +735,7 @@ llvm::Error PyTorchModelLoader::loadDiv(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode->getResult()); } -llvm::Error PyTorchModelLoader::loadAdd(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadAdd(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 3, outputs, 1)); @@ -759,7 +757,7 @@ llvm::Error PyTorchModelLoader::loadAdd(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode->getResult()); } -llvm::Error PyTorchModelLoader::loadSub(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadSub(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 3, outputs, 1)); @@ -781,7 +779,7 @@ llvm::Error PyTorchModelLoader::loadSub(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode->getResult()); } -llvm::Error PyTorchModelLoader::loadMax(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadMax(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 2, outputs, 1)); @@ -795,7 +793,7 @@ llvm::Error PyTorchModelLoader::loadMax(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode->getResult()); } -llvm::Error PyTorchModelLoader::loadSize(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadSize(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 2, outputs, 1)); @@ -824,8 +822,7 @@ llvm::Error PyTorchModelLoader::loadSize(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], std::move(glowIVal)); } -llvm::Error -PyTorchModelLoader::loadListConstruct(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadListConstruct(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); // Requires -1 because this requires at least one input. @@ -871,7 +868,7 @@ PyTorchModelLoader::loadListConstruct(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], std::move(glowIVal)); } -llvm::Error PyTorchModelLoader::loadReshape(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadReshape(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 2, outputs, 1)); @@ -914,7 +911,7 @@ llvm::Error PyTorchModelLoader::loadReshape(const torch::jit::Node *ptNode) { F_.createReshape("reshape", input, glowShape)); } -llvm::Error PyTorchModelLoader::loadRelu(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadRelu(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 1, outputs, 1)); @@ -926,7 +923,7 @@ llvm::Error PyTorchModelLoader::loadRelu(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode->getResult()); } -llvm::Error PyTorchModelLoader::loadExp(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadExp(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 1, outputs, 1)); @@ -938,7 +935,7 @@ llvm::Error PyTorchModelLoader::loadExp(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode->getResult()); } -llvm::Error PyTorchModelLoader::loadSqrt(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadSqrt(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 1, outputs, 1)); @@ -950,7 +947,7 @@ llvm::Error PyTorchModelLoader::loadSqrt(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode->getResult()); } -llvm::Error PyTorchModelLoader::loadSigmoid(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadSigmoid(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 1, outputs, 1)); @@ -962,7 +959,7 @@ llvm::Error PyTorchModelLoader::loadSigmoid(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode->getResult()); } -llvm::Error PyTorchModelLoader::loadReciprocal(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadReciprocal(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 1, outputs, 1)); @@ -973,8 +970,7 @@ llvm::Error PyTorchModelLoader::loadReciprocal(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode->getResult()); } -llvm::Error -PyTorchModelLoader::loadConvolution(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadConvolution(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 12, outputs, 1)); @@ -1057,7 +1053,7 @@ PyTorchModelLoader::loadConvolution(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], output->getResult()); } -llvm::Error PyTorchModelLoader::loadLinear(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadLinear(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 3, outputs, 1)); @@ -1095,7 +1091,7 @@ llvm::Error PyTorchModelLoader::loadLinear(const torch::jit::Node *ptNode) { F_.createFullyConnected("linear", input, weights, bias, outTy)); } -llvm::Error PyTorchModelLoader::loadBatchNorm(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadBatchNorm(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 9, outputs, 1)); @@ -1167,7 +1163,7 @@ llvm::Error PyTorchModelLoader::loadBatchNorm(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], bn->getResult()); } -llvm::Error PyTorchModelLoader::loadMaxPool2d(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadMaxPool2d(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 6, outputs, 1)); @@ -1223,7 +1219,7 @@ llvm::Error PyTorchModelLoader::loadMaxPool2d(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], output); } -llvm::Error PyTorchModelLoader::loadAvgPool2d(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadAvgPool2d(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 7, outputs, 1)); @@ -1277,7 +1273,7 @@ llvm::Error PyTorchModelLoader::loadAvgPool2d(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], output); } -llvm::Error PyTorchModelLoader::loadClamp(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadClamp(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 3, outputs, 1)); @@ -1302,8 +1298,8 @@ llvm::Error PyTorchModelLoader::loadClamp(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], output); } -llvm::Error -PyTorchModelLoader::loadAdaptiveAvgPool2d(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadAdaptiveAvgPool2d( + const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 2, outputs, 1)); @@ -1343,7 +1339,7 @@ PyTorchModelLoader::loadAdaptiveAvgPool2d(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], output); } -llvm::Error PyTorchModelLoader::loadTranspose(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadTranspose(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 1, outputs, 1)); @@ -1362,7 +1358,7 @@ llvm::Error PyTorchModelLoader::loadTranspose(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], output); } -llvm::Error PyTorchModelLoader::loadMin(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadMin(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 2, outputs, 1)); @@ -1376,7 +1372,7 @@ llvm::Error PyTorchModelLoader::loadMin(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], output); } -llvm::Error PyTorchModelLoader::loadMatMul(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadMatMul(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 2, outputs, 1)); @@ -1390,7 +1386,7 @@ llvm::Error PyTorchModelLoader::loadMatMul(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode); } -llvm::Error PyTorchModelLoader::loadPRelu(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadPRelu(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 2, outputs, 1)); @@ -1419,7 +1415,7 @@ llvm::Error PyTorchModelLoader::loadPRelu(const torch::jit::Node *ptNode) { } /// TODO: check Dtype is float (optional value). -llvm::Error PyTorchModelLoader::loadSoftMax(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadSoftMax(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 3, outputs, 1)); @@ -1443,7 +1439,7 @@ llvm::Error PyTorchModelLoader::loadSoftMax(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode); } -llvm::Error PyTorchModelLoader::loadFlatten(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadFlatten(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 3, outputs, 1)); @@ -1466,7 +1462,7 @@ llvm::Error PyTorchModelLoader::loadFlatten(const torch::jit::Node *ptNode) { return addValueMapping(outputs[0], glowNode); } -llvm::Error PyTorchModelLoader::loadTopK(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadTopK(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 5, outputs, 2)); @@ -1500,10 +1496,10 @@ llvm::Error PyTorchModelLoader::loadTopK(const torch::jit::Node *ptNode) { RETURN_IF_ERR(addValueMapping(outputs[0], glowNode->getValues())); RETURN_IF_ERR(addValueMapping(outputs[1], glowNode->getIndices())); - return llvm::Error::success(); + return Error::success(); } -llvm::Error PyTorchModelLoader::loadConstant(const torch::jit::Node *ptNode) { +Error PyTorchModelLoader::loadConstant(const torch::jit::Node *ptNode) { auto inputs = ptNode->inputs(); auto outputs = ptNode->outputs(); RETURN_IF_ERR(checkInputAndOutputSizes(inputs, 0, outputs, 1)); @@ -1522,19 +1518,19 @@ llvm::Error PyTorchModelLoader::loadConstant(const torch::jit::Node *ptNode) { std::vector *ints; ASSIGN_VALUE_OR_RETURN_ERR(ints, glowIVal.toIntList()); if (ints->empty()) { - return llvm::Error::success(); + return Error::success(); } } else if (glowIVal.isDoubleList()) { std::vector *doubles; ASSIGN_VALUE_OR_RETURN_ERR(doubles, glowIVal.toDoubleList()); if (doubles->empty()) { - return llvm::Error::success(); + return Error::success(); } } else if (glowIVal.isBoolList()) { std::vector *bools; ASSIGN_VALUE_OR_RETURN_ERR(bools, glowIVal.toBoolList()); if (bools->empty()) { - return llvm::Error::success(); + return Error::success(); } } @@ -1551,18 +1547,17 @@ llvm::Error PyTorchModelLoader::loadConstant(const torch::jit::Node *ptNode) { RETURN_IF_ERR(addValueMapping(outputs[0], std::move(glowIVal))); } - return llvm::Error::success(); + return Error::success(); } /*static*/ -llvm::Error PyTorchModelLoader::loadJITGraph( +Error PyTorchModelLoader::loadJITGraph( glow::Function &F, const torch::jit::Graph &graph, const at::ArrayRef inputs, std::vector &inputPlaceholders, std::vector &outputPlaceholders, const PyTorchLoaderSettings &settings) { - llvm::Error error = llvm::Error::success(); - MARK_ERR_CHECKED(error); + Error error = Error::empty(); PyTorchModelLoader loader(F, graph, inputs, inputPlaceholders, outputPlaceholders, error, settings, /*frozenInputIndices*/ nullptr); @@ -1573,11 +1568,11 @@ PyTorchModelLoader::PyTorchModelLoader( glow::Function &F, const torch::jit::Graph &graph, const at::ArrayRef inputs, std::vector &inputPlaceholders, - std::vector &outputPlaceholders, llvm::Error &error, + std::vector &outputPlaceholders, Error &error, const PyTorchLoaderSettings &settings, std::set *frozenInputIndices) : F_(F), inputs_(inputs), frozenInputIndices_(frozenInputIndices), copyTensorMemory_(false) { - auto loadFn = [&]() -> llvm::Error { + auto loadFn = [&]() -> Error { auto graphInputValues = graph.inputs(); RETURN_ERR_IF_NOT( @@ -1629,21 +1624,20 @@ PyTorchModelLoader::PyTorchModelLoader( outputPlaceholders.push_back(save->getPlaceholder()); } - return llvm::Error::success(); + return Error::success(); }; error = loadFn(); } /*static*/ -llvm::Error PyTorchModelLoader::loadJITGraphForOnnxTraining( +Error PyTorchModelLoader::loadJITGraphForOnnxTraining( glow::Function &F, const torch::jit::Graph &graph, const at::ArrayRef inputs, const at::ArrayRef> parameters, std::vector &inputPlaceholders, std::vector &outputPlaceholders) { - llvm::Error error = llvm::Error::success(); - MARK_ERR_CHECKED(error); + Error error = Error::empty(); PyTorchModelLoader loader(F, graph, inputs, parameters, inputPlaceholders, outputPlaceholders, error); return error; @@ -1654,10 +1648,10 @@ PyTorchModelLoader::PyTorchModelLoader( const at::ArrayRef inputs, const at::ArrayRef> parameters, std::vector &inputPlaceholders, - std::vector &outputPlaceholders, llvm::Error &error) + std::vector &outputPlaceholders, Error &error) : F_(F), inputs_(inputs), copyTensorMemory_(true) { - auto setup = [&]() -> llvm::Error { + auto setup = [&]() -> Error { auto graphInputValues = graph.inputs(); RETURN_ERR_IF_NOT( inputs.size() + parameters.size() == graphInputValues.size(), @@ -1713,7 +1707,7 @@ PyTorchModelLoader::PyTorchModelLoader( outputPlaceholders.push_back(save->getPlaceholder()); } - return llvm::Error::success(); + return Error::success(); }; error = setup(); @@ -1732,7 +1726,7 @@ ValueMapping::ValueMapping(GlowIValue glowIValue) { glowIValue_ = llvm::make_unique(std::move(glowIValue)); } -llvm::Expected ValueMapping::getMappedNodeValue() { +Expected ValueMapping::getMappedNodeValue() { if (mappingType_ == ValueMappingType::IValue) { RETURN_ERR("ValueMapping doesn't contain a NodeValue"); } else { @@ -1740,7 +1734,7 @@ llvm::Expected ValueMapping::getMappedNodeValue() { } } -llvm::Expected ValueMapping::getMappedGlowIValue() { +Expected ValueMapping::getMappedGlowIValue() { if (mappingType_ == ValueMappingType::IValue) { return glowIValue_.get(); } else { @@ -1748,7 +1742,7 @@ llvm::Expected ValueMapping::getMappedGlowIValue() { } } -llvm::Expected ValueMapping::getMappedGlowIValue() const { +Expected ValueMapping::getMappedGlowIValue() const { if (mappingType_ == ValueMappingType::IValue) { return glowIValue_.get(); } else { diff --git a/torch_glow/src/PyTorchModelLoader.h b/torch_glow/src/PyTorchModelLoader.h index b2bb3311cb..28ed8d7f3e 100644 --- a/torch_glow/src/PyTorchModelLoader.h +++ b/torch_glow/src/PyTorchModelLoader.h @@ -18,7 +18,6 @@ #define GLOW_TORCH_GLOW_SRC_PYTORCHMODELLOADER_H #include "PyTorchCommon.h" -#include #include #include "GlowIValue.h" @@ -70,13 +69,13 @@ class ValueMapping { ValueMapping(GlowIValue glowIValue); /// \returns the mapped NodeValue if one is mapped otherwise return an error. - llvm::Expected getMappedNodeValue(); + Expected getMappedNodeValue(); /// \returns the mapped GlowIValue if one is mapped otherwise return an error. - llvm::Expected getMappedGlowIValue(); + Expected getMappedGlowIValue(); /// \returns the mapped GlowIValue if one is mapped otherwise return an error. - llvm::Expected getMappedGlowIValue() const; + Expected getMappedGlowIValue() const; }; /// Loads PyTorch JIT IR graphs as a Glow Function. @@ -111,8 +110,7 @@ class PyTorchModelLoader { /// constants. struct MappingOfMemberFunctionsValue { /// The type of functions used to load PyTorch nodes in PyTorchModelLoader. - using LoadFn = - llvm::Error (PyTorchModelLoader::*)(const torch::jit::Node *); + using LoadFn = Error (PyTorchModelLoader::*)(const torch::jit::Node *); /// Symbols (as strings) that this mapping value is applicable to. const std::vector symbols; @@ -165,7 +163,7 @@ class PyTorchModelLoader { /// settings control the fusion details. Output parameters \p /// inputPlaceholders and \p outputPlaceholders are filled out. \returns /// error on failure. - static llvm::Error + static Error loadJITGraph(glow::Function &F, const torch::jit::Graph &graph, const at::ArrayRef inputs, std::vector &inputPlaceholders, @@ -176,7 +174,7 @@ class PyTorchModelLoader { /// as graph external inputs, and \parameters as known tensors. Output /// parameters \p inputPlaceholders and \p outputPlaceholders are filled out. /// \returns error on failure. - static llvm::Error loadJITGraphForOnnxTraining( + static Error loadJITGraphForOnnxTraining( glow::Function &F, const torch::jit::Graph &graph, const at::ArrayRef inputs, const at::ArrayRef> parameters, @@ -194,7 +192,7 @@ class PyTorchModelLoader { const at::ArrayRef inputs, std::vector &inputPlaceholders, std::vector &outputPlaceholders, - llvm::Error &error, const PyTorchLoaderSettings &settings, + Error &error, const PyTorchLoaderSettings &settings, std::set *frozenInputIndices); /// Takes a glow::Function \p F, a jit::Graph \p graph to load, and a @@ -206,7 +204,7 @@ class PyTorchModelLoader { const at::ArrayRef inputs, const at::ArrayRef> parameters, std::vector &inputPlaceholders, - std::vector &outputPlaceholders, llvm::Error &error); + std::vector &outputPlaceholders, Error &error); /// Save access to the mapping. static const MappingOfMemberFunctions &getSymbolsMapping(); @@ -215,17 +213,15 @@ class PyTorchModelLoader { /// \p nodeValue. Set \p wasFrozen to true if this comes from a from a frozen /// input. /// \returns error on failure. - llvm::Error addValueMapping(const torch::jit::Value *value, - glow::NodeValue nodeValue, - bool wasFrozen = false); + Error addValueMapping(const torch::jit::Value *value, + glow::NodeValue nodeValue, bool wasFrozen = false); /// Add a new mapping from the PyTorch Value \p value to the GlowIValue /// \p glowIValue. Set \p wasFrozen to true if this comes from a from a frozen /// input. /// \returns error on failure. - llvm::Error addValueMapping(const torch::jit::Value *value, - glow::GlowIValue glowIValue, - bool wasFrozen = false); + Error addValueMapping(const torch::jit::Value *value, + glow::GlowIValue glowIValue, bool wasFrozen = false); /// Remove any ValueMapping associated with \p value. void removeValueMapping(const torch::jit::Value *value); @@ -241,11 +237,11 @@ class PyTorchModelLoader { bool ignoreNones = false) const; /// Find the Glow NodeValue that maps to a given PyTorch value \p value. - llvm::Expected + Expected getGlowNodeValueForValue(const torch::jit::Value *value); /// Find the GlowIValue that maps to a given PyTorch value \p value. - llvm::Expected + Expected getGlowIValueForValue(const torch::jit::Value *value); /// For each Placeholder input to \p ptNode, if this input has been marked @@ -253,122 +249,122 @@ class PyTorchModelLoader { /// create a glow Constant for that Placeholder with the iValue from the stack /// of inputs for this loader. \returns a ValueMap containing just these new /// Constants. - llvm::Error freezeWeights(const torch::jit::Node *ptNode); + Error freezeWeights(const torch::jit::Node *ptNode); /// Load a given PyTorch Node \p ptNode. \returns /// error on failure. - llvm::Error loadNode(const torch::jit::Node *ptNode); + Error loadNode(const torch::jit::Node *ptNode); /// Load a PyTorch Constant node as a Glow Constant. /// \returns error on failure. - llvm::Error loadConstant(const torch::jit::Node *ptNode); + Error loadConstant(const torch::jit::Node *ptNode); /// Load a PyTorch mul node. /// \returns error on failure. - llvm::Error loadMul(const torch::jit::Node *ptNode); + Error loadMul(const torch::jit::Node *ptNode); /// Load a PyTorch div node. /// \returns error on failure. - llvm::Error loadDiv(const torch::jit::Node *ptNode); + Error loadDiv(const torch::jit::Node *ptNode); /// Load a PyTorch add node. /// \returns error on failure. - llvm::Error loadAdd(const torch::jit::Node *ptNode); + Error loadAdd(const torch::jit::Node *ptNode); /// Load a PyTorch sub node. /// \returns error on failure. - llvm::Error loadSub(const torch::jit::Node *ptNode); + Error loadSub(const torch::jit::Node *ptNode); /// Load a PyTorch max node. /// \returns error on failure. - llvm::Error loadMax(const torch::jit::Node *ptNode); + Error loadMax(const torch::jit::Node *ptNode); /// Load a PyTorch relu node. /// \returns error on failure. - llvm::Error loadRelu(const torch::jit::Node *ptNode); + Error loadRelu(const torch::jit::Node *ptNode); /// Load a PyTorch exp node. /// \returns error on failure. - llvm::Error loadExp(const torch::jit::Node *ptNode); + Error loadExp(const torch::jit::Node *ptNode); /// Load a PyTorch sqrt node. /// \returns error on failure. - llvm::Error loadSqrt(const torch::jit::Node *ptNode); + Error loadSqrt(const torch::jit::Node *ptNode); /// Load a PyTorch reciprocal node. - llvm::Error loadReciprocal(const torch::jit::Node *ptNode); + Error loadReciprocal(const torch::jit::Node *ptNode); /// Load a PyTorch _convolution node. /// \returns error on failure. - llvm::Error loadConvolution(const torch::jit::Node *ptNode); + Error loadConvolution(const torch::jit::Node *ptNode); /// Load a PyTorch batch_norm node. /// \returns error on failure. - llvm::Error loadBatchNorm(const torch::jit::Node *ptNode); + Error loadBatchNorm(const torch::jit::Node *ptNode); /// Load a PyTorch max_pool2d node. /// \returns error on failure. - llvm::Error loadMaxPool2d(const torch::jit::Node *ptNode); + Error loadMaxPool2d(const torch::jit::Node *ptNode); /// Load a PyTorch sigmoid node. /// \returns error on failure. - llvm::Error loadSigmoid(const torch::jit::Node *ptNode); + Error loadSigmoid(const torch::jit::Node *ptNode); /// Load a PyTorch avg_pool2d node. /// \returns error on failure. - llvm::Error loadAvgPool2d(const torch::jit::Node *ptNode); + Error loadAvgPool2d(const torch::jit::Node *ptNode); /// Load a PyTorch adaptive_avg_pool2d node. /// \returns error on failure. - llvm::Error loadAdaptiveAvgPool2d(const torch::jit::Node *ptNode); + Error loadAdaptiveAvgPool2d(const torch::jit::Node *ptNode); /// Load a PyTorch t (transpose) node. /// \returns error on failure. - llvm::Error loadTranspose(const torch::jit::Node *ptNode); + Error loadTranspose(const torch::jit::Node *ptNode); /// Load a PyTorch aten::linear node. /// \returns error on failure. - llvm::Error loadLinear(const torch::jit::Node *ptNode); + Error loadLinear(const torch::jit::Node *ptNode); /// Load a PyTorch min node. /// \returns error on failure. - llvm::Error loadMin(const torch::jit::Node *ptNode); + Error loadMin(const torch::jit::Node *ptNode); /// Load a PyTorch clamp node. /// \returns error on failure. - llvm::Error loadClamp(const torch::jit::Node *ptNode); + Error loadClamp(const torch::jit::Node *ptNode); /// Load a PyTorch matmul (n x k) x (k x m) -> (n x m) node. /// \returns error on failure. - llvm::Error loadMatMul(const torch::jit::Node *ptNode); + Error loadMatMul(const torch::jit::Node *ptNode); /// Load a PyTorch prelu node. /// \returns error on failure. - llvm::Error loadPRelu(const torch::jit::Node *ptNode); + Error loadPRelu(const torch::jit::Node *ptNode); /// Load a PyTorch SoftMax node. /// \returns error on failure. - llvm::Error loadSoftMax(const torch::jit::Node *ptNode); + Error loadSoftMax(const torch::jit::Node *ptNode); /// Load a PyTorch flatten node. /// \returns error on failure. - llvm::Error loadFlatten(const torch::jit::Node *ptNode); + Error loadFlatten(const torch::jit::Node *ptNode); /// Load a PyTorch topK node. /// \returns error on failure. - llvm::Error loadTopK(const torch::jit::Node *ptNode); + Error loadTopK(const torch::jit::Node *ptNode); /// Load a PyTorch aten::size node. /// \returns error on failure. - llvm::Error loadSize(const torch::jit::Node *ptNode); + Error loadSize(const torch::jit::Node *ptNode); /// Load a PyTorch prim::ListConstruct node. /// \returns error on failure. - llvm::Error loadListConstruct(const torch::jit::Node *ptNode); + Error loadListConstruct(const torch::jit::Node *ptNode); /// Load a PyTorch aten::reshape node. /// \returns error on failure. - llvm::Error loadReshape(const torch::jit::Node *ptNode); + Error loadReshape(const torch::jit::Node *ptNode); }; } // namespace glow diff --git a/torch_glow/src/training/TorchGlowTraining.cpp b/torch_glow/src/training/TorchGlowTraining.cpp index f9c8783dd1..c359317c2e 100644 --- a/torch_glow/src/training/TorchGlowTraining.cpp +++ b/torch_glow/src/training/TorchGlowTraining.cpp @@ -55,12 +55,12 @@ void TorchGlowTraining::clear() { TorchGlowTraining::~TorchGlowTraining() { clear(); } -llvm::Error TorchGlowTraining::init(llvm::StringRef modelFile, - std::vector &inputs, - llvm::StringRef backend, - const ONNXWriterParameters ¶meters, - const TrainingConfig &config, - RandomizeWeights mode) { +Error TorchGlowTraining::init(llvm::StringRef modelFile, + std::vector &inputs, + llvm::StringRef backend, + const ONNXWriterParameters ¶meters, + const TrainingConfig &config, + RandomizeWeights mode) { // Clean up all previous allocations, if any. clear(); // Initialize execution engine. @@ -69,7 +69,7 @@ llvm::Error TorchGlowTraining::init(llvm::StringRef modelFile, F_ = engine_.getModule().createFunction("torch_glow_model"); // Execution lambda helps to use compact Glow RETURN_* macros inside, and // have possibility to clean-up resources before leaving the function scope. - auto setup = [&]() -> llvm::Error { + auto setup = [&]() -> Error { // Detect the proper loader. if (modelFile.endswith_lower(".pt")) { RETURN_IF_ERR(PyTorchFileLoader::parsePyTorchGraphForOnnxTraining( @@ -78,8 +78,7 @@ llvm::Error TorchGlowTraining::init(llvm::StringRef modelFile, mode = RandomizeWeights::YES; } } else if (modelFile.endswith_lower(".onnx")) { - llvm::Error err = llvm::Error::success(); - MARK_ERR_CHECKED(err); + Error err = Error::empty(); ONNXModelLoader loader(modelFile.str(), {}, {}, *F_, &err); RETURN_IF_ERR(err); if (mode == RandomizeWeights::AUTO) { @@ -124,10 +123,10 @@ llvm::Error TorchGlowTraining::init(llvm::StringRef modelFile, TF_ = glow::differentiate(F_, config); engine_.compile(CompilationMode::Train); - return llvm::Error::success(); + return Error::success(); }; - llvm::Error err = setup(); + Error err = setup(); if (err) { // On failure cleanup resources. clear(); @@ -135,11 +134,10 @@ llvm::Error TorchGlowTraining::init(llvm::StringRef modelFile, } parameters_ = parameters; - return llvm::Error::success(); + return Error::success(); } -llvm::Error TorchGlowTraining::train(const Tensor &samples, - const Tensor &labels) { +Error TorchGlowTraining::train(const Tensor &samples, const Tensor &labels) { RETURN_ERR_IF_NOT(TF_, "Class instance, wasn't properly initialized."); auto *input = bindings_.get(inputPHs_[0]); auto *label = bindings_.get(selectedPH_); @@ -170,12 +168,11 @@ llvm::Error TorchGlowTraining::train(const Tensor &samples, engine_.run(bindings_, TFName); } #endif - return llvm::Error::success(); + return Error::success(); } -llvm::Error TorchGlowTraining::save(llvm::StringRef snapshotFile) { - llvm::Error err = llvm::Error::success(); - MARK_ERR_CHECKED(err); +Error TorchGlowTraining::save(llvm::StringRef snapshotFile) { + Error err = Error::empty(); // Detects output ONNX file format, text or binary. const bool textMode = snapshotFile.endswith_lower(".onnxtxt"); @@ -221,7 +218,7 @@ bool TorchGlowTrainingWrapper::init(const std::string &modelPath, bool randomizeWeights) { std::vector ptInputs = {ptTensors.begin(), ptTensors.end()}; - return !errToBool(trainer_.init( + return !ERR_TO_BOOL(trainer_.init( modelPath, ptInputs, backend, parameters_, config_, randomizeWeights ? TorchGlowTraining::RandomizeWeights::YES : TorchGlowTraining::RandomizeWeights::NO)); @@ -232,11 +229,11 @@ bool TorchGlowTrainingWrapper::train(const at::Tensor &ptSamples, glow::Tensor glowSamples = ptTensorToGlowTensor(ptSamples); glow::Tensor glowLabels = ptTensorToGlowTensor(ptLabels); - return !errToBool(trainer_.train(glowSamples, glowLabels)); + return !ERR_TO_BOOL(trainer_.train(glowSamples, glowLabels)); } bool TorchGlowTrainingWrapper::save(const std::string &snapshotFile) { - return !errToBool(trainer_.save(snapshotFile)); + return !ERR_TO_BOOL(trainer_.save(snapshotFile)); } /// Sets ONNXWriterParameters diff --git a/torch_glow/src/training/TorchGlowTraining.h b/torch_glow/src/training/TorchGlowTraining.h index 22d5a6dfca..258cc1b776 100644 --- a/torch_glow/src/training/TorchGlowTraining.h +++ b/torch_glow/src/training/TorchGlowTraining.h @@ -20,7 +20,6 @@ #include "PyTorchCommon.h" #include "glow/ExecutionEngine/ExecutionEngine.h" #include "glow/Graph/Graph.h" -#include "llvm/Support/Error.h" #include namespace glow { @@ -71,26 +70,24 @@ class TorchGlowTraining { /// \p backend name, ONNX exporter \p parameters, \p inputs, \p config, /// randomizes weights according to the provided \p mode. /// \returns error on failure. - llvm::Error init(llvm::StringRef modelFile, - std::vector &inputs, - llvm::StringRef backend, - const ONNXWriterParameters ¶meters, - const TrainingConfig &config, - RandomizeWeights mode = RandomizeWeights::AUTO); + Error init(llvm::StringRef modelFile, std::vector &inputs, + llvm::StringRef backend, const ONNXWriterParameters ¶meters, + const TrainingConfig &config, + RandomizeWeights mode = RandomizeWeights::AUTO); /// Trains the loaded model from the provided \p samples and \p labels. /// Samples and labels must have the compatible dimensions and types. /// Caller can provide one or more samples and correspondently labels. /// Method can be invoked as many times as required. /// \returns error in case of uninitiated model or invalid input parameters. - llvm::Error train(const Tensor &samples, const Tensor &labels); + Error train(const Tensor &samples, const Tensor &labels); /// Saves the trained model in ONNX (extended) format to the provided /// \p snapshotFile. It's safe to call this method any time after train() /// calls. Method leaves the internal trained weights unaffected, and caller /// can continue to call train() method again. /// \returns error on failure. - llvm::Error save(llvm::StringRef snapshotFile); + Error save(llvm::StringRef snapshotFile); }; /// Wrapper class helps to integrate TorchGlowTraining class functionality into diff --git a/torch_glow/tests/unittests/PyTorchLoaderTest.cpp b/torch_glow/tests/unittests/PyTorchLoaderTest.cpp index a2f452a852..a4e21765ab 100644 --- a/torch_glow/tests/unittests/PyTorchLoaderTest.cpp +++ b/torch_glow/tests/unittests/PyTorchLoaderTest.cpp @@ -28,7 +28,7 @@ TEST(ModelLoaderTest, Loader) { const std::string fileName{GLOW_DATA_PATH "tests/models/pytorchModels/resnet18.pt"}; std::shared_ptr module; - llvm::Error err = glow::PyTorchFileLoader::loadPyTorchModel(fileName, module); + glow::Error err = glow::PyTorchFileLoader::loadPyTorchModel(fileName, module); EXPECT_FALSE(err); } @@ -45,10 +45,10 @@ TEST(ModelLoaderTest, Fusion) { std::vector inputPlaceholders; std::vector outputPlaceholders; - llvm::Error err = glow::PyTorchFileLoader::loadPyTorchGraph( + glow::Error err = glow::PyTorchFileLoader::loadPyTorchGraph( fileName, vec, *F, inputPlaceholders, outputPlaceholders); - EXPECT_FALSE(glow::errToBool(std::move(err))); + EXPECT_FALSE(ERR_TO_BOOL(std::move(err))); } TEST(ModelLoaderTest, DISABLED_Direct) { @@ -64,8 +64,8 @@ TEST(ModelLoaderTest, DISABLED_Direct) { std::vector inputPlaceholders; std::vector outputPlaceholders; - llvm::Error err = glow::PyTorchFileLoader::parsePyTorchGraphForOnnxTraining( + glow::Error err = glow::PyTorchFileLoader::parsePyTorchGraphForOnnxTraining( fileName, vec, *F, inputPlaceholders, outputPlaceholders); - EXPECT_FALSE(glow::errToBool(std::move(err))); + EXPECT_FALSE(ERR_TO_BOOL(std::move(err))); } diff --git a/torch_glow/tests/unittests/TorchGlowTrainingTest.cpp b/torch_glow/tests/unittests/TorchGlowTrainingTest.cpp index afc831f8e0..b13c76f67d 100644 --- a/torch_glow/tests/unittests/TorchGlowTrainingTest.cpp +++ b/torch_glow/tests/unittests/TorchGlowTrainingTest.cpp @@ -38,7 +38,7 @@ TEST(TorchGlowTraining, Test) { config.batchSize = 1; // TODO (after full fusion is available) - if (errToBool( + if (ERR_TO_BOOL( trainer.init(fileName, vec, "Interpreter", parameters, config))) { return; } @@ -48,7 +48,7 @@ TEST(TorchGlowTraining, Test) { std::vector labelDims = {1, 1000}; Tensor labels(ElemKind::Int64ITy, labelDims); - EXPECT_FALSE(errToBool(trainer.train(samples, labels))); + EXPECT_FALSE(ERR_TO_BOOL(trainer.train(samples, labels))); - EXPECT_FALSE(errToBool(trainer.save("/tmp/test.onnx"))); + EXPECT_FALSE(ERR_TO_BOOL(trainer.save("/tmp/test.onnx"))); }