Skip to content

[PH] Port the C2/ONNX loader to using Placeholders. #1783

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Oct 4, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions include/glow/ExecutionEngine/ExecutionEngine.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,13 @@ void updateVariables(llvm::ArrayRef<Variable *> vars,
void updateVariables(Context &ctx, llvm::ArrayRef<Placeholder *> ph,
llvm::ArrayRef<Tensor *> inputs);

/// This method updates the placeholders in the module. The placeholders are
/// found by name
/// in \p ph with the tensor content values \p inputs.
void updateInputsByName(Context &ctx, Module *mod,
llvm::ArrayRef<llvm::StringRef> ph,
llvm::ArrayRef<Tensor *> inputs);

/// Update the content of the tensors \p vars with some slices that from \p
/// inputs. The data starts at slice \p sampleIdx and wraps around until the
/// data in \p v is filled. All dimensions, except for the first (batch)
Expand Down
6 changes: 3 additions & 3 deletions include/glow/Importer/Caffe2ModelLoader.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,12 +63,12 @@ class Caffe2ModelLoader
/// Loads the caffe2 model that's represented by a network description file,
/// serialized in \p netDescFilename, and weights file, serialized in
/// \p netWeightFilename, and populates the network in \p F.
/// The tensors in \p tensors are stored with the names in the list of names
/// \p names and used as inputs to the network.
/// The list \p types and \p names are used to initialized the inputs and
/// outputs with specific names and types.
Caffe2ModelLoader(const std::string &netDescFilename,
const std::string &netWeightFilename,
llvm::ArrayRef<const char *> names,
llvm::ArrayRef<Tensor *> tensors, Function &F);
llvm::ArrayRef<TypeRef> types, Function &F);
};

} // namespace glow
Expand Down
2 changes: 1 addition & 1 deletion include/glow/Importer/CommonOperatorLoader.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ class CommonOperatorLoader : public ProtobufLoader {
T->template getHandle<int64_t>() =
std::vector<int64_t>(in.dims().begin(), in.dims().end());

createAndRememberVariable(opName, *T);
createAndRegisterConstant(opName, *T);
}

/// Loads Sqrt operator, given its protobuf representation and parsed args.
Expand Down
6 changes: 3 additions & 3 deletions include/glow/Importer/ONNXIFIModelLoader.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,16 +38,16 @@ class ONNXIFIModelLoader : public ONNXModelLoader {
const onnxTensorDescriptorV1 *weightDescriptors);

/// Mapping between ONNX names for inputs and actual Glow input vars.
llvm::StringMap<Variable *> onnxNameToInputVars_;
llvm::StringMap<Placeholder *> onnxNameToInputVars_;

public:
/// \returns mapping between ONNX names and actual Glow input vars.
const llvm::StringMap<Variable *> &getInputVarsMapping() const {
const llvm::StringMap<Placeholder *> &getInputVarsMapping() const {
return onnxNameToInputVars_;
}

/// \returns mapping between ONNX names and actual Glow output nodes.
const llvm::StringMap<Variable *> &getOutputVarsMapping() const {
const llvm::StringMap<Placeholder *> &getOutputVarsMapping() const {
return outputVarsByName_;
}

Expand Down
12 changes: 6 additions & 6 deletions include/glow/Importer/ONNXModelLoader.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,19 +90,19 @@ class ONNXModelLoader
size_t onnxModelSize);

/// Checks that the inputs tensors are compatible with the inputs declared in
/// the ONNX model. The input tensors in \p tensors are stored with the names
/// in the list of names \p tensorNames.
/// the ONNX model. The input types in \p types match the list of names
/// \p tensorNames.
void checkInputs(ONNX_NAMESPACE::GraphProto &net,
llvm::ArrayRef<const char *> tensorNames,
llvm::ArrayRef<Tensor *> tensors);
llvm::ArrayRef<TypeRef> types);

/// Loads the ONNX model that's represented by a model description file,
/// serialized in \p modelDescFilename and populates the network into \p F.
/// The tensors in \p tensors are stored with the names in the list of names
/// \p tensorNames and used as inputs to the network.
/// The types in \p types match the list of names \p tensorNames and used as
/// inputs to the network.
ONNXModelLoader(const std::string &modelDescFilename,
llvm::ArrayRef<const char *> tensorNames,
llvm::ArrayRef<Tensor *> tensors, Function &F);
llvm::ArrayRef<TypeRef> types, Function &F);
};

} // namespace glow
Expand Down
33 changes: 17 additions & 16 deletions include/glow/Importer/ProtobufLoader.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,17 +99,19 @@ class ProtobufLoader {
/// A list of weight tensors indexed by name.
llvm::StringMap<Tensor *> tensors_;
/// A map from names of the external outputs of the network to Variables.
llvm::StringMap<Variable *> outputVarsByName_;
llvm::StringMap<Placeholder *> outputVarsByName_;

/// \returns the tensor that was registered under the name \p name.
Tensor *getTensorByName(llvm::StringRef name);

/// Create a new variable \p name initialized with \p tensor.
/// \returns The newly created variable.
/// \pre !hasNodeByName(name)
Variable *createAndRememberVariable(
llvm::StringRef name, const Tensor &tensor,
VisibilityKind visibilityKind = VisibilityKind::Private);
/// Create a new constant that's initialized with \p tensor, and register it
/// under the name \p name. \returns The newly created constant.
Variable *createAndRegisterConstant(llvm::StringRef name,
const Tensor &tensor);

/// Create a new Placeholder of type \p T, and register it
/// under the name \p name. \returns The newly created placeholder.
Placeholder *createAndRegisterPlaceholder(llvm::StringRef name, TypeRef T);

/// \returns the NodeValue that was registered with the name \p name or
/// a nullptr wrapped in a NodeValue if no node has been registered with this
Expand All @@ -130,26 +132,25 @@ class ProtobufLoader {
bool hasNodeByName(llvm::StringRef name) const;

/// Constructs new ProtobufLoader object. It will populate the network into \p
/// F. The tensors in \p tensors are stored with the names in the list of
/// names \p tensorNames and used as inputs to the network.
/// F. The list \p types and \p names are used to initialized the inputs and
/// outputs with specific names and types.
ProtobufLoader(llvm::ArrayRef<const char *> tensorNames,
llvm::ArrayRef<Tensor *> tensors, Function &F);
llvm::ArrayRef<TypeRef> types, Function &F);

virtual ~ProtobufLoader();

/// \returns the single final output Variable of the network. The function
/// assumes there is only one output, verified via assertion. For image
/// \returns the single final output of the network. The function assumes that
/// there is only one output, verified via assertion. For image
/// classification, this single final output is usually the result of the last
/// softmax or regression layer.
/// \pre outputVarsByName_.size() == 1
Variable *getSingleOutput() {
Placeholder *getSingleOutput() {
assert(outputVarsByName_.size() == 1);
return outputVarsByName_.begin()->second;
}

/// \returns the Variable for the external output with \p name.
/// \returns the Placeholder for the external output with \p name.
/// \pre outputVarsByName_.find(name) != outputVarsByName_.end()
Variable *getOutputByName(llvm::StringRef name) const;
Placeholder *getOutputByName(llvm::StringRef name) const;
};

} // namespace glow
Expand Down
15 changes: 15 additions & 0 deletions lib/ExecutionEngine/ExecutionEngine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,21 @@ void glow::updateVariables(Context &ctx, llvm::ArrayRef<Placeholder *> ph,
}
}

void glow::updateInputsByName(Context &ctx, Module *mod,
llvm::ArrayRef<llvm::StringRef> ph,
llvm::ArrayRef<Tensor *> inputs) {
assert(inputs.size() == ph.size() &&
"The number of inputs does not match the number of Placeholders");

for (int i = 0, e = ph.size(); i < e; i++) {
Placeholder *p = mod->getPlaceholderByName(ph[i]);
Tensor *t = inputs[i];
assert(t && "Invalid tensor.");
assert(p && "Invalid placeholder.");
updateVariables(ctx, {p}, {t});
}
}

void ExecutionEngine::run() {
assert(function_ && "No function has been compiled");
function_->execute();
Expand Down
8 changes: 4 additions & 4 deletions lib/Importer/Caffe2ModelLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -619,8 +619,8 @@ void Caffe2ModelLoader::loadNetwork(caffe2::NetDef &net) {
for (int i = 0; i < net.external_output_size(); i++) {
auto &outputName = net.external_output(i);
auto r = getNodeValueByName(outputName);
auto *SN = G_.createSave("save_" + outputName, r);
outputVarsByName_[outputName] = SN->getVariable();
auto *SN = G_.createSavePH("save_" + outputName, r);
outputVarsByName_[outputName] = SN->getPlaceholder();
}
}

Expand Down Expand Up @@ -796,9 +796,9 @@ void Caffe2ModelLoader::loadWeights(caffe2::NetDef &net) {
Caffe2ModelLoader::Caffe2ModelLoader(const std::string &netDescFilename,
const std::string &netWeightFilename,
llvm::ArrayRef<const char *> names,
llvm::ArrayRef<Tensor *> tensors,
llvm::ArrayRef<TypeRef> types,
Function &F)
: CommonOperatorLoader(names, tensors, F) {
: CommonOperatorLoader(names, types, F) {
// The caffe2 weights that we are deserializing.
caffe2::NetDef weightsDef;
// The caffe2 network descriptor that we are deserializing.
Expand Down
3 changes: 1 addition & 2 deletions lib/Importer/ONNXIFIModelLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,7 @@ void ONNXIFIModelLoader::loadInputs(ONNX_NAMESPACE::GraphProto &net) {

Tensor *T = new Tensor();
setTensorType(in.type(), T);
auto *var =
createAndRememberVariable(in.name(), *T, VisibilityKind::Public);
Placeholder *var = createAndRegisterPlaceholder(in.name(), &T->getType());
onnxNameToInputVars_.try_emplace(in.name(), var);
}
}
Expand Down
14 changes: 7 additions & 7 deletions lib/Importer/ONNXModelLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -503,8 +503,8 @@ bool ONNXModelLoader::setOutputNodes(ONNX_NAMESPACE::GraphProto &net) {
for (int i = 0; i < net.output_size(); i++) {
const auto &outputName = net.output(i).name();
auto r = getNodeValueByName(outputName);
SaveNode *SN = G_.createSave("save_" + outputName, r);
outputVarsByName_[outputName] = SN->getVariable();
SaveNode *SN = G_.createSavePH("save_" + outputName, r);
outputVarsByName_[outputName] = SN->getPlaceholder();
}

return true;
Expand All @@ -528,7 +528,7 @@ ONNXModelLoader::ONNXModelLoader(Function &F)

void ONNXModelLoader::checkInputs(ONNX_NAMESPACE::GraphProto &net,
llvm::ArrayRef<const char *> tensorNames,
llvm::ArrayRef<Tensor *> tensors) {
llvm::ArrayRef<TypeRef> types) {
for (size_t i = 0; i < tensorNames.size(); i++) {
// Look if a corresponding input exists.
for (int j = 0; j < net.input_size(); j++) {
Expand All @@ -539,7 +539,7 @@ void ONNXModelLoader::checkInputs(ONNX_NAMESPACE::GraphProto &net,
continue;
}

llvm::ArrayRef<size_t> dims = tensors[i]->dims();
llvm::ArrayRef<size_t> dims = types[i]->dims();
const ONNX_NAMESPACE::TensorShapeProto &shape =
valueInfo.type().tensor_type().shape();
(void)shape;
Expand All @@ -558,8 +558,8 @@ void ONNXModelLoader::checkInputs(ONNX_NAMESPACE::GraphProto &net,

ONNXModelLoader::ONNXModelLoader(const std::string &modelDescFilename,
llvm::ArrayRef<const char *> tensorNames,
llvm::ArrayRef<Tensor *> tensors, Function &F)
: CommonOperatorLoader(tensorNames, tensors, F) {
llvm::ArrayRef<TypeRef> types, Function &F)
: CommonOperatorLoader(tensorNames, types, F) {
// The ONNX model that we are deserializing.
ONNX_NAMESPACE::ModelProto modelDef;
if (!loadProto(modelDef, modelDescFilename)) {
Expand All @@ -568,7 +568,7 @@ ONNXModelLoader::ONNXModelLoader(const std::string &modelDescFilename,
setVersion(modelDef);

ONNX_NAMESPACE::GraphProto graphDef = modelDef.graph();
checkInputs(graphDef, tensorNames, tensors);
checkInputs(graphDef, tensorNames, types);

loadInitializers(graphDef);
if (!loadNetwork(graphDef)) {
Expand Down
29 changes: 18 additions & 11 deletions lib/Importer/ProtobufLoader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ Tensor *ProtobufLoader::getTensorByName(llvm::StringRef name) {
return tensors_[name];
}

Variable *ProtobufLoader::getOutputByName(llvm::StringRef name) const {
Placeholder *ProtobufLoader::getOutputByName(llvm::StringRef name) const {
assert(outputVarsByName_.count(name) &&
"There is no Variable registered with this name.");
auto it = outputVarsByName_.find(name);
Expand All @@ -60,15 +60,23 @@ NodeValue ProtobufLoader::getNodeValueByName(llvm::StringRef name) const {
return node;
}

Variable *ProtobufLoader::createAndRememberVariable(
llvm::StringRef name, const Tensor &tensor, VisibilityKind visibilityKind) {
assert(!hasNodeByName(name) && "Creating an already existing node?!");
Variable *ProtobufLoader::createAndRegisterConstant(llvm::StringRef name,
const Tensor &tensor) {
assert(!hasNodeByName(name) && "Creating an already existing node");
// Note: We do not support training from models loaded from protos, so
// trainable is always set to false here.
Variable *node = G_.getParent()->createVariable(name, tensor, visibilityKind,
/* trainable */ false);
Variable *node =
G_.getParent()->createVariable(name, tensor, VisibilityKind::Private,
/* trainable */ false);
nodeValueByName_[name] = NodeValue(node, 0);
return node;
}

Placeholder *ProtobufLoader::createAndRegisterPlaceholder(llvm::StringRef name,
TypeRef T) {
assert(!hasNodeByName(name) && "Creating an already existing node");
Placeholder *node = G_.getParent()->createPlaceholder(T, name, false);
nodeValueByName_[name] = NodeValue(node, 0);
return node;
}

Expand All @@ -80,25 +88,24 @@ ProtobufLoader::getNodeValueOrCreateVariableByName(llvm::StringRef name) {
}

Tensor *T = getTensorByName(name);
return NodeValue(createAndRememberVariable(name, *T), 0);
return NodeValue(createAndRegisterConstant(name, *T), 0);
}

bool ProtobufLoader::hasNodeByName(llvm::StringRef name) const {
return getNodeValueByNameOrNullNodeValue(name).getNode() != nullptr;
}

ProtobufLoader::ProtobufLoader(llvm::ArrayRef<const char *> tensorNames,
llvm::ArrayRef<Tensor *> tensors, Function &F)
llvm::ArrayRef<TypeRef> types, Function &F)
: G_(F) {
// Verify that the version of the library that we linked against is
// compatible with the version of the headers we compiled against.
GOOGLE_PROTOBUF_VERIFY_VERSION;

assert(tensorNames.size() == tensors.size() && "Invalid initialization list");
assert(tensorNames.size() == types.size() && "Invalid initialization list");
for (unsigned i = 0; i < tensorNames.size(); i++) {
assert(!hasNodeByName(tensorNames[i]) && "Input names have duplicate");
createAndRememberVariable(tensorNames[i], *tensors[i],
VisibilityKind::Public);
createAndRegisterPlaceholder(tensorNames[i], types[i]);
}
}

Expand Down
13 changes: 6 additions & 7 deletions lib/Onnxifi/Base.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,29 +68,28 @@ onnxStatus Graph::initGraph(const void *onnxModel, size_t onnxModelSize,
onnxStatus Graph::run() {
// Copy tensors from the input addresses to the Glow tensors.
llvm::SmallVector<Tensor *, 4> tensors;
llvm::SmallVector<Variable *, 4> vars;
llvm::SmallVector<Placeholder *, 4> phs;
for (auto inputVar : inputVarToBuffer_) {
auto *var = inputVar.first;
auto *type = var->getType();
void *inputBuffer = reinterpret_cast<void *>(inputVar.second);
tensors.push_back(new Tensor(inputBuffer, type));
vars.push_back(var);
phs.push_back(var);
}

// Run inference.
auto &EE = backendPtr_->getEE();
updateVariables(vars, tensors);
updateVariables(ctx_, phs, tensors);
EE.run();

// Copy outputs to the addresses specified in the outputNodeToBuffer_.
for (auto outputVar : outputNodeToBuffer_) {
void *outputAddress = reinterpret_cast<void *>(outputVar.second);
const Tensor &res = outputVar.first->getPayload();
const Tensor *res = ctx_.get(outputVar.first);

memcpy(outputAddress, res.getUnsafePtr(),
res.size() * res.getType().getElementSize());
memcpy(outputAddress, res->getUnsafePtr(),
res->size() * res->getType().getElementSize());
}

return ONNXIFI_STATUS_SUCCESS;
}

Expand Down
8 changes: 4 additions & 4 deletions lib/Onnxifi/Base.h
Original file line number Diff line number Diff line change
Expand Up @@ -119,19 +119,19 @@ class Graph {
Context ctx_;

/// Mapping between ONNX name for the input variable and Glow variable.
llvm::StringMap<Variable *> onnxNameToInputVar_;
llvm::StringMap<Placeholder *> onnxNameToInputVar_;

/// Mapping between ONNX name for the output variable and Glow output
/// node.
llvm::StringMap<Variable *> onnxNameToOutputNode_;
llvm::StringMap<Placeholder *> onnxNameToOutputNode_;

/// Mapping between input var and the actual memory address.
/// Inputs will be read from these addresses.
llvm::DenseMap<Variable *, onnxPointer> inputVarToBuffer_;
llvm::DenseMap<Placeholder *, onnxPointer> inputVarToBuffer_;

/// Mapping between output var and the actual memory address.
/// Results must be written to these addresses.
llvm::DenseMap<Variable *, onnxPointer> outputNodeToBuffer_;
llvm::DenseMap<Placeholder *, onnxPointer> outputNodeToBuffer_;
};

typedef Graph *GraphPtr;
Expand Down
2 changes: 1 addition & 1 deletion tests/unittests/ImporterTestUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ unsigned countNodeKind(Function *F, Kinded::Kind kind) {

/// Helper function to get the save node from a Variable \p var.
/// \pre (var->getUsers().size() == 1)
SaveNode *getSaveNodeFromVariable(Variable *var) {
SaveNode *getSaveNodeFromDest(Storage *var) {
auto &varUsers = var->getUsers();
assert(varUsers.size() == 1);
auto *saveNode = llvm::dyn_cast<SaveNode>(varUsers.begin()->getUser());
Expand Down
Loading