Skip to content

Untangle the Graph and IR libraries and prepare for the implementation of the Execution engine. #30

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 8 commits into from
Oct 12, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/cifar10.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ void testCIFAR10() {
auto *SM = G.createSoftMax("softmax", RL3, E);
auto *result = G.createReturn("ret", SM);

G.generateIR();
IP.getModule().generateIR();
IP.optimize(OptimizationMode::Train);
IP.initVars();

Expand Down
2 changes: 1 addition & 1 deletion examples/mnist.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ void testMNIST() {

auto *result = G.createReturn("return", SM);

G.generateIR();
IP.getModule().generateIR();
IP.optimize(OptimizationMode::Train);
IP.initVars();

Expand Down
8 changes: 4 additions & 4 deletions include/glow/Network/Image.h → include/glow/Base/Image.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef GLOW_NETWORK_IMAGE_H
#define GLOW_NETWORK_IMAGE_H
#ifndef GLOW_BASE_IMAGE_H
#define GLOW_BASE_IMAGE_H

#include "glow/Network/Tensor.h"
#include "glow/Base/Tensor.h"

namespace glow {

Expand All @@ -17,4 +17,4 @@ bool writePngImage(Tensor *T, const char *filename,

} // namespace glow

#endif // GLOW_NETWORK_IMAGE_H
#endif // GLOW_BASE_IMAGE_H
12 changes: 6 additions & 6 deletions include/glow/Network/Tensor.h → include/glow/Base/Tensor.h
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
#ifndef GLOW_NETWORK_TENSOR_H
#define GLOW_NETWORK_TENSOR_H
#ifndef GLOW_BASE_TENSOR_H
#define GLOW_BASE_TENSOR_H

#include "Config.h"

#include "glow/IR/Type.h"
#include "glow/Base/Type.h"
#include "glow/Support/Compiler.h"
#include "glow/Support/Random.h"

Expand Down Expand Up @@ -328,7 +328,7 @@ template <class ElemTy> class Handle final {
assert(tensor_->isInBounds(indices));
size_t index = getElementPtr(indices);
assert(index < size() && "Out of bounds");
ElemTy *data = tensor_->getRawDataPointer<ElemTy>();
auto *data = tensor_->getRawDataPointer<ElemTy>();
return data[index];
}

Expand All @@ -343,7 +343,7 @@ template <class ElemTy> class Handle final {
/// \returns the element at offset \p idx without any size calculations.
ElemTy &raw(size_t index) {
assert(index < size() && "Out of bounds");
ElemTy *data = tensor_->getRawDataPointer<ElemTy>();
auto *data = tensor_->getRawDataPointer<ElemTy>();
return data[index];
}

Expand Down Expand Up @@ -655,4 +655,4 @@ template <class ElemTy> Handle<ElemTy> Tensor::getHandle() {

} // namespace glow

#endif // GLOW_NETWORK_TENSOR_H
#endif // GLOW_BASE_TENSOR_H
8 changes: 4 additions & 4 deletions include/glow/Network/Train.h → include/glow/Base/Train.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef GLOW_NETWORK_TRAIN_H
#define GLOW_NETWORK_TRAIN_H
#ifndef GLOW_BASE_TRAIN_H
#define GLOW_BASE_TRAIN_H

#include "glow/Network/Tensor.h"
#include "glow/Base/Tensor.h"

#include <cstddef>
#include <cstdint>
Expand Down Expand Up @@ -52,4 +52,4 @@ class Trainer {

} // namespace glow

#endif // GLOW_NETWORK_TRAIN_H
#endif // GLOW_BASE_TRAIN_H
21 changes: 12 additions & 9 deletions include/glow/IR/Type.h → include/glow/Base/Type.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#ifndef GLOW_IR_TYPE_H
#define GLOW_IR_TYPE_H
#ifndef GLOW_BASE_TYPE_H
#define GLOW_BASE_TYPE_H

#include "glow/Support/Compiler.h"

Expand Down Expand Up @@ -78,7 +78,7 @@ struct Type final {
unsigned char numSizes_{0};

/// Specifies the element type of the tensor.
ElemKind elementType_;
ElemKind elementType_{ElemKind::IndexTy};

/// Initialize a new type.
Type(ElemKind elemTy, llvm::ArrayRef<size_t> dims) : elementType_(elemTy) {
Expand All @@ -92,23 +92,26 @@ struct Type final {
}

/// An empty type.
Type() : elementType_(ElemKind::IndexTy) { numSizes_ = 0; }
Type() = default;

/// \returns true if \p other is the same type.
bool isEqual(TypeRef other) const { return isEqual(*other); }

/// \returns true if \p other is the same type.
bool isEqual(const Type &other) const {
// Element type must be the same.
if (elementType_ != other.elementType_)
if (elementType_ != other.elementType_) {
return false;
}
// Must have the same number of sizes.
if (numSizes_ != other.numSizes_)
if (numSizes_ != other.numSizes_) {
return false;
}
// Sizes must be the same.
for (size_t i = 0; i < numSizes_; i++) {
if (sizes_[i] != other.sizes_[i])
if (sizes_[i] != other.sizes_[i]) {
return false;
}
}

return true;
Expand Down Expand Up @@ -198,6 +201,6 @@ inline bool operator==(const Type &LHS, const Type &RHS) {

namespace std {
std::string to_string(const glow::Type &);
}
} // namespace std

#endif // GLOW_IR_TYPE_H
#endif // GLOW_BASE_TYPE_H
53 changes: 28 additions & 25 deletions include/glow/Graph/Graph.h
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
#ifndef GLOW_GRAPH_GRAPH_H
#define GLOW_GRAPH_GRAPH_H

#include "glow/IR/Instrs.h"
#include "glow/Base/Type.h"

#include "glow/Graph/Nodes.h"

#include "llvm/ADT/ArrayRef.h"

#include <list>
#include <unordered_map>
#include <vector>

Expand All @@ -30,19 +33,13 @@ class ReturnNode;

/// Represents the compute graph.
class Graph final {
public:
/// Stores the mapping between graph nodes to IR variables.
using NodeToInstrTy = std::unordered_map<const Node *, Value *>;

private:
/// A uniqued list of types in the module. Types in this list can be compared
/// by comparing their addresses.
std::list<Type> types_{};
/// A list of nodes that the graph owns.
std::vector<Node *> nodes_;
/// A list of variables that the graph owns.
std::vector<Variable *> vars_;
/// A reference to the low-level IR module.
Module &M_;
/// Maps nodes in the graph to the generated IR.
NodeToInstrTy IRMap;

/// Inserts the node \p N to the list of nodes, and returns the inserted node.
template <class NodeTy> NodeTy *addNode(NodeTy *N) {
Expand All @@ -57,26 +54,36 @@ class Graph final {
}

public:
Graph(Module &M) : M_(M) {}
Graph() = default;

~Graph();

/// Return a pointer to a uniqued type \p t in the current module.
TypeRef uniqueType(const Type &T);

/// Return a pointer to a uniqued type \p t in the current module.
TypeRef uniqueType(ElemKind elemTy, llvm::ArrayRef<size_t> dims);

/// Return the void type.
TypeRef getVoidTy();

/// @name High-level, operation-level IRBuilder.
///@{

Variable *
createVariable(TypeRef T, llvm::StringRef name,
WeightVar::InitKind initKind = WeightVar::InitKind::Broadcast,
Variable::InitKind initKind = Variable::InitKind::Broadcast,
float val = 0.0);

Variable *
createVariable(ElemKind T, llvm::ArrayRef<size_t> dims, llvm::StringRef name,
WeightVar::InitKind initKind = WeightVar::InitKind::Broadcast,
Variable::InitKind initKind = Variable::InitKind::Broadcast,
float val = 0.0);

ConvolutionNode *createConv(llvm::StringRef name, Node *input, size_t depth,
size_t kernel, size_t stride, size_t pad);

PoolNode *createPool(llvm::StringRef name, Node *input, PoolInst::OpKind kind,
PoolNode *createPool(llvm::StringRef name, Node *input, PoolNode::OpKind kind,
size_t kernel, size_t stride, size_t pad);

FullyConnectedNode *createFullyConnected(llvm::StringRef name, Node *input,
Expand Down Expand Up @@ -113,27 +120,23 @@ class Graph final {
float alpha = 1e-4, float beta = 0.75, float k = 2.0);

ArithmeticNode *createArithmetic(llvm::StringRef name, Node *LHS, Node *RHS,
ArithmeticInst::OpKind op);
ArithmeticNode::OpKind op);

ReturnNode *createReturn(llvm::StringRef name, Node *input);

/// @}

/// Registers the fact that the node \p N was lowered into the IR value \p V.
void registerIRMap(const Node *N, Value *V);

/// \returns the IR value that the node \p N was lowered into, or null, if the
/// node was not lowered into any IR value.
Value *getIRForNode(const Node *N) const;

/// Generate IR from the nodes in the graph into the module.
void generateIR();

/// Dumps the textual representation of the network.
void dump();

/// Dump a dotty graph that depicts the module.
void dumpDAG();

/// \returns the list of nodes that the graph owns.
const std::vector<Node *> &getNodes() const { return nodes_; }

/// \returns the list of variables that the graph owns.
const std::vector<Variable *> &getVars() const { return vars_; }
};

} // namespace glow
Expand Down
2 changes: 1 addition & 1 deletion include/glow/Graph/Node.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@

#include "llvm/ADT/StringRef.h"

#include "glow/Base/Type.h"
#include "glow/IR/Traits.h"
#include "glow/IR/Type.h"

namespace glow {

Expand Down
35 changes: 28 additions & 7 deletions include/glow/Graph/Nodes.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,16 @@
#define GLOW_GRAPH_NODES_H

#include "glow/Graph/Node.h"
#include "glow/IR/IR.h"
#include "glow/IR/Instrs.h"

namespace glow {

class Variable final : public Node {
public:
using InitKind = WeightVar::InitKind;
enum class InitKind {
Extern, // No initialization.
Broadcast, // Broadcast a single value to all elements.
Xavier, // Init the tensor with random values using the Xavier method.
};

private:
/// The value to use during initialization. This can be the value to splat or
Expand Down Expand Up @@ -69,18 +71,33 @@ class ConvolutionNode final : public Node {

std::string getDebugDesc() const override;
void visit(Node *parent, NodeVisitor *visitor) override;

/// Calculate the size of the output tensor based on the convolution
/// parameters.
static std::pair<size_t, size_t> calculateOutputDims(size_t sx, size_t sy,
size_t pad,
size_t filterSize,
size_t stride) {
size_t outsx = ((sx + pad * 2 - filterSize) / stride + 1);
size_t outsy = ((sy + pad * 2 - filterSize) / stride + 1);
return {outsx, outsy};
}
};

class PoolNode final : public Node {
public:
using OpKind = PoolInst::OpKind;
/// Specifies the kind of pooling done by the operator.
enum class OpKind {
Max,
Avg,
};

private:
Node *in_;
size_t kernel_;
size_t stride_;
size_t pad_;
PoolInst::OpKind kind_;
OpKind kind_;

public:
PoolNode(Node *in, TypeRef outTy, llvm::StringRef name, OpKind kind,
Expand Down Expand Up @@ -313,12 +330,16 @@ class BatchNormalizationNode final : public Node {

class ArithmeticNode final : public Node {
public:
using OpKind = ArithmeticInst::OpKind;
/// Specifies the kind of pooling done by the operator.
enum class OpKind {
Add,
Mul,
};

private:
Node *LHS_;
Node *RHS_;
ArithmeticInst::OpKind kind_;
OpKind kind_;
const char *getKindStr() const;

public:
Expand Down
Loading