Skip to content
This repository was archived by the owner on Jul 1, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 24 additions & 24 deletions examples/cifar10.cpp
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#include "glow/Graph/Graph.h"
#include "glow/Graph/Nodes.h"
#include "glow/IR/IR.h"
#include "glow/IR/IRBuilder.h"
#include "glow/IR/Instrs.h"
Expand Down Expand Up @@ -70,37 +72,35 @@ void testCIFAR10() {
IP.getConfig().momentum = 0.9;
IP.getConfig().L2Decay = 0.0001;

Value *result;
Value *E;
Value *A;
unsigned minibatchSize = 8;

{
IRBuilder bb(IP.getModule());
auto &G = IP.getGraph();

// Create the input layer:
A = bb.createWeightVar(ElemKind::FloatTy, {minibatchSize, 32, 32, 3});
E = bb.createWeightVar(ElemKind::IndexTy, {minibatchSize, 1});
// Create the input layer:
auto *A =
G.createVariable(ElemKind::FloatTy, {minibatchSize, 32, 32, 3}, "input");
auto *E = G.createVariable(ElemKind::IndexTy, {minibatchSize, 1}, "expected",
Variable::InitKind::Extern);

// Create the rest of the network.
auto *CV0 = bb.createConvOp(A, 16, 5, 1, 2);
auto *RL0 = bb.createRELUOp(*CV0);
auto *MP0 = bb.createPoolOp(*RL0, PoolInst::OpKind::Max, 2, 2, 0);
// Create the rest of the network.
auto *CV0 = G.createConv("conv", A, 16, 5, 1, 2);
auto *RL0 = G.createRELU("relu", CV0);
auto *MP0 = G.createPool("pool", RL0, PoolNode::OpKind::Max, 2, 2, 0);

auto *CV1 = bb.createConvOp(*MP0, 20, 5, 1, 2);
auto *RL1 = bb.createRELUOp(*CV1);
auto *MP1 = bb.createPoolOp(*RL1, PoolInst::OpKind::Max, 2, 2, 0);
auto *CV1 = G.createConv("conv", MP0, 20, 5, 1, 2);
auto *RL1 = G.createRELU("relu", CV1);
auto *MP1 = G.createPool("pool", RL1, PoolNode::OpKind::Max, 2, 2, 0);

auto *CV2 = bb.createConvOp(*MP1, 20, 5, 1, 2);
auto *RL2 = bb.createRELUOp(*CV2);
auto *MP2 = bb.createPoolOp(*RL2, PoolInst::OpKind::Max, 2, 2, 0);
auto *CV2 = G.createConv("conv", MP1, 20, 5, 1, 2);
auto *RL2 = G.createRELU("relu", CV2);
auto *MP2 = G.createPool("pool", RL2, PoolNode::OpKind::Max, 2, 2, 0);

auto *FCL1 = bb.createFullyConnectedOp(*MP2, 10);
auto *RL3 = bb.createRELUOp(*FCL1);
auto *SM = bb.createSoftMaxOp(*RL3, E);
result = bb.createReturnOp(*SM);
}
auto *FCL1 = G.createFullyConnected("fc", MP2, 10);
auto *RL3 = G.createRELU("relu", FCL1);
auto *SM = G.createSoftMax("softmax", RL3, E);
auto *result = G.createReturn("ret", SM);

G.generateIR();
IP.optimize(OptimizationMode::Train);
IP.initVars();

Expand All @@ -125,7 +125,7 @@ void testCIFAR10() {
Tensor sample(ElemKind::FloatTy, {minibatchSize, 3, 32, 32});
sample.copyConsecutiveSlices(&images, minibatchSize * i);
IP.infer({A}, {&sample});
auto *res = IP.getTensorForValue(result);
auto *res = IP.getTensorForNode(result);

for (unsigned int iter = 0; iter < minibatchSize; iter++) {
auto T = res->getHandle<FloatTy>().extractSlice(iter);
Expand Down
48 changes: 26 additions & 22 deletions examples/mnist.cpp
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
#include "glow/Graph/Graph.h"
#include "glow/Graph/Node.h"
#include "glow/Graph/Nodes.h"
#include "glow/IR/IR.h"
#include "glow/IR/IRBuilder.h"
#include "glow/IR/Instrs.h"
Expand Down Expand Up @@ -72,28 +75,29 @@ void testMNIST() {
IP.getConfig().momentum = 0.9;
IP.getConfig().L2Decay = 0.001;

Value *A;
Value *result;
Value *selected;
{
IRBuilder bb(IP.getModule());

A = bb.createWeightVar(ElemKind::FloatTy, {minibatchSize, 28, 28, 1});
auto *CV0 = bb.createConvOp(A, 16, 5, 1, 2);
auto *RL0 = bb.createRELUOp(*CV0);
auto *MP0 = bb.createPoolOp(*RL0, PoolInst::OpKind::Max, 3, 3, 0);

auto *CV1 = bb.createConvOp(*MP0, 16, 5, 1, 2);
auto *RL1 = bb.createRELUOp(*CV1);
auto *MP1 = bb.createPoolOp(*RL1, PoolInst::OpKind::Max, 3, 3, 0);

auto *FCL1 = bb.createFullyConnectedOp(*MP1, 10);
auto *RL2 = bb.createRELUOp(*FCL1);
selected = bb.createWeightVar(ElemKind::IndexTy, {minibatchSize, 1});
auto *SM = bb.createSoftMaxOp(*RL2, selected);
result = bb.createReturnOp(*SM);
}
auto &G = IP.getGraph();

Variable *A = G.createVariable(ElemKind::FloatTy, {minibatchSize, 28, 28, 1},
"input", Variable::InitKind::Extern);

auto *CV0 = G.createConv("conv", A, 16, 5, 1, 2);
auto *RL0 = G.createRELU("relu", CV0);
auto *MP0 = G.createPool("pool", RL0, PoolNode::OpKind::Max, 3, 3, 0);

auto *CV1 = G.createConv("conv", MP0, 16, 5, 1, 2);
auto *RL1 = G.createRELU("conv", CV1);
auto *MP1 = G.createPool("pool", RL1, PoolNode::OpKind::Max, 3, 3, 0);

auto *FCL1 = G.createFullyConnected("fc", MP1, 10);
auto *RL2 = G.createRELU("fc", FCL1);
Variable *selected =
G.createVariable(ElemKind::IndexTy, {minibatchSize, 1}, +"selected",
Variable::InitKind::Extern);
auto *SM = G.createSoftMax("sm", RL2, selected);

auto *result = G.createReturn("return", SM);

G.generateIR();
IP.optimize(OptimizationMode::Train);
IP.initVars();

Expand Down Expand Up @@ -126,7 +130,7 @@ void testMNIST() {
Tensor sample(ElemKind::FloatTy, {minibatchSize, 1, 28, 28});
sample.copyConsecutiveSlices(&imageInputs, 0);
IP.infer({A}, {&sample});
auto *res = IP.getTensorForValue(result);
Tensor *res = IP.getTensorForNode(result);

for (unsigned int iter = 0; iter < minibatchSize; iter++) {
auto T = res->getHandle<FloatTy>().extractSlice(iter);
Expand Down
19 changes: 15 additions & 4 deletions include/glow/Graph/Graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,19 @@ class ReturnNode;

/// Represents the compute graph.
class Graph final {
public:
/// Stores the mapping between graph nodes to IR variables.
using NodeToInstrTy = std::unordered_map<const Node *, Value *>;

private:
/// A list of nodes that the graph owns.
std::vector<Node *> nodes_;
/// A list of variables that the graph owns.
std::vector<Variable *> vars_;
/// A reference to the low-level IR module.
Module &M_;
/// Maps nodes in the graph to the generated IR.
NodeToInstrTy IRMap;

/// Inserts the node \p N to the list of nodes, and returns the inserted node.
template <class NodeTy> NodeTy *addNode(NodeTy *N) {
Expand All @@ -50,9 +57,6 @@ class Graph final {
}

public:
/// Holds the mapping between graph nodes to IR variables.
using NodeToInstrTy = std::unordered_map<Node *, Value *>;

Graph(Module &M) : M_(M) {}
~Graph();

Expand Down Expand Up @@ -115,8 +119,15 @@ class Graph final {

/// @}

/// Registers the fact that the node \p N was lowered into the IR value \p V.
void registerIRMap(const Node *N, Value *V);

/// \returns the IR value that the node \p N was lowered into, or null, if the
/// node was not lowered into any IR value.
Value *getIRForNode(const Node *N) const;

/// Generate IR from the nodes in the graph into the module.
NodeToInstrTy generateIR();
void generateIR();

/// Dumps the textual representation of the network.
void dump();
Expand Down
28 changes: 19 additions & 9 deletions include/glow/Graph/Nodes.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,23 +8,26 @@
namespace glow {

class Variable final : public Node {
public:
using InitKind = WeightVar::InitKind;

private:
/// The value to use during initialization. This can be the value to splat or
/// a parameter to specify the range of the random values.
float val_;
/// The initialization mode.
WeightVar::InitKind initKind_;
InitKind initKind_;

public:
Variable(llvm::StringRef name, TypeRef Ty, WeightVar::InitKind initKind,
float val)
Variable(llvm::StringRef name, TypeRef Ty, InitKind initKind, float val)
: Node(Kinded::Kind::WeightVarKind, Ty, name), val_(val),
initKind_(initKind) {}

static bool classof(const Kinded *k) {
return k->getKind() == Kinded::Kind::WeightVarKind;
}

WeightVar::InitKind getInitKind() const { return initKind_; }
InitKind getInitKind() const { return initKind_; }
float getVal() const { return val_; }

std::string getDebugDesc() const override;
Expand Down Expand Up @@ -69,14 +72,18 @@ class ConvolutionNode final : public Node {
};

class PoolNode final : public Node {
public:
using OpKind = PoolInst::OpKind;

private:
Node *in_;
size_t kernel_;
size_t stride_;
size_t pad_;
PoolInst::OpKind kind_;

public:
PoolNode(Node *in, TypeRef outTy, llvm::StringRef name, PoolInst::OpKind kind,
PoolNode(Node *in, TypeRef outTy, llvm::StringRef name, OpKind kind,
size_t kernel, size_t stride, size_t pad)
: Node(Kinded::Kind::PoolInstKind, outTy, name), in_(in), kernel_(kernel),
stride_(stride), pad_(pad), kind_(kind) {}
Expand All @@ -89,7 +96,7 @@ class PoolNode final : public Node {
size_t getKernel() const { return kernel_; }
size_t getStride() const { return stride_; }
size_t getPad() const { return pad_; }
PoolInst::OpKind getKind() const { return kind_; }
OpKind getKind() const { return kind_; }

std::string getDebugDesc() const override;
void visit(Node *parent, NodeVisitor *visitor) override;
Expand Down Expand Up @@ -305,22 +312,25 @@ class BatchNormalizationNode final : public Node {
};

class ArithmeticNode final : public Node {
public:
using OpKind = ArithmeticInst::OpKind;

private:
Node *LHS_;
Node *RHS_;
ArithmeticInst::OpKind kind_;
const char *getKindStr() const;

public:
ArithmeticNode(llvm::StringRef name, Node *LHS, Node *RHS,
ArithmeticInst::OpKind kind)
ArithmeticNode(llvm::StringRef name, Node *LHS, Node *RHS, OpKind kind)
: Node(Kinded::Kind::ArithmeticInstKind, LHS->getType(), name), LHS_(LHS),
RHS_(RHS), kind_(kind) {}
static bool classof(const Kinded *k) {
return k->getKind() == Kinded::Kind::ArithmeticInstKind;
}
Node *getLHS() const { return LHS_; }
Node *getRHS() const { return RHS_; }
ArithmeticInst::OpKind getKind() const { return kind_; }
OpKind getKind() const { return kind_; }

std::string getDebugDesc() const override;
void visit(Node *parent, NodeVisitor *visitor) override;
Expand Down
2 changes: 0 additions & 2 deletions include/glow/IR/IR.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,6 @@ class Instruction : public Value {
/// Verify the correctness of the instruction parameters.
void verify() const;

operator Value *() const { return getOperand(0).first; }

static bool mayShareBuffers(const Instruction *I);
};

Expand Down
12 changes: 12 additions & 0 deletions include/glow/IR/IRBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,18 @@ class IRBuilder {
ConvolutionInst *createConvOp(Value *input, size_t depth, size_t kernel,
size_t stride, size_t pad);

ConvolutionInst *createConvOp(Value *input, Value *filter, Value *bias,
size_t depth, size_t kernel, size_t stride,
size_t pad);

PoolInst *createPoolOp(Value *input, PoolInst::OpKind kind, size_t kernel,
size_t stride, size_t pad);

FullyConnectedInst *createFullyConnectedOp(Value *input, size_t outDepth);

FullyConnectedInst *createFullyConnectedOp(Value *input, Value *filter,
Value *bias, size_t outDepth);

ReluInst *createRELUOp(Value *input);

SigmoidInst *createSigmoidOp(Value *input);
Expand All @@ -60,6 +67,11 @@ class IRBuilder {
float epsilon = 1e-5,
float momentum = 0.9);

BatchNormalizationInst *
createBatchNormalizationOp(Value *input, Value *beta, Value *gamma,
Value *mean, Value *var, size_t channelIdx = 0,
float epsilon = 1e-5, float momentum = 0.9);

LocalResponseNormalizationInst *
createLocalResponseNormalizationOp(Value *input, size_t halfWindowSize = 2,
float alpha = 1e-4, float beta = 0.75,
Expand Down
4 changes: 4 additions & 0 deletions include/glow/IR/Instrs.h
Original file line number Diff line number Diff line change
Expand Up @@ -451,8 +451,12 @@ class WeightVar : public Value {

const char *getInitKindStr() const;

void setInitKind(InitKind k) { initKind_ = k; }
InitKind getInitKind() const { return initKind_; }

void setVal(float v) { val_ = v; }
float getVal() const { return val_; }

std::string getExtraDesc() const;
void verify() const {}
};
Expand Down
23 changes: 15 additions & 8 deletions include/glow/Importer/Caffe2.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

#include "llvm/ADT/ArrayRef.h"

#include "glow/IR/IRBuilder.h"
#include "glow/Graph/Graph.h"

#include <string>
#include <unordered_map>
Expand All @@ -25,14 +25,16 @@ class Value;
class caffe2ModelLoader {
/// The interpreter that runs the program.
Interpreter &IP_;
/// The network that we are building.
IRBuilder builder_;
/// The graph that we are building.
Graph &G_;
/// Saves network nodes by name.
std::unordered_map<std::string, Value *> nodeByName_;
std::unordered_map<std::string, Node *> nodeByName_;
/// A list of weight tensors indexed by name.
std::unordered_map<std::string, Tensor *> tensors_;
/// The external output of the network.
Value *root_{nullptr};
Node *root_{nullptr};
/// A list of tensors to load into variables once the graph is materialized.
std::vector<std::pair<Variable *, Tensor *>> variableInit_;

/// Load the weight tensors from the 'init' file and register them in the map
/// \p tensors.
Expand All @@ -52,13 +54,18 @@ class caffe2ModelLoader {
/// file.
bool loadProtoFile(caffe2::NetDef &net, const std::string &filename);

/// Register the tensor \p t to initialize the variable \p v.
void registerVariableInit(Node *v, Tensor *t) {
variableInit_.push_back({cast<Variable>(v), t});
}

public:
/// \returns the node that was registered with the name \p name.
Value *getNodeByName(const std::string &name);
Node *getNodeByName(const std::string &name);

/// \returns the node that was registered with the name \p name or create a
/// new Variable node for a tensor with this name.
Value *getOrCreateNodeByName(const std::string &name);
Node *getOrCreateNodeByName(const std::string &name);

/// Loads the caffe2 model that's represnted by a network description file,
/// serialized in \p netDescFilename, and weights file, serialized in
Expand All @@ -72,7 +79,7 @@ class caffe2ModelLoader {

/// \returns the output of the network. This is usually the result of the last
/// softmax or regression layer.
Value *getRoot() { return root_; }
Node *getRoot() { return root_; }
};

} // namespace glow
Expand Down
Loading