Skip to content

Commit 52aef2c

Browse files
author
Roman Dzhabarov
committed
[NFC/cleanup] Remove more entries of variables.
1 parent c5cd64a commit 52aef2c

25 files changed

+110
-106
lines changed

examples/char-rnn.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -275,7 +275,7 @@ int main(int argc, char **argv) {
275275
// Generate a sentence by running inference over and over again.
276276
for (unsigned i = 0; i < generateChars; i++) {
277277
// Generate a char:
278-
updateVariables(ctx, {X}, {&currCharInfer});
278+
updateInputPlaceholders(ctx, {X}, {&currCharInfer});
279279
EE.run();
280280

281281
// Pick a char at random from the softmax distribution.

examples/cifar10.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ void testCIFAR10() {
159159
for (unsigned int i = 0; i < 100 / minibatchSize; i++) {
160160
Tensor sample(ElemKind::FloatTy, {minibatchSize, 32, 32, 3});
161161
sample.copyConsecutiveSlices(&images, minibatchSize * i);
162-
updateVariables(ctx, {A}, {&sample});
162+
updateInputPlaceholders(ctx, {A}, {&sample});
163163
EE.run();
164164

165165
for (unsigned int iter = 0; iter < minibatchSize; iter++) {

examples/fr2en.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -391,7 +391,7 @@ void Model::translate(const std::vector<std::string> &batch) {
391391
(words.size() - 1) + j * MAX_LENGTH;
392392
}
393393

394-
updateVariables(ctx, {input_, seqLength_}, {&input, &seqLength});
394+
updateInputPlaceholders(ctx, {input_, seqLength_}, {&input, &seqLength});
395395
EE_.run();
396396

397397
auto OH = ctx.get(output_)->getHandle<int64_t>();

include/glow/ExecutionEngine/ExecutionEngine.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -91,15 +91,15 @@ class ExecutionEngine final {
9191

9292
/// This method updates the placeholders in \p ph with the tensor content
9393
/// values \p inputs, in \p ctx.
94-
void updateVariables(Context &ctx, llvm::ArrayRef<Placeholder *> ph,
95-
llvm::ArrayRef<Tensor *> inputs);
94+
void updateInputPlaceholders(Context &ctx, llvm::ArrayRef<Placeholder *> ph,
95+
llvm::ArrayRef<Tensor *> inputs);
9696

9797
/// This method updates the placeholders in the module. The placeholders are
9898
/// found by name
9999
/// in \p ph with the tensor content values \p inputs.
100-
void updateInputsByName(Context &ctx, Module *mod,
101-
llvm::ArrayRef<llvm::StringRef> ph,
102-
llvm::ArrayRef<Tensor *> inputs);
100+
void updateInputPlaceholdersByName(Context &ctx, Module *mod,
101+
llvm::ArrayRef<llvm::StringRef> ph,
102+
llvm::ArrayRef<Tensor *> inputs);
103103

104104
/// Runs \p iterations iterations of the compiled function. The method updates a
105105
/// global counter and future invocations of this method continue running

lib/Converter/FunctionConverter.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
#include "glow/Graph/Graph.h" // For Function.
1919
#include "glow/Graph/Node.h" // For Node.
20-
#include "glow/Graph/Nodes.h" // For Placeholder and Variable.
20+
#include "glow/Graph/Nodes.h" // For Placeholder and Constant.
2121

2222
using namespace glow;
2323

lib/ExecutionEngine/ExecutionEngine.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -41,27 +41,27 @@ void ExecutionEngine::setBackend(Backend *backend) {
4141

4242
ExecutionEngine::~ExecutionEngine() = default;
4343

44-
void glow::updateVariables(Context &ctx, llvm::ArrayRef<Placeholder *> ph,
45-
llvm::ArrayRef<Tensor *> inputs) {
44+
void glow::updateInputPlaceholders(Context &ctx,
45+
llvm::ArrayRef<Placeholder *> ph,
46+
llvm::ArrayRef<Tensor *> inputs) {
4647
assert(inputs.size() == ph.size() &&
4748
"The number of inputs does not match the number of Placeholders");
4849

49-
// Update the input variables.
5050
for (int i = 0, e = ph.size(); i < e; i++) {
5151
assert(ph[i] && "Invalid value");
5252
auto *backingTensor = ctx.get(ph[i]);
5353
assert(backingTensor && "Can't find the placeholder");
5454
auto dim = inputs[i]->dims();
5555
(void)dim;
5656
assert(backingTensor->getType().isEqual(inputs[i]->getType()) &&
57-
"Mismatch on Variable and Tensor types.");
57+
"Mismatch on Placeholder and Tensor types.");
5858
backingTensor->assign(inputs[i]);
5959
}
6060
}
6161

62-
void glow::updateInputsByName(Context &ctx, Module *mod,
63-
llvm::ArrayRef<llvm::StringRef> ph,
64-
llvm::ArrayRef<Tensor *> inputs) {
62+
void glow::updateInputPlaceholdersByName(Context &ctx, Module *mod,
63+
llvm::ArrayRef<llvm::StringRef> ph,
64+
llvm::ArrayRef<Tensor *> inputs) {
6565
assert(inputs.size() == ph.size() &&
6666
"The number of inputs does not match the number of Placeholders");
6767

@@ -70,7 +70,7 @@ void glow::updateInputsByName(Context &ctx, Module *mod,
7070
Tensor *t = inputs[i];
7171
assert(t && "Invalid tensor.");
7272
assert(p && "Invalid placeholder.");
73-
updateVariables(ctx, {p}, {t});
73+
updateInputPlaceholders(ctx, {p}, {t});
7474
}
7575
}
7676

@@ -87,7 +87,7 @@ void glow::runBatch(ExecutionEngine &EE, Context &ctx, size_t iterations,
8787

8888
assert(!inputs.empty() && "No inputs");
8989
assert(inputs.size() == ph.size() &&
90-
"The number of inputs does not match the number of variables");
90+
"The number of inputs does not match the number of placeholders");
9191

9292
// For each iteration in the batch:
9393
for (size_t j = 0; j < iterations; j++) {

lib/Graph/Graph.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2361,7 +2361,7 @@ void Function::verify() const {
23612361
}
23622362
}
23632363

2364-
// Now check that the variables that are written to are either:
2364+
// Now check that the placeholders that are written to are either:
23652365
// - Written by a save node, or
23662366
// - Are only used by the node that writes them
23672367
// If this check fails, that means we have implicit memory

lib/IR/GraphScheduler.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,8 @@ void ChildMemSizeBasedScheduler::orderChildNodesAndSchedule(Node *N) {
103103

104104
// SaveNode hack:
105105
// We don't model memory dependencies, but we still need to honor them.
106-
// Make sure the SaveNode happens after the last use of the output variable.
106+
// Make sure the SaveNode happens after the last use of the output
107+
// placeholder.
107108
if (auto *save = dyn_cast<SaveNode>(N)) {
108109
auto *destination = save->getOutput().getNode();
109110
for (NodeUse &use : destination->getUsers()) {

lib/Onnxifi/Base.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ onnxStatus Graph::run() {
7979

8080
// Run inference.
8181
auto &EE = backendPtr_->getEE();
82-
updateVariables(ctx_, phs, tensors);
82+
updateInputPlaceholders(ctx_, phs, tensors);
8383
EE.run();
8484

8585
// Copy outputs to the addresses specified in the outputNodeToBuffer_.

lib/Optimizer/IROptimizer.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -481,12 +481,12 @@ void makeWeightsConst(IRFunction &M) {
481481
}
482482
}
483483

484-
// Mark the variable as read only.
484+
// Mark the constant as read only.
485485
if (readOnly) {
486486
W->setMutability(WeightVar::MutabilityKind::Constant);
487487
} else {
488488
assert(W->getMutability() != WeightVar::MutabilityKind::Constant &&
489-
"Variables defined as Const cannot be written into.");
489+
"Const cannot be written into.");
490490
}
491491
}
492492
}

lib/Optimizer/Lower.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ static void lowerSGDNode(Function *F, const SGDNode &SGD) {
293293

294294
float momentum = SGD.getMomentum();
295295

296-
assert(W.dims() == G.dims() && "Invalid variables sizes for SGDNode");
296+
assert(W.dims() == G.dims() && "Invalid weight/gradient sizes for SGDNode");
297297

298298
float L1Decay = SGD.getL1Decay();
299299
float L2Decay = SGD.getL2Decay();

lib/Optimizer/Partition.cpp

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -64,9 +64,9 @@ class NodeFunctionMap {
6464
Function *operator[](Node *n) { return nodeToFunction_[n]; }
6565
};
6666

67-
/// If \p node has a single input that is not a variable, return it. Otherwise
67+
/// If \p node has a single input that is not a storage, return it. Otherwise
6868
/// return nullptr.
69-
Node *singleNonVariableInput(Node *node) {
69+
Node *singleNonStorageInput(Node *node) {
7070
Node *nonVarInput = nullptr;
7171

7272
for (unsigned i = 0, e = node->getNumInputs(); i < e; i++) {
@@ -96,7 +96,7 @@ NodeFunctionMap selectBasicBlockPartitions(Function *F) {
9696

9797
// If node has only one input, and that input has only one output, place it
9898
// in the same partition.
99-
auto *in = singleNonVariableInput(node);
99+
auto *in = singleNonStorageInput(node);
100100
if (in && in->getNumUsers() == 1) {
101101
auto it = mapping.find(in);
102102
assert(it != mapping.end());
@@ -126,9 +126,9 @@ FunctionDAG doPartitioning(Function *F, NodeFunctionMap &mapping) {
126126
mapping[&N]->addNode(clone);
127127
}
128128

129-
// For any dependency that crosses a partition, add a variable and save
129+
// For any dependency that crosses a partition, add a placeholder and save
130130
// node. Record the dependence in the function graph.
131-
llvm::DenseMap<Node *, Placeholder *> variables;
131+
llvm::DenseMap<Node *, Placeholder *> placeholders;
132132
for (auto *F : mapping.getFunctions()) {
133133
for (auto &N : F->getNodes()) {
134134
for (unsigned inp = 0, e = N.getNumInputs(); inp < e; inp++) {
@@ -143,23 +143,23 @@ FunctionDAG doPartitioning(Function *F, NodeFunctionMap &mapping) {
143143
// Add this dependence to the FunctionDAG.
144144
G.add(F, inputF);
145145

146-
// If we've already created a variable for this dependence, use it.
147-
auto it = variables.find(input.getNode());
148-
if (it != variables.end()) {
146+
// If we've already created a placeholder for this dependence, use it.
147+
auto it = placeholders.find(input.getNode());
148+
if (it != placeholders.end()) {
149149
N.setNthInput(inp, it->second);
150150
continue;
151151
}
152152

153-
// Create a new variable to represent this dependence.
153+
// Create a new placeholder to represent this dependence.
154154
auto *save = inputF->createSave("tmp", input);
155155
auto *tmp = save->getPlaceholder();
156-
variables[input.getNode()] = tmp;
156+
placeholders[input.getNode()] = tmp;
157157
N.setNthInput(inp, tmp);
158158
}
159159
}
160160
}
161161

162-
// Update links between nodes in the cloned functions. Add variables (and
162+
// Update links between nodes in the cloned functions. Add placeholders (and
163163
// save nodes) where a link crosses a partition boundary.
164164
for (auto *F : mapping.getFunctions()) {
165165
for (auto &N : F->getNodes()) {

tests/unittests/BackendTest.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ TEST(Interpreter, profileQuantizationForANetwork) {
6868

6969
// TODO: Verify histogram itself, for now just verify min and max.
7070
// Run inference first time and capture tensor stats.
71-
updateVariables(ctx, {A}, {&inputs});
71+
updateInputPlaceholders(ctx, {A}, {&inputs});
7272
EE.run();
7373

7474
QuantizationProfileNode *profile{nullptr};
@@ -95,7 +95,7 @@ TEST(Interpreter, profileQuantizationForANetwork) {
9595

9696
// Run inference for the second time with new min and max.
9797
inputs.getHandle() = {0.2f, 1.6f, 0.5f, 1.3f};
98-
updateVariables(ctx, {A}, {&inputs});
98+
updateInputPlaceholders(ctx, {A}, {&inputs});
9999
EE.run();
100100
min = CI.raw(0);
101101
max = CI.raw(1);
@@ -137,7 +137,7 @@ TEST_P(BackendTest, simpleInference) {
137137
ctx.allocate(S->getPlaceholder());
138138
EE_.compile(CompilationMode::Infer, F, ctx);
139139

140-
updateVariables(ctx, {input}, {&inputs});
140+
updateInputPlaceholders(ctx, {input}, {&inputs});
141141
EE_.run();
142142
}
143143

tests/unittests/BackendTestUtils.cpp

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ void inferIntLookupTableNet(Tensor *input, Tensor *out,
6666

6767
EE.compile(CompilationMode::Infer, F, ctx);
6868

69-
updateVariables(ctx, {var}, {input});
69+
updateInputPlaceholders(ctx, {var}, {input});
7070
EE.run();
7171
out->assign(resultTensor);
7272
}
@@ -107,7 +107,8 @@ void inferConvNet(Tensor *inputs, Tensor *filter, Tensor *bias, Tensor *out,
107107

108108
EE.compile(CompilationMode::Infer, F, ctx);
109109

110-
updateVariables(ctx, {inputP, filterP, biasP}, {inputs, filter, bias});
110+
updateInputPlaceholders(ctx, {inputP, filterP, biasP},
111+
{inputs, filter, bias});
111112
EE.run();
112113
out->assign(resultTensor);
113114
}
@@ -149,7 +150,7 @@ void trainConvNet(Tensor *inputs, Tensor *kernel1, Tensor *bias1,
149150

150151
runBatch(EE, ctx, 8, sampleCounter, {var1, var2}, {inputs, selected});
151152
EE.compile(CompilationMode::Infer, F, ctx);
152-
updateVariables(ctx, {var1, var2}, {inputs, selected});
153+
updateInputPlaceholders(ctx, {var1, var2}, {inputs, selected});
153154
EE.run();
154155
out->assign(resultTensor);
155156
}
@@ -167,7 +168,7 @@ void inferLocalResponseNormalizationNet(Tensor *inputs, Tensor *out,
167168

168169
EE.compile(CompilationMode::Infer, F, ctx);
169170

170-
updateVariables(ctx, {var}, {inputs});
171+
updateInputPlaceholders(ctx, {var}, {inputs});
171172
EE.run();
172173
out->assign(resultTensor);
173174
}
@@ -248,7 +249,7 @@ void trainAvgPoolNet(Tensor *inputs, Tensor *weights, Tensor *bias,
248249
runBatch(EE, ctx, 10, sampleCounter, {var1, var2}, {inputs, selected});
249250
EE.compile(CompilationMode::Infer, F, ctx);
250251

251-
updateVariables(ctx, {var1, var2}, {inputs, selected});
252+
updateInputPlaceholders(ctx, {var1, var2}, {inputs, selected});
252253
EE.run();
253254
out->assign(resultTensor);
254255
}
@@ -306,7 +307,7 @@ void inferSmallConv(Tensor *inputs, Tensor *out, BackendKind kind) {
306307

307308
EE.compile(CompilationMode::Infer, F, ctx);
308309

309-
updateVariables(ctx, {in}, {inputs});
310+
updateInputPlaceholders(ctx, {in}, {inputs});
310311
EE.run();
311312

312313
out->assign(resultTensor);
@@ -543,7 +544,7 @@ void trainSoftMaxNet(Tensor *inputs, Tensor *weights, Tensor *bias,
543544
runBatch(EE, ctx, 30, sampleCounter, {var1, var2}, {inputs, selected});
544545
EE.compile(CompilationMode::Infer, F, ctx);
545546

546-
updateVariables(ctx, {var1, var2}, {inputs, selected});
547+
updateInputPlaceholders(ctx, {var1, var2}, {inputs, selected});
547548
EE.run();
548549
out->assign(resultTensor);
549550
}
@@ -567,7 +568,7 @@ void inferTanhConcatNet(Tensor *input1, Tensor *input2, Tensor *input3,
567568

568569
EE.compile(CompilationMode::Infer, F, ctx);
569570

570-
updateVariables(ctx, {var1, var2, var3}, {input1, input2, input3});
571+
updateInputPlaceholders(ctx, {var1, var2, var3}, {input1, input2, input3});
571572
EE.run();
572573
out->assign(resultTensor);
573574
}
@@ -590,7 +591,7 @@ void inferBasicConvNet(Tensor *inputs, Tensor *out, BackendKind kind,
590591

591592
EE.compile(CompilationMode::Infer, F, ctx);
592593

593-
updateVariables(ctx, {var}, {inputs});
594+
updateInputPlaceholders(ctx, {var}, {inputs});
594595
EE.run();
595596
out->assign(resultTensor);
596597
}
@@ -643,7 +644,7 @@ void inferMixedNet(Tensor *inputs, Tensor *out, BackendKind kind) {
643644

644645
EE.compile(CompilationMode::Infer, F, ctx);
645646

646-
updateVariables(ctx, {var}, {inputs});
647+
updateInputPlaceholders(ctx, {var}, {inputs});
647648
EE.run();
648649
out->assign(resultTensor);
649650
}
@@ -687,8 +688,8 @@ void inferComplexNet1(Tensor *inputs1, Tensor *inputs2, Tensor *inputs3,
687688

688689
EE.compile(CompilationMode::Infer, F, ctx);
689690

690-
updateVariables(ctx, {var1, var2, var3, var4},
691-
{inputs1, inputs2, inputs3, inputs4});
691+
updateInputPlaceholders(ctx, {var1, var2, var3, var4},
692+
{inputs1, inputs2, inputs3, inputs4});
692693
EE.run();
693694
out->assign(resultTensor);
694695
}
@@ -728,7 +729,7 @@ void inferTinyResnet(Tensor *input, Tensor *out, std::vector<Tensor> &weights,
728729

729730
EE.compile(CompilationMode::Infer, F, ctx);
730731

731-
updateVariables(ctx, {in}, {input});
732+
updateInputPlaceholders(ctx, {in}, {input});
732733
EE.run();
733734
out->assign(resultTensor);
734735
}
@@ -761,7 +762,7 @@ void inferExtract3D(Tensor *input, Tensor *out, BackendKind kind) {
761762

762763
EE.compile(CompilationMode::Infer, F, ctx);
763764

764-
updateVariables(ctx, {inputs}, {input});
765+
updateInputPlaceholders(ctx, {inputs}, {input});
765766
EE.run();
766767
out->assign(resultTensor);
767768
}
@@ -790,7 +791,7 @@ void inferMaxSplat(Tensor *input, Tensor *out, BackendKind kind) {
790791

791792
EE.compile(CompilationMode::Infer, F, ctx);
792793

793-
updateVariables(ctx, {var}, {input});
794+
updateInputPlaceholders(ctx, {var}, {input});
794795
EE.run();
795796
out->assign(resultTensor);
796797
}

tests/unittests/GemmTest.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ void infer(Tensor *out, Tensor *lhs, Tensor *rhs) {
5757

5858
EE.compile(CompilationMode::Infer, F, ctx);
5959

60-
updateVariables(ctx, {lhsVar, rhsVar}, {lhs, rhs});
60+
updateInputPlaceholders(ctx, {lhsVar, rhsVar}, {lhs, rhs});
6161
EE.run();
6262

6363
out->assign(res);

tests/unittests/HyphenTest.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ struct HyphenNetwork {
304304
bi = numSamples - batchSize;
305305
}
306306
auto batchInputs = inputs.getUnowned({batchSize, 6, 27}, {bi, 0, 0});
307-
updateVariables(ctx_, {input_}, {&batchInputs});
307+
updateInputPlaceholders(ctx_, {input_}, {&batchInputs});
308308
EE.run();
309309

310310
// Check each output in the batch.

0 commit comments

Comments
 (0)