Skip to content

Commit cb2269c

Browse files
committed
Revert: [Placeholder] Add support for Placeholders in the execution engine #1586
This commit reverts the parts of the PR #1586 that pass the placeholder variables in the run command of the execution engine. At first I thought that this would allow to eliminate the variables from the high-level parts of the compilation, but I realized that we need to start with fixing all of the backends and add support to the compiled function.
1 parent a1e72f7 commit cb2269c

File tree

17 files changed

+35
-159
lines changed

17 files changed

+35
-159
lines changed

include/glow/Backends/CompiledFunction.h

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -16,23 +16,16 @@
1616
#ifndef GLOW_BACKENDS_COMPILEDFUNCTION_H
1717
#define GLOW_BACKENDS_COMPILEDFUNCTION_H
1818

19-
#include "llvm/ADT/ArrayRef.h"
20-
2119
namespace glow {
2220

23-
class Placeholder;
24-
class Tensor;
25-
2621
/// Interface for executing a compiled function.
2722
class CompiledFunction {
2823
public:
2924
/// Dtor.
3025
virtual ~CompiledFunction() = default;
3126

32-
/// Execute the network. The backing tensors in \p tensors are mapped to the
33-
/// placeholders of \p placeholders for the invocation of the program.
34-
virtual void execute(llvm::ArrayRef<Placeholder *> placeholders,
35-
llvm::ArrayRef<Tensor *> tensors) = 0;
27+
/// Execute the network.
28+
virtual void execute() = 0;
3629
};
3730

3831
} // end namespace glow

include/glow/ExecutionEngine/ExecutionEngine.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,19 +80,19 @@ class ExecutionEngine final {
8080

8181
/// Runs a single execution of the function. This method updates the variables
8282
/// in \p nodes with the tensor content values \p inputs.
83-
void run(llvm::ArrayRef<Storage *> vars, llvm::ArrayRef<Tensor *> inputs);
83+
void run(llvm::ArrayRef<Variable *> vars, llvm::ArrayRef<Tensor *> inputs);
8484

8585
/// Runs \p iterations iterations of the function. The method updates a local
8686
/// counter and future invocations of this method continue running iterations
8787
/// of the batch at the next available slice.
8888
/// The method updates the variables in \p vars with the tensors \p inputs.
89-
void runBatch(size_t iterations, llvm::ArrayRef<Storage *> vars,
89+
void runBatch(size_t iterations, llvm::ArrayRef<Variable *> vars,
9090
llvm::ArrayRef<Tensor *> inputs);
9191

9292
private:
9393
/// Update the inputs for all variables \p vars with data from the inputs \p
9494
/// inputs at offset \p sampleIdx. Then perform a run of the network.
95-
void updateInputsAndRunNetwork(llvm::ArrayRef<Storage *> vars,
95+
void updateInputsAndRunNetwork(llvm::ArrayRef<Variable *> vars,
9696
llvm::ArrayRef<Tensor *> inputs,
9797
size_t sampleIdx);
9898

lib/Backends/CPU/CPUFunction.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,7 @@ CPUFunction::CPUFunction(std::unique_ptr<llvm::orc::GlowJIT> JIT, void *heap)
2626

2727
CPUFunction::~CPUFunction() { alignedFree(heap_); }
2828

29-
void CPUFunction::execute(llvm::ArrayRef<Placeholder *> placeholders,
30-
llvm::ArrayRef<Tensor *> tensors) {
29+
void CPUFunction::execute() {
3130
auto sym = JIT_->findSymbol("jitmain");
3231
assert(sym && "Unable to JIT the code!");
3332
using JitFuncType = void (*)(void);

lib/Backends/CPU/CPUFunction.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,7 @@ class CPUFunction final : public CompiledFunction {
3838
///@{
3939
~CPUFunction() override;
4040

41-
void execute(llvm::ArrayRef<Placeholder *> placeholders,
42-
llvm::ArrayRef<Tensor *> tensors) override;
41+
void execute() override;
4342
///@}
4443
};
4544

lib/Backends/Interpreter/InterpreterFunction.cpp

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,10 @@ InterpreterFunction::InterpreterFunction(std::unique_ptr<IRFunction> F)
3131
assert(!externalTensors_.count(w) && "The tensor is already registered");
3232
externalTensors_[w] = &v->getPayload();
3333
}
34+
35+
for (auto *W : F_->getWeights()) {
36+
getOrCreateTensor(W);
37+
}
3438
}
3539

3640
InterpreterFunction::~InterpreterFunction() {
@@ -98,15 +102,7 @@ void InterpreterFunction::deleteTensor(const Value *v) {
98102
tensors_.erase(it);
99103
}
100104

101-
void InterpreterFunction::execute(llvm::ArrayRef<Placeholder *> placeholders,
102-
llvm::ArrayRef<Tensor *> tensors) {
103-
// Register the placeholder values with the corresponding backing tensor.
104-
// Note: the current implementation is not thread safe. See issue #1334.
105-
for (int i = 0, e = placeholders.size(); i < e; i++) {
106-
auto *w = F_->getWeightForNode(placeholders[i]);
107-
externalTensors_[w] = tensors[i];
108-
}
109-
105+
void InterpreterFunction::execute() {
110106
// Do the forward pass.
111107
#define DEF_VALUE(CLASS, NAME)
112108
#define DEF_INSTR(CLASS, NAME) \

lib/Backends/Interpreter/InterpreterFunction.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,7 @@ class InterpreterFunction final : public CompiledFunction {
5454
///@{
5555
~InterpreterFunction() override;
5656

57-
void execute(llvm::ArrayRef<Placeholder *> placeholders,
58-
llvm::ArrayRef<Tensor *> tensors) override;
57+
void execute() override;
5958
///@}
6059

6160
private:

lib/Backends/OpenCL/OpenCL.cpp

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -587,8 +587,7 @@ static void topK(Tensor &outW, Tensor &indW, Tensor &inW, size_t k) {
587587
}
588588
}
589589

590-
void OpenCLFunction::execute(llvm::ArrayRef<Placeholder *> placeholders,
591-
llvm::ArrayRef<Tensor *> tensors) {
590+
void OpenCLFunction::execute() {
592591
auto copiedToDeviceBytes = copyMutableWeightsToDevice();
593592
(void)copiedToDeviceBytes;
594593
DEBUG_GLOW(llvm::dbgs() << "Copied " << copiedToDeviceBytes

lib/Backends/OpenCL/OpenCL.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -98,8 +98,7 @@ class OpenCLFunction final : public CompiledFunction {
9898
///@{
9999
~OpenCLFunction() override;
100100

101-
void execute(llvm::ArrayRef<Placeholder *> placeholders,
102-
llvm::ArrayRef<Tensor *> tensors) override;
101+
void execute() override;
103102
///@}
104103

105104
private:

lib/ExecutionEngine/ExecutionEngine.cpp

Lines changed: 10 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -28,8 +28,6 @@
2828

2929
using namespace glow;
3030

31-
using llvm::cast;
32-
3331
namespace {
3432
static llvm::cl::opt<std::string>
3533
dumpIRDAG("dump-ir-dag",
@@ -57,39 +55,24 @@ void ExecutionEngine::setBackend(Backend *backend) {
5755

5856
ExecutionEngine::~ExecutionEngine() = default;
5957

60-
void ExecutionEngine::run(llvm::ArrayRef<Storage *> vars,
58+
void ExecutionEngine::run(llvm::ArrayRef<Variable *> vars,
6159
llvm::ArrayRef<Tensor *> inputs) {
6260
assert(function_ && "No function has been compiled");
6361
assert(inputs.size() == vars.size() &&
6462
"The number of inputs does not match the number of variables");
6563

66-
llvm::SmallVector<Placeholder *, 8> placeholders;
67-
llvm::SmallVector<Tensor *, 8> tensors;
68-
69-
// Update the input variables, or register placeholders.
64+
// Update the input variables.
7065
for (int i = 0, e = vars.size(); i < e; i++) {
71-
// This is a variable. In here we override the content of the tensor with
72-
// the requested tensor.
73-
// Note: When we finish the migration to Placeholder this code needs to be
74-
// removed because it's not threadsafe. See issue #1334.
75-
if (Variable *V = llvm::dyn_cast<Variable>(vars[i])) {
76-
assert(V->getVisibilityKind() == VisibilityKind::Public &&
77-
"Trying to update a private variable");
78-
loadValueFromTensor(V, inputs[i]);
79-
continue;
80-
}
81-
82-
// This is a placeholder that we need to make concrete during the execution
83-
// of the program.
84-
placeholders.push_back(cast<Placeholder>(vars[i]));
85-
tensors.push_back(inputs[i]);
66+
assert(vars[i]->getVisibilityKind() == VisibilityKind::Public &&
67+
"Trying to update a private variable");
68+
loadValueFromTensor(vars[i], inputs[i]);
8669
}
8770

88-
function_->execute(placeholders, tensors);
71+
function_->execute();
8972
}
9073

9174
void ExecutionEngine::runBatch(size_t iterations,
92-
llvm::ArrayRef<Storage *> vars,
75+
llvm::ArrayRef<Variable *> vars,
9376
llvm::ArrayRef<Tensor *> inputs) {
9477
static size_t trainCounter = 0;
9578

@@ -110,56 +93,16 @@ void ExecutionEngine::runBatch(size_t iterations,
11093
}
11194
}
11295

113-
void ExecutionEngine::updateInputsAndRunNetwork(llvm::ArrayRef<Storage *> vars,
96+
void ExecutionEngine::updateInputsAndRunNetwork(llvm::ArrayRef<Variable *> vars,
11497
llvm::ArrayRef<Tensor *> inputs,
11598
size_t sampleIdx) {
116-
llvm::SmallVector<Placeholder *, 8> placeholders;
117-
118-
// This container saves the tensor slices that were extracted from the inputs.
119-
// The tensors are alive during the lifetime of this function. These tensors
120-
// must not move, and we must pass them as ArrayRef, so we allocate them here
121-
// manually.
122-
llvm::SmallVector<Tensor, 8> tempTensorViews;
123-
// We need to pass the tensors as array ref, so we reference the static
124-
// storage above, that's frozen during the execution of the program.
125-
llvm::SmallVector<Tensor *, 8> tensors;
126-
12799
// Update the input variables.
128100
for (int i = 0, e = vars.size(); i < e; i++) {
129-
// Note: When we finish the migration to Placeholder this code needs to be
130-
// removed because it's not threadsafe. See issue #1334.
131-
if (Variable *V = llvm::dyn_cast<Variable>(vars[i])) {
132-
loadValueFromTensorSlice(V, inputs[i], sampleIdx);
133-
continue;
134-
}
135-
136-
// This is a placeholder that we need to make concrete during the execution
137-
// of the program.
138-
placeholders.push_back(cast<Placeholder>(vars[i]));
139-
140-
// Extract a tensor view from the input values. This has the same
141-
// functionality as the logic in loadValueFromTensorSlice that copies the
142-
// content of the tensor.
143-
auto batchSize = inputs[i]->dims()[0];
144-
auto startIdx = sampleIdx % batchSize;
145-
auto phDims = placeholders[i]->dims();
146-
147-
// The start offset is all zeros except for the batch dimension.
148-
std::vector<size_t> sliceOffset(inputs[i]->dims().size(), 0);
149-
sliceOffset[0] = startIdx;
150-
151-
// Create the tensor view.
152-
tempTensorViews.push_back(inputs[i]->getUnowned(phDims, sliceOffset));
153-
}
154-
155-
// Save the pointer to the allocated tensor view, because the interface
156-
// requires ArrayRef of tensors.
157-
for (int i = 0, e = tempTensorViews.size(); i < e; i++) {
158-
tensors.push_back(&tempTensorViews[i]);
101+
loadValueFromTensorSlice(vars[i], inputs[i], sampleIdx);
159102
}
160103

161104
// Run the network.
162-
function_->execute(placeholders, tensors);
105+
function_->execute();
163106
}
164107

165108
void ExecutionEngine::loadValueFromTensorSlice(Variable *v, Tensor *input,

lib/Graph/Graph.cpp

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2155,6 +2155,9 @@ void Function::verify() const {
21552155
llvm_unreachable("Multiple nodes with the same name");
21562156
}
21572157

2158+
const auto &vars = getParent()->getVars();
2159+
(void)vars;
2160+
21582161
// Any node referenced by one of the graph nodes should be part of the Graph.
21592162
for (const auto &N : nodes_) {
21602163
for (size_t idx = 0, e = N.getNumInputs(); idx < e; ++idx) {

lib/Onnxifi/Base.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ onnxStatus Graph::initGraph(const void *onnxModel, size_t onnxModelSize,
6868
onnxStatus Graph::run() {
6969
// Copy tensors from the input addresses to the Glow tensors.
7070
llvm::SmallVector<Tensor *, 4> tensors;
71-
llvm::SmallVector<Storage *, 4> vars;
71+
llvm::SmallVector<Variable *, 4> vars;
7272
for (auto inputVar : inputVarToBuffer_) {
7373
auto *var = inputVar.first;
7474
auto *type = var->getType();

tests/unittests/BackendCorrectnessTest.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -304,7 +304,7 @@ TEST_P(CPUOnly, dataParallelStackingTest) {
304304
}
305305

306306
MockCPUBackend backend;
307-
backend.compile(std::move(M))->execute({}, {});
307+
backend.compile(std::move(M))->execute();
308308
auto H = var->getHandle();
309309
EXPECT_EQ(H.at(0), 3);
310310
EXPECT_EQ(H.at(1), 4);

tests/unittests/BackendTest.cpp

Lines changed: 1 addition & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ TEST_P(BackendTest, debugPrint) {
143143

144144
std::unique_ptr<Backend> backend(createBackend(GetParam()));
145145
auto function = backend->compile(std::move(IR));
146-
function->execute({}, {});
146+
function->execute();
147147
}
148148

149149
/// This test checks that we can compile a function without depending on the
@@ -173,59 +173,6 @@ TEST_P(BackendTest, decoupleCodegenFromGraph) {
173173
EXPECT_NEAR(HX.at({2}), 9, 1E-5);
174174
}
175175

176-
/// Check that we can pass information to the execution engine using Placeholder
177-
/// variables and read it back using Save nodes (in variables).
178-
TEST(Placeholder, simplePlaceholderValue) {
179-
Tensor data{99.0, 35.0, 2.0, 3.0};
180-
181-
ExecutionEngine EE{BackendKind::Interpreter};
182-
auto &mod = EE.getModule();
183-
184-
Function *F = mod.createFunction("main");
185-
auto *input = mod.createPlaceholder(ElemKind::FloatTy, {4}, "input");
186-
SaveNode *S = F->createSave("ret", input);
187-
188-
EE.compile(CompilationMode::Infer, F);
189-
190-
EE.run({input}, {&data});
191-
192-
auto &res = S->getVariable()->getPayload();
193-
EXPECT_TRUE(res.isEqual(data));
194-
}
195-
196-
/// Check that we can pass information to the execution engine using Placeholder
197-
/// variables and the runBatch API.
198-
TEST(Placeholder, runBatchTest) {
199-
// The input contains two slices of 4 floats each.
200-
Tensor data(ElemKind::FloatTy, {4, 4});
201-
// Fill the array with the pattern: [0 1 2 3; 10, 11, 12, 13; 20 21 22 23 ...]
202-
for (size_t i = 0; i < 4; i++) {
203-
for (size_t j = 0; j < 4; j++) {
204-
data.getHandle().at({i, j}) = i * 10 + j;
205-
}
206-
}
207-
208-
ExecutionEngine EE{BackendKind::Interpreter};
209-
auto &mod = EE.getModule();
210-
211-
Function *F = mod.createFunction("main");
212-
auto *input = mod.createPlaceholder(ElemKind::FloatTy, {1, 4}, "input");
213-
SaveNode *S = F->createSave("ret", input);
214-
215-
EE.compile(CompilationMode::Infer, F);
216-
217-
// Run the batch for 2 iterations:
218-
EE.runBatch(2, {input}, {&data});
219-
220-
Tensor expected{10, 11, 12, 13};
221-
auto EH = expected.getHandle();
222-
auto RH = S->getVariable()->getPayload().getHandle();
223-
224-
for (size_t i = 0; i < 4; i++) {
225-
EXPECT_EQ(RH.raw(i), EH.raw(i));
226-
}
227-
}
228-
229176
INSTANTIATE_TEST_CASE_P(Interpreter, BackendTest,
230177
::testing::Values(BackendKind::Interpreter));
231178

tests/unittests/BackendTestUtils.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,7 @@ namespace glow {
2323
/// MockBackend used only for unit testing.
2424
class MockBackend : public Backend {
2525
class MockFunction : public CompiledFunction {
26-
void execute(llvm::ArrayRef<Placeholder *> placeholders,
27-
llvm::ArrayRef<Tensor *> tensors) override {}
26+
void execute() override {}
2827
};
2928
std::unique_ptr<CompiledFunction>
3029
compile(std::unique_ptr<IRFunction> IR) const override {

tests/unittests/partitionTest.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ class PartitionTest : public ::testing::Test {
3535
};
3636

3737
/// Execute a graph of functions serially, which is the simplest approach.
38-
static void executeSerial(const FunctionDAG &G, llvm::ArrayRef<Storage *> vars,
38+
static void executeSerial(const FunctionDAG &G, llvm::ArrayRef<Variable *> vars,
3939
llvm::ArrayRef<Tensor *> inputs) {
4040
for (auto *F : G.getFunctions()) {
4141
ExecutionEngine EE;

tools/loader/Loader.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,7 @@ void Loader::compile() {
267267
}
268268
}
269269

270-
void Loader::runInference(llvm::ArrayRef<Storage *> variables,
270+
void Loader::runInference(llvm::ArrayRef<Variable *> variables,
271271
llvm::ArrayRef<Tensor *> tensors) {
272272
assert(!emittingBundle() &&
273273
"No inference is performed in the bundle generation mode.");

tools/loader/Loader.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ class Loader {
6060

6161
/// Runs inference, unless emit bundle mode is enabled. If inference is run
6262
/// then it will \return true, else false.
63-
void runInference(llvm::ArrayRef<Storage *> variables,
63+
void runInference(llvm::ArrayRef<Variable *> variables,
6464
llvm::ArrayRef<Tensor *> tensors);
6565

6666
/// Create the Loader driver object, and parse/verify the command line

0 commit comments

Comments
 (0)