Skip to content
This repository was archived by the owner on Jul 1, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions include/glow/Graph/Graph.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,8 +164,8 @@ class Function final : public Named {

ConvolutionNode *createConv(llvm::StringRef name, NodeValue input,
NodeValue filter, NodeValue bias, TypeRef outTy,
size_t depth, size_t kernel, size_t stride,
size_t pad, size_t group);
size_t kernel, size_t stride, size_t pad,
size_t group);

PoolMaxNode *createPoolMax(llvm::StringRef name, NodeValue input,
size_t kernel, size_t stride, size_t pad);
Expand Down
4 changes: 2 additions & 2 deletions lib/Backends/CPU/Transforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ using llvm::isa;
/// pre-swizzle the data in the weights to make the access pattern more
/// efficient.
static Node *optimizeCPUConv(ConvolutionNode *CN, Function *F) {
auto depth = CN->getDepth();
auto depth = CN->getFilter().dims()[0];
auto *M = F->getParent();

// The depth dimension must be a multiple of 64 to perform the
Expand Down Expand Up @@ -62,7 +62,7 @@ static Node *optimizeCPUConv(ConvolutionNode *CN, Function *F) {

return F->addNode(new CPUConvDKKC8Node(
CN->getName(), CN->getType(), CN->getInput(), filter8, CN->getBias(),
CN->getKernel(), CN->getStride(), CN->getPad(), CN->getDepth()));
CN->getKernel(), CN->getStride(), CN->getPad()));
}

bool CPUBackend::transformPostLowering(Function *F) {
Expand Down
4 changes: 1 addition & 3 deletions lib/Backends/OpenCL/OpenCL.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -431,11 +431,9 @@ void OCLBackend::doForwardPass() {
setKernelArg(kernel, 9, idim);
setKernelArg(kernel, 10, ShapeNHWC(CC->getFilter()->getType()->dims()));

auto depth = CC->getDepth();

// Use a 3D grid where the first dimension is the depth and the second
// dimension is the slice index in the batch.
enqueueKernel(commands_, kernel, deviceId_, {odim.h, odim.w, depth},
enqueueKernel(commands_, kernel, deviceId_, {odim.h, odim.w, odim.c},
kernelLaunches);
continue;
}
Expand Down
19 changes: 9 additions & 10 deletions lib/Graph/Graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -372,38 +372,37 @@ ConvolutionNode *Function::createConv(llvm::StringRef name, NodeValue input,
auto OT = getParent()->uniqueType(ElemKind::FloatTy, outDims);

return addNode(new ConvolutionNode(name, OT, input, filter, bias, kernel,
stride, pad, depth, /*group = */ 1));
stride, pad, /*group = */ 1));
}

/// Check that the dimensions that are passed in when the convolution is
/// constructed are correct.
static void assertConvDims(NodeValue input, NodeValue filter, NodeValue bias,
size_t depth, size_t kernel, size_t stride,
size_t pad, size_t group) {
size_t kernel, size_t stride, size_t pad,
size_t group) {
ShapeNHWC idim = ShapeNHWC(input.dims());
assert(idim.w >= kernel && idim.h >= kernel &&
"buffer too small for selected stride");
assert(idim.c % group == 0 && "channels number must be divisible by groups");
(void)idim;

auto filterDims = filter->dims();
assert(filterDims[0] == depth * group && filterDims[1] == kernel &&
assert(filterDims[0] % group == 0 && filterDims[1] == kernel &&
filterDims[2] == kernel && filterDims[3] == idim.c / group &&
"Invalid filter dims");
(void)filterDims;

assert(bias->getType()->size() == depth * group && "Invalid bias size");
assert(bias->getType()->size() == filterDims[0] && "Invalid bias size");
}

ConvolutionNode *Function::createConv(llvm::StringRef name, NodeValue input,
NodeValue filter, NodeValue bias,
TypeRef outTy, size_t depth,
size_t kernel, size_t stride, size_t pad,
size_t group) {
assertConvDims(input, filter, bias, depth, kernel, stride, pad, group);
TypeRef outTy, size_t kernel,
size_t stride, size_t pad, size_t group) {
assertConvDims(input, filter, bias, kernel, stride, pad, group);
auto OT = getParent()->uniqueType(*outTy);
return addNode(new ConvolutionNode(name, OT, input, filter, bias, kernel,
stride, pad, depth, group));
stride, pad, group));
}

PoolMaxNode *Function::createPoolMax(llvm::StringRef name, NodeValue input,
Expand Down
15 changes: 7 additions & 8 deletions lib/Graph/Nodes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,7 @@ static void checkType(NodeValue A, ElemKind expectedType) {

static void verifyConvolution(NodeValue src, NodeValue dest, NodeValue filter,
NodeValue bias, size_t kernel, size_t stride,
size_t pad, size_t depth, size_t group) {
size_t pad, size_t group) {
assert(src.getElementType() == dest.getElementType() && "Invalid Type");
assert(src.getElementType() == filter.getElementType() && "Invalid Type");
assert(src.getElementType() == bias.getElementType() && "Invalid Type");
Expand All @@ -501,15 +501,14 @@ static void verifyConvolution(NodeValue src, NodeValue dest, NodeValue filter,
assert(idim.c % group == 0 && "channels number must be divisible by groups");

auto outSz = calculateConvOutputDims(idim.h, idim.w, kernel, stride, pad);
ShapeNHWC exp(idim.n, outSz.first, outSz.second, depth * group);
(void)exp;
assert(exp == odim && "Invalid output dimensions");
assert(odim.n == idim.n && odim.h == outSz.first && odim.w == outSz.second &&
odim.c % group == 0 && "Invalid output dimensions");

auto filterDims = {depth * group, kernel, kernel, idim.c / group};
auto filterDims = {odim.c, kernel, kernel, idim.c / group};
assert(filter.getType()->dims().equals(filterDims) && "Invalid filter dims");
(void)filterDims;

auto biasDims = {depth * group};
auto biasDims = {odim.c};
assert(bias.getType()->dims().equals(biasDims) && "Invalid bias dims");
(void)biasDims;
}
Expand Down Expand Up @@ -596,14 +595,14 @@ static void verifyRegression(NodeValue src, NodeValue dest,

void ConvolutionNode::verify() const {
verifyConvolution(getInput(), getResult(), getFilter(), getBias(), Kernel_,
Stride_, Pad_, Depth_, Group_);
Stride_, Pad_, Group_);
}

void ConvolutionGradNode::verify() const {
verifyConvolution(getGradOfInputNamedInput(),
getGradOfOriginalOutputNamedResult(),
getGradOfInputNamedFilter(), getGradOfInputNamedBias(),
Kernel_, Stride_, Pad_, Depth_, Group_);
Kernel_, Stride_, Pad_, Group_);
}

void PoolMaxNode::verify() const {
Expand Down
7 changes: 3 additions & 4 deletions lib/IR/IRGen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,10 +115,9 @@ struct IRGenVisitor : NodeWalker {
auto *filterG = builder_.createAllocActivationInst("conv.filter.G",
filter->getType());

builder_.createConvolutionGradInst(N->getName(), input, filter, outGrad,
inG, filterG, biasG, CG->getKernel(),
CG->getStride(), CG->getPad(),
CG->getDepth(), CG->getGroup());
builder_.createConvolutionGradInst(
N->getName(), input, filter, outGrad, inG, filterG, biasG,
CG->getKernel(), CG->getStride(), CG->getPad(), CG->getGroup());

registerIR(CG->getGradOfInputNamedInput(), inG);
registerIR(CG->getGradOfInputNamedFilter(), filterG);
Expand Down
4 changes: 2 additions & 2 deletions lib/Importer/Caffe2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -218,8 +218,8 @@ void caffe2ModelLoader::loadOperator(const caffe2::OperatorDef &op) {
{idim.n, outSz.first, outSz.second, depth}};
auto outTy = G_.getParent()->uniqueType(ElemKind::FloatTy, outDims);

auto *node = G_.createConv(opName, tr, filter, bias, outTy, depth / group,
kernel, stride, pad, group);
auto *node = G_.createConv(opName, tr, filter, bias, outTy, kernel, stride,
pad, group);

// Transpose the output back.
auto *N = G_.createTranspose(opName, node, NHWC2NCHW);
Expand Down
8 changes: 4 additions & 4 deletions lib/Optimizer/GraphOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -762,10 +762,10 @@ static void optimizeQuantization(Function *F) {
if (auto *CN = dyn_cast<ConvolutionNode>(RS->getInput())) {
// Create the exact same convolution but with a different scaling
// return type.
auto *newCN = F->createConv(
CN->getName(), CN->getInput(), CN->getFilter(), CN->getBias(),
RS->getType(), CN->getDepth(), CN->getKernel(), CN->getStride(),
CN->getPad(), CN->getGroup());
auto *newCN =
F->createConv(CN->getName(), CN->getInput(), CN->getFilter(),
CN->getBias(), RS->getType(), CN->getKernel(),
CN->getStride(), CN->getPad(), CN->getGroup());
RS->getResult().replaceAllUsesOfWith(newCN);
continue;
}
Expand Down
5 changes: 2 additions & 3 deletions lib/Optimizer/Lower.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -504,9 +504,8 @@ void lowerGroupConvolutionNode(Function *F, ConvolutionNode &BNG) {
{(groupId + 1) * outCperG, kernel, kernel, inCperG});
auto *bias_slice = F->createSlice(BNG.getName(), bias, {groupId * outCperG},
{(groupId + 1) * outCperG});
convs[groupId] =
F->createConv(BNG.getName(), in_slice, filter_slice, bias_slice, outTy,
outCperG, kernel, stride, pad, 1);
convs[groupId] = F->createConv(BNG.getName(), in_slice, filter_slice,
bias_slice, outTy, kernel, stride, pad, 1);
}
auto result = F->createConcat(BNG.getName(), convs, 3);
BNG.getResult().replaceAllUsesOfWith(result);
Expand Down
8 changes: 4 additions & 4 deletions lib/Quantization/Quantization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -322,10 +322,10 @@ void generateQuantizedGraph(
auto QT = F->getParent()->uniqueType(ElemKind::Int8QTy,
CV->getResult()->dims(),
TQP.scale_, TQP.offset_);
quantizedNode = F->createConv(
CV->getName(), quantizedInputs[0], quantizedInputs[1],
quantizedInputs[2], QT, CV->getDepth(), CV->getKernel(),
CV->getStride(), CV->getPad(), CV->getGroup());
quantizedNode =
F->createConv(CV->getName(), quantizedInputs[0], quantizedInputs[1],
quantizedInputs[2], QT, CV->getKernel(),
CV->getStride(), CV->getPad(), CV->getGroup());
break;
}
case Kinded::Kind::SliceNodeKind: {
Expand Down
2 changes: 1 addition & 1 deletion tests/unittests/BackendTestUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ void inferConvNet(Tensor *inputs, Tensor *filter, Tensor *bias, Tensor *out,
OT = F->getParent()->uniqueType(out->getElementType(), out->dims());
}
auto *conv =
F->createConv("conv", inputVar, filterVar, biasVar, OT, 10, 5, 3, 4, 1);
F->createConv("conv", inputVar, filterVar, biasVar, OT, 5, 3, 4, 1);
auto result = F->createSave("ret", conv, outVar);
EE.compile(CompilationMode::Infer, F);
EE.run({inputVar, filterVar, biasVar}, {inputs, filter, bias});
Expand Down
4 changes: 2 additions & 2 deletions tests/unittests/OperatorTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -502,8 +502,8 @@ void checkIntConvolution(ExecutionEngine &EE, unsigned convDepth) {
auto *biasq = F->createQuantize("bias.q", bias, biasTy);

auto *convq =
F->createConv("convq", inputq, filterq, biasq, resTy, conv->getDepth(),
conv->getKernel(), conv->getStride(), conv->getPad(), 1);
F->createConv("convq", inputq, filterq, biasq, resTy, conv->getKernel(),
conv->getStride(), conv->getPad(), 1);
auto *dequantRes = F->createDequantize("dequant", convq);

// Subtract the results of the convolution from the quantized convolution.
Expand Down
2 changes: 1 addition & 1 deletion tests/unittests/basicIRTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ TEST(IR, allInstrs) {
XY->setName("srcXY");

builder.createCopyInst("", I1, I0);
builder.createConvolutionInst("", I3, I1, F0, B0, 7, 2, 3, 64, 1);
builder.createConvolutionInst("", I3, I1, F0, B0, 7, 2, 3, 1);
builder.createPoolMaxInst("", I4, I0, 7, 2, 3);
builder.createSigmoidInst("", I1, I0);
builder.createTanhInst("", I1, I0);
Expand Down
4 changes: 2 additions & 2 deletions tests/unittests/graphTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,8 @@ TEST(Graph, simpleQuant) {
std::array<size_t, 4> outDims = {{1, outSz.first, outSz.second, 16}};
auto t = F->getParent()->uniqueType(glow::ElemKind::Int8QTy, outDims, 1.5, 6);

auto *conv = F->createConv("conv", input, filter, bias, t, depth, kernel,
step, pad, 1);
auto *conv =
F->createConv("conv", input, filter, bias, t, kernel, step, pad, 1);

auto s = conv->getType()->size();
auto *fcFilter = MD.createVariable(ElemKind::Int8QTy, {s, 6}, 0.4, 2, "F");
Expand Down
1 change: 0 additions & 1 deletion tools/ClassGen/Backends/CPU/CPUSpecificInstrs.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ BB.newBackendSpecificInstr("CPUConvDKKC8")
.addMember(MemberType::SizeT, "Kernel")
.addMember(MemberType::SizeT, "Stride")
.addMember(MemberType::SizeT, "Pad")
.addMember(MemberType::SizeT, "Depth")
.autoIRGen()
.autoVerify(VerifyKind::SameElementType, {"Dest", "Src", "Filter", "Bias"});

Expand Down
1 change: 0 additions & 1 deletion tools/ClassGen/Backends/CPU/CPUSpecificNodes.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ BB.newNode("CPUConvDKKC8")
.addMember(MemberType::SizeT, "Kernel")
.addMember(MemberType::SizeT, "Stride")
.addMember(MemberType::SizeT, "Pad")
.addMember(MemberType::SizeT, "Depth")
.addResultFromCtorArg()
.setDocstring("This is a cpu-specific convolution implementation where the "
"filter is transposed to the shape [D/8, K, K, C, 8]");
Expand Down
2 changes: 1 addition & 1 deletion tools/ClassGen/Backends/CPU/CPUSpecificNodesVerification.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ void CPUConvDKKC8Node::verify() const {
ShapeNHWC odim(getResult().getType()->dims());
auto outSz = calculateConvOutputDims(idim.h, idim.w, getKernel(), getStride(),
getPad());
ShapeNHWC exp(idim.n, outSz.first, outSz.second, getDepth());

This comment was marked as off-topic.

This comment was marked as off-topic.

ShapeNHWC exp(idim.n, outSz.first, outSz.second, getBias().dims()[0]);
(void)exp;
assert(exp == odim && "Invalid output dimensions");
}
Expand Down
1 change: 0 additions & 1 deletion tools/ClassGen/InstrGen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,6 @@ int main(int argc, char **argv) {
.addMember(MemberType::SizeT, "Kernel")
.addMember(MemberType::SizeT, "Stride")
.addMember(MemberType::SizeT, "Pad")
.addMember(MemberType::SizeT, "Depth")
.addMember(MemberType::SizeT, "Group")
.autoIRGen()
.autoVerify(VerifyKind::SameElementType,
Expand Down
3 changes: 1 addition & 2 deletions tools/ClassGen/NodeGen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,12 @@ int main(int argc, char **argv) {
.addMember(MemberType::SizeT, "Kernel")
.addMember(MemberType::SizeT, "Stride")
.addMember(MemberType::SizeT, "Pad")
.addMember(MemberType::SizeT, "Depth")
.addMember(MemberType::SizeT, "Group")
.addResultFromCtorArg()
.addGradient()
.setDocstring("Performs Convolution using a given Input, Filter, and "
"Bias tensors, as well as provided Kernel, Stride, Pad, "
"Depth and Group.");
"and Group.");

BB.newNode("PoolMax")
.addInput("Input")
Expand Down