Skip to content

Commit ec2cdfb

Browse files
author
Wei
authored
[FX] disable 2 of conv3d and type_as tests (#1224)
* Update test_convolution.py * Update test_type_as.py * Update config.yml * Update test_convolution.py * c++ format fix * manual fix format for one file
1 parent 84ffb67 commit ec2cdfb

File tree

13 files changed

+174
-161
lines changed

13 files changed

+174
-161
lines changed

.circleci/config.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -747,7 +747,7 @@ parameters:
747747
# Nightly platform config
748748
torch-nightly-build:
749749
type: string
750-
default: "1.13.0.dev20220715+cu113"
750+
default: "1.13.0.dev20220731+cu113"
751751
torch-nightly-build-index:
752752
type: string
753753
default: "https://download.pytorch.org/whl/nightly/cu113"

core/conversion/conversionctx/ConversionCtx.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
107107
}
108108

109109
cfg->setAvgTimingIterations(settings.num_avg_timing_iters);
110-
if (settings.workspace_size != 0){
110+
if (settings.workspace_size != 0) {
111111
cfg->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kWORKSPACE, settings.workspace_size);
112112
}
113113

@@ -124,13 +124,13 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
124124
settings.enabled_precisions.find(nvinfer1::DataType::kFLOAT) == settings.enabled_precisions.end(),
125125
"DLA supports only fp16 or int8 precision");
126126
cfg->setDLACore(settings.device.dla_core);
127-
if (settings.dla_sram_size != 1048576){
127+
if (settings.dla_sram_size != 1048576) {
128128
cfg->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kDLA_MANAGED_SRAM, settings.dla_sram_size);
129129
}
130-
if (settings.dla_local_dram_size != 1073741824){
130+
if (settings.dla_local_dram_size != 1073741824) {
131131
cfg->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kDLA_LOCAL_DRAM, settings.dla_local_dram_size);
132132
}
133-
if (settings.dla_global_dram_size != 536870912){
133+
if (settings.dla_global_dram_size != 536870912) {
134134
cfg->setMemoryPoolLimit(nvinfer1::MemoryPoolType::kDLA_GLOBAL_DRAM, settings.dla_global_dram_size);
135135
}
136136
}

core/conversion/converters/converter_util.cpp

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -207,13 +207,13 @@ nvinfer1::ITensor* clamp(
207207
nvinfer1::ITensor* lower_bound,
208208
nvinfer1::ITensor* upper_bound,
209209
std::string const& name) {
210-
211210
auto max_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kMAX, x, lower_bound, "max layer for " + name);
212211
TORCHTRT_CHECK(max_layer, "Unable to create max layer for clamp");
213212
LOG_DEBUG(ctx->logger, "Create " << max_layer->getName() << " for clamp");
214213
auto max_itensor = max_layer->getOutput(0);
215214

216-
auto min_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kMIN, max_itensor, upper_bound, "min layer for " + name);
215+
auto min_layer =
216+
add_elementwise(ctx, nvinfer1::ElementWiseOperation::kMIN, max_itensor, upper_bound, "min layer for " + name);
217217
TORCHTRT_CHECK(min_layer, "Unable to create min layer for clamp");
218218
LOG_DEBUG(ctx->logger, "Create " << min_layer->getName() << " for clamp");
219219
auto min_itensor = min_layer->getOutput(0);
@@ -227,13 +227,13 @@ nvinfer1::ITensor* clamp_to_input_dim(
227227
nvinfer1::ITensor* input_dim,
228228
int nbdims,
229229
std::string const& name) {
230-
231230
auto zero = torch::zeros({nbdims}).to(torch::kI32);
232231
auto zero_itensor = tensor_to_const(ctx, zero);
233232
auto one = torch::ones({nbdims}).to(torch::kI32);
234233
auto one_itensor = tensor_to_const(ctx, one);
235234

236-
auto upper_bound_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUB, input_dim, one_itensor, "sub layer for " + name);
235+
auto upper_bound_layer =
236+
add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUB, input_dim, one_itensor, "sub layer for " + name);
237237
TORCHTRT_CHECK(upper_bound_layer, "Unable to create sub layer for clamp to inputDim");
238238
LOG_DEBUG(ctx->logger, "Create " << upper_bound_layer->getName() << " for clamp to inputDim");
239239
auto upper_bound = upper_bound_layer->getOutput(0);
@@ -243,7 +243,8 @@ nvinfer1::ITensor* clamp_to_input_dim(
243243
LOG_DEBUG(ctx->logger, "Create " << max_layer->getName() << " for clamp to inputDim");
244244
auto max_itensor = max_layer->getOutput(0);
245245

246-
auto min_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kMIN, max_itensor, upper_bound, "min layer for " + name);
246+
auto min_layer =
247+
add_elementwise(ctx, nvinfer1::ElementWiseOperation::kMIN, max_itensor, upper_bound, "min layer for " + name);
247248
TORCHTRT_CHECK(min_layer, "Unable to create min_layer for clamp to inputDim");
248249
LOG_DEBUG(ctx->logger, "Create " << min_layer->getName() << " for clamp to inputDim");
249250
auto min_itensor = min_layer->getOutput(0);
@@ -257,7 +258,6 @@ nvinfer1::ITensor* normalize_indices(
257258
nvinfer1::ITensor* indices,
258259
int nbdims,
259260
std::string const& name) {
260-
261261
auto zero = torch::zeros({nbdims}).to(torch::kI32);
262262
auto neg = -torch::ones({nbdims}).to(torch::kI32);
263263
auto zero_itensor = tensor_to_const(ctx, zero);
@@ -307,17 +307,20 @@ nvinfer1::ITensor* get_slice_size(
307307
at::Tensor one_tensor = torch::ones({nbdims}).to(torch::kI32);
308308
auto one_itensor = tensor_to_const(ctx, one_tensor);
309309

310-
auto sub_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUB, end, start, "get_slice_size sub layer for " + name);
310+
auto sub_layer =
311+
add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUB, end, start, "get_slice_size sub layer for " + name);
311312
TORCHTRT_CHECK(sub_layer, "Unable to create sub layer in calculate_output_size");
312313
LOG_DEBUG(ctx->logger, "Create " << sub_layer->getName() << " for calculate_output_size");
313314
auto sub_itensor = sub_layer->getOutput(0);
314315

315-
auto div_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kDIV, sub_itensor, stride, "get_slice_size div layer for " + name);
316+
auto div_layer = add_elementwise(
317+
ctx, nvinfer1::ElementWiseOperation::kDIV, sub_itensor, stride, "get_slice_size div layer for " + name);
316318
TORCHTRT_CHECK(div_layer, "Unable to create div layer in calculate_output_size");
317319
LOG_DEBUG(ctx->logger, "Create " << div_layer->getName() << " for calculate_output_size");
318320
auto div_itensor = div_layer->getOutput(0);
319321

320-
auto add_layer = add_elementwise(ctx, nvinfer1::ElementWiseOperation::kSUM, div_itensor, one_itensor, "get_slice_size sum layer for " + name);
322+
auto add_layer = add_elementwise(
323+
ctx, nvinfer1::ElementWiseOperation::kSUM, div_itensor, one_itensor, "get_slice_size sum layer for " + name);
321324
TORCHTRT_CHECK(add_layer, "Unable to create add layer in calculate_output_size");
322325
LOG_DEBUG(ctx->logger, "Create " << add_layer->getName() << " for calculate_output_size");
323326
auto size_itensor = add_layer->getOutput(0);

core/conversion/converters/converter_util.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
#pragma once
22

3+
#include <limits>
34
#include <map>
45
#include <string>
5-
#include <limits>
66

77
#include "core/conversion/conversionctx/ConversionCtx.h"
88
#include "core/conversion/converters/Weights.h"

0 commit comments

Comments
 (0)