Skip to content

Commit f7bc341

Browse files
authored
Remove unused variables (#1955)
1 parent df3393a commit f7bc341

10 files changed

+0
-32
lines changed

benchmarks/cpp/nvfuser/batch_norm_channels_first.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -73,10 +73,6 @@ static void NvFuserScheduler_BatchNorm(
7373
DataType dtype) {
7474
TORCH_INTERNAL_ASSERT(dtype == DataType::Float || dtype == DataType::Half);
7575

76-
const bool kTraining = true;
77-
const float kMomentum = 0.1;
78-
const float kEps = 1e-5;
79-
8076
std::vector<int64_t> input_shape{
8177
benchmark_state.range(0),
8278
benchmark_state.range(1),

benchmarks/cpp/nvfuser/batch_norm_channels_first_backward.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ static void setupBatchNorm_BWD(Fusion* fusion, DataType dtype) {
2525
FusionGuard fg(fusion);
2626

2727
const bool kTraining = true;
28-
const float kMomentum = 0.1;
2928
const float kEps = 1e-5;
3029

3130
// setup fusion
@@ -85,9 +84,6 @@ static void NvFuserScheduler_BatchNorm_BWD(
8584
DataType dtype) {
8685
TORCH_INTERNAL_ASSERT(dtype == DataType::Float || dtype == DataType::Half);
8786

88-
const bool kTraining = true;
89-
const float kEps = 1e-5;
90-
9187
std::vector<int64_t> input_shape{
9288
benchmark_state.range(0),
9389
benchmark_state.range(1),

benchmarks/cpp/nvfuser/batch_norm_channels_last.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -74,10 +74,6 @@ static void NvFuserScheduler_BatchNorm_nhwc(
7474
DataType dtype) {
7575
TORCH_INTERNAL_ASSERT(dtype == DataType::Float || dtype == DataType::Half);
7676

77-
const bool kTraining = true;
78-
const float kMomentum = 0.1;
79-
const float kEps = 1e-5;
80-
8177
std::vector<int64_t> input_shape{
8278
benchmark_state.range(0),
8379
benchmark_state.range(2),

benchmarks/cpp/nvfuser/batch_norm_channels_last_backward.cpp

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ static void setupBatchNorm_nhwc_BWD(Fusion* fusion, DataType dtype) {
2525
FusionGuard fg(fusion);
2626

2727
const bool kTraining = true;
28-
const float kMomentum = 0.1;
2928
const float kEps = 1e-5;
3029

3130
// setup fusion
@@ -86,9 +85,6 @@ static void NvFuserScheduler_BatchNorm_nhwc_BWD(
8685
DataType dtype) {
8786
TORCH_INTERNAL_ASSERT(dtype == DataType::Float || dtype == DataType::Half);
8887

89-
const bool kTraining = true;
90-
const float kEps = 1e-5;
91-
9288
std::vector<int64_t> input_shape{
9389
benchmark_state.range(0),
9490
benchmark_state.range(2),

benchmarks/cpp/nvfuser/gelu_backward.cpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -113,9 +113,6 @@ BENCHMARK(GeluBackward_AutoSchedule)->Unit(benchmark::kMicrosecond);
113113
//------------------------------------------------------------------------------
114114

115115
static void GeluBackward_Lower(benchmark::State& benchmark_state) {
116-
constexpr int kHiddenFeatures = 512;
117-
constexpr int kBatchSize = 64;
118-
119116
Fusion fusion;
120117

121118
// setup fusion

benchmarks/cpp/nvfuser/layer_norm.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@ static void setupLayerNorm(Fusion* fusion, DataType dtype) {
2222

2323
FusionGuard fg(fusion);
2424

25-
const int kReductionAxis = 1;
2625
const float kEps = 1e-5;
2726

2827
Double* eps_ptr = IrBuilder::create<Double>(kEps);
@@ -61,7 +60,6 @@ static void NvFuserScheduler_LayerNorm(
6160

6261
std::vector<int64_t> input_shape{
6362
benchmark_state.range(0), benchmark_state.range(1)};
64-
const float kEps = 1e-5;
6563

6664
// inputs
6765
at::manual_seed(0);

benchmarks/cpp/nvfuser/layer_norm_backward.cpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,6 @@ static void setupLayerNorm_BWD(Fusion* fusion, DataType dtype) {
2222

2323
TORCH_INTERNAL_ASSERT(dtype == DataType::Float || dtype == DataType::Half);
2424

25-
const int kReductionAxis = 1;
26-
Double* eps_ptr = IrBuilder::create<Double>(1e-5);
27-
2825
// setup fusion
2926
auto grad_out = makeContigTensor(2, dtype);
3027
auto input = makeContigTensor(2, dtype);

benchmarks/cpp/nvfuser/rms_norm.cpp

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@ static void setupRMSNorm(Fusion* fusion, DataType dtype) {
2424

2525
FusionGuard fg(fusion);
2626

27-
const int kReductionAxis = 2;
2827
const float kEps = 1e-6;
2928

3029
Double* eps_ptr = IrBuilder::create<Double>(kEps);
@@ -61,7 +60,6 @@ static void NvFuserScheduler_RMSNorm(
6160
dtype == DataType::BFloat16);
6261

6362
std::vector<int64_t> input_shape{8, benchmark_state.range(0), 1024};
64-
const float kEps = 1e-6;
6563

6664
// inputs
6765
at::manual_seed(0);

benchmarks/cpp/nvfuser/rms_norm_backward.cpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,6 @@ static void setupRMSNorm_BWD(Fusion* fusion, DataType dtype) {
2424
dtype == DataType::Float || dtype == DataType::Half ||
2525
dtype == DataType::BFloat16);
2626

27-
const int kReductionAxis = 2;
28-
Double* eps_ptr = IrBuilder::create<Double>(1e-6);
29-
3027
// setup fusion
3128
auto grad_out = makeContigTensor(3, dtype);
3229
auto input = makeContigTensor(3, dtype);

benchmarks/cpp/nvfuser/timm.cpp

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,6 @@ static void setup_vit_base_patch16_224_bcast5(Fusion* fusion, void* null) {
139139
auto t20 = sum(t37, {2});
140140
auto t24 = broadcast(t20, bcast_pattern1);
141141
auto d95 = castOp(DataType::Double, t2->axis(2)->extent());
142-
auto d96 = mul(IrBuilder::create<Double>(1.0), d95);
143142
auto d105 = reciprocal(d95);
144143
auto t25 = mul(t24, d105);
145144
auto t26 = add(t25, IrBuilder::create<Double>(1e-6));
@@ -320,8 +319,6 @@ static void NvFuserScheduler_TIMM_vit_base_patch16_224_norm_inner3(
320319

321320
at::manual_seed(0);
322321
auto fp16_options = at::TensorOptions().dtype(at::kHalf).device(at::kCUDA, 0);
323-
auto fp32_options =
324-
at::TensorOptions().dtype(at::kFloat).device(at::kCUDA, 0);
325322

326323
auto t0 = at::randn(input_shape, fp16_options);
327324

0 commit comments

Comments
 (0)