File tree Expand file tree Collapse file tree 2 files changed +9
-9
lines changed
torch/csrc/jit/codegen/cuda/python_frontend Expand file tree Collapse file tree 2 files changed +9
-9
lines changed Original file line number Diff line number Diff line change @@ -1353,14 +1353,14 @@ struct BatchNormOpRecord : RecordFunctor {
1353
1353
BatchNormOpRecord (
1354
1354
std::vector<State> args,
1355
1355
std::vector<State> outputs,
1356
- bool kTraining ,
1356
+ bool training ,
1357
1357
bool channels_last)
1358
1358
: RecordFunctor(
1359
1359
std::move (args),
1360
1360
std::move(outputs),
1361
1361
"ops.batch_norm",
1362
1362
RecordType::BatchNormOp),
1363
- kTraining_( kTraining ),
1363
+ training_(training ),
1364
1364
channels_last_(channels_last) {}
1365
1365
virtual ~BatchNormOpRecord () = default ;
1366
1366
virtual RecordFunctor* clone () final {
@@ -1371,15 +1371,15 @@ struct BatchNormOpRecord : RecordFunctor {
1371
1371
auto result = false ;
1372
1372
if (auto child_ptr = dynamic_cast <const BatchNormOpRecord*>(&other)) {
1373
1373
result = RecordFunctor::operator ==(other);
1374
- result = result && (kTraining_ == child_ptr->kTraining_ );
1374
+ result = result && (training_ == child_ptr->training_ );
1375
1375
result = result && (channels_last_ == child_ptr->channels_last_ );
1376
1376
}
1377
1377
return result;
1378
1378
}
1379
1379
1380
1380
virtual size_t hash () const final {
1381
1381
auto result = RecordFunctor::hash ();
1382
- return result | (static_cast <size_t >(kTraining_ ) << 28 ) |
1382
+ return result | (static_cast <size_t >(training_ ) << 28 ) |
1383
1383
(static_cast <size_t >(channels_last_) << 29 );
1384
1384
}
1385
1385
@@ -1399,7 +1399,7 @@ struct BatchNormOpRecord : RecordFunctor {
1399
1399
bias,
1400
1400
running_mean,
1401
1401
running_var,
1402
- kTraining_ ,
1402
+ training_ ,
1403
1403
momentum,
1404
1404
eps,
1405
1405
channels_last_);
@@ -1409,7 +1409,7 @@ struct BatchNormOpRecord : RecordFunctor {
1409
1409
}
1410
1410
1411
1411
private:
1412
- bool kTraining_ ;
1412
+ bool training_ ;
1413
1413
bool channels_last_;
1414
1414
};
1415
1415
Original file line number Diff line number Diff line change @@ -1277,7 +1277,7 @@ void initNvFuserPythonBindings(PyObject* module) {
1277
1277
nvfuser::Tensor bias,
1278
1278
nvfuser::Tensor running_mean,
1279
1279
nvfuser::Tensor running_var,
1280
- bool kTraining ,
1280
+ bool training ,
1281
1281
nvfuser::Scalar momentum,
1282
1282
nvfuser::Scalar eps,
1283
1283
bool channels_last) -> decltype (auto ) {
@@ -1297,7 +1297,7 @@ void initNvFuserPythonBindings(PyObject* module) {
1297
1297
{fd->recordingState (output ()),
1298
1298
fd->recordingState (mean ()),
1299
1299
fd->recordingState (invstd ())},
1300
- kTraining ,
1300
+ training ,
1301
1301
channels_last));
1302
1302
return std::make_tuple (output, mean, invstd);
1303
1303
},
@@ -1306,7 +1306,7 @@ void initNvFuserPythonBindings(PyObject* module) {
1306
1306
py::arg (" bias" ).none (true ),
1307
1307
py::arg (" running_mean" ).none (true ),
1308
1308
py::arg (" running_var" ).none (true ),
1309
- py::arg (" kTraining " ),
1309
+ py::arg (" training " ),
1310
1310
py::arg (" momentum" ),
1311
1311
py::arg (" eps" ),
1312
1312
py::arg (" channels_last" ) = false ,
You can’t perform that action at this time.
0 commit comments