|
32 | 32 | logging as dynamo_logging,
|
33 | 33 | utils as dynamo_utils,
|
34 | 34 | )
|
35 |
| -from torch._dynamo.utils import detect_fake_mode, lazy_format_graph_code |
| 35 | +from torch._dynamo.utils import counters, detect_fake_mode, lazy_format_graph_code |
36 | 36 | from torch._functorch.aot_autograd import aot_export_module, make_boxed_func
|
37 | 37 | from torch._inductor.codecache import code_hash, CompiledFxGraph, FxGraphCache
|
38 | 38 |
|
@@ -511,6 +511,10 @@ def fx_codegen_and_compile(
|
511 | 511 | post_grad_passes(gm, is_inference=is_inference)
|
512 | 512 | V.debug.fx_graph_transformed(gm, example_inputs)
|
513 | 513 | post_grad_graphs_log.debug("%s", lazy_format_graph_code("AFTER POST GRAD", gm))
|
| 514 | + log.debug( |
| 515 | + "counters of inductor dict after apply passes on the input FX graph in the post grad pass: %s", |
| 516 | + counters["inductor"], |
| 517 | + ) |
514 | 518 |
|
515 | 519 | with V.set_fake_mode(fake_mode):
|
516 | 520 | graph = GraphLowering(
|
@@ -1010,6 +1014,10 @@ def compile_fx(
|
1010 | 1014 | )
|
1011 | 1015 |
|
1012 | 1016 | model_ = pre_grad_passes(model_, example_inputs_)
|
| 1017 | + log.debug( |
| 1018 | + "counters of inductor dict after apply passes on the input FX graph in the pre grad pass: %s", |
| 1019 | + counters["inductor"], |
| 1020 | + ) |
1013 | 1021 |
|
1014 | 1022 | if any(isinstance(x, (list, tuple, dict)) for x in example_inputs_):
|
1015 | 1023 | return flatten_graph_inputs(
|
|
0 commit comments