Skip to content

Commit 1eb14fe

Browse files
Kunal Chakrabortifacebook-github-bot
Kunal Chakraborti
authored andcommitted
Back out "[PyTorch Edge] Add Quantized Softmax Op (Naive Implementation)"
Summary: Original commit changeset: 426a07808035 Original Phabricator Diff: D34943147 (pytorch@8d7242a) Since D34943147 (pytorch@8d7242a) landed, Adfinder push candidates show consistently push blocking red counters for getAds C CPU main thread and getAds NC CPU main thread. AF auto prod canary for D34943147 (pytorch@8d7242a), c1-c2 does shows 1.19% regression for counter 'getAds C CPU main thread' and ~1% regression for counter 'getAds C CPU main thread': https://www.internalfb.com/intern/experiment_store/experiment/27487791896054/#commit1-commit2 To help unblock adfinder push, reverting D34943147 (pytorch@8d7242a) Test Plan: Canary: https://our.intern.facebook.com/intern/ads/canary/442677925633895915 Canary completed: https://www.internalfb.com/intern/experiment_store/experiment/25288768753864/#commit1-commit2 Counter 'getAds C CPU main thread' moves in the opposite direction by -0.75. Differential Revision: D35370901 fbshipit-source-id: b2e89f5976eb3fa2c2b22f120c0e32e380f5bc52
1 parent b290c04 commit 1eb14fe

File tree

4 files changed

+0
-63
lines changed

4 files changed

+0
-63
lines changed

aten/src/ATen/native/quantized/cpu/qsoftmax.cpp

Lines changed: 0 additions & 27 deletions
This file was deleted.

aten/src/ATen/native/quantized/library.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,6 @@ TORCH_LIBRARY(quantized, m) {
188188
m.def(TORCH_SELECTIVE_SCHEMA("quantized::relu6(Tensor qx, bool inplace=False) -> Tensor"));
189189
m.def(TORCH_SELECTIVE_SCHEMA("quantized::leaky_relu(Tensor qx, Scalar negative_slope, bool inplace, float output_scale, int output_zero_point) -> Tensor"));
190190
m.def(TORCH_SELECTIVE_SCHEMA("quantized::sigmoid(Tensor qx, float output_scale, int output_zero_point) -> Tensor"));
191-
m.def(TORCH_SELECTIVE_SCHEMA("quantized::softmax(Tensor qx, int dim, float output_scale, int output_zero_point) -> Tensor"));
192191
}
193192

194193
// According to #33294: The "_" prefix registration will be

test/quantization/core/test_quantized_op.py

Lines changed: 0 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -1101,40 +1101,6 @@ def test_qmatmul(self, num_dims, outer_dims, m, k, n, dtypes):
11011101
scale_C,
11021102
zero_point_C)
11031103

1104-
"""Tests the correctness of the quantized softmax op."""
1105-
@given(num_dims=st.integers(2, 4),
1106-
dims=st.lists(st.integers(2, 5), min_size=5, max_size=5))
1107-
def test_qsoftmax(self, num_dims, dims):
1108-
size = dims[:num_dims]
1109-
torch_dtype = torch.quint8
1110-
np_dtype = np.uint8
1111-
dim = num_dims - 1
1112-
1113-
scale_X = 1.3
1114-
zero_point_X = 0
1115-
X = torch.rand(size=size, dtype=torch.float32) * 8 + zero_point_X
1116-
1117-
scale_Y = 1 / 256
1118-
zero_point_Y = 0
1119-
1120-
qX = torch.quantize_per_tensor(X,
1121-
scale=scale_X,
1122-
zero_point=zero_point_X,
1123-
dtype=torch_dtype)
1124-
1125-
1126-
# softmax ground truth
1127-
Y = torch.softmax(qX.dequantize(), dim=dim).numpy()
1128-
qY = _quantize(Y, scale_Y, zero_point_Y, dtype=np_dtype)
1129-
qY_hat = torch.ops.quantized.softmax(qX,
1130-
dim=dim,
1131-
output_scale=scale_Y,
1132-
output_zero_point=zero_point_Y)
1133-
1134-
np.testing.assert_equal(qY, qY_hat.int_repr(),
1135-
"Quantized softmax failed.")
1136-
1137-
11381104
"""Tests the correctness of the mul and mul_relu op."""
11391105
def test_qmul_broadcast(self):
11401106
mul_relu = torch.ops.quantized.mul_relu

tools/build_variables.bzl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1222,7 +1222,6 @@ aten_native_source_non_codegen_list = [
12221222
"aten/src/ATen/native/quantized/cpu/qreduction.cpp",
12231223
"aten/src/ATen/native/quantized/cpu/qrelu.cpp",
12241224
"aten/src/ATen/native/quantized/cpu/qsigmoid.cpp",
1225-
"aten/src/ATen/native/quantized/cpu/qsoftmax.cpp",
12261225
"aten/src/ATen/native/quantized/cpu/qsort.cpp",
12271226
"aten/src/ATen/native/quantized/cpu/qtanh.cpp",
12281227
"aten/src/ATen/native/quantized/cpu/qthreshold.cpp",

0 commit comments

Comments
 (0)