-
Notifications
You must be signed in to change notification settings - Fork 7.1k
Use real weight and image for classification model test and relaxing precision requirement for general model tests #7130
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 13 commits
dcdc8db
4e1cb65
edcb727
ff950d7
c32b4ae
0e9fc37
ef6e11c
e384ca0
8d33c56
a2ec9c1
d991bc4
7873c09
ad83ef6
ad99e28
5047b49
7661ab4
b6e83c1
60675be
ab81f0e
0b5bcec
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -18,7 +18,7 @@ | |
from common_utils import cpu_and_gpu, freeze_rng_state, map_nested_tensor_object, needs_cuda, set_rng_seed | ||
from PIL import Image | ||
from torchvision import models, transforms | ||
from torchvision.models import get_model_builder, list_models | ||
from torchvision.models import get_model_builder, get_model_weights, get_weight, list_models | ||
|
||
|
||
ACCEPT = os.getenv("EXPECTTEST_ACCEPT", "0") == "1" | ||
|
@@ -29,7 +29,7 @@ def list_model_fns(module): | |
return [get_model_builder(name) for name in list_models(module)] | ||
|
||
|
||
def _get_image(input_shape, real_image, device): | ||
def _get_image(input_shape, real_image, device, weights=None, dtype=None): | ||
"""This routine loads a real or random image based on `real_image` argument. | ||
Currently, the real image is utilized for the following list of models: | ||
- `retinanet_resnet50_fpn`, | ||
|
@@ -51,19 +51,26 @@ def _get_image(input_shape, real_image, device): | |
|
||
img = Image.open(GRACE_HOPPER) | ||
|
||
original_width, original_height = img.size | ||
if weights is None: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. should we just pass the weights all the time? What's the reason for having them in only some cases but not all? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In some cases the weight are really restrictive, for instance if we use Also as of now, we dont use real weight for detection model test. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
But isn't that a good thing? i.e. if we go below the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For test purpose, we might want to use smaller image even if the output is garbage but we can still check for the consistentcy (what we did so far with random image and random weight). And in this case if we set |
||
original_width, original_height = img.size | ||
# make the image square | ||
img = img.crop((0, 0, original_width, original_width)) | ||
img = img.resize(input_shape[-2:]) | ||
|
||
# make the image square | ||
img = img.crop((0, 0, original_width, original_width)) | ||
img = img.resize(input_shape[1:3]) | ||
|
||
convert_tensor = transforms.ToTensor() | ||
image = convert_tensor(img) | ||
convert_tensor = transforms.ToTensor() | ||
image = convert_tensor(img) | ||
else: | ||
H, W = input_shape[-2:] | ||
min_side = min(H, W) | ||
preprocess = weights.transforms(resize_size=min_side, crop_size=min_side) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we don't need to pass parameters to the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We need this if we want to control the size when the test happened, otherwise we will rely on the default size on the weight transforms (In some big model, we would like to use smaller image size for the test to speed up runtime). Note: For test purpose, I think it is okay not to use the |
||
image = preprocess(img) | ||
if len(input_shape) > len(image.size()): | ||
image = image.unsqueeze(0) | ||
assert tuple(image.size()) == input_shape | ||
return image.to(device=device) | ||
return image.to(device=device, dtype=dtype) | ||
|
||
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests | ||
return torch.rand(input_shape).to(device=device) | ||
return torch.rand(input_shape).to(device=device, dtype=dtype) | ||
|
||
|
||
@pytest.fixture | ||
|
@@ -195,7 +202,7 @@ def _check_fx_compatible(model, inputs, eager_out=None): | |
eager_out = model(inputs) | ||
with torch.no_grad(), freeze_rng_state(): | ||
fx_out = model_fx(inputs) | ||
torch.testing.assert_close(eager_out, fx_out) | ||
torch.testing.assert_close(eager_out, fx_out, atol=5e-5, rtol=5e-5) | ||
|
||
|
||
def _check_input_backprop(model, inputs): | ||
|
@@ -278,11 +285,15 @@ def _check_input_backprop(model, inputs): | |
# tests under test_quantized_classification_model will be skipped for the following models. | ||
quantized_flaky_models = ("inception_v3", "resnet50") | ||
|
||
# The tests for the following detection models are flaky due to precision of float32 | ||
# we will do the test in float64 for these models | ||
detection_flaky_models = ("keypointrcnn_resnet50_fpn", "maskrcnn_resnet50_fpn_v2") | ||
|
||
|
||
# The following contains configuration parameters for all models which are used by | ||
# the _test_*_model methods. | ||
_model_params = { | ||
"inception_v3": {"input_shape": (1, 3, 299, 299), "init_weights": True}, | ||
"inception_v3": {"input_shape": (1, 3, 299, 299)}, | ||
"retinanet_resnet50_fpn": { | ||
"num_classes": 20, | ||
"score_thresh": 0.01, | ||
|
@@ -354,6 +365,7 @@ def _check_input_backprop(model, inputs): | |
"vit_h_14": { | ||
"image_size": 56, | ||
"input_shape": (1, 3, 56, 56), | ||
"weight_name": None, | ||
}, | ||
"mvit_v1_b": { | ||
"input_shape": (1, 3, 16, 224, 224), | ||
|
@@ -364,7 +376,8 @@ def _check_input_backprop(model, inputs): | |
"s3d": { | ||
"input_shape": (1, 3, 16, 224, 224), | ||
}, | ||
"googlenet": {"init_weights": True}, | ||
"regnet_y_128gf": {"weight_name": "IMAGENET1K_SWAG_LINEAR_V1"}, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could we just get the actual weights from the model name, using the helpers from https://pytorch.org/vision/main/models.html#model-registration-mechanism ? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We can, I actually use the helper to get the actual weight in here. I think I prefer this design where we dont need to specify the |
||
"vitc_b_16": {"weight_name": None}, | ||
} | ||
# speeding up slow models: | ||
slow_models = [ | ||
|
@@ -390,7 +403,10 @@ def _check_input_backprop(model, inputs): | |
"swin_v2_b", | ||
] | ||
for m in slow_models: | ||
_model_params[m] = {"input_shape": (1, 3, 64, 64)} | ||
if m not in _model_params: | ||
_model_params[m] = {"input_shape": (1, 3, 64, 64)} | ||
else: | ||
_model_params[m]["input_shape"] = (1, 3, 64, 64) | ||
YosuaMichael marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
|
||
# skip big models to reduce memory usage on CI test. We can exclude combinations of (platform-system, device). | ||
|
@@ -648,6 +664,7 @@ def test_generalizedrcnn_transform_repr(): | |
|
||
|
||
def vitc_b_16(**kwargs: Any): | ||
kwargs.pop("weights", None) | ||
return models.VisionTransformer( | ||
image_size=224, | ||
patch_size=16, | ||
|
@@ -671,33 +688,46 @@ def test_vitc_models(model_fn, dev): | |
def test_classification_model(model_fn, dev): | ||
set_rng_seed(0) | ||
defaults = { | ||
"num_classes": 50, | ||
"num_classes": 1000, | ||
"input_shape": (1, 3, 224, 224), | ||
"num_expected": 50, | ||
YosuaMichael marked this conversation as resolved.
Show resolved
Hide resolved
|
||
"real_image": True, | ||
} | ||
model_name = model_fn.__name__ | ||
if SKIP_BIG_MODEL and is_skippable(model_name, dev): | ||
pytest.skip("Skipped to reduce memory usage. Set env var SKIP_BIG_MODEL=0 to enable test for this model") | ||
kwargs = {**defaults, **_model_params.get(model_name, {})} | ||
num_classes = kwargs.get("num_classes") | ||
num_expected = kwargs.pop("num_expected") | ||
input_shape = kwargs.pop("input_shape") | ||
real_image = kwargs.pop("real_image", False) | ||
weight_name = kwargs.pop("weight_name", "IMAGENET1K_V1") | ||
weight = None | ||
if weight_name is not None: | ||
weight_enum = get_model_weights(model_name) | ||
weight = get_weight(f"{weight_enum.__name__}.{weight_name}") | ||
|
||
model = model_fn(**kwargs) | ||
model = model_fn(weights=weight, **kwargs) | ||
model.eval().to(device=dev) | ||
x = _get_image(input_shape=input_shape, real_image=real_image, device=dev) | ||
out = model(x) | ||
_assert_expected(out.cpu(), model_name, prec=1e-3) | ||
x = _get_image(input_shape=input_shape, real_image=real_image, device=dev, weights=weight) | ||
with torch.no_grad(), freeze_rng_state(): | ||
out = model(x) | ||
expect_out = out[:, :num_expected] | ||
_assert_expected(expect_out.cpu(), model_name, prec=3e-2) | ||
assert out.shape[-1] == num_classes | ||
assert expect_out.shape[-1] == num_expected | ||
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out) | ||
_check_fx_compatible(model, x, eager_out=out) | ||
|
||
if dev == "cuda": | ||
with torch.cuda.amp.autocast(): | ||
with torch.cuda.amp.autocast(), torch.no_grad(), freeze_rng_state(): | ||
model.to(x.device) | ||
out = model(x) | ||
expect_out = out[:, :num_expected] | ||
# See autocast_flaky_numerics comment at top of file. | ||
if model_name not in autocast_flaky_numerics: | ||
_assert_expected(out.cpu(), model_name, prec=0.1) | ||
assert out.shape[-1] == 50 | ||
_assert_expected(expect_out.cpu(), model_name, prec=0.1) | ||
assert expect_out.shape[-1] == num_expected | ||
|
||
_check_input_backprop(model, x) | ||
|
||
|
@@ -777,13 +807,17 @@ def test_detection_model(model_fn, dev): | |
"input_shape": (3, 300, 300), | ||
} | ||
model_name = model_fn.__name__ | ||
if model_name in detection_flaky_models: | ||
dtype = torch.float64 | ||
else: | ||
dtype = torch.get_default_dtype() | ||
kwargs = {**defaults, **_model_params.get(model_name, {})} | ||
input_shape = kwargs.pop("input_shape") | ||
real_image = kwargs.pop("real_image", False) | ||
|
||
model = model_fn(**kwargs) | ||
model.eval().to(device=dev) | ||
x = _get_image(input_shape=input_shape, real_image=real_image, device=dev) | ||
model.eval().to(device=dev, dtype=dtype) | ||
x = _get_image(input_shape=input_shape, real_image=real_image, device=dev, dtype=dtype) | ||
model_input = [x] | ||
with torch.no_grad(), freeze_rng_state(): | ||
out = model(model_input) | ||
|
@@ -818,7 +852,7 @@ def compute_mean_std(tensor): | |
return {"mean": mean, "std": std} | ||
|
||
output = map_nested_tensor_object(out, tensor_map_fn=compact) | ||
prec = 0.01 | ||
prec = 3e-2 | ||
try: | ||
# We first try to assert the entire output if possible. This is not | ||
# only the best way to assert results but also handles the cases | ||
|
@@ -917,7 +951,7 @@ def test_video_model(model_fn, dev): | |
# RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests | ||
x = torch.rand(input_shape).to(device=dev) | ||
out = model(x) | ||
_assert_expected(out.cpu(), model_name, prec=1e-5) | ||
_assert_expected(out.cpu(), model_name, prec=3e-3) | ||
assert out.shape[-1] == num_classes | ||
_check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(model_name, None), eager_out=out) | ||
_check_fx_compatible(model, x, eager_out=out) | ||
|
Uh oh!
There was an error while loading. Please reload this page.