@@ -43,6 +43,9 @@ TEST_F(NVFuserTest, FusionStandaloneFull_CUDA) {
43
43
fusion->addInput (fill_val2);
44
44
fusion->addInput (fill_val3);
45
45
for (auto dtype : dtypes) {
46
+ if (!isSupportedTypeByDevice (aten_to_data_type (dtype))) {
47
+ continue ;
48
+ }
46
49
auto out_tv = full ({size}, fill_val1, aten_to_data_type (dtype));
47
50
fusion->addOutput (out_tv);
48
51
out_tv = full ({size, size}, fill_val2, aten_to_data_type (dtype));
@@ -57,6 +60,9 @@ TEST_F(NVFuserTest, FusionStandaloneFull_CUDA) {
57
60
std::vector<at::Tensor> expect;
58
61
expect.reserve (dtypes.size ());
59
62
for (auto dtype : dtypes) {
63
+ if (!isSupportedTypeByDevice (aten_to_data_type (dtype))) {
64
+ continue ;
65
+ }
60
66
const auto options =
61
67
at::TensorOptions ().dtype (dtype).device (at::kCUDA , 0 );
62
68
expect.emplace_back (at::full ({size}, 11 , options));
@@ -94,6 +100,9 @@ TEST_F(NVFuserTest, FusionStandaloneZeros_CUDA) {
94
100
Val* size = IrBuilder::create<Int>();
95
101
fusion->addInput (size);
96
102
for (auto dtype : dtypes) {
103
+ if (!isSupportedTypeByDevice (aten_to_data_type (dtype))) {
104
+ continue ;
105
+ }
97
106
auto out_tv = zeros ({size}, aten_to_data_type (dtype));
98
107
fusion->addOutput (out_tv);
99
108
out_tv = zeros ({size, size}, aten_to_data_type (dtype));
@@ -108,6 +117,9 @@ TEST_F(NVFuserTest, FusionStandaloneZeros_CUDA) {
108
117
std::vector<at::Tensor> expect;
109
118
expect.reserve (dtypes.size ());
110
119
for (auto dtype : dtypes) {
120
+ if (!isSupportedTypeByDevice (aten_to_data_type (dtype))) {
121
+ continue ;
122
+ }
111
123
const auto options =
112
124
at::TensorOptions ().dtype (dtype).device (at::kCUDA , 0 );
113
125
expect.emplace_back (at::zeros ({size}, options));
@@ -145,6 +157,9 @@ TEST_F(NVFuserTest, FusionStandaloneOnes_CUDA) {
145
157
Val* size = IrBuilder::create<Int>();
146
158
fusion->addInput (size);
147
159
for (auto dtype : dtypes) {
160
+ if (!isSupportedTypeByDevice (aten_to_data_type (dtype))) {
161
+ continue ;
162
+ }
148
163
auto out_tv = ones ({size}, aten_to_data_type (dtype));
149
164
fusion->addOutput (out_tv);
150
165
out_tv = ones ({size, size}, aten_to_data_type (dtype));
@@ -159,6 +174,9 @@ TEST_F(NVFuserTest, FusionStandaloneOnes_CUDA) {
159
174
std::vector<at::Tensor> expect;
160
175
expect.reserve (dtypes.size ());
161
176
for (auto dtype : dtypes) {
177
+ if (!isSupportedTypeByDevice (aten_to_data_type (dtype))) {
178
+ continue ;
179
+ }
162
180
const auto options =
163
181
at::TensorOptions ().dtype (dtype).device (at::kCUDA , 0 );
164
182
expect.emplace_back (at::ones ({size}, options));
@@ -183,6 +201,10 @@ TEST_F(NVFuserTest, FusionStandaloneARange_CUDA) {
183
201
auto dtypes = {kFloat , kLong , kDouble };
184
202
185
203
for (auto dtype : dtypes) {
204
+ if (!isSupportedTypeByDevice (aten_to_data_type (dtype))) {
205
+ continue ;
206
+ }
207
+
186
208
auto fusion = std::make_unique<Fusion>();
187
209
FusionGuard fg (fusion.get ());
188
210
0 commit comments