Skip to content

Commit 1e6bf28

Browse files
authored
Merge pull request #102 from iotamudelta/master
Merge from upstream
2 parents ae0336d + 578f601 commit 1e6bf28

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

74 files changed

+1183
-1009
lines changed

.jenkins/pytorch/build.sh

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ fi
7474
WERROR=1 python setup.py install
7575

7676
# Add the test binaries so that they won't be git clean'ed away
77-
git add -f build/bin
77+
git add -f build/bin build/lib
7878

7979
# Testing ATen install
8080
if [[ "$BUILD_ENVIRONMENT" != *cuda* ]]; then
@@ -101,11 +101,3 @@ if [[ "$BUILD_ENVIRONMENT" == *xenial-cuda8-cudnn6-py3* ]]; then
101101
make html
102102
popd
103103
fi
104-
105-
# Test no-Python build
106-
if [[ "$BUILD_TEST_LIBTORCH" == "1" ]]; then
107-
echo "Building libtorch"
108-
# NB: Install outside of source directory (at the same level as the root
109-
# pytorch folder) so that it doesn't get cleaned away prior to docker push.
110-
WERROR=1 VERBOSE=1 tools/cpp_build/build_caffe2.sh "$PWD/../cpp-build"
111-
fi

.jenkins/pytorch/macos-build.sh

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,12 @@ export IMAGE_COMMIT_TAG=${BUILD_ENVIRONMENT}-${IMAGE_COMMIT_ID}
6161

6262
python setup.py install
6363

64+
# this is a bit hacky, but not too bad. Bundle the test binaries into
65+
# the installation directory, so they can catch a free ride on the 7z
66+
# train.
67+
mkdir -p ${PYTORCH_ENV_DIR}/miniconda3/lib/python3.6/site-packages/torch/test_binaries/build
68+
mv build/{bin,lib} ${PYTORCH_ENV_DIR}/miniconda3/lib/python3.6/site-packages/torch/test_binaries/build/
69+
6470
# Upload torch binaries when the build job is finished
6571
7z a ${IMAGE_COMMIT_TAG}.7z ${PYTORCH_ENV_DIR}/miniconda3/lib/python3.6/site-packages/torch*
6672
aws s3 cp ${IMAGE_COMMIT_TAG}.7z s3://ossci-macos-build/pytorch/${IMAGE_COMMIT_TAG}.7z --acl public-read

.jenkins/pytorch/macos-test.sh

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -50,22 +50,13 @@ test_python_all() {
5050
test_cpp_api() {
5151
# C++ API
5252

53-
# NB: Install outside of source directory (at the same level as the root
54-
# pytorch folder) so that it doesn't get cleaned away prior to docker push.
55-
# But still clean it before we perform our own build.
56-
#
57-
CPP_BUILD="$PWD/../cpp-build"
58-
rm -rf $CPP_BUILD
59-
mkdir -p $CPP_BUILD
60-
WERROR=1 VERBOSE=1 tools/cpp_build/build_caffe2.sh "$CPP_BUILD"
61-
6253
python tools/download_mnist.py --quiet -d test/cpp/api/mnist
6354

6455
# Unfortunately it seems like the test can't load from miniconda3
6556
# without these paths being set
6657
export DYLD_LIBRARY_PATH="$DYLD_LIBRARY_PATH:$PWD/miniconda3/lib"
6758
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$PWD/miniconda3/lib"
68-
"$CPP_BUILD"/caffe2/bin/test_api
59+
${PYTORCH_ENV_DIR}/miniconda3/lib/python3.6/site-packages/torch/test_binaries/build/bin/test_api
6960
}
7061

7162
if [ -z "${JOB_BASE_NAME}" ] || [[ "${JOB_BASE_NAME}" == *-test ]]; then

.jenkins/pytorch/test.sh

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -108,14 +108,13 @@ test_torchvision() {
108108
test_libtorch() {
109109
if [[ "$BUILD_TEST_LIBTORCH" == "1" ]]; then
110110
echo "Testing libtorch"
111-
CPP_BUILD="$PWD/../cpp-build"
112111
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
113-
"$CPP_BUILD"/caffe2/bin/test_jit
112+
./build/bin/test_jit
114113
else
115-
"$CPP_BUILD"/caffe2/bin/test_jit "[cpu]"
114+
./build/bin/test_jit "[cpu]"
116115
fi
117116
python tools/download_mnist.py --quiet -d test/cpp/api/mnist
118-
OMP_NUM_THREADS=2 "$CPP_BUILD"/caffe2/bin/test_api
117+
OMP_NUM_THREADS=2 ./build/bin/test_api
119118
fi
120119
}
121120

aten/src/ATen/Declarations.cwrap

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@
2323
cpu_half: True
2424
arguments:
2525
- THTensor* self
26-
- arg: THSize* size
26+
- arg: IntListSize size
2727
long_args: True
28-
- CONSTANT NULL
28+
- CONSTANT {}
2929
]]
3030
[[
3131
name: set_
@@ -43,23 +43,23 @@
4343
scalar_check: False
4444
arguments:
4545
- THTensor* self
46-
- CONSTANT NULL, 0, THLongStorageView({0}, THLongStorageViewKind::SIZE), NULL
46+
- CONSTANT NULL, 0, {0}, {}
4747
- cname: setStorage
4848
scalar_check: False
4949
arguments:
5050
- THTensor* self
5151
- THStorage* source
5252
- CONSTANT 0
53-
- CONSTANT __storage_size.get()
54-
- CONSTANT NULL
53+
- CONSTANT {static_cast<int64_t>(source.pImpl()->size())}
54+
- CONSTANT {}
5555
- cname: setStorage
5656
arguments:
5757
- THTensor* self
5858
- THStorage* source
5959
- long storage_offset
60-
- THSize* size
61-
- arg: THStride* stride
62-
default: NULL
60+
- IntListSize size
61+
- arg: IntListStride stride
62+
default: {}
6363
]]
6464
[[
6565
name: _fill_
@@ -171,7 +171,7 @@
171171
return: THTensor*
172172
arguments:
173173
- THTensor* self
174-
- arg: THSize* size
174+
- arg: IntListSize size
175175
long_args: True
176176
]]
177177
[[
@@ -3393,8 +3393,8 @@
33933393
arguments: []
33943394
- cname: newWithSize
33953395
arguments:
3396-
- THSize* size
3397-
- CONSTANT NULL
3396+
- IntListSize size
3397+
- CONSTANT {}
33983398
]]
33993399
[[
34003400
name: tensor
@@ -3404,15 +3404,15 @@
34043404
options:
34053405
- cname: newWithSize
34063406
arguments:
3407-
- THSize* size
3408-
- arg: THStride* stride
3407+
- IntListSize size
3408+
- arg: IntListStride stride
34093409
- cname: newWithStorage
34103410
arguments:
34113411
- THStorage* storage
34123412
- int64_t storageOffset
3413-
- THSize* size
3414-
- arg: THStride* stride
3415-
default: NULL
3413+
- IntListSize size
3414+
- arg: IntListStride stride
3415+
default: {}
34163416
]]
34173417

34183418
# In theory, this could be a part of the above declaration. But in

aten/src/ATen/InferSize.h

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
#pragma once
2+
3+
#include <ATen/optional.h>
4+
#include <ATen/ScalarType.h>
5+
#include <sstream>
6+
#include <vector>
7+
8+
namespace at {
9+
10+
// Infers the size of a dim with size -1, if it exists. Also checks that new
11+
// shape is compatible with the number of elements.
12+
static std::vector<int64_t> infer_size(IntList shape, int64_t numel) {
13+
auto res = shape.vec();
14+
int64_t newsize = 1;
15+
auto infer_dim = at::optional<int64_t>();
16+
for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) {
17+
if (shape[dim] == -1) {
18+
if (infer_dim) {
19+
throw std::runtime_error("only one dimension can be inferred");
20+
}
21+
infer_dim = dim;
22+
} else if (shape[dim] >= 0) {
23+
newsize *= shape[dim];
24+
} else {
25+
AT_ERROR("invalid shape dimension ", shape[dim]);
26+
}
27+
}
28+
29+
if (numel == newsize || (infer_dim && newsize > 0 && numel % newsize == 0)) {
30+
if (infer_dim) {
31+
// we have a degree of freedom here to select the dimension size; follow NumPy semantics
32+
// and just bail.
33+
AT_CHECK(newsize != 0, "cannot reshape tensor of 0 elements into shape ", shape);
34+
res[*infer_dim] = numel / newsize;
35+
}
36+
return res;
37+
}
38+
39+
std::ostringstream ss;
40+
ss << "shape '" << shape << "' is invalid for input of size " << numel;
41+
throw std::runtime_error(ss.str());
42+
}
43+
44+
}

aten/src/ATen/THLongStorageView.h

Lines changed: 0 additions & 86 deletions
This file was deleted.

aten/src/ATen/THSizeStrideCompat.h

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
#pragma once
2+
3+
#include <ATen/ScalarType.h>
4+
#include <vector>
5+
6+
// NOTE: these functions are for compatibility into TH functions that takes sizes and strides.
7+
// We should just write the TH functions that don't require this, but that involves two steps:
8+
// 1) first class scalar support (for sizes)
9+
// 2) differentiating between nullptr/non-nullptr strides (the former "infers" strides).
10+
11+
namespace at {
12+
13+
static inline at::IntList get_intlist_size_th(IntList sizes) {
14+
static int64_t one = 1;
15+
if (sizes.size() == 0) {
16+
// fake scalar
17+
return IntList(&one, 1);
18+
} else {
19+
return sizes;
20+
}
21+
}
22+
23+
static inline IntList get_intlist_stride_th(IntList strides) {
24+
if (strides.size() == 0) {
25+
// differentiating between nullptr/non-nullptr strides (the former "infers" strides)
26+
return IntList();
27+
} else {
28+
return strides;
29+
}
30+
}
31+
32+
}

aten/src/ATen/function_wrapper.py

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -210,8 +210,8 @@ def __init__(self, reason):
210210
'THDenseIndexTensor*': 'Tensor &',
211211
'THStorage*': 'Storage &',
212212
'THGenerator*': 'Generator *',
213-
'THSize*': 'IntList',
214-
'THStride*': 'IntList',
213+
'IntListSize': 'IntList',
214+
'IntListStride': 'IntList',
215215
'accreal': 'Scalar',
216216
'real': 'Scalar',
217217
'long': 'int64_t',
@@ -227,8 +227,8 @@ def __init__(self, reason):
227227
'THDenseIndexTensor*': 'IndexTensor',
228228
'THStorage*': 'Storage',
229229
'THGenerator*': 'Generator*',
230-
'THSize*': 'IntList',
231-
'THStride*': 'IntList',
230+
'IntListSize': 'IntList',
231+
'IntListStride': 'IntList',
232232
'accreal': 'accreal',
233233
'real': 'real',
234234
'long': 'int64_t',
@@ -297,9 +297,8 @@ def __init__(self, reason):
297297
CodeTemplate(
298298
'check_generator<${Backend}Generator>(${arg_name}, &globalContext().defaultGenerator(backend()))'),
299299
# This is a cast done via direct-construction
300-
'THSize*': CodeTemplate('THLongStorageView ${result_name}(${arg_name}, THLongStorageViewKind::SIZE);'),
301-
# This is a cast done via direct-construction
302-
'THStride*': CodeTemplate('THLongStorageView ${result_name}(${arg_name}, THLongStorageViewKind::STRIDE);'),
300+
'IntListSize': CodeTemplate('at::IntList ${result_name} = get_intlist_size_th(${arg_name});'),
301+
'IntListStride': CodeTemplate('at::IntList ${result_name} = get_intlist_stride_th(${arg_name});'),
303302
'real': CodeTemplate('${arg_name}.to${ScalarName}()'),
304303
'accreal': CodeTemplate('${arg_name}.to${AccScalarName}()'),
305304
'TensorList': CodeTemplate(
@@ -309,7 +308,7 @@ def __init__(self, reason):
309308
'IntList': CodeTemplate('check_intlist<${size}>(${arg_name}, "${arg_name}", ${arg_pos}${,default_init})')
310309
}
311310

312-
DIRECT_CONSTRUCTION_CHECKED_CAST = {'THSize*', 'THStride*'}
311+
DIRECT_CONSTRUCTION_CHECKED_CAST = {'IntListSize', 'IntListStride'}
313312

314313
CHECKED_USE = {
315314
'THTensor*': '{}_->tensor',
@@ -349,8 +348,6 @@ def __init__(self, reason):
349348
# Replacements for constants when calling into TH
350349
CONSTANT_REPLACEMENTS = [
351350
('AS_REAL', '${AS_REAL}'),
352-
('__storage_size.get\\(\\)',
353-
'THLongStorageView(static_cast<int64_t>(source.pImpl()->size()), THLongStorageViewKind::LENGTH)'),
354351
('__last_dim', 'self.ndimension()-1'),
355352
]
356353

@@ -1327,7 +1324,7 @@ def emit_body(env, option):
13271324
output_count = 0
13281325

13291326
# scalar_check is the heuristic conditions when a result may be a scalar_check
1330-
# if there is a THSize* argument, then its dimensions are used to determine scalar.
1327+
# if there is a IntListSize argument, then its dimensions are used to determine scalar.
13311328
# otherwise, it is true if all the input tensors are scalars,
13321329
scalar_check_is_from_size = False
13331330
scalar_check_is_from_option = False
@@ -1343,7 +1340,7 @@ def emit_body(env, option):
13431340
for arg in option['arguments']:
13441341
if is_real_argument_to_wrapper(arg):
13451342
count += 1
1346-
if arg['type'] == 'THSize*' and not scalar_check_is_from_option:
1343+
if arg['type'] == 'IntListSize' and not scalar_check_is_from_option:
13471344
scalar_check_is_from_size = True
13481345
scalar_check = '{}.size() == 0'.format(arg['name'])
13491346
if arg['type'] == 'TensorList':

0 commit comments

Comments
 (0)