@@ -231,6 +231,17 @@ class Tensor {
231
231
232
232
virtual ~Tensor () noexcept {}
233
233
234
+ /* *
235
+ * @brief Extend the outer-most dimension of this tensor
236
+ * to dimension of `num`.
237
+ */
238
+ void ExtendTo (TIndex num, float growthPct, BaseContext* context) {
239
+ CAFFE_ENFORCE_GE_WITH_CALLER (dims_.size (), 1 );
240
+ CAFFE_ENFORCE_GE_WITH_CALLER (growthPct, 0 );
241
+ CAFFE_ENFORCE (context != nullptr , " Context must be provided." );
242
+ Extend (num - dims_[0 ], growthPct, context);
243
+ }
244
+
234
245
/* *
235
246
* @brief Extends the outer-most dimension of this tensor by num elements,
236
247
* preserving the existing data.
@@ -242,6 +253,8 @@ class Tensor {
242
253
*/
243
254
void Extend (TIndex num, float growthPct, BaseContext* context) {
244
255
CAFFE_ENFORCE_GE_WITH_CALLER (dims_.size (), 1 );
256
+ CAFFE_ENFORCE_GE_WITH_CALLER (
257
+ num, 0 , " `num` must be non-negative for Extend" );
245
258
auto newDims = dims_;
246
259
newDims[0 ] += num;
247
260
if (!data_) {
@@ -261,30 +274,17 @@ class Tensor {
261
274
auto newCapacity = dims_;
262
275
newCapacity[0 ] = std::max<size_t >(
263
276
newDims[0 ], std::ceil (dims_[0 ] * (growthPct + 100 ) / 100 ));
264
- Reserve (newCapacity, context);
265
- dims_ = newDims;
266
- size_ = newSize;
267
- }
268
-
269
- template <class T >
270
- void Reserve (const std::vector<T>& newCapacity, BaseContext* context) {
271
- auto newSize = std::accumulate (
272
- newCapacity.begin (),
273
- newCapacity.end (),
274
- static_cast <TIndex>(1 ),
275
- std::multiplies<TIndex>());
276
- if (newSize * meta_.itemsize () <= capacity_) {
277
- return ;
278
- }
279
277
auto oldData = std::move (data_);
280
278
auto oldSize = size_;
281
279
auto oldDims = dims_;
282
280
Resize (newCapacity);
283
281
auto * newData = raw_mutable_data (meta_);
282
+ CAFFE_ENFORCE (
283
+ context != nullptr , " Context must be provided to Extend the tensor" );
284
284
context->CopyItemsSameDevice (meta_, oldSize, oldData.get (), newData);
285
- dims_ = oldDims;
286
- size_ = oldSize;
287
285
reserved_ = true ;
286
+ dims_ = newDims;
287
+ size_ = newSize;
288
288
}
289
289
290
290
/* *
@@ -293,7 +293,7 @@ class Tensor {
293
293
* This method guarantees that no re-allocations are carried out, which means
294
294
* that the extra capacity after the end of the shurnk tensor is maintained.
295
295
*/
296
- void Shrink (TIndex outer_dim) {
296
+ void ShrinkTo (TIndex outer_dim) {
297
297
CAFFE_ENFORCE_WITH_CALLER (dims_.size () >= 1 , " Tensor must be at least 1D" );
298
298
CAFFE_ENFORCE_WITH_CALLER (
299
299
outer_dim <= dims_[0 ],
@@ -306,6 +306,38 @@ class Tensor {
306
306
std::multiplies<TIndex>());
307
307
}
308
308
309
+ /* *
310
+ * @brief Reserve space for the underlying tensor.
311
+ *
312
+ * This must be called after Resize(), since we only specify the first
313
+ * dimension This does not copy over the old data to the newly allocated space
314
+ */
315
+ template <class T >
316
+ void ReserveSpace (const T& outer_dim) {
317
+ CAFFE_ENFORCE (
318
+ size_ != -1 , " size should be initialized before calling ReserveSpace" );
319
+ auto newCapacity = dims_;
320
+ newCapacity[0 ] = outer_dim;
321
+ auto newSize = std::accumulate (
322
+ newCapacity.begin (),
323
+ newCapacity.end (),
324
+ static_cast <TIndex>(1 ),
325
+ std::multiplies<TIndex>());
326
+ if (newSize * meta_.itemsize () <= capacity_) {
327
+ return ;
328
+ }
329
+ // Old data is discarded
330
+ data_.reset ();
331
+ auto oldSize = size_;
332
+ auto oldDims = dims_;
333
+ Resize (newCapacity);
334
+ // Allocate new memory and don't copy over the data
335
+ raw_mutable_data (meta_);
336
+ dims_ = oldDims;
337
+ size_ = oldSize;
338
+ reserved_ = true ;
339
+ }
340
+
309
341
/* *
310
342
* @brief Resizes a tensor.
311
343
*
@@ -389,7 +421,7 @@ class Tensor {
389
421
capacity_ = 0 ;
390
422
// If reserved is true and we changed tensor memory then it is fine
391
423
// to switch it to false, if Resize is called from Reserve and it triggers
392
- // FreeMemory() then reserved_ will be set to true at end of Reserve ()
424
+ // FreeMemory() then reserved_ will be set to true at end of ReserveSpace ()
393
425
reserved_ = false ;
394
426
}
395
427
@@ -740,6 +772,10 @@ class Tensor {
740
772
TypeMeta meta_;
741
773
std::shared_ptr<void > data_;
742
774
size_t capacity_ = 0 ;
775
+ // we decide to keep reserved and it will
776
+ // live in Tensor after the split
777
+ // The logic is that if Extend() or ReserveSpace() were ever called,
778
+ // then subsequent Resize()s will not free up Storage.
743
779
bool reserved_ = false ;
744
780
DeviceType device_type_ = CPU;
745
781
// In case of chunk load we store how much data was already loaded
0 commit comments