@@ -15,8 +15,7 @@ const SaveLayerOptions SaveLayerOptions::kWithAttributes =
15
15
kNoAttributes .with_renders_with_attributes();
16
16
17
17
DisplayList::DisplayList ()
18
- : byte_count_(0 ),
19
- op_count_ (0 ),
18
+ : op_count_(0 ),
20
19
nested_byte_count_ (0 ),
21
20
nested_op_count_(0 ),
22
21
total_depth_(0 ),
@@ -27,25 +26,13 @@ DisplayList::DisplayList()
27
26
modifies_transparent_black_(false ),
28
27
root_has_backdrop_filter_(false ),
29
28
root_is_unbounded_(false ),
30
- max_root_blend_mode_(DlBlendMode::kClear ) {}
31
-
32
- // Eventually we should rework DisplayListBuilder to compute these and
33
- // deliver the vector alongside the storage.
34
- static std::vector<size_t > MakeOffsets (const DisplayListStorage& storage,
35
- size_t byte_count) {
36
- std::vector<size_t > offsets;
37
- const uint8_t * start = storage.get ();
38
- const uint8_t * end = start + byte_count;
39
- const uint8_t * ptr = start;
40
- while (ptr < end) {
41
- offsets.push_back (ptr - start);
42
- ptr += reinterpret_cast <const DLOp*>(ptr)->size ;
43
- }
44
- return offsets;
29
+ max_root_blend_mode_(DlBlendMode::kClear ) {
30
+ FML_DCHECK (offsets_.size () == 0u );
31
+ FML_DCHECK (storage_.size () == 0u );
45
32
}
46
33
47
34
DisplayList::DisplayList (DisplayListStorage&& storage,
48
- size_t byte_count ,
35
+ std::vector< size_t >&& offsets ,
49
36
uint32_t op_count,
50
37
size_t nested_byte_count,
51
38
uint32_t nested_op_count,
@@ -59,8 +46,7 @@ DisplayList::DisplayList(DisplayListStorage&& storage,
59
46
bool root_is_unbounded,
60
47
sk_sp<const DlRTree> rtree)
61
48
: storage_(std::move(storage)),
62
- offsets_(MakeOffsets(storage_, byte_count)),
63
- byte_count_(byte_count),
49
+ offsets_(std::move(offsets)),
64
50
op_count_(op_count),
65
51
nested_byte_count_(nested_byte_count),
66
52
nested_op_count_(nested_op_count),
@@ -73,11 +59,12 @@ DisplayList::DisplayList(DisplayListStorage&& storage,
73
59
root_has_backdrop_filter_(root_has_backdrop_filter),
74
60
root_is_unbounded_(root_is_unbounded),
75
61
max_root_blend_mode_(max_root_blend_mode),
76
- rtree_(std::move(rtree)) {}
62
+ rtree_(std::move(rtree)) {
63
+ FML_DCHECK (storage_.capacity () == storage_.size ());
64
+ }
77
65
78
66
DisplayList::~DisplayList () {
79
- const uint8_t * ptr = storage_.get ();
80
- DisposeOps (ptr, ptr + byte_count_);
67
+ DisposeOps (storage_, offsets_);
81
68
}
82
69
83
70
uint32_t DisplayList::next_unique_id () {
@@ -132,7 +119,7 @@ void DisplayList::RTreeResultsToIndexVector(
132
119
return ;
133
120
}
134
121
}
135
- const uint8_t * ptr = storage_.get () + offsets_[index ];
122
+ const uint8_t * ptr = storage_.base () + offsets_[index ];
136
123
const DLOp* op = reinterpret_cast <const DLOp*>(ptr);
137
124
switch (GetOpCategory (op->type )) {
138
125
case DisplayListOpCategory::kAttribute :
@@ -193,7 +180,7 @@ void DisplayList::RTreeResultsToIndexVector(
193
180
}
194
181
195
182
void DisplayList::Dispatch (DlOpReceiver& receiver) const {
196
- const uint8_t * base = storage_.get ();
183
+ const uint8_t * base = storage_.base ();
197
184
for (size_t offset : offsets_) {
198
185
DispatchOneOp (receiver, base + offset);
199
186
}
@@ -213,7 +200,7 @@ void DisplayList::Dispatch(DlOpReceiver& receiver,
213
200
Dispatch (receiver);
214
201
} else {
215
202
auto op_indices = GetCulledIndices (cull_rect);
216
- const uint8_t * base = storage_.get ();
203
+ const uint8_t * base = storage_.base ();
217
204
for (DlIndex index : op_indices) {
218
205
DispatchOneOp (receiver, base + offsets_[index ]);
219
206
}
@@ -240,11 +227,14 @@ void DisplayList::DispatchOneOp(DlOpReceiver& receiver,
240
227
}
241
228
}
242
229
243
- void DisplayList::DisposeOps (const uint8_t * ptr, const uint8_t * end) {
244
- while (ptr < end) {
245
- auto op = reinterpret_cast <const DLOp*>(ptr);
246
- ptr += op->size ;
247
- FML_DCHECK (ptr <= end);
230
+ void DisplayList::DisposeOps (const DisplayListStorage& storage,
231
+ const std::vector<size_t >& offsets) {
232
+ const uint8_t * base = storage.base ();
233
+ if (!base) {
234
+ return ;
235
+ }
236
+ for (size_t offset : offsets) {
237
+ auto op = reinterpret_cast <const DLOp*>(base + offset);
248
238
switch (op->type ) {
249
239
#define DL_OP_DISPOSE (name ) \
250
240
case DisplayListOpType::k##name: \
@@ -362,10 +352,9 @@ DisplayListOpType DisplayList::GetOpType(DlIndex index) const {
362
352
}
363
353
364
354
size_t offset = offsets_[index ];
365
- FML_DCHECK (offset < byte_count_ );
366
- auto ptr = storage_.get () + offset;
355
+ FML_DCHECK (offset < storage_. size () );
356
+ auto ptr = storage_.base () + offset;
367
357
auto op = reinterpret_cast <const DLOp*>(ptr);
368
- FML_DCHECK (ptr + op->size <= storage_.get () + byte_count_);
369
358
return op->type ;
370
359
}
371
360
@@ -399,34 +388,32 @@ bool DisplayList::Dispatch(DlOpReceiver& receiver, DlIndex index) const {
399
388
}
400
389
401
390
size_t offset = offsets_[index ];
402
- FML_DCHECK (offset < byte_count_);
403
- auto ptr = storage_.get () + offset;
404
- FML_DCHECK (offset + reinterpret_cast <const DLOp*>(ptr)->size <= byte_count_);
391
+ FML_DCHECK (offset < storage_.size ());
392
+ auto ptr = storage_.base () + offset;
405
393
406
394
DispatchOneOp (receiver, ptr);
407
395
408
396
return true ;
409
397
}
410
398
411
- static bool CompareOps (const uint8_t * ptrA,
412
- const uint8_t * endA,
413
- const uint8_t * ptrB,
414
- const uint8_t * endB) {
399
+ static bool CompareOps (const DisplayListStorage& storageA,
400
+ const std::vector<size_t >& offsetsA,
401
+ const DisplayListStorage& storageB,
402
+ const std::vector<size_t >& offsetsB) {
403
+ const uint8_t * base_a = storageA.base ();
404
+ const uint8_t * base_b = storageB.base ();
415
405
// These conditions are checked by the caller...
416
- FML_DCHECK ((endA - ptrA) == (endB - ptrB));
417
- FML_DCHECK (ptrA != ptrB);
418
- const uint8_t * bulk_start_a = ptrA;
419
- const uint8_t * bulk_start_b = ptrB;
420
- while (ptrA < endA && ptrB < endB) {
421
- auto opA = reinterpret_cast <const DLOp*>(ptrA);
422
- auto opB = reinterpret_cast <const DLOp*>(ptrB);
423
- if (opA->type != opB->type || opA->size != opB->size ) {
406
+ FML_DCHECK (offsetsA.size () == offsetsB.size ());
407
+ FML_DCHECK (base_a != base_b);
408
+ size_t bulk_start = 0u ;
409
+ for (size_t i = 0 ; i < offsetsA.size (); i++) {
410
+ size_t offset = offsetsA[i];
411
+ FML_DCHECK (offsetsB[i] == offset);
412
+ auto opA = reinterpret_cast <const DLOp*>(base_a + offset);
413
+ auto opB = reinterpret_cast <const DLOp*>(base_b + offset);
414
+ if (opA->type != opB->type ) {
424
415
return false ;
425
416
}
426
- ptrA += opA->size ;
427
- ptrB += opB->size ;
428
- FML_DCHECK (ptrA <= endA);
429
- FML_DCHECK (ptrB <= endB);
430
417
DisplayListCompare result;
431
418
switch (opA->type ) {
432
419
#define DL_OP_EQUALS (name ) \
@@ -451,23 +438,23 @@ static bool CompareOps(const uint8_t* ptrA,
451
438
case DisplayListCompare::kEqual :
452
439
// Check if we have a backlog of bytes to bulk compare and then
453
440
// reset the bulk compare pointers to the address following this op
454
- auto bulk_bytes = reinterpret_cast <const uint8_t *>(opA) - bulk_start_a;
455
- if (bulk_bytes > 0 ) {
456
- if (memcmp (bulk_start_a, bulk_start_b, bulk_bytes) != 0 ) {
441
+ if (bulk_start < offset) {
442
+ const uint8_t * bulk_start_a = base_a + bulk_start;
443
+ const uint8_t * bulk_start_b = base_b + bulk_start;
444
+ if (memcmp (bulk_start_a, bulk_start_b, offset - bulk_start) != 0 ) {
457
445
return false ;
458
446
}
459
447
}
460
- bulk_start_a = ptrA;
461
- bulk_start_b = ptrB ;
448
+ bulk_start =
449
+ i + 1 < offsetsA. size () ? offsetsA[i + 1 ] : storageA. size () ;
462
450
break ;
463
451
}
464
452
}
465
- if (ptrA != endA || ptrB != endB) {
466
- return false ;
467
- }
468
- if (bulk_start_a < ptrA) {
453
+ if (bulk_start < storageA.size ()) {
469
454
// Perform a final bulk compare if we have remaining bytes waiting
470
- if (memcmp (bulk_start_a, bulk_start_b, ptrA - bulk_start_a) != 0 ) {
455
+ const uint8_t * bulk_start_a = base_a + bulk_start;
456
+ const uint8_t * bulk_start_b = base_b + bulk_start;
457
+ if (memcmp (bulk_start_a, bulk_start_b, storageA.size () - bulk_start) != 0 ) {
471
458
return false ;
472
459
}
473
460
}
@@ -478,15 +465,15 @@ bool DisplayList::Equals(const DisplayList* other) const {
478
465
if (this == other) {
479
466
return true ;
480
467
}
481
- if (byte_count_ != other->byte_count_ || op_count_ != other->op_count_ ) {
468
+ if (offsets_.size () != other->offsets_ .size () ||
469
+ storage_.size () != other->storage_ .size () ||
470
+ op_count_ != other->op_count_ ) {
482
471
return false ;
483
472
}
484
- const uint8_t * ptr = storage_.get ();
485
- const uint8_t * o_ptr = other->storage_ .get ();
486
- if (ptr == o_ptr) {
473
+ if (storage_.base () == other->storage_ .base ()) {
487
474
return true ;
488
475
}
489
- return CompareOps (ptr, ptr + byte_count_, o_ptr, o_ptr + other->byte_count_ );
476
+ return CompareOps (storage_, offsets_, other-> storage_ , other->offsets_ );
490
477
}
491
478
492
479
} // namespace flutter
0 commit comments