@@ -649,9 +649,13 @@ function serializeThenable(
649
649
// We can no longer accept any resolved values
650
650
request . abortableTasks . delete ( newTask ) ;
651
651
newTask . status = ABORTED ;
652
- const errorId : number = ( request . fatalError : any ) ;
653
- const model = stringify ( serializeByValueID ( errorId ) ) ;
654
- emitModelChunk ( request , newTask . id , model ) ;
652
+ if ( enableHalt && request . type === PRERENDER ) {
653
+ request . pendingChunks -- ;
654
+ } else {
655
+ const errorId : number = ( request . fatalError : any ) ;
656
+ const model = stringify ( serializeByValueID ( errorId ) ) ;
657
+ emitModelChunk ( request , newTask . id , model ) ;
658
+ }
655
659
return newTask . id ;
656
660
}
657
661
if ( typeof thenable . status === 'string' ) {
@@ -2293,7 +2297,17 @@ function renderModel(
2293
2297
if ( typeof x . then === 'function' ) {
2294
2298
if ( request . status === ABORTING ) {
2295
2299
task . status = ABORTED ;
2296
- const errorId : number = ( request . fatalError : any ) ;
2300
+ let errorId : number ;
2301
+ if ( enableHalt && request . type === PRERENDER ) {
2302
+ // This is unfortunate that we would consume an id here. It suggests something
2303
+ // isn't quite right with our model. When halting we don't emit any chunks
2304
+ // but we're not in a position where we are avoiding emitting an entire
2305
+ // chunk so we have to return something in this slot within the model so we
2306
+ // consume an id and return it here knowing it will never resolve.
2307
+ errorId = request . nextChunkId ++ ;
2308
+ } else {
2309
+ errorId = ( request . fatalError : any ) ;
2310
+ }
2297
2311
if ( wasReactNode ) {
2298
2312
return serializeLazyID ( errorId ) ;
2299
2313
}
@@ -2346,7 +2360,17 @@ function renderModel(
2346
2360
2347
2361
if ( request . status === ABORTING ) {
2348
2362
task . status = ABORTED ;
2349
- const errorId : number = ( request . fatalError : any ) ;
2363
+ let errorId : number ;
2364
+ if ( enableHalt && request . type === PRERENDER ) {
2365
+ // This is unfortunate that we would consume an id here. It suggests something
2366
+ // isn't quite right with our model. When halting we don't emit any chunks
2367
+ // but we're not in a position where we are avoiding emitting an entire
2368
+ // chunk so we have to return something in this slot within the model so we
2369
+ // consume an id and return it here knowing it will never resolve.
2370
+ errorId = request . nextChunkId ++ ;
2371
+ } else {
2372
+ errorId = ( request . fatalError : any ) ;
2373
+ }
2350
2374
if ( wasReactNode ) {
2351
2375
return serializeLazyID ( errorId ) ;
2352
2376
}
@@ -3820,6 +3844,22 @@ function retryTask(request: Request, task: Task): void {
3820
3844
request.abortableTasks.delete(task);
3821
3845
task.status = COMPLETED;
3822
3846
} catch ( thrownValue ) {
3847
+ if ( request . status === ABORTING ) {
3848
+ request . abortableTasks . delete ( task ) ;
3849
+ task . status = ABORTED ;
3850
+ if ( enableHalt && request . type === PRERENDER ) {
3851
+ // When aborting a prerener with halt semantics we don't emit
3852
+ // anything into the slot for a task that aborts, it remains unresolved
3853
+ request. pendingChunks -- ;
3854
+ } else {
3855
+ // Otherwise we emit an error chunk into the task slot.
3856
+ const errorId : number = ( request . fatalError : any ) ;
3857
+ const model = stringify ( serializeByValueID ( errorId ) ) ;
3858
+ emitModelChunk ( request , task . id , model ) ;
3859
+ }
3860
+ return;
3861
+ }
3862
+
3823
3863
const x =
3824
3864
thrownValue === SuspenseException
3825
3865
? // This is a special type of exception used for Suspense. For historical
@@ -3832,14 +3872,6 @@ function retryTask(request: Request, task: Task): void {
3832
3872
if ( typeof x === 'object ' && x !== null ) {
3833
3873
// $FlowFixMe[method-unbinding]
3834
3874
if ( typeof x . then === 'function' ) {
3835
- if ( request . status === ABORTING ) {
3836
- request . abortableTasks . delete ( task ) ;
3837
- task . status = ABORTED ;
3838
- const errorId : number = ( request . fatalError : any ) ;
3839
- const model = stringify ( serializeByValueID ( errorId ) ) ;
3840
- emitModelChunk ( request , task . id , model ) ;
3841
- return ;
3842
- }
3843
3875
// Something suspended again, let's pick it back up later.
3844
3876
task . status = PENDING ;
3845
3877
task . thenableState = getThenableStateAfterSuspending ( ) ;
@@ -3856,15 +3888,6 @@ function retryTask(request: Request, task: Task): void {
3856
3888
}
3857
3889
}
3858
3890
3859
- if ( request . status === ABORTING ) {
3860
- request . abortableTasks . delete ( task ) ;
3861
- task . status = ABORTED ;
3862
- const errorId : number = ( request . fatalError : any ) ;
3863
- const model = stringify ( serializeByValueID ( errorId ) ) ;
3864
- emitModelChunk ( request , task . id , model ) ;
3865
- return ;
3866
- }
3867
-
3868
3891
request . abortableTasks . delete ( task ) ;
3869
3892
task . status = ERRORED ;
3870
3893
const digest = logRecoverableError ( request , x , task ) ;
@@ -3942,6 +3965,17 @@ function abortTask(task: Task, request: Request, errorId: number): void {
3942
3965
request.completedErrorChunks.push(processedChunk);
3943
3966
}
3944
3967
3968
+ function haltTask ( task : Task , request : Request ) : void {
3969
+ if ( task . status === RENDERING ) {
3970
+ // this task will be halted by the render
3971
+ return ;
3972
+ }
3973
+ task.status = ABORTED;
3974
+ // We don't actually emit anything for this task id because we are intentionally
3975
+ // leaving the reference unfulfilled.
3976
+ request.pendingChunks--;
3977
+ }
3978
+
3945
3979
function flushCompletedChunks (
3946
3980
request : Request ,
3947
3981
destination : Destination ,
@@ -4087,12 +4121,6 @@ export function abort(request: Request, reason: mixed): void {
4087
4121
}
4088
4122
const abortableTasks = request . abortableTasks ;
4089
4123
if ( abortableTasks . size > 0) {
4090
- // We have tasks to abort. We'll emit one error row and then emit a reference
4091
- // to that row from every row that's still remaining if we are rendering. If we
4092
- // are prerendering (and halt semantics are enabled) we will refer to an error row
4093
- // but not actually emit it so the reciever can at that point rather than error.
4094
- const errorId = request . nextChunkId ++ ;
4095
- request . fatalError = errorId ;
4096
4124
if (
4097
4125
enablePostpone &&
4098
4126
typeof reason === 'object' &&
@@ -4101,10 +4129,20 @@ export function abort(request: Request, reason: mixed): void {
4101
4129
) {
4102
4130
const postponeInstance : Postpone = ( reason : any ) ;
4103
4131
logPostpone ( request , postponeInstance . message , null ) ;
4104
- if ( ! enableHalt || request . type === PRERENDER ) {
4105
- // When prerendering with halt semantics we omit the referred to postpone.
4132
+ if ( enableHalt && request . type === PRERENDER ) {
4133
+ // When prerendering with halt semantics we simply halt the task
4134
+ // and leave the reference unfulfilled.
4135
+ abortableTasks. forEach ( task => haltTask ( task , request ) ) ;
4136
+ abortableTasks . clear ( ) ;
4137
+ } else {
4138
+ // When rendering we produce a shared postpone chunk and then
4139
+ // fulfill each task with a reference to that chunk.
4140
+ const errorId = request . nextChunkId ++ ;
4141
+ request . fatalError = errorId ;
4106
4142
request . pendingChunks ++ ;
4107
4143
emitPostponeChunk ( request , errorId , postponeInstance ) ;
4144
+ abortableTasks . forEach ( task => abortTask ( task , request , errorId ) ) ;
4145
+ abortableTasks . clear ( ) ;
4108
4146
}
4109
4147
} else {
4110
4148
const error =
@@ -4120,14 +4158,22 @@ export function abort(request: Request, reason: mixed): void {
4120
4158
)
4121
4159
: reason ;
4122
4160
const digest = logRecoverableError ( request , error , null ) ;
4123
- if ( ! enableHalt || request . type === RENDER ) {
4124
- // When prerendering with halt semantics we omit the referred to error.
4161
+ if ( enableHalt && request . type === PRERENDER ) {
4162
+ // When prerendering with halt semantics we simply halt the task
4163
+ // and leave the reference unfulfilled.
4164
+ abortableTasks . forEach ( task => haltTask ( task , request ) ) ;
4165
+ abortableTasks . clear ( ) ;
4166
+ } else {
4167
+ // When rendering we produce a shared error chunk and then
4168
+ // fulfill each task with a reference to that chunk.
4169
+ const errorId = request . nextChunkId ++ ;
4170
+ request . fatalError = errorId ;
4125
4171
request . pendingChunks ++ ;
4126
4172
emitErrorChunk ( request , errorId , digest , error ) ;
4173
+ abortableTasks . forEach ( task => abortTask ( task , request , errorId ) ) ;
4174
+ abortableTasks . clear ( ) ;
4127
4175
}
4128
4176
}
4129
- abortableTasks . forEach ( task => abortTask ( task , request , errorId ) ) ;
4130
- abortableTasks . clear ( ) ;
4131
4177
const onAllReady = request . onAllReady ;
4132
4178
onAllReady ( ) ;
4133
4179
}
0 commit comments