Skip to content

[Fizz] Expose maxBoundarySize as an option #21029

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Mar 19, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion packages/react-dom/src/server/ReactDOMFizzServerBrowser.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import {

type Options = {
signal?: AbortSignal,
progressiveChunkSize?: number,
};

function renderToReadableStream(
Expand All @@ -35,7 +36,11 @@ function renderToReadableStream(
}
return new ReadableStream({
start(controller) {
request = createRequest(children, controller);
request = createRequest(
children,
controller,
options ? options.progressiveChunkSize : undefined,
);
startWork(request);
},
pull(controller) {
Expand Down
11 changes: 10 additions & 1 deletion packages/react-dom/src/server/ReactDOMFizzServerNode.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,10 @@ function createDrainHandler(destination, request) {
return () => startFlowing(request);
}

type Options = {
progressiveChunkSize?: number,
};

type Controls = {
// Cancel any pending I/O and put anything remaining into
// client rendered mode.
Expand All @@ -30,8 +34,13 @@ type Controls = {
function pipeToNodeWritable(
children: ReactNodeList,
destination: Writable,
options?: Options,
): Controls {
const request = createRequest(children, destination);
const request = createRequest(
children,
destination,
options ? options.progressiveChunkSize : undefined,
);
let hasStartedFlowing = false;
startWork(request);
return {
Expand Down
12 changes: 10 additions & 2 deletions packages/react-noop-renderer/src/ReactNoopServer.js
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,11 @@ const ReactNoopServer = ReactFizzServer({
},
});

function render(children: React$Element<any>): Destination {
type Options = {
progressiveChunkSize?: number,
};

function render(children: React$Element<any>, options?: Options): Destination {
const destination: Destination = {
root: null,
placeholders: new Map(),
Expand All @@ -220,7 +224,11 @@ function render(children: React$Element<any>): Destination {
ReactNoopServer.abort(request);
},
};
const request = ReactNoopServer.createRequest(children, destination);
const request = ReactNoopServer.createRequest(
children,
destination,
options ? options.progressiveChunkSize : undefined,
);
ReactNoopServer.startWork(request);
ReactNoopServer.startFlowing(request);
return destination;
Expand Down
24 changes: 21 additions & 3 deletions packages/react-server/src/ReactFizzServer.js
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ const CLOSED = 2;
type Request = {
+destination: Destination,
+responseState: ResponseState,
+maxBoundarySize: number,
+progressiveChunkSize: number,
status: 0 | 1 | 2,
nextSegmentId: number,
allPendingWork: number, // when it reaches zero, we can close the connection.
Expand All @@ -113,16 +113,34 @@ type Request = {
partialBoundaries: Array<SuspenseBoundary>, // Partially completed boundaries that can flush its segments early.
};

// This is a default heuristic for how to split up the HTML content into progressive
// loading. Our goal is to be able to display additional new content about every 500ms.
// Faster than that is unnecessary and should be throttled on the client. It also
// adds unnecessary overhead to do more splits. We don't know if it's a higher or lower
// end device but higher end suffer less from the overhead than lower end does from
// not getting small enough pieces. We error on the side of low end.
// We base this on low end 3G speeds which is about 500kbits per second. We assume
// that there can be a reasonable drop off from max bandwidth which leaves you with
// as little as 80%. We can receive half of that each 500ms - at best. In practice,
// a little bandwidth is lost to processing and contention - e.g. CSS and images that
// are downloaded along with the main content. So we estimate about half of that to be
// the lower end throughput. In other words, we expect that you can at least show
// about 12.5kb of content per 500ms. Not counting starting latency for the first
// paint.
// 500 * 1024 / 8 * .8 * 0.5 / 2
const DEFAULT_PROGRESSIVE_CHUNK_SIZE = 12800;

export function createRequest(
children: ReactNodeList,
destination: Destination,
progressiveChunkSize: number = DEFAULT_PROGRESSIVE_CHUNK_SIZE,
): Request {
const pingedWork = [];
const abortSet: Set<SuspendedWork> = new Set();
const request = {
destination,
responseState: createResponseState(),
maxBoundarySize: 1024,
progressiveChunkSize,
status: BUFFERING,
nextSegmentId: 0,
allPendingWork: 0,
Expand Down Expand Up @@ -642,7 +660,7 @@ function flushSegment(
flushSubtree(request, destination, segment);

return writeEndSuspenseBoundary(destination);
} else if (boundary.byteSize > request.maxBoundarySize) {
} else if (boundary.byteSize > request.progressiveChunkSize) {
// This boundary is large and will be emitted separately so that we can progressively show
// other content. We add it to the queue during the flush because we have to ensure that
// the parent flushes first so that there's something to inject it into.
Expand Down