Skip to content

Commit 8316f96

Browse files
committed
fix(http1): force always-ready connections to yield after a few spins
1 parent 5019885 commit 8316f96

File tree

3 files changed

+81
-14
lines changed

3 files changed

+81
-14
lines changed

src/common/mod.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,10 @@ pub(crate) mod exec;
44
pub(crate) mod io;
55
mod lazy;
66
mod never;
7+
pub(crate) mod task;
78

89
pub(crate) use self::buf::StaticBuf;
910
pub(crate) use self::exec::Exec;
1011
pub(crate) use self::lazy::{lazy, Started as Lazy};
1112
pub use self::never::Never;
13+
pub(crate) use self::task::YieldNow;

src/common/task.rs

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
use futures::{Async, Poll, task::Task};
2+
3+
use super::Never;
4+
5+
/// A type to help "yield" a future, such that it is re-scheduled immediately.
6+
///
7+
/// Useful for spin counts, so a future doesn't hog too much time.
8+
#[derive(Debug)]
9+
pub(crate) struct YieldNow {
10+
cached_task: Option<Task>,
11+
}
12+
13+
impl YieldNow {
14+
pub(crate) fn new() -> YieldNow {
15+
YieldNow {
16+
cached_task: None,
17+
}
18+
}
19+
20+
/// Returns `Ok(Async::NotReady)` always, while also notifying the
21+
/// current task so that it is rescheduled immediately.
22+
///
23+
/// Since it never returns `Async::Ready` or `Err`, those types are
24+
/// set to `Never`.
25+
pub(crate) fn poll_yield(&mut self) -> Poll<Never, Never> {
26+
// Check for a cached `Task` first...
27+
if let Some(ref t) = self.cached_task {
28+
if t.will_notify_current() {
29+
t.notify();
30+
return Ok(Async::NotReady);
31+
}
32+
}
33+
34+
// No cached task, or not current, so get a new one...
35+
let t = ::futures::task::current();
36+
t.notify();
37+
self.cached_task = Some(t);
38+
Ok(Async::NotReady)
39+
}
40+
}

src/proto/h1/dispatch.rs

Lines changed: 39 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ use tokio_io::{AsyncRead, AsyncWrite};
77

88
use body::{Body, Payload};
99
use body::internal::FullDataArg;
10-
use common::Never;
10+
use common::{Never, YieldNow};
1111
use proto::{BodyLength, DecodedLength, Conn, Dispatched, MessageHead, RequestHead, RequestLine, ResponseHead};
1212
use super::Http1Transaction;
1313
use service::Service;
@@ -18,6 +18,10 @@ pub(crate) struct Dispatcher<D, Bs: Payload, I, T> {
1818
body_tx: Option<::body::Sender>,
1919
body_rx: Option<Bs>,
2020
is_closing: bool,
21+
/// If the poll loop reaches its max spin count, it will yield by notifying
22+
/// the task immediately. This will cache that `Task`, since it usually is
23+
/// the same one.
24+
yield_now: YieldNow,
2125
}
2226

2327
pub(crate) trait Dispatch {
@@ -58,6 +62,7 @@ where
5862
body_tx: None,
5963
body_rx: None,
6064
is_closing: false,
65+
yield_now: YieldNow::new(),
6166
}
6267
}
6368

@@ -98,7 +103,29 @@ where
98103
fn poll_inner(&mut self, should_shutdown: bool) -> Poll<Dispatched, ::Error> {
99104
T::update_date();
100105

101-
loop {
106+
try_ready!(self.poll_loop());
107+
108+
if self.is_done() {
109+
if let Some(pending) = self.conn.pending_upgrade() {
110+
self.conn.take_error()?;
111+
return Ok(Async::Ready(Dispatched::Upgrade(pending)));
112+
} else if should_shutdown {
113+
try_ready!(self.conn.shutdown().map_err(::Error::new_shutdown));
114+
}
115+
self.conn.take_error()?;
116+
Ok(Async::Ready(Dispatched::Shutdown))
117+
} else {
118+
Ok(Async::NotReady)
119+
}
120+
}
121+
122+
fn poll_loop(&mut self) -> Poll<(), ::Error> {
123+
// Limit the looping on this connection, in case it is ready far too
124+
// often, so that other futures don't starve.
125+
//
126+
// 16 was chosen arbitrarily, as that is number of pipelined requests
127+
// benchmarks often use. Perhaps it should be a config option instead.
128+
for _ in 0..16 {
102129
self.poll_read()?;
103130
self.poll_write()?;
104131
self.poll_flush()?;
@@ -112,21 +139,19 @@ where
112139
// Using this instead of task::current() and notify() inside
113140
// the Conn is noticeably faster in pipelined benchmarks.
114141
if !self.conn.wants_read_again() {
115-
break;
142+
//break;
143+
return Ok(Async::Ready(()));
116144
}
117145
}
118146

119-
if self.is_done() {
120-
if let Some(pending) = self.conn.pending_upgrade() {
121-
self.conn.take_error()?;
122-
return Ok(Async::Ready(Dispatched::Upgrade(pending)));
123-
} else if should_shutdown {
124-
try_ready!(self.conn.shutdown().map_err(::Error::new_shutdown));
125-
}
126-
self.conn.take_error()?;
127-
Ok(Async::Ready(Dispatched::Shutdown))
128-
} else {
129-
Ok(Async::NotReady)
147+
trace!("poll_loop yielding (self = {:p})", self);
148+
149+
match self.yield_now.poll_yield() {
150+
Ok(Async::NotReady) => Ok(Async::NotReady),
151+
// maybe with `!` this can be cleaner...
152+
// but for now, just doing this to eliminate branches
153+
Ok(Async::Ready(never)) |
154+
Err(never) => match never {}
130155
}
131156
}
132157

0 commit comments

Comments
 (0)